机器学习-采用正态贝叶斯分类器、决策树、随机森林对wine数据集分类
来源:互联网 发布:枪械3d模型数据 编辑:程序博客网 时间:2024/06/05 14:53
关于wine数据集描述:http://archive.ics.uci.edu/ml/datasets/Wine
#include "opencv2/ml/ml.hpp"#include "opencv2/core/core.hpp"#include "opencv2/core/utility.hpp"#include <stdio.h>#include <string>#include <map>#include <vector>#include<iostream>using namespace std;using namespace cv;using namespace cv::ml;static void help(){printf("\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees.\n""Usage:\n\t./tree_engine [-r <response_column>] [-ts type_spec] <csv filename>\n""where -r <response_column> specified the 0-based index of the response (0 by default)\n""-ts specifies the var type spec in the form ord[n1,n2-n3,n4-n5,...]cat[m1-m2,m3,m4-m5,...]\n""<csv filename> is the name of training data file in comma-separated value format\n\n");}static void train_and_print_errs(Ptr<StatModel> model, const Ptr<TrainData>& data){bool ok = model->train(data);if (!ok){printf("Training failed\n");}else{printf("train error: %f\n", model->calcError(data, false, noArray()));printf("test error: %f\n\n", model->calcError(data, true, noArray()));}}int main(int argc, char** argv){if (argc < 2){help();return 0;}const char* filename = 0;int response_idx = 0;std::string typespec;for (int i = 1; i < argc; i++){if (strcmp(argv[i], "-r") == 0)sscanf(argv[++i], "%d", &response_idx);else if (strcmp(argv[i], "-ts") == 0)typespec = argv[++i];else if (argv[i][0] != '-')filename = argv[i];else{printf("Error. Invalid option %s\n", argv[i]);help();return -1;}}printf("\nReading in %s...\n\n", filename);const double train_test_split_ratio = 0.5;//加载训练数据Ptr<TrainData> data = TrainData::loadFromCSV(filename, 0, response_idx, response_idx + 1, typespec);if (data.empty()) {printf("ERROR: File %s can not be read\n", filename);return 0;}data->setTrainTestSplitRatio(train_test_split_ratio);//预测数据float test1[] = { 14.23, 1.71, 2.43, 15.6, 127, 2.8, 3.06, .28, 2.29, 5.64, 1.04, 3.92, 1065 };float test2[] = { 12.37, .94, 1.36, 10.6, 88, 1.98, .57, .28, .42, 1.95, 1.05, 1.82, 520 };float test3[] = { 12.86, 1.35, 2.32, 18, 122, 1.51, 1.25, .21, .94, 4.1, .76, 1.29, 630 };Mat test1Map(1, 13, CV_32FC1, test1);Mat test2Map(1, 13, CV_32FC1, test2);Mat test3Map(1, 13, CV_32FC1, test3);printf("============正太贝叶斯分类器================\n");//创建正态贝叶斯分类器Ptr<NormalBayesClassifier> bayes = NormalBayesClassifier::create();//训练模型train_and_print_errs(bayes, data);//保存模型bayes->save("bayes_result.xml");//读取模型,强行使用一下,为了强调这种用法,当然此处完全没必要Ptr<NormalBayesClassifier> bayes2 = NormalBayesClassifier::load<NormalBayesClassifier>("bayes_result.xml");cout << bayes2->predict(test1Map) << endl;cout << bayes2->predict(test2Map) << endl;cout << bayes2->predict(test3Map) << endl;cout << "============================================" << endl;printf("======DTREE=====\n");//创建决策树Ptr<DTrees> dtree = DTrees::create();dtree->setMaxDepth(10); //设置决策树的最大深度dtree->setMinSampleCount(2); //设置决策树叶子节点的最小样本数dtree->setRegressionAccuracy(0); //设置回归精度dtree->setUseSurrogates(false); //不使用替代分叉属性dtree->setMaxCategories(16); //设置最大的类数量dtree->setCVFolds(0); //设置不交叉验证dtree->setUse1SERule(false); //不使用1SE规则dtree->setTruncatePrunedTree(false); //不对分支进行修剪dtree->setPriors(Mat()); //设置先验概率train_and_print_errs(dtree, data);dtree->save("dtree_result.xml");//读取模型,强行使用一下,为了强调这种用法,当然此处完全没必要Ptr<DTrees> dtree2 = DTrees::load<DTrees>("dtree_result.xml");cout << dtree2->predict(test1Map) << endl;cout << dtree2->predict(test2Map) << endl;cout << dtree2->predict(test3Map) << endl;cout << "============================================" << endl;//if ((int)data->getClassLabels().total() <= 2) // regression or 2-class classification problem//{//printf("======BOOST=====\n");//Ptr<Boost> boost = Boost::create();//boost->setBoostType(Boost::GENTLE);//boost->setWeakCount(100);//boost->setWeightTrimRate(0.95);//boost->setMaxDepth(2);//boost->setUseSurrogates(false);//boost->setPriors(Mat());//train_and_print_errs(boost, data);//}printf("======RTREES=====\n");Ptr<RTrees> rtrees = RTrees::create();rtrees->setMaxDepth(10);rtrees->setMinSampleCount(2);rtrees->setRegressionAccuracy(0);rtrees->setUseSurrogates(false);rtrees->setMaxCategories(16);rtrees->setPriors(Mat());rtrees->setCalculateVarImportance(false);rtrees->setActiveVarCount(0);rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 0));train_and_print_errs(rtrees, data);cout << rtrees->predict(test1Map) << endl;cout << rtrees->predict(test2Map) << endl;cout << rtrees->predict(test3Map) << endl;cout << "============================================" << endl;return 0;}
此处可以看出,对于wine数据集的分类,效果比较 rtress > dtree > normalbayes
wine数据集包含为178条数据
阅读全文
0 0
- 机器学习-采用正态贝叶斯分类器、决策树、随机森林对wine数据集分类
- 机器学习-采用正态贝叶斯分类器、决策树、随机森林对abalone数据集分类
- 机器学习-采用决策树对wine分类
- 机器学习-采用正态贝叶斯分类器对wine分类
- 数据挖掘笔记-分类-决策树-随机森林
- 随机森林分类器
- 利用随机森林和梯度替身决策树对titanic数据进行分类,并对结果进行分析
- [Java][机器学习]用决策树分类算法对Iris花数据集进行处理
- 【机器学习算法模型】分类算法——随机森林
- R语言使用随机森林方法对数据分类
- 机器学习概念总结笔记(三)——分类决策树C4.5、集成学习Bagging算法Boosting算法随机森林算法迭代决策树算法、
- 机器学习:决策树之随机森林
- 机器学习之决策树和随机森林
- 【机器学习】决策树与随机森林
- 机器学习-决策树和随机森林
- 机器学习--决策树和随机森林简介
- 机器学习之决策树与随机森林
- 单一决策树与集成模型(随机森林分类器、梯度提升决策树)的比较
- Tensorflow:模型保存/模型恢复?
- (转)【Unity3D游戏开发】—— iTween笔记 一(战棋寻路)
- 爬虫系列16.urlparse模块
- CMB标量功率谱第一个谱指数跑动项n(1)跑动带来的影响
- JQ放大镜插件的调用
- 机器学习-采用正态贝叶斯分类器、决策树、随机森林对wine数据集分类
- Ubuntu系统备份和还原,从此避免系统重装
- UVA 11988 破损的键盘
- flask-on-iis
- MFC:判断复选框的选中状态
- jsp/servlet第四章第四节session补充
- BMP图片格式简介
- Kafka消息时间戳(kafka message timestamp)
- 【小白的CFD之旅】10 敲门实例