python-sklearn中RandomForestClassifier函数以及ROC曲线绘制

来源:互联网 发布:mac修改mysql登录密码 编辑:程序博客网 时间:2024/05/16 01:29

先转自博主http://blog.itpub.net/12199764/viewspace-1572056/

介绍一下RandomForestClassifier函数的简单用法

# -*- coding: utf-8 -*-from sklearn.tree import DecisionTreeClassifierfrom matplotlib.pyplot import *from sklearn.cross_validation import train_test_splitfrom sklearn.ensemble import RandomForestClassifierfrom sklearn.externals.joblib import Parallel, delayedfrom sklearn.tree import export_graphvizfinal = open('c:/test/final.dat' , 'r')data = [line.strip().split('\t') for line in final]feature = [[float(x) for x in row[3:]] for row in data]target = [int(row[0]) for row in data]#拆分训练集和测试集feature_train, feature_test, target_train, target_test = train_test_split(feature, target, test_size=0.1, random_state=42)#分类型决策树clf = RandomForestClassifier(n_estimators = 8)#训练模型s = clf.fit(feature_train , target_train)print s#评估模型准确率r = clf.score(feature_test , target_test)print rprint '判定结果:%s' % clf.predict(feature_test[0])#print clf.predict_proba(feature_test[0])print '所有的树:%s' % clf.estimators_print clf.classes_print clf.n_classes_print '各feature的重要性:%s' % clf.feature_importances_print clf.n_outputs_def _parallel_helper(obj, methodname, *args, **kwargs):    return getattr(obj, methodname)(*args, **kwargs)all_proba = Parallel(n_jobs=10, verbose=clf.verbose, backend="threading")(            delayed(_parallel_helper)(e, 'predict_proba', feature_test[0]) for e in clf.estimators_)print '所有树的判定结果:%s' % all_probaproba = all_proba[0]for j in range(1, len(all_proba)):    proba += all_proba[j]proba /= len(clf.estimators_)print '数的棵树:%s , 判不作弊的树比例:%s' % (clf.n_estimators , proba[0,0])print '数的棵树:%s , 判作弊的树比例:%s' % (clf.n_estimators , proba[0,1])#当判作弊的树多余不判作弊的树时,最终结果是判作弊print '判断结果:%s' % clf.classes_.take(np.argmax(proba, axis=1), axis=0)#把所有的树都保存到wordfor i in xrange(len(clf.estimators_)):    export_graphviz(clf.estimators_[i] , '%d.dot'%i)

楼上博主将模型保存在dot文件中,还可以将模型保存在pkl文件中,这部分是转自https://www.zhihu.com/question/31604690/answer/52708757
import pickle
model = RandomForestClassifier(n_estimators=100)
model.fit(x, y) # 建立随机森林,其中x,y分别为训练集和训练集的标签

‘’‘
然后就可以把训练好的随机森林存进pickle中了,省去了每次训练,其实并不是所有的类都能存进pickle, 但随机森林是可以的,这里不局限于存一个model,你可以把训练集、训练集的标签、随机森林放到一个list一起扔进pickle
’‘’
with open("data.pkl", "wb") as f:
pickle.dump(model, f)
# 如果下次要用就再读取
with open("data.pkl", "rb") as f:
model = pickle.load(f)

下面是自己写的代码片段

import matplotlib.pyplot as pltimport pandas as pdfrom sklearn.ensemble import RandomForestClassifierfrom sklearn.metrics import roc_curve, aucimport pickledf_train = pd.read_csv(utils.HEATMAP_FEATURE_CSV_TRAIN)df_validation = pd.read_csv(utils.HEATMAP_FEATURE_CSV_VALIDATION)n_columns = len(df_train.columns)feature_column_names = df_train.columns[:n_columns - 1]#返回的是每个列表的第一行,对应的是特征的名字label_column_name = df_train.columns[n_columns - 1]train_x = df_train[feature_column_names]train_y = df_train[label_column_name]validation_x = df_validation[feature_column_names]validation_y = df_validation[label_column_name]clf = RandomForestClassifier(n_estimators=50, n_jobs=2)  #分类型决策树s = clf.fit(train_x, train_y) # 训练模型r = clf.score(validation_x,validation_y) #评估模型准确率print rpredict_y_validation = clf.predict(validation_x)#直接给出预测结果,每个点在所有label的概率和为1,内部还是调用predict——proba()# print(predict_y_validation)prob_predict_y_validation = clf.predict_proba(validation_x)#给出带有概率值的结果,每个点所有label的概率和为1predictions_validation = prob_predict_y_validation[:, 1]fpr, tpr, _ = roc_curve(validation_y, predictions_validation)    #roc_auc = auc(fpr, tpr)plt.title('ROC Validation')plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)plt.legend(loc='lower right')plt.plot([0, 1], [0, 1], 'r--')plt.xlim([0, 1])plt.ylim([0, 1])plt.ylabel('True Positive Rate')plt.xlabel('False Positive Rate')plt.show()
#
#
#

#