scikit-learn入门到精通(四):模型选择

来源:互联网 发布:洗衣机什么牌子好 知乎 编辑:程序博客网 时间:2024/06/05 01:16

k-折叠验证

'''k折叠验证,用于测量预测精度'''import  numpy as npX_folds = np.array_split(X_digits,3)y_folds = np.array_split(y_digits,3)scores = list()for k in range(3):    X_train = list(X_folds)    X_test =X_train.pop(k)    X_train = np.concatenate(X_train)    y_train = list(y_folds)    y_test = y_train.pop(k)    y_train = np.concatenate(y_train)    scores.append(svc.fit(X_train,y_train).score(X_test,y_test))print (scores)#[0.93489148580968284, 0.95659432387312182, 0.93989983305509184]
#上面写的有点长,事实上scikit有自动生成器'''KFold(n,k)StratifiedKFold(y,k)LeaveOneOut(n)LeaveOneLabelOut(labels)'''from sklearn import cross_validationk_fold = cross_validation.KFold(n=6,n_folds=3)for train_indices,test_indices in k_fold:    print ('Train: %s | test:%s'%(train_indices,test_indices))Train: [2 3 4 5] | test:[0 1]Train: [0 1 4 5] | test:[2 3]Train: [0 1 2 3] | test:[4 5]
#现在很容易的实现k折叠验证k_fold = cross_validation.KFold(len(X_digits),n_folds=3)[svc.fit(X_digits[train],y_digits[train]).score(X_digits[test],y_digits[test]) for train,test in k_fold][0.93489148580968284, 0.95659432387312182, 0.93989983305509184]cross_validation.cross_val_score(svc,X_digits,y_digits,cv=k_fold,n_jobs=-1)#array([ 0.93489149,  0.95659432,  0.93989983])

网格搜索和交叉验证

'''网格搜索和交叉验证估计1,网格搜索'''from sklearn.grid_search import GridSearchCVCs = np.logspace(-6,-1,10)clf = GridSearchCV(estimator=svc,param_grid=dict(C=Cs),n_jobs=-1)clf.fit(X_digits[:1000],y_digits[:1000])clf.best_score_#0.92500000000000004clf.best_estimator_.C#0.0077426368268112772clf.score(X_digits[1000:],y_digits[1000:])#0.94353826850690092'''交叉验证参数'''from sklearn import linear_model,datasetslasso = linear_model.LassoCV()diabetes = datasets.load_diabetes()X_diabetes = diabetes.datay_diabetes = diabetes.targetlasso.fit(X_diabetes,y_diabetes)#estimator自动选择lambdalasso.alpha_#0.012291895087486173
0 0
原创粉丝点击