Python进行特征提取

来源:互联网 发布:剑灵帅气灵男捏脸数据 编辑:程序博客网 时间:2024/06/07 05:33
# -*- coding: utf-8 -*-"""Created on Mon Aug 21 10:57:29 2017@author: 飘的心"""#过滤式特征选择#根据方差进行选择,方差越小,代表该属性识别能力很差,可以剔除from sklearn.feature_selection import VarianceThresholdx=[[100,1,2,3],   [100,4,5,6],   [100,7,8,9],   [101,11,12,13]]selector=VarianceThreshold(1)  #方差阈值值,selector.fit(x)selector.variances_  #展现属性的方差selector.transform(x)#进行特征选择selector.get_support(True)  #选择结果后,特征之前的索引selector.inverse_transform(selector.transform(x)) #将特征选择后的结果还原成原始数据                                                  #被剔除掉的数据,显示为0                                                  #单变量特征选择from sklearn.feature_selection import SelectKBest,f_classifx=[[1,2,3,4,5],   [5,4,3,2,1],   [3,3,3,3,3],   [1,1,1,1,1]]y=[0,1,0,1]selector=SelectKBest(score_func=f_classif,k=3)#选择3个特征,指标使用的是方差分析F值selector.fit(x,y)selector.scores_ #每一个特征的得分selector.pvalues_selector.get_support(True) #如果为true,则返回被选出的特征下标,如果选择False,则                            #返回的是一个布尔值组成的数组,该数组只是那些特征被选择selector.transform(x)#包裹时特征选择from sklearn.feature_selection import RFEfrom sklearn.svm import LinearSVC  #选择svm作为评定算法from sklearn.datasets import load_iris #加载数据集iris=load_iris()x=iris.datay=iris.targetestimator=LinearSVC()selector=RFE(estimator=estimator,n_features_to_select=2) #选择2个特征selector.fit(x,y)selector.n_features_   #给出被选出的特征的数量selector.support_      #给出了被选择特征的maskselector.ranking_      #特征排名,被选出特征的排名为1#注意:特征提取对于预测性能的提升没有必然的联系,接下来进行比较;from sklearn.feature_selection import RFEfrom sklearn.svm import LinearSVCfrom sklearn import cross_validationfrom sklearn.datasets import load_iris#加载数据iris=load_iris()X=iris.datay=iris.target#特征提取estimator=LinearSVC()selector=RFE(estimator=estimator,n_features_to_select=2)X_t=selector.fit_transform(X,y)#切分测试集与验证集x_train,x_test,y_train,y_test=cross_validation.train_test_split(X,y,                                    test_size=0.25,random_state=0,stratify=y)x_train_t,x_test_t,y_train_t,y_test_t=cross_validation.train_test_split(X_t,y,                                    test_size=0.25,random_state=0,stratify=y)clf=LinearSVC()clf_t=LinearSVC()clf.fit(x_train,y_train)clf_t.fit(x_train_t,y_train_t)print('origin dataset test score:',clf.score(x_test,y_test))#origin dataset test score: 0.973684210526print('selected Dataset:test score:',clf_t.score(x_test_t,y_test_t))#selected Dataset:test score: 0.947368421053import numpy as npfrom sklearn.feature_selection import RFECVfrom sklearn.svm import LinearSVCfrom sklearn.datasets import load_irisiris=load_iris()x=iris.datay=iris.targetestimator=LinearSVC()selector=RFECV(estimator=estimator,cv=3)selector.fit(x,y)selector.n_features_selector.support_selector.ranking_selector.grid_scores_#嵌入式特征选择import numpy as npfrom sklearn.feature_selection import SelectFromModelfrom sklearn.svm import LinearSVCfrom sklearn.datasets import load_digitsdigits=load_digits()x=digits.datay=digits.targetestimator=LinearSVC(penalty='l1',dual=False)selector=SelectFromModel(estimator=estimator,threshold='mean')selector.fit(x,y)selector.transform(x)selector.threshold_selector.get_support(indices=True)#scikitlearn提供了Pipeline来讲多个学习器组成流水线,通常流水线的形式为:将数据标准化,#--》特征提取的学习器————》执行预测的学习器,除了最后一个学习器之后,#前面的所有学习器必须提供transform方法,该方法用于数据转化(如归一化、正则化、#以及特征提取#学习器流水线(pipeline)from sklearn.svm import LinearSVCfrom sklearn.datasets import load_digitsfrom sklearn import cross_validationfrom sklearn.linear_model import LogisticRegressionfrom sklearn.pipeline import Pipelinedef test_Pipeline(data):    x_train,x_test,y_train,y_test=data    steps=[('linear_svm',LinearSVC(C=1,penalty='l1',dual=False)),           ('logisticregression',LogisticRegression(C=1))]    pipeline=Pipeline(steps)    pipeline.fit(x_train,y_train)    print('named steps',pipeline.named_steps)    print('pipeline score',pipeline.score(x_test,y_test))    if __name__=='__main__':    data=load_digits()    x=data.data    y=data.target    test_Pipeline(cross_validation.train_test_split(x,y,test_size=0.25,                                    random_state=0,stratify=y))


阅读全文
1 0
原创粉丝点击