spark机器学习构建回归模型

来源:互联网 发布:linux服务器架设书 编辑:程序博客网 时间:2024/05/22 10:37
from pyspark import SparkContextsc=SparkContext("local[4]","first spark app")raw_data=sc.textFile("E:\sparkLearning\Bike-Sharing-Dataset\hour.csv")records=raw_data.map(lambda x:x.split(","))num_data=raw_data.count()first=records.first()print(first)print(num_data)records.cache()def get_mapping(rdd,idx):    return rdd.map(lambda fields:fields[idx]).distinct().zipWithIndex().collectAsMap()print("Mapping of first categorical feature column: %s"%get_mapping(records,2))mappings=[get_mapping(records,i) for i in range(2,10)]cat_len=sum(map(len,mappings))num_len=len(records.first()[11:15])total_len=num_len+cat_lenprint("Feature vector length for categorical features: %d"%cat_len)print("Feature vector length for numerical features: %d"%num_len)print("Total feature vector length: %d"%total_len)from pyspark.mllib.regression import  LabeledPointimport numpy as npdef extract_features(record):    cat_vec=np.zeros(cat_len)    i=0    step=0    for field in record[2:9]:        m=mappings[i]        idx=m[field]        cat_vec[idx+step]=1        i=i+1        step=step+len(m)    num_vec=np.array([float(field) for field in record[10:14]])    return np.concatenate((cat_vec,num_vec))def extract_label(record):    return float(record[-1])data=records.map(lambda r:LabeledPoint(extract_label(r),extract_features(r)))first_point=data.first()print("原始特征向量Raw data: "+str(first[2:]))print("原始特征标签Label: "+str(first_point.label))print("对特征进行热编码之后的特征向量Linear Model feature vector :\n"+str(first_point.features))print("对特征进行热编码之后的特征向量的长度Linear Model feature vector length:"+str(len(first_point.features)))def extract_features_dt(record):    return np.array(record[2:14])data_dt = records.map(lambda point: LabeledPoint(extract_label(point), extract_features_dt(point)))first_point_dt=data_dt.first()print("决策树特征向量:\n"+str(first_point_dt.features))print("决策树特征向量长度:"+str(len(first_point_dt.features)))from pyspark.mllib.regression import LinearRegressionWithSGDfrom pyspark.mllib.tree import DecisionTreeliner_model=LinearRegressionWithSGD.train(data,iterations=10,step=0.1,intercept=False)true_vs_predicted=data.map(lambda p:(p.label,liner_model.predict(p.features)))print("线性回归对前五个样本的预测:\n"+str(true_vs_predicted.take(5)))dt_model=DecisionTree.trainRegressor(data_dt,{})preds=dt_model.predict(data_dt.map(lambda p:p.features))actual=data.map(lambda p:p.label)true_vs_predicted_dt=actual.zip(preds)print(" 决策树预测值:"+str(true_vs_predicted_dt.take(5)))print("决策树深度:"+str(dt_model.depth()))print("决策树节点个数:"+str(dt_model.numNodes()))def squared_error(actual,pred):    return (pred-actual)**2def abs_error(actual,pred):    return np.abs(pred-actual)def squared_log_error(pred,actual):    return (np.log(pred+1)-np.log(actual+1))**2mse=true_vs_predicted.map(lambda point:squared_error(point[0],point[1])).mean()mae=true_vs_predicted.map(lambda p:abs_error(p[0],p[1])).mean()rmsle=np.sqrt(true_vs_predicted.map(lambda p:squared_log_error(p[0],p[1])).mean())print("Linear Model - Mean Squared Error : %2.4f"%mse)print("Linear Model - Mean Absolute Error : %2.4f"%mae)print("Linear Model - Root Mean Squared Log Error : %2.4f"%rmsle)mse_dt=true_vs_predicted_dt.map(lambda point:squared_error(point[0],point[1])).mean()mae_dt=true_vs_predicted_dt.map(lambda p:abs_error(p[0],p[1])).mean()rmsle_dt=np.sqrt(true_vs_predicted_dt.map(lambda p:squared_log_error(p[0],p[1])).mean())print("Decision Tree - Mean Squared Error : %2.4f"%mse_dt)print("Decision Tree - Mean Absolute Error : %2.4f"%mae_dt)print("Decision Tree - Root Mean Squared Log Error : %2.4f"%rmsle_dt)import matplotlib.pyplot as plt# targets=records.map(lambda r:float(r[-1])).collect()# plt.hist(targets,bins=40,color="lightblue",normed=True)# fig=plt.gcf()# fig.set_size_inches(16,10)# plt.show()# log_targets=records.map(lambda r: np.log(float(r[-1]))).collect()# plt.hist(log_targets,bins=40,color="lightblue",normed=True)# fig=plt.gcf()# fig.set_size_inches(12,6)# plt.show()# sqrt_targets=records.map(lambda r: np.sqrt(float(r[-1]))).collect()# plt.hist(sqrt_targets,bins=40,color="lightblue",normed=True)# fig=plt.gcf()# fig.set_size_inches(16,10)# plt.show()data_log=data.map(lambda lp:LabeledPoint(np.log(lp.label),lp.features))model_log=LinearRegressionWithSGD.train(data_log,iterations=10,step=0.1)r=true_vs_predicted_log=data_log.map(lambda p:(np.exp(p.label),np.exp(model_log.predict(p.features))))mse_log=true_vs_predicted_log.map(lambda point:squared_error(point[0],point[1])).mean()mae_log=true_vs_predicted_log.map(lambda p:abs_error(p[0],p[1])).mean()rmsle_log=np.sqrt(true_vs_predicted_log.map(lambda p:squared_log_error(p[0],p[1])).mean())print("Linear Model - Mean Squared Error : %2.4f"%mse_log)print("Linear Model - Mean Absolute Error : %2.4f"%mae_log)print("Linear Model - Root Mean Squared Log Error : %2.4f"%rmsle_log)print("Non log-transfromed predictions:\n"+str(true_vs_predicted.take(3)))print("Log_transformed predictions:\n"+str(true_vs_predicted_log.take(3)))data_dt_log=data_dt.map(lambda lp:LabeledPoint(np.log(lp.label),lp.features))dt_model_log=DecisionTree.trainRegressor(data_dt_log,{})preds_log=dt_model_log.predict(data_dt_log.map(lambda p:p.features))actual_log=data_dt_log.map(lambda p:p.label)true_vs_predicted_dt_log=actual_log.zip(preds_log).map(lambda p:(np.exp(p[0]),np.exp(p[1])))#均方误差mse_log_dt=true_vs_predicted_dt_log.map(lambda point:squared_error(point[0],point[1])).mean()#平均绝对误差mae_log_dt=true_vs_predicted_dt_log.map(lambda p:abs_error(p[0],p[1])).mean()#均方根对数误差rmsle_log_dt=np.sqrt(true_vs_predicted_dt_log.map(lambda p:squared_log_error(p[0],p[1])).mean())print("Decision Tree - Mean Squared Error : %2.4f"%mse_log_dt)print("Decision Tree  - Mean Absolute Error : %2.4f"%mae_log_dt)print("Decision Tree - Root Mean Squared Log Error : %2.4f"%rmsle_log_dt)print("Non log-transfromed predictions:\n"+str(true_vs_predicted_dt.take(3)))print("Log_transformed predictions:\n"+str(true_vs_predicted_dt_log.take(3)))#划分测试集和训练集data_with_idx=data.zipWithIndex().map(lambda k_v:(k_v[1],k_v[0]))test=data_with_idx.sample(False,0.2,42)train=data_with_idx.subtractByKey(test)train_data=train.map(lambda idx_p:idx_p[1])test_data=test.map(lambda idx_p:idx_p[1])train_size=train_data.count()test_size=test_data.count()print("训练集大小:%d"%train_size)print("测试集大小: %d"%test_size)print("总数据大小: %d"%num_data)print("训练集+测试集大小:%d"%(train_size+test_size))data_with_idx_dt=data_dt.zipWithIndex().map(lambda k_v:(k_v[1],k_v[0]))test_dt=data_with_idx_dt.sample(False,0.2,42)train_dt=data_with_idx_dt.subtractByKey(test)train_data_dt=train_dt.map(lambda idx_p:idx_p[1])test_data_dt=test_dt.map(lambda idx_p:idx_p[1])#评估参数def evaluate(train,test,iterations,step,regParam,regType,intercept):    model=LinearRegressionWithSGD.train(train,iterations,step,regParam=regParam,regType=regType,intercept=intercept)    tp=test.map(lambda p:(p.label,model.predict(p.features)))    rmsle=np.sqrt(tp.map(lambda p:squared_log_error(p[0],p[1])).mean())    return rmsle#迭代次数变化params=[1,5,10,20,50,100,125,150,175,200]metrics=[evaluate(train_data,test_data,param,0.01,0.0,'l2',False) for param in params]print(params)print(metrics)# plt.plot(params,metrics)# fig=plt.gcf()# plt.xscale('log')# plt.show()#步长变化params=[0.01,0.025,0.05,0.075,0.1,1.0]metrics=[evaluate(train_data,test_data,10,param,0.0,'l2',False) for param in params]print(params)print(metrics)# plt.plot(params,metrics)# fig=plt.gcf()# plt.xscale('log')# plt.show()#L2正则化params=[0.0,0.01,0.1,1.0,5.0,10.0,20.0]metrics=[evaluate(train_data,test_data,10,0.1,param,'l2',False) for param in params]print(params)print(metrics)# plt.plot(params,metrics)# fig=plt.gcf()# plt.xscale('log')# plt.show()#L1正则化params=[0.0,0.01,0.1,1.0,10.0,100.0,1000.0]metrics=[evaluate(train_data,test_data,10,0.1,param,'l1',False) for param in params]print(params)print(metrics)# plt.plot(params,metrics)# fig=plt.gcf()# plt.xscale('log')# plt.show()#截距params=[False, True]metrics=[evaluate(train_data,test_data,10,0.1,10,'l2',param) for param in params]print(params)print(metrics)# plt.bar(params,metrics,color="lightblue")# fig=plt.gcf()# plt.show()#决策树性能分析def evaluate_dt(train,test,maxDepth,maxBins):    model=DecisionTree.trainRegressor(train,{},impurity='variance',maxDepth=maxDepth,maxBins=maxBins)    preds=model.predict(test.map(lambda p:p.features))    actual=test.map(lambda p:p.label)    tp=actual.zip(preds)    rmsle=np.sqrt(tp.map(lambda p:squared_log_error(p[0],p[1])).mean())    return  rmsle#树深度params=[1,2,3,4,5,10,20]metrics=[evaluate_dt(train_data_dt,test_data_dt,param,32) for param in params]print(params)print(metrics)# plt.plot(params,metrics)# fig=plt.gcf()# plt.show()#最大划分树params=[2,4,8,16,32,64,100]metrics=[evaluate_dt(train_data_dt,test_data_dt,5,param) for param in params]print(params)print(metrics)plt.plot(params,metrics)fig=plt.gcf()plt.show()

原创粉丝点击