pyspark-评估指标

来源:互联网 发布:开淘宝店对产品的要求 编辑:程序博客网 时间:2024/06/05 16:07

参考地址:

1、http://spark.apache.org/docs/latest/ml-guide.html

2、https://github.com/apache/spark/tree/v2.2.0

3、http://spark.apache.org/docs/latest/mllib-evaluation-metrics.html



Classification model evaluation

Binary classification

from pyspark.mllib.classification import LogisticRegressionWithLBFGSfrom pyspark.mllib.evaluation import BinaryClassificationMetricsfrom pyspark.mllib.util import MLUtils# Several of the methods available in scala are currently missing from pyspark# Load training data in LIBSVM formatdata = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_binary_classification_data.txt")# Split data into training (60%) and test (40%)training, test = data.randomSplit([0.6, 0.4], seed=11)training.cache()# Run training algorithm to build the modelmodel = LogisticRegressionWithLBFGS.train(training)# Compute raw scores on the test setpredictionAndLabels = test.map(lambda lp: (float(model.predict(lp.features)), lp.label))# Instantiate metrics objectmetrics = BinaryClassificationMetrics(predictionAndLabels)# Area under precision-recall curveprint("Area under PR = %s" % metrics.areaUnderPR)# Area under ROC curveprint("Area under ROC = %s" % metrics.areaUnderROC)
Find full example code at "examples/src/main/python/mllib/binary_classification_metrics_example.py" in the Spark repo.


Multiclass classification

from pyspark.mllib.classification import LogisticRegressionWithLBFGSfrom pyspark.mllib.util import MLUtilsfrom pyspark.mllib.evaluation import MulticlassMetrics# Load training data in LIBSVM formatdata = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_multiclass_classification_data.txt")# Split data into training (60%) and test (40%)training, test = data.randomSplit([0.6, 0.4], seed=11)training.cache()# Run training algorithm to build the modelmodel = LogisticRegressionWithLBFGS.train(training, numClasses=3)# Compute raw scores on the test setpredictionAndLabels = test.map(lambda lp: (float(model.predict(lp.features)), lp.label))# Instantiate metrics objectmetrics = MulticlassMetrics(predictionAndLabels)# Overall statisticsprecision = metrics.precision()recall = metrics.recall()f1Score = metrics.fMeasure()print("Summary Stats")print("Precision = %s" % precision)print("Recall = %s" % recall)print("F1 Score = %s" % f1Score)# Statistics by classlabels = data.map(lambda lp: lp.label).distinct().collect()for label in sorted(labels):    print("Class %s precision = %s" % (label, metrics.precision(label)))    print("Class %s recall = %s" % (label, metrics.recall(label)))    print("Class %s F1 Measure = %s" % (label, metrics.fMeasure(label, beta=1.0)))# Weighted statsprint("Weighted recall = %s" % metrics.weightedRecall)print("Weighted precision = %s" % metrics.weightedPrecision)print("Weighted F(1) Score = %s" % metrics.weightedFMeasure())print("Weighted F(0.5) Score = %s" % metrics.weightedFMeasure(beta=0.5))print("Weighted false positive rate = %s" % metrics.weightedFalsePositiveRate)
Find full example code at "examples/src/main/python/mllib/multi_class_metrics_example.py" in the Spark repo.


Multilabel classification

from pyspark.mllib.evaluation import MultilabelMetricsscoreAndLabels = sc.parallelize([    ([0.0, 1.0], [0.0, 2.0]),    ([0.0, 2.0], [0.0, 1.0]),    ([], [0.0]),    ([2.0], [2.0]),    ([2.0, 0.0], [2.0, 0.0]),    ([0.0, 1.0, 2.0], [0.0, 1.0]),    ([1.0], [1.0, 2.0])])# Instantiate metrics objectmetrics = MultilabelMetrics(scoreAndLabels)# Summary statsprint("Recall = %s" % metrics.recall())print("Precision = %s" % metrics.precision())print("F1 measure = %s" % metrics.f1Measure())print("Accuracy = %s" % metrics.accuracy)# Individual label statslabels = scoreAndLabels.flatMap(lambda x: x[1]).distinct().collect()for label in labels:    print("Class %s precision = %s" % (label, metrics.precision(label)))    print("Class %s recall = %s" % (label, metrics.recall(label)))    print("Class %s F1 Measure = %s" % (label, metrics.f1Measure(label)))# Micro statsprint("Micro precision = %s" % metrics.microPrecision)print("Micro recall = %s" % metrics.microRecall)print("Micro F1 measure = %s" % metrics.microF1Measure)# Hamming lossprint("Hamming loss = %s" % metrics.hammingLoss)# Subset accuracyprint("Subset accuracy = %s" % metrics.subsetAccuracy)


Ranking systems

from pyspark.mllib.recommendation import ALS, Ratingfrom pyspark.mllib.evaluation import RegressionMetrics, RankingMetrics# Read in the ratings datalines = sc.textFile("data/mllib/sample_movielens_data.txt")def parseLine(line):    fields = line.split("::")    return Rating(int(fields[0]), int(fields[1]), float(fields[2]) - 2.5)ratings = lines.map(lambda r: parseLine(r))# Train a model on to predict user-product ratingsmodel = ALS.train(ratings, 10, 10, 0.01)# Get predicted ratings on all existing user-product pairstestData = ratings.map(lambda p: (p.user, p.product))predictions = model.predictAll(testData).map(lambda r: ((r.user, r.product), r.rating))ratingsTuple = ratings.map(lambda r: ((r.user, r.product), r.rating))scoreAndLabels = predictions.join(ratingsTuple).map(lambda tup: tup[1])# Instantiate regression metrics to compare predicted and actual ratingsmetrics = RegressionMetrics(scoreAndLabels)# Root mean squared errorprint("RMSE = %s" % metrics.rootMeanSquaredError)# R-squaredprint("R-squared = %s" % metrics.r2)
Find full example code at "examples/src/main/python/mllib/ranking_metrics_example.py" in the Spark repo.


Regression model evaluation

from pyspark.mllib.regression import LabeledPoint, LinearRegressionWithSGDfrom pyspark.mllib.evaluation import RegressionMetricsfrom pyspark.mllib.linalg import DenseVector# Load and parse the datadef parsePoint(line):    values = line.split()    return LabeledPoint(float(values[0]),                        DenseVector([float(x.split(':')[1]) for x in values[1:]]))data = sc.textFile("data/mllib/sample_linear_regression_data.txt")parsedData = data.map(parsePoint)# Build the modelmodel = LinearRegressionWithSGD.train(parsedData)# Get predictionsvaluesAndPreds = parsedData.map(lambda p: (float(model.predict(p.features)), p.label))# Instantiate metrics objectmetrics = RegressionMetrics(valuesAndPreds)# Squared Errorprint("MSE = %s" % metrics.meanSquaredError)print("RMSE = %s" % metrics.rootMeanSquaredError)# R-squaredprint("R-squared = %s" % metrics.r2)# Mean absolute errorprint("MAE = %s" % metrics.meanAbsoluteError)# Explained varianceprint("Explained variance = %s" % metrics.explainedVariance)
Find full example code at "examples/src/main/python/mllib/regression_metrics_example.py" in the Spark repo.


原创粉丝点击