Spark reduce算子

来源:互联网 发布:asp sql 仓库管理系统 编辑:程序博客网 时间:2024/06/05 05:23

reduce是将RDD中的所有元素进行聚合操作的额,第一个和第二个元素聚合的,值与第三个元素聚合,值与第四个元素聚合的,以此类推的
collect是对RDD中所有元素获取到本地客户端的
count获取RDD中元素总数的
take(n)获取RDD中的前n个元素的
saveAsTextFile是将RDD元素保存到文件中的,对每个元素调用toString方法的
countByKey对每个key对应的值进行count计数的
foreach遍历RDD中的每个元素的

public class  ActionOperation{     public  static  void  main(String[]  args){         reduce();         collect();         count();         take();         saveAsTextFile();         countByKey();}private  static  void  reduce(){    SparkConf    conf=new SparkConf().setAppName("reduce").setMaster("local")    JavaSparkContext  sc=newe JavaSparkContext(conf);   List<Integer>  numberList=Arrays.asList(1,2,3,4,5,6,7,8,9,10); JavaRDD<Integer>   numbers=sc.parallelize(numberList)//使用reduce操作对集合中的数字进行累加的,reduce操作的原理,首先将第一个元素和第二个元素,传入到call()方法中并进行计算的,会获取一个结果的,再接着将该结果与下一个元素传入到call()方法中的,进行计算的额,//所以reduce的本质就是聚合的,将多个元素聚合成一个元素的int  sum=numbers.reduce(new  Function2<Integer,Integer,Integer>(){        private  static final   long  serialVersionUID=1L;        public  Integer  call(Integer  v1,Integer  v2) throws  Exception {                return  v1+v2;}}); System.out.println(sum); sc.close();}private  static  void  collect(){    SparkConf  conf=new SparkConf().setAppName("collect").setMaster("local")    JavaSparkContext sc=new JavaSparkContext(conf);  List<Integer>  numberList=Arrays.asList(1,2,3,4,5,6,7,8,9,10);  JavaRDD<Integer>  numbers=sc.parallelize(numberList);  JavaRDD<Integer> doubleNumbers=numbers.map(new Function<Integer,Integer>(){           private  static  final  long   serialVersionUID=1L;           public  Integer  call(Integer  v1) throws  Exception{             return  v1*2}                  });foreach  action操作,是在远程集群上遍历RDD中的元素的,而使用collect操作,将分布在远程集群上的DoubleNumbers,RDD的数据拉取到本地这种方式,一般不建议使用,因为如果RDD中的数据量比较大的,那么性能是比较差的,因为要从远程走大量的网络传输的,将数据获取到本地上的额,此外的,除了性能差,还可能在RDD中数据量特别大的情况下,发生oom异常的,内存溢出的额,通常的情况下,还是推荐使用foreachaction操作的,来对最终的RDD元素进行处理的List<Integer> doubleNumberList=doubleNumbers.collect();for(Integer  num:doubleNumberLilst){  System.out.println(num);}sc.close();}private  static  void  count(){    SparkConf  conf=new   SparkConf().setAppName("count").setMaster("local");    JavaSparkContext  sc=new   JavaSparkContext(conf);      List<Integer>   numberList=Arrays.asList(1,2,3,4,5,6,7,8,9,10)    JavaRDD<Integer> numbers=sc.parallelize(numberList);    long  count=numbers.count();    System.out.println(count);     sc.close();}private static  void  take(){  SparkConf  conf=new SparkConf().setAppName("take").setMaster("local")  JavaSparkContext  sc=new JavaSparkContext(conf);  List<Integer>   numberList=Arrays.asList(1,2,3,4,5,6,7,8,9,10);  JavaRDD<Integer> numbers=sc.parallelize(numberList);  //take操作与collect操作相似的,也是从远程集群上,获取RDD的数据的,拉取到本地上的  但是collect操作是获取RDD的所有数据的,take只是获取n个数据的额List<Integer> top3numbers=numbers.take(3);for(Integer  num:top3numbers){   System.out.println(num);}sc.close();}private  static  void  saveAsTextFile(){  SparkConf  conf=new SparkConf().setAppName("saveAsTextFile").setMaster("local");  JavaSparkContext  sc=new  JavaSparkContext(conf);  List<Integer> numberList=Arrays.asList(1,2,3,4,5,6,7,8,9,10);  JavaRDD<Integer>   numbers=sc.parallelize(numberList)   JavaRDD<Integer>  doubleNumbers=numbers.map(new Function<Integer,Integer>(){        private   static  final  long  serialVersionUID=1L;        public  Integer  call(Integer  v1)  throws  Exception{             return   v1+v2;}});    doubleNumbers.saveAsTextFile("hdfs://spark1:9000/double_number");            //直接将RDD中的数据,保存到HDFS文件中的额,只能只当文件夹目录的额,也就是目录的额,    sc.close();}private     static   void  countByKey(){    SparkConf   conf=new  SparkConf().setAppName("countByKey").setMaster("local")    JavaSparkContext  sc=new JavaSparkContext(conf);   List<Tuple2<String,String>>    studentList=Arrays.asList(new Tuple2<String,String>("class1","leo"),new Tuple2<String,String>("class2","jack"),new Tuple2<String,String>("class1","marry"),new Tuple2<String,String>("class2","david"));JavaPairRDD<String,String> studens=sc.parallelizePairs(studentList);//对RDD应用countByKey的操作,countByKey的返回类型直接就是Map<String,Object>Map<String,Object>  studentCounts=students.countByKey();for(Map.Entry<String,Object>  studentCount:studentCounts.entrySet()){     System.out.println(studentCount.getKey()+":"+studentCount.getValue());}    sc.close();  }}

object ActionOperation{
def main(args:Array[String]):Unit={
reduce()
collect()
count()
take()
saveAsTextFile()
countByKey()
}
def reduce():Unit={
val conf =new SparkConf().setAppName(“reduce”).setMaster(“local”)
val sc=new SparkContext(conf)
val numberArray=Array(1,2,3,4,5,6,7,8,9,10)
val numbers=sc.parallelize(numberArray,1)
val sum=numbers.reduce(+)
println(sum)
}

def collect():Unit={
val conf =new SparkConf().setAppName(“collect”).setMaster(“local”)
val sc=new SparkContext(conf)
val numberArray=Array(1,2,3,4,5,6,7,8,9,10)
val numbers=sc.parallelize(numberArray,1)
val doubleNumbers=numbers.map{num=>num*2}
val doubleNumberArray=doubleNumbers.collect()
for(num<- doubleNumberArray){
println(num)
}
}

def count():Unit={
val conf=new SparkConf().setAppName(“count”).setMaster(“local”)
val sc=new SparkContext(conf)
val numberArray=Array(1,2,3,4,5,6,7,8,9,10)
val numbers=sc.parallelize(numberArray,1)
val count=numbers.count()
println(count)
sc.close()
}

def take():Unit={
val conf=new SparkConf().setAppName(“take”).setMaster(“local”)
val sc=new SparkContext(conf)
val numberArray=Array(1,2,3,4,5,6,7,8,9,10)
val numbers=sc.parallelize(numberArray,1)
val top3Numbers=numbers.take(3)
for(num<- top3Numbers){
println(num)
}
}

def saveAsTextFile():Unit={
val conf=new SparkConf().setAppName(“saveAsTextFile”).setMaster(“local”)
val sc=new SparkContext(conf)
val numberArray=Array(1,2,3,4,5,6,7,8,9,10)
val numbers=sc.parallelize(numberArray,1)
val doubleNumbers=numbers.map{num=>num*3}
doubleNumbers.saveAsTextFile(“hdfs://spark1:9000/double_number”)
}

def countByKey():Unit={
val conf=new SparkConf().setAppName(“countByKey”).setMaster(“local”)
val sc=new SparkContext(conf)
val studentList=Array(new Tuple2(“class1”,”lelo”),new Tuple2(“class2”,”jack”),new Tuple2(“class1”,”tom”),new Tuple2(“class2”,”jen”))
val students=sc.parallelize(studentList,1)
val studentCounts=students.countByKey()
println(studentCounts)
}

}

}

原创粉丝点击