Spark学习笔记(1)--------基本函数

来源:互联网 发布:ubuntu查看分区挂载点 编辑:程序博客网 时间:2024/05/29 17:02
Spark学习笔记(1)--------基本函数
 前言:

         spark之精髓远未领略,基本的函数和指令只能说是初体验。希望日后可以将这个工具熟练掌握。。。

         作者:Leige_Smart

         运行环境:Linux
         语言:scala
         说明:    (#)指大标题         (@)指命令        (@@)指运行结果

#基本函数命令
 #内容
         %scala> rdd.foreach(println)
         %leige;ddf;dfe;efefe;sdcd;
         %dfe;eff;
         %fsdfe;fe;frgr;dcdc;
         %eff;leige;dfe;
         %efefe;dcdc;

 @命令和执行结果

        @scala> val rddlength=rdd.map(s=>s.length).collect
        @@rddlength: Array[Int] = Array(25, 19, 8, 14, 11, 0)
        @将rdd中字符统计出来,并放在Array向量中

  _.reduce()--
        @scala> val rddtotallength=rddlength.reduce((a,b)=>a+b)
        @rddtotallength: Int = 77
        @将Array中的数字垒加起来

  _.map(_XX)
        @val rdd = sc.parallelize(List(1,2,3,4,5,6))
        @val mapRdd = rdd.map(_*2)  //这是典型的函数式编程
        @mapRdd.collect()
        @@Array(2,4,6,8,10,12)
 _.map(x=>(x._2,x._1))
        @将元组前后两个元素调换
        @val l1=sc.parallelize(List(('a',1),('a',2),('b',3)))
        @val l2=l1.map(x=>(x._2,x._1))
        @l2.collect
        @@ Array[(Int, Char)] = Array((1,a), (2,a), (3,b))

  _.filter(_XX)
        @val filterRdd = mapRdd.filter(_ > 5)
        @filterRdd.collect()
        @@Array(6,8,10,12)

  _.count
        @计算行数
  _.cache
        @把内容保存到内存中(如果在保存到内存后操作会快很多)

  _.flatMap(_.split(";"))
@去掉;
        @scala> val rdd2=rdd.flatMap(_.split(";"))
        @rdd2.collect
        @@Array[String] = Array(leige, ddf, dfe, efefe, sdcd, fsdfe, fe, frgr,dc,dfe, eff, eff, leige, dfe, efefe, dcdc, "")

  _.flatMap(_.split(";").map((_,1)))
        @将每个元素变成一个元组
        @val rdd3=rdd.flatMap(_.split(";").map((_,1)))
        @@Array[(String, Int)] = Array((leige,1), (ddf,1), (dfe,1), (efefe,1), (sdcd,1), (fsdfe,1), (fe,1), (frgr,1), (dcdc,1), (dfe,1), (eff,1), (eff        @@,1), (leige,1), (dfe,1), (efefe,1), (dcdc,1), ("",1))

  _.flatMap(_.split(";").map((_,1))).reduceByKey(_+_)
  _.flatMap(_.split(";").map((_,1))).reduceByKey((a,b)=>a+b)
        @将元组统计求和
        @Array[(String, Int)] = Array((sdcd,1), (dcdc,2), (fsdfe,1), ("",1), (ddf,1), (leige,2), (efefe,2), (frgr,1), (fe,1), (eff,2), (dfe,3))

rdd1 join rdd2
        @把两个list做笛卡尔积
        @val rdd1 = sc.parallelize(List(('a',1),(‘a’, 2), ('b', 3)))
        @val rdd2 = sc.parallelize(List(('a',4),(‘b’, 5)))
        @@val result_union = rdd1 join rdd2 //结果是把两个list做笛卡尔积,Array(('a', (1,4), ('a', (2,4), ('b', (3, 5)))

 rdd1 union rdd2
        @把两个list合并
        @val rdd1 = sc.parallelize(List(('a',1),(‘a’, 2)))
        @val rdd2 = sc.parallelize(List(('b',1),(‘b’, 2)))
        @@val result_union = rdd1 union rdd2 //结果是把两个list合并成一个,List(('a',1),(‘a’, 2),('b',1),(‘b’, 2))

_.lookup('x')
        @把x对应value提出来组成一个seq
        @ val rdd=sc.parallelize(List(('a',1),('a',2),('b',1),('b',2)))
        @rdd.lookup('a')
        @rdd2.foreach(println)
        @@1 2

  _.sortByKey()/_.sortByKey(false)
        @按照键值排序/降序
        @val l=sc.parallelize(List(('a',2),('b',4),('a',3),('b',1)))
        @val l2=l.sortByKey()
        @@Array[(Char, Int)] = Array((a,2), (a,3), (b,4), (b,1)
0 0