SPARK dataframe 两个表操作

来源:互联网 发布:金山数据恢复 编辑:程序博客网 时间:2024/06/05 17:57

spark 如何操作两个文件呢?

看下面样例:

创建了两个临时表,而后在编写语句。

class vistorcount extends Serializable{  

  
    //the defination please put here,otherwise it will compile error.
    case class person(name:String,age:Int)
    case class personscore(name:String,score:Int)
    //case class mobiledata(mobile:String,longtitude:String,latitude:String,status:Int,createtime:String)
    case class mobiledata(mobile:String,longtitude:String,latitude:String)
    case class mobilecity(mobilerange:String,city:String)
    def getpeople(): Unit = {


    val conf = new SparkConf().setAppName("traveldata").setMaster("local[*]")    
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)
    import sqlContext.implicits._    
    val df = sc.textFile("c://traveldata//people.txt", 1)
    .map(_.split(","))
    .map(p=>person(p(0),p(1).trim.toInt))
    .toDF()
    df.registerTempTable("people")
    //cacheTable("people")
    val ps = sc.textFile("c://traveldata//peoplescore.txt", 1).map(_.split(",")).map(p=>personscore(p(0),p(1).trim.toInt)).toDF()
    ps.registerTempTable("personscore")
    
    val resultDF = sqlContext.sql("select people.name,people.age,personscore.score from people,personscore where people.name=personscore.name");
    //resultDF.show()
    resultDF.collect().foreach(println)
    sc.stop()

  }

}

2 0
原创粉丝点击