spark on hive 写入hdfs 用csv格式

来源:互联网 发布:大学送礼给老师知乎 编辑:程序博客网 时间:2024/06/05 23:54
Exception in thread "main" java.lang.ClassNotFoundException: Failed to find data source: csv. Please find packages at http://spark-packages.org
        at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.lookupDataSource(ResolvedDataSource.scala:77)
        at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:219)
        at org.apache.spark.sql.DataFrameWriter.dataSource$lzycompute$1(DataFrameWriter.scala:181)
        at org.apache.spark.sql.DataFrameWriter.org$apache$spark$sql$DataFrameWriter$$dataSource$1(DataFrameWriter.scala:181)
        at org.apache.spark.sql.DataFrameWriter$$anonfun$save$1.apply$mcV$sp(DataFrameWriter.scala:188)
        at org.apache.spark.sql.DataFrameWriter.executeAndCallQEListener(DataFrameWriter.scala:154)
        at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:188)
        at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:172)
        at xiangqi_spark.MysqlImport$.main(MysqlImport.scala:30)
        at xiangqi_spark.MysqlImport.main(MysqlImport.scala)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:730)
        at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:181)
        at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:206)
        at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:121)
        at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.lang.ClassNotFoundException: csv.DefaultSource
        at java.net.URLClassLoader.findClass(URLClassLoader.java:381)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
        at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
        at org.apache.spark.sql.execution.datasources.ResolvedDataSource$$anonfun$4$$anonfun$apply$1.apply(ResolvedDataSource.scala:62)
        at org.apache.spark.sql.execution.datasources.ResolvedDataSource$$anonfun$4$$anonfun$apply$1.apply(ResolvedDataSource.scala:62)
        at scala.util.Try$.apply(Try.scala:161)
        at org.apache.spark.sql.execution.datasources.ResolvedDataSource$$anonfun$4.apply(ResolvedDataSource.scala:62)
        at org.apache.spark.sql.execution.datasources.ResolvedDataSource$$anonfun$4.apply(ResolvedDataSource.scala:62)
        at scala.util.Try.orElse(Try.scala:82)

        at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.lookupDataSource(ResolvedDataSource.scala:62)

        


解决办法:

pom文件加:spark是1.6.0

<!--spark on hive 写入csv格式文件--><dependency>    <groupId>com.databricks</groupId>    <artifactId>spark-csv_2.10</artifactId>    <version>1.4.0</version></dependency>


     代码:
package 666import org.apache.spark.sql.hive.HiveContextimport org.apache.spark.sql.{DataFrame, SQLContext}import org.apache.spark.{SparkConf, SparkContext}import utils.TimeUtil/**  * Created by Administrator on 2017/10/24.  */object MysqlImport extends Serializable{  def main(args: Array[String]): Unit = {    val conf: SparkConf = new SparkConf ( )      .setAppName ( "MysqlImport" )//      .setMaster ( "local" )    val sc: SparkContext = new SparkContext(conf)    val sqlcontext: SQLContext = new SQLContext(sc)    val hiveContext: HiveContext = new HiveContext(sc)//    hiveContext.sql("use bdp_dw ") //生产库    hiveContext.sql("use default ")//测试库    val date: String = TimeUtil.getOneDaysAgoTime(0)//获取当前日期格式:yyyyMMdd    val resultsql: DataFrame = hiveContext.sql(" select * from bi_device_under_vol")//    //生产集群//    resultsql.write.format("csv").save("hdfs://10.27.227.160:8020/tmp/"+date)    import com.databricks.spark.csv._    resultsql.saveAsCsvFile("hdfs://10.27.227.160:8020/tmp/"+date)//    测试集群//    resultsql.repartition(1).saveAsCsvFile("hdfs://192.168.0.118:8020/tmp/"+date+"_1")//    resultsql.write.format("csv").save("hdfs://192.168.0.118:8020/tmp/"+date+"_2")//    resultsql.write.format("csv").save("D:\\tt")    sc.stop()  }}