Spark1.51 本地启动了Master和一个Slave后,在Spark-Shell里跑,看出sc.textFile找数据的stacktrace

来源:互联网 发布:陕西用友软件 编辑:程序博客网 时间:2024/05/22 06:35
scala> val textFile = sc.textFile("README.md")textFile: org.apache.spark.rdd.RDD[String] = MapPartitionsRDD[5] at textFile at <console>:21scala> textFile.count()org.apache.hadoop.mapred.InvalidInputException: Input path does not exist: file:/home/spark/spark-1.5.1/bin/README.md    at org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:251)    at org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:270)    at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:207)    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)    at scala.Option.getOrElse(Option.scala:120)    at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)    at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)    at scala.Option.getOrElse(Option.scala:120)    at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1919)    at org.apache.spark.rdd.RDD.count(RDD.scala:1121)    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:24)    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:29)    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:31)    at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:33)    at $iwC$$iwC$$iwC$$iwC.<init>(<console>:35)    at $iwC$$iwC$$iwC.<init>(<console>:37)    at $iwC$$iwC.<init>(<console>:39)    at $iwC.<init>(<console>:41)    at <init>(<console>:43)    at .<init>(<console>:47)    at .<clinit>(<console>)    at .<init>(<console>:7)    at .<clinit>(<console>)    at $print(<console>)    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)    at java.lang.reflect.Method.invoke(Method.java:606)    at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)    at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1340)    at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)    at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)    at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)    at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)    at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)    at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)    at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)    at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)    at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)    at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)    at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)    at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)    at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)    at org.apache.spark.repl.Main$.main(Main.scala:31)    at org.apache.spark.repl.Main.main(Main.scala)    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)    at java.lang.reflect.Method.invoke(Method.java:606)    at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:672)    at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:180)    at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:205)    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:120)    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)scala> val file = sc.textFile("hdfs://localhost:9000/sigmoid/input.txt")file: org.apache.spark.rdd.RDD[String] = MapPartitionsRDD[7] at textFile at <console>:21scala> file.count()java.net.ConnectException: Call From localhost.localdomain/127.0.0.1 to localhost:9000 failed on connection exception: java.net.ConnectException: 拒绝连接; For more details see:  http://wiki.apache.org/hadoop/ConnectionRefused    at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)    at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)    at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)    at java.lang.reflect.Constructor.newInstance(Constructor.java:526)    at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:783)    at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:730)    at org.apache.hadoop.ipc.Client.call(Client.java:1351)    at org.apache.hadoop.ipc.Client.call(Client.java:1300)    at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)    at com.sun.proxy.$Proxy19.getFileInfo(Unknown Source)    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)    at java.lang.reflect.Method.invoke(Method.java:606)    at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:186)    at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)    at com.sun.proxy.$Proxy19.getFileInfo(Unknown Source)    at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getFileInfo(ClientNamenodeProtocolTranslatorPB.java:651)    at org.apache.hadoop.hdfs.DFSClient.getFileInfo(DFSClient.java:1679)    at org.apache.hadoop.hdfs.DistributedFileSystem$17.doCall(DistributedFileSystem.java:1106)    at org.apache.hadoop.hdfs.DistributedFileSystem$17.doCall(DistributedFileSystem.java:1102)    at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)    at org.apache.hadoop.hdfs.DistributedFileSystem.getFileStatus(DistributedFileSystem.java:1102)    at org.apache.hadoop.fs.FileSystem.globStatusInternal(FileSystem.java:1701)    at org.apache.hadoop.fs.FileSystem.globStatus(FileSystem.java:1647)    at org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:222)    at org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:270)    at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:207)    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)    at scala.Option.getOrElse(Option.scala:120)    at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)    at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)    at scala.Option.getOrElse(Option.scala:120)    at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1919)    at org.apache.spark.rdd.RDD.count(RDD.scala:1121)    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:24)    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:29)    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:31)    at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:33)    at $iwC$$iwC$$iwC$$iwC.<init>(<console>:35)    at $iwC$$iwC$$iwC.<init>(<console>:37)    at $iwC$$iwC.<init>(<console>:39)    at $iwC.<init>(<console>:41)    at <init>(<console>:43)    at .<init>(<console>:47)    at .<clinit>(<console>)    at .<init>(<console>:7)    at .<clinit>(<console>)    at $print(<console>)    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)    at java.lang.reflect.Method.invoke(Method.java:606)    at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)    at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1340)    at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)    at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)    at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)    at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)    at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)    at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)    at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)    at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)    at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)    at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)    at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)    at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)    at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)    at org.apache.spark.repl.Main$.main(Main.scala:31)    at org.apache.spark.repl.Main.main(Main.scala)    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)    at java.lang.reflect.Method.invoke(Method.java:606)    at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:672)    at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:180)    at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:205)    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:120)    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)Caused by: java.net.ConnectException: 拒绝连接    at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)    at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)    at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)    at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:529)    at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:493)    at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:547)    at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:642)    at org.apache.hadoop.ipc.Client$Connection.access$2600(Client.java:314)    at org.apache.hadoop.ipc.Client.getConnection(Client.java:1399)    at org.apache.hadoop.ipc.Client.call(Client.java:1318)    ... 78 morescala> val dataframe = sqlContext.read.json("examples/src/main/resources/people.json")java.io.IOException: No input paths specified in job    at org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:198)    at org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:270)    at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:207)    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)    at scala.Option.getOrElse(Option.scala:120)    at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)    at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)    at scala.Option.getOrElse(Option.scala:120)    at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)    at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:35)    at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:239)at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:237)    at scala.Option.getOrElse(Option.scala:120)    at org.apache.spark.rdd.RDD.partitions(RDD.scala:237)    at org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1.apply(RDD.scala:1093)at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:147)at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:108)at org.apache.spark.rdd.RDD.withScope(RDD.scala:306)at org.apache.spark.rdd.RDD.treeAggregate(RDD.scala:1091)at org.apache.spark.sql.execution.datasources.json.InferSchema$.apply(InferSchema.scala:58)at org.apache.spark.sql.execution.datasources.json.JSONRelation$$anonfun$6.apply(JSONRelation.scala:105)    at org.apache.spark.sql.execution.datasources.json.JSONRelation$$anonfun$6.apply(JSONRelation.scala:100)at scala.Option.getOrElse(Option.scala:120)at org.apache.spark.sql.execution.datasources.json.JSONRelation.dataSchema$lzycompute(JSONRelation.scala:100)at org.apache.spark.sql.execution.datasources.json.JSONRelation.dataSchema(JSONRelation.scala:99)at org.apache.spark.sql.sources.HadoopFsRelation.schema$lzycompute(interfaces.scala:561)at org.apache.spark.sql.sources.HadoopFsRelation.schema(interfaces.scala:560)at org.apache.spark.sql.execution.datasources.LogicalRelation.<init>(LogicalRelation.scala:31)at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:120)at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:104)at org.apache.spark.sql.DataFrameReader.json(DataFrameReader.scala:219)at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:19)    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:24)    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:26)    at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:28)    at $iwC$$iwC$$iwC$$iwC.<init>(<console>:30)    at $iwC$$iwC$$iwC.<init>(<console>:32)    at $iwC$$iwC.<init>(<console>:34)    at $iwC.<init>(<console>:36)    at <init>(<console>:38)    at .<init>(<console>:42)    at .<clinit>(<console>)    at .<init>(<console>:7)    at .<clinit>(<console>)    at $print(<console>)    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)    at java.lang.reflect.Method.invoke(Method.java:606)    at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)    at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1340)    at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)    at org.apache.spark.repl.SparkILoop.reallyInterpret$1(SparkILoop.scala:857)    at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:902)    at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:814)    at org.apache.spark.repl.SparkILoop.processLine$1(SparkILoop.scala:657)    at org.apache.spark.repl.SparkILoop.innerLoop$1(SparkILoop.scala:665)    at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$loop(SparkILoop.scala:670)at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply$mcZ$sp(SparkILoop.scala:997)    at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)    at org.apache.spark.repl.SparkILoop$$anonfun$org$apache$spark$repl$SparkILoop$$process$1.apply(SparkILoop.scala:945)    at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)    at org.apache.spark.repl.SparkILoop.org$apache$spark$repl$SparkILoop$$process(SparkILoop.scala:945)at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1059)at org.apache.spark.repl.Main$.main(Main.scala:31)at org.apache.spark.repl.Main.main(Main.scala)at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)at java.lang.reflect.Method.invoke(Method.java:606)at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:672)    at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:180)    at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:205)    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:120)    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)scala> val dataframe = sqlContext.read.json("../examples/src/main/resources/people.json")dataframe: org.apache.spark.sql.DataFrame = [age: bigint, name: string]
0 1
原创粉丝点击