hadoop解决Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/util/Apps

来源:互联网 发布:算法引论 pdf百度云 编辑:程序博客网 时间:2024/04/29 22:45

linux+eclipse+本地执行WordCount抛出下面异常:

Exception in thread "main" java.lang.NoClassDefFoundError: org/apache/hadoop/yarn/util/Apps。

解决:没有把yarn下的包以及yarn 下的lib目录下的包导入

package cn.itheima.bigdata.hadoop.mr.wordcount;import java.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;/** * 用来描述一个作业job(使用哪个mapper类,哪个reducer类,输入文件在哪,输出结果放哪。。。。) * 然后提交这个job给hadoop集群 * @author duanhaitao@itcast.cn * *///cn.itheima.bigdata.hadoop.mr.wordcount.WordCountRunnerpublic class WordCountRunner {public static void main(String[] args) throws Exception {Configuration conf = new Configuration();Job wcjob = Job.getInstance(conf);//设置job所使用的jar包conf.set("mapreduce.job.jar", "wcount.jar");//设置wcjob中的资源所在的jar包wcjob.setJarByClass(WordCountRunner.class);//wcjob要使用哪个mapper类wcjob.setMapperClass(WordCountMapper.class);//wcjob要使用哪个reducer类wcjob.setReducerClass(WordCountReducer.class);//wcjob的mapper类输出的kv数据类型wcjob.setMapOutputKeyClass(Text.class);wcjob.setMapOutputValueClass(LongWritable.class);//wcjob的reducer类输出的kv数据类型wcjob.setOutputKeyClass(Text.class);wcjob.setOutputValueClass(LongWritable.class);//指定要处理的原始数据所存放的路径FileInputFormat.setInputPaths(wcjob, "hdfs://yun12-01:9000/wc/srcdata");//指定处理之后的结果输出到哪个路径FileOutputFormat.setOutputPath(wcjob, new Path("hdfs://yun12-01:9000/wc/output"));boolean res = wcjob.waitForCompletion(true);System.exit(res?0:1);}}
package cn.itheima.bigdata.hadoop.mr.wordcount;import java.io.IOException;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Reducer;public class WordCountReducer extends Reducer<Text, LongWritable, Text, LongWritable>{// key: hello ,  values : {1,1,1,1,1.....}@Overrideprotected void reduce(Text key, Iterable<LongWritable> values,Context context)throws IOException, InterruptedException {//定义一个累加计数器long count = 0;for(LongWritable value:values){count += value.get();}//输出<单词:count>键值对context.write(key, new LongWritable(count));}}

package cn.itheima.bigdata.hadoop.mr.wordcount;import java.io.IOException;import org.apache.commons.lang.StringUtils;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;public class WordCountMapper extends Mapper<LongWritable, Text, Text, LongWritable>{@Overrideprotected void map(LongWritable key, Text value,Context context)throws IOException, InterruptedException {//获取到一行文件的内容String line = value.toString();//切分这一行的内容为一个单词数组String[] words = StringUtils.split(line, " ");//遍历输出  <word,1>for(String word:words){context.write(new Text(word), new LongWritable(1));}}}



0 0
原创粉丝点击