使用maven搭建hadoop环境

来源:互联网 发布:mac相簿照片如何导出 编辑:程序博客网 时间:2024/06/06 11:05

关于Maven的使用就不再啰嗦了,网上很多,并且这么多年变化也不大,这里仅介绍怎么搭建Hadoop的开发环境。
1. 首先创建工程

[plain] view plain copy print?mvn archetype:generate -DgroupId=my.hadoopstudy -DartifactId=hadoopstudy -DarchetypeArtifactId=maven-archetype-quickstart -DinteractiveMode=false  
  1. 然后在pom.xml文件里添加hadoop的依赖包hadoop-common, hadoop-client, hadoop-hdfs,添加后的pom.xml文件如下
    [html] view plain copy print?在CODE上查看代码片派生到我的代码片
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"           xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">      <modelVersion>4.0.0</modelVersion>      <groupId>my.hadoopstudy</groupId>      <artifactId>hadoopstudy</artifactId>      <packaging>jar</packaging>      <version>1.0-SNAPSHOT</version>      <name>hadoopstudy</name>      <url>http://maven.apache.org</url>      <dependencies>          <dependency>              <groupId>org.apache.hadoop</groupId>              <artifactId>hadoop-common</artifactId>              <version>2.6.4</version>          </dependency>          <dependency>              <groupId>org.apache.hadoop</groupId>              <artifactId>hadoop-hdfs</artifactId>              <version>2.6.4</version>          </dependency>          <dependency>              <groupId>org.apache.hadoop</groupId>              <artifactId>hadoop-client</artifactId>              <version>2.6.4</version>          </dependency>          <dependency>              <groupId>junit</groupId>              <artifactId>junit</artifactId>              <version>3.8.1</version>              <scope>test</scope>          </dependency>      </dependencies>  </project>  
  1. 测试
    3.1 首先我们可以测试一下hdfs的开发,这里假定使用上一篇Hadoop文章中的hadoop集群,类代码如下
    [java] view plain copy print?在CODE上查看代码片派生到我的代码片
package my.hadoopstudy.dfs;  import org.apache.hadoop.conf.Configuration;  import org.apache.hadoop.fs.FSDataOutputStream;  import org.apache.hadoop.fs.FileStatus;  import org.apache.hadoop.fs.FileSystem;  import org.apache.hadoop.fs.Path;  import org.apache.hadoop.io.IOUtils;  import java.io.InputStream;  import java.net.URI;  public class Test {      public static void main(String[] args) throws Exception {          String uri = "hdfs://9.111.254.189:9000/";          Configuration config = new Configuration();          FileSystem fs = FileSystem.get(URI.create(uri), config);          // 列出hdfs上/user/fkong/目录下的所有文件和目录          FileStatus[] statuses = fs.listStatus(new Path("/user/fkong"));          for (FileStatus status : statuses) {              System.out.println(status);          }          // 在hdfs的/user/fkong目录下创建一个文件,并写入一行文本          FSDataOutputStream os = fs.create(new Path("/user/fkong/test.log"));          os.write("Hello World!".getBytes());          os.flush();          os.close();          // 显示在hdfs的/user/fkong下指定文件的内容          InputStream is = fs.open(new Path("/user/fkong/test.log"));          IOUtils.copyBytes(is, System.out, 1024, true);      }  }  

3.2 测试MapReduce作业
测试代码比较简单,如下:
[java] view plain copy print?在CODE上查看代码片派生到我的代码片

package my.hadoopstudy.mapreduce;  import org.apache.hadoop.conf.Configuration;  import org.apache.hadoop.fs.Path;  import org.apache.hadoop.io.IntWritable;  import org.apache.hadoop.io.Text;  import org.apache.hadoop.mapreduce.Job;  import org.apache.hadoop.mapreduce.Mapper;  import org.apache.hadoop.mapreduce.Reducer;  import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;  import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;  import org.apache.hadoop.util.GenericOptionsParser;  import java.io.IOException;  public class EventCount {      public static class MyMapper extends Mapper<Object, Text, Text, IntWritable>{          private final static IntWritable one = new IntWritable(1);          private Text event = new Text();          public void map(Object key, Text value, Context context) throws IOException, InterruptedException {              int idx = value.toString().indexOf(" ");              if (idx > 0) {                  String e = value.toString().substring(0, idx);                  event.set(e);                  context.write(event, one);              }          }      }      public static class MyReducer extends Reducer<Text,IntWritable,Text,IntWritable> {          private IntWritable result = new IntWritable();          public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException, InterruptedException {              int sum = 0;              for (IntWritable val : values) {                  sum += val.get();              }              result.set(sum);              context.write(key, result);          }      }      public static void main(String[] args) throws Exception {          Configuration conf = new Configuration();          String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();          if (otherArgs.length < 2) {              System.err.println("Usage: EventCount <in> <out>");              System.exit(2);          }          Job job = Job.getInstance(conf, "event count");          job.setJarByClass(EventCount.class);          job.setMapperClass(MyMapper.class);          job.setCombinerClass(MyReducer.class);          job.setReducerClass(MyReducer.class);          job.setOutputKeyClass(Text.class);          job.setOutputValueClass(IntWritable.class);          FileInputFormat.addInputPath(job, new Path(otherArgs[0]));          FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));          System.exit(job.waitForCompletion(true) ? 0 : 1);      }  }  

运行“mvn package”命令产生jar包hadoopstudy-1.0-SNAPSHOT.jar,并将jar文件复制到hadoop安装目录下
这里假定我们需要分析几个日志文件中的Event信息来统计各种Event个数,所以创建一下目录和文件
[plain] view plain copy print?在CODE上查看代码片派生到我的代码片

/tmp/input/event.log.1  /tmp/input/event.log.2  /tmp/input/event.log.

3

因为这里只是要做一个列子,所以每个文件内容可以都一样,假如内容如下
[plain] view plain copy print?在CODE上查看代码片派生到我的代码片

JOB_NEW ...  JOB_NEW ...  JOB_FINISH ...  JOB_NEW ...  JOB_FINISH ...  

然后把这些文件复制到HDFS上
[plain] view plain copy print?在CODE上查看代码片派生到我的代码片
$ bin/hdfs dfs -put /tmp/input /user/fkong/input

运行mapreduce作业
[plain] view plain copy print?在CODE上查看代码片派生到我的代码片

$ bin/hadoop jar hadoopstudy-1.0-SNAPSHOT.jar my.hadoopstudy.mapreduce.EventCount /user/fkong/input /user/fkong/output

查看执行结果
[plain] view plain copy print?在CODE上查看代码片派生到我的代码片

$ bin/hdfs dfs -cat /user/fkong/output/part-r-00000  
1 0
原创粉丝点击