欢迎使用CSDN-markdown编辑器

来源:互联网 发布:小说编写软件 编辑:程序博客网 时间:2024/06/16 08:58

Java实现的MR处理xml文件

  1. 自定义xml读入处理类,继承DefaultHandler类
  2. 使用Sax解析xml
  3. 书写MR程序
  4. 书写Job程序
    XmlSaxParser.class
package cn.fywspring.hadoopd.wc_xml;import org.xml.sax.Attributes;import org.xml.sax.SAXException;import org.xml.sax.helpers.DefaultHandler;/** * 定制xml解析器,拿到每个节点的单词 * @author Yiwan * */public class XmlSaxParser extends DefaultHandler{    //存储读取到的单词    private String word;    //记住操作的标签名    private String tagName;    public String getTagName() {        return tagName;    }    public void setTagName(String tagName) {        this.tagName = tagName;    }    public String getWord() {        return word;    }    public void setWord(String word) {        this.word = word;    }    //只调用一次,可以进行初始化操作    @Override    public void startDocument() throws SAXException {    }    //调用多次,从这开始解析    @Override    public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException {        this.tagName = qName;    }    //调用多次    @Override    public void endElement(String uri, String localName, String qName) throws SAXException {        this.tagName = null;//    }    //调用一次    @Override    public void endDocument() throws SAXException {    }    //调用多次,取标签中的值,如<a>value<a> --> value    @Override    public void characters(char[] ch, int start, int length) throws SAXException {        if(this.tagName != null){            String data = new String(ch,start,length);            if("word".equals(this.tagName)){                this.word = data;//把值传给word变量,外部方法可以通过get方法获取到            }        }    }}

MyXMLReaader.class

package cn.fywspring.hadoopd.wc_xml;import java.io.ByteArrayInputStream;import javax.xml.parsers.SAXParser;import javax.xml.parsers.SAXParserFactory;/** * 处理xml,从标签中获得指定的字段 * @author Yiwan * */public class MyXMLReader {    public static void main(String[] args) {        System.out.println(getValue("<words><word>hello</word></words>"));    }    /**     * 接收一个字符串     * @param str 接收的字符串     * @return 返回从xml标签中提取出来的字段     */    public static String getValue(String str){        SAXParser parser = null;        try {            //新建一个SAX解析器对象,该对象的特点就是一行一行的读xml文件            parser = SAXParserFactory.newInstance().newSAXParser();            //新建一个自定义的xml处理类实例            XmlSaxParser xml = new XmlSaxParser();            //parse方法的第一个参数接收一个InputStream类型的参数,第二个参数是上面定义的xml处理类实例            parser.parse(new ByteArrayInputStream(str.getBytes()), xml);            //通过访问器获得字段并返回            return xml.getWord();        } catch (Exception e) {            e.printStackTrace();        }        return "";    }}

WCMapper.class

package cn.fywspring.hadoopd.wc_xml;import java.io.IOException;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Mapper;public class WCMapper extends Mapper<LongWritable, Text, Text, IntWritable> {    @Override    protected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, IntWritable>.Context context)            throws IOException, InterruptedException {        try {            String string = value.toString();            String text = MyXMLReader.getValue(string);            if(!"".equals(text)){                String[] words = text.split(" ");                for (String word : words) {                    context.write(new Text(word), new IntWritable(1));                }            }        } catch (Exception e) {            // TODO Auto-generated catch block            e.printStackTrace();        }    }}

WCReducer.class

package cn.fywspring.hadoopd.wc_xml;import java.io.IOException;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Reducer;public class WCReducer extends Reducer<Text, IntWritable, Text, IntWritable> {    @Override    protected void reduce(Text key, Iterable<IntWritable> values,            Reducer<Text, IntWritable, Text, IntWritable>.Context context) throws IOException, InterruptedException {        int count = 0;        for (IntWritable value : values) {            count += value.get();        }        context.write(key, new IntWritable(count));    }}

WCDriver.class

package cn.fywspring.hadoopd.wc_xml;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.Path;import org.apache.hadoop.io.IntWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;public class WCDriver {    public static void main(String[] args) {        try {            Configuration conf = new Configuration();            Job job = Job.getInstance(conf);            job.setJarByClass(WCDriver.class);            job.setMapperClass(WCMapper.class);            job.setReducerClass(WCReducer.class);            job.setMapOutputKeyClass(Text.class);            job.setMapOutputValueClass(IntWritable.class);            job.setOutputKeyClass(Text.class);            job.setOutputValueClass(IntWritable.class);            FileInputFormat.setInputPaths(job, new Path("hdfs://hadoop01:9000/wc_xml"));            FileOutputFormat.setOutputPath(job, new Path("hdfs://hadoop01:9000/wc_xml/result"));            job.waitForCompletion(true);        } catch (Exception e) {            e.printStackTrace();        }    }}
原创粉丝点击