【云星数据---Apache Flink实战系列(精品版)】:Apache Flink批处理API详解与编程实战026--DateSet实用API详解026

来源:互联网 发布:夏宽大师 淘宝 编辑:程序博客网 时间:2024/06/07 23:55

一、Flink DateSet定制API详解(JAVA版) -003

Reduce

element为粒度,对element进行合并操作。最后只能形成一个结果。

执行程序:

package code.book.batch.dataset.advance.api;import org.apache.flink.api.common.functions.ReduceFunction;import org.apache.flink.api.java.DataSet;import org.apache.flink.api.java.ExecutionEnvironment;public class ReduceFunction001java {    public static void main(String[] args) throws Exception {        // 1.设置运行环境,准备运行的数据        final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();        DataSet<Integer> text = env.fromElements(1, 2, 3, 4, 5, 6,7);        //2.对DataSet的元素进行合并,这里是计算累加和        DataSet<Integer> text2 = text.reduce(new ReduceFunction<Integer>() {            @Override            public Integer reduce(Integer intermediateResult, Integer next) throws Exception {                return intermediateResult + next;            }        });        text2.print();        //3.对DataSet的元素进行合并,这里是计算累乘积        DataSet<Integer> text3 = text.reduce(new ReduceFunction<Integer>() {            @Override            public Integer reduce(Integer intermediateResult, Integer next) throws Exception {                return intermediateResult * next;            }        });        text3.print();        //4.对DataSet的元素进行合并,逻辑可以写的很复杂        DataSet<Integer> text4 = text.reduce(new ReduceFunction<Integer>() {            @Override            public Integer reduce(Integer intermediateResult, Integer next) throws Exception {                if (intermediateResult % 2 == 0) {                    return intermediateResult + next;                } else {                    return intermediateResult * next;                }            }        });        text4.print();        //5.对DataSet的元素进行合并,可以看出intermediateResult是临时合并结果,next是下一个元素        DataSet<Integer> text5 = text.reduce(new ReduceFunction<Integer>() {            @Override            public Integer reduce(Integer intermediateResult, Integer next) throws Exception {                System.out.println("intermediateResult=" + intermediateResult + " ,next=" + next);                return intermediateResult + next;            }        });        text5.collect();    }}

执行结果:

text2.print()28text3.print()5040text4.print()157text5.print()intermediateResult=1 ,next=2intermediateResult=3 ,next=3intermediateResult=6 ,next=4intermediateResult=10 ,next=5intermediateResult=15 ,next=6intermediateResult=21 ,next=7

reduceGroup

对每一组的元素分别进行合并操作。与reduce类似,不过它能为每一组产生一个结果。如果没有分组,就当作一个分组,此时和reduce一样,只会产生一个结果。

执行程序:

package code.book.batch.dataset.advance.api;import org.apache.flink.api.common.functions.GroupReduceFunction;import org.apache.flink.api.java.DataSet;import org.apache.flink.api.java.ExecutionEnvironment;import org.apache.flink.api.java.tuple.Tuple2;import org.apache.flink.util.Collector;import java.util.Iterator;public class GroupReduceFunction001java {    public static void main(String[] args) throws Exception {        // 1.设置运行环境,准备运行的数据        final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();        DataSet<Integer> text = env.fromElements(1, 2, 3, 4, 5, 6, 7);        //2.对DataSet的元素进行合并,这里是计算累加和        DataSet<Integer> text2 = text.reduceGroup(new GroupReduceFunction<Integer, Integer>() {            @Override            public void reduce(Iterable<Integer> iterable,             Collector<Integer> collector) throws Exception {                int sum = 0;                Iterator<Integer> itor = iterable.iterator();                while (itor.hasNext()) {                    sum += itor.next();                }                collector.collect(sum);            }        });        text2.print();        //3.对DataSet的元素进行分组合并,这里是分别计算偶数和奇数的累加和        DataSet<Tuple2<Integer, Integer>> text3 = text.reduceGroup(        new GroupReduceFunction<Integer, Tuple2<Integer, Integer>>() {            @Override            public void reduce(Iterable<Integer> iterable,            Collector<Tuple2<Integer, Integer>> collector)throws Exception {                int sum0 = 0;                int sum1 = 0;                Iterator<Integer> itor = iterable.iterator();                while (itor.hasNext()) {                    int v = itor.next();                    if (v % 2 == 0) {                        sum0 += v;                    } else {                        sum1 += v;                    }                }                collector.collect(new Tuple2<Integer, Integer>(sum0, sum1));            }        });        text3.print();        //4.对DataSet的元素进行分组合并,这里是对分组后的数据进行合并操作,统计每个人的工资总和        //(每个分组会合并出一个结果)        DataSet<Tuple2<String, Integer>> data = env.fromElements(        new Tuple2("zhangsan", 1000), new Tuple2("lisi", 1001),         new Tuple2("zhangsan", 3000), new Tuple2("lisi", 1002));        //4.1根据name进行分组        DataSet<Tuple2<String, Integer>> data2 = data.groupBy(0).reduceGroup(        new GroupReduceFunction<Tuple2<String, Integer>, Tuple2<String, Integer>>() {            @Override            public void reduce(Iterable<Tuple2<String, Integer>> iterable,             Collector<Tuple2<String, Integer>> collector) throws Exception {                int salary = 0;                String name = "";                Iterator<Tuple2<String, Integer>> itor = iterable.iterator();                //4.2统计每个人的工资总和                while (itor.hasNext()) {                    Tuple2<String, Integer> t = itor.next();                    name = t.f0;                    salary += t.f1;                }                collector.collect(new Tuple2(name, salary));            }        });        data2.print();    }}

执行结果:

text3.print()28text3.print()(12,16)data2.print(lisi,2003)(zhangsan,4000)
阅读全文
0 0
原创粉丝点击