Storm和JDBC native集成

来源:互联网 发布:中广电网络机顶盒好嘛 编辑:程序博客网 时间:2024/06/02 05:17

org.apache.storm.jdbc.bolt 下面有两个JDBC的bolt 实现类,分别为JdbcLookupBolt 和JdbcLookupBolt 都继承AbstractJdbcBolt 抽象方法,当有原生的JDBC方法或者C3P0连接池的时当并发大的时候可能会出现连接不到数据库的问题,官方的Storm-JDBC方法可以解决连接的问题和JDBC初始化的问题

AbstractJdbcBolt的部分源代码:
实现了BaseRichBolt抽象方法,其中的prepare方法执行一些初始化的工作
public abstract class AbstractJdbcBolt extends BaseRichBolt {    private static final Logger LOG = LoggerFactory.getLogger(AbstractJdbcBolt.class);    protected OutputCollector collector;    protected transient JdbcClient jdbcClient;    protected String configKey;    protected Integer queryTimeoutSecs;    protected ConnectionProvider connectionProvider;    public void prepare(Map map, TopologyContext topologyContext, OutputCollector collector) {        this.collector = collector;        this.connectionProvider.prepare();        if(this.queryTimeoutSecs == null) {            this.queryTimeoutSecs = Integer.valueOf(Integer.parseInt(map.get("topology.message.timeout.secs").toString()));        }        this.jdbcClient = new JdbcClient(this.connectionProvider, this.queryTimeoutSecs.intValue());    }    public AbstractJdbcBolt(ConnectionProvider connectionProvider) {        this.connectionProvider = connectionProvider;    }    public void cleanup() {        this.connectionProvider.cleanup();    }}

Maven依赖

<dependency>    <groupId>org.apache.storm</groupId>    <artifactId>storm-jdbc</artifactId>    <version>${storm.version}</version>    <type>jar</type></dependency>


JDBCMapper,相当于一个table里面的row, columnSchema里面的顺序要和Table字段的顺序保持一致

List<Column> columnSchema = Lists.newArrayList(    new Column("user_id", java.sql.Types.INTEGER),    new Column("user_name", java.sql.Types.VARCHAR),    new Column("dept_name", java.sql.Types.VARCHAR));JdbcMapper simpleJdbcMapper = new SimpleJdbcMapper(columnSchema);

看代码实现如下:


public class UserSpout implements IRichSpout {    boolean isDistributed;    SpoutOutputCollector collector;    public static final List<Values> rows = Lists.newArrayList(            new Values(1, "peter", System.currentTimeMillis()),            new Values(2, "bob", System.currentTimeMillis()),            new Values(3, "alice", System.currentTimeMillis()));    public UserSpout() {        this(true);    }    public UserSpout(boolean isDistributed) {        this.isDistributed = isDistributed;    }    public boolean isDistributed() {        return this.isDistributed;    }    @SuppressWarnings("rawtypes")    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {        this.collector = collector;    }    public void close() {    }    public void nextTuple() {        final Random rand = new Random();        final Values row = rows.get(rand.nextInt(rows.size() - 1));        System.out.println("row:" + row);        this.collector.emit(row);//        Thread.yield();    }    public void ack(Object msgId) {    }    public void fail(Object msgId) {    }    public void declareOutputFields(OutputFieldsDeclarer declarer) {        declarer.declare(new Fields("user_id", "user_name", "create_date"));    }    @Override    public void activate() {    }    @Override    public void deactivate() {    }    @Override    public Map<String, Object> getComponentConfiguration() {        return null;    }}
定义一个抽象类,封装拓扑的实现,这里面用到是Storm—JDBC封装的数据源HikariCPConnectionProvider并是线程安全的,实行了ConnectionProvider接口
这个接口包含三个方法prepare(),getConnection(),cleanUp(),其中getConnection返回一个JDBC Connection;
public abstract class AbstractUserTopology {    private static final List<String> setupSqls = Lists.newArrayList(            "drop table if exists user",            "drop table if exists department",            "drop table if exists user_department",            "create table if not exists user (user_id integer, user_name varchar(100), dept_name varchar(100), create_date date)",            "create table if not exists department (dept_id integer, dept_name varchar(100))",            "create table if not exists user_department (user_id integer, dept_id integer)",            "insert into department values (1, 'R&D')",            "insert into department values (2, 'Finance')",            "insert into department values (3, 'HR')",            "insert into department values (4, 'Sales')",            "insert into user_department values (1, 1)",            "insert into user_department values (2, 2)",            "insert into user_department values (3, 3)",            "insert into user_department values (4, 4)"    );    protected UserSpout userSpout;    protected JdbcMapper jdbcMapper;    protected JdbcLookupMapper jdbcLookupMapper;    protected ConnectionProvider connectionProvider;    protected static final String TABLE_NAME = "user";    protected static final String JDBC_CONF = "jdbc.conf";    protected static final String SELECT_QUERY = "select dept_name from department, user_department where department.dept_id = user_department.dept_id" +            " and user_department.user_id = ?";    public void execute(String[] args) throws Exception {/*        if (args.length != 4 && args.length != 5) {            System.out.println("Usage: " + this.getClass().getSimpleName() + " <dataSourceClassName> <dataSource.url> "                    + "<user> <password> [topology name]");            System.exit(-1);        }*/        Map map = Maps.newHashMap();        map.put("dataSourceClassName", "com.mysql.jdbc.jdbc2.optional.MysqlDataSource");//        map.put("dataSource.url", "jdbc:mysql://localhost:3306/sparkDB");//jdbc:mysql://localhost/test        map.put("dataSource.user", "hadoop");//root        map.put("dataSource.password", "hadoop");/*        if (args.length == 4) {            map.put("dataSource.password", args[3]);//password        }*/        Config config = new Config();        config.put(JDBC_CONF, map);        ConnectionProvider connectionProvider = new HikariCPConnectionProvider(map);        connectionProvider.prepare();        int queryTimeoutSecs = 60;        JdbcClient jdbcClient = new JdbcClient(connectionProvider, queryTimeoutSecs);        for (String sql : setupSqls) {            System.out.println("sql:" + sql);            jdbcClient.executeSql(sql);        }        this.userSpout = new UserSpout();        this.jdbcMapper = new SimpleJdbcMapper(TABLE_NAME, connectionProvider);        connectionProvider.cleanup();        Fields outputFields = new Fields("user_id", "user_name", "dept_name", "create_date");        //根据user_id查询        List<Column> queryParamColumns = Lists.newArrayList(new Column("user_id", Types.INTEGER));        this.jdbcLookupMapper = new SimpleJdbcLookupMapper(outputFields, queryParamColumns);        this.connectionProvider = new HikariCPConnectionProvider(map);        if (args.length != 4) {            LocalCluster cluster = new LocalCluster();            cluster.submitTopology("JDBCTopo", config, getTopology());            Thread.sleep(10000);            cluster.shutdown();//            System.exit(0);        } else {            StormSubmitter.submitTopology(args[4], config, getTopology());        }    }    public abstract StormTopology getTopology();}
public class UserPersistanceTopology extends AbstractUserTopology {
    private static final String USER_SPOUT = "USER_SPOUT";    private static final String LOOKUP_BOLT = "LOOKUP_BOLT";    private static final String PERSISTANCE_BOLT = "PERSISTANCE_BOLT";    public static void main(String[] args) throws Exception {        new UserPersistanceTopology().execute(args);    }    @Override    public StormTopology getTopology() {        JdbcLookupBolt departmentLookupBolt = new JdbcLookupBolt(connectionProvider, SELECT_QUERY, this.jdbcLookupMapper);        //must specify column schema when providing custom query.        List<Column> schemaColumns = Lists.newArrayList(new Column("create_date", Types.DATE),                new Column("dept_name", Types.VARCHAR), new Column("user_id", Types.INTEGER), new Column("user_name", Types.VARCHAR));        JdbcMapper mapper = new SimpleJdbcMapper(schemaColumns);        JdbcInsertBolt userPersistanceBolt = new JdbcInsertBolt(connectionProvider, mapper)                .withInsertQuery("insert into user (create_date, dept_name, user_id, user_name) values (?,?,?,?)");        // userSpout ==> jdbcBolt        TopologyBuilder builder = new TopologyBuilder();              builder.setSpout(USER_SPOUT, this.userSpout, 1);        builder.setBolt(LOOKUP_BOLT, departmentLookupBolt, 1).shuffleGrouping(USER_SPOUT);        builder.setBolt(PERSISTANCE_BOLT, userPersistanceBolt, 1).shuffleGrouping(LOOKUP_BOLT);        return builder.createTopology();    }}


1 0
原创粉丝点击