Java调用HDFS的HA

来源:互联网 发布:表单系统源码 编辑:程序博客网 时间:2024/05/21 10:03


HDFS工具类:

package com.yinker.tinyv.utils;import java.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import org.apache.log4j.Logger;/** * hdfs工具类 * @Description * @author yangluan * @{time}:2017年7月26日下午2:45:12 */public class HdfsUtils {private static Logger logger = Logger.getLogger(HdfsUtils.class);/** * 上传方法 * @param src  原路径 * @param dst  目标路径 * @param conf 配置文件 * @return 成功还是失败 */public static boolean putHDFS(String src, String dst, Configuration conf) {conf.set("fs.defaultFS",ConfigUtils.FS_DEFAULTFS);        conf.set("dfs.nameservices",ConfigUtils.DFS_NAMESERVICE);        conf.set("dfs.ha.namenodes."+ConfigUtils.DFS_NAMESERVICE,ConfigUtils.DFS_HA_NAMENODES_NS1);        conf.set("dfs.namenode.rpc-address."+ConfigUtils.DFS_NAMESERVICE+".nn1",ConfigUtils.DFS_NAMENODE_RPC_ADDRESS_NS1_NN1);        conf.set("dfs.namenode.rpc-address."+ConfigUtils.DFS_NAMESERVICE+".nn2",ConfigUtils.DFS_NAMENODE_RPC_ADDRESS_NS1_NN2);        conf.set("dfs.client.failover.proxy.provider."+ConfigUtils.DFS_NAMESERVICE,ConfigUtils.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_NS1);Path dstPath = new Path(ConfigUtils.FS_DEFAULTFS+dst);try {FileSystem hdfs  = dstPath.getFileSystem(conf);if(hdfs.exists(dstPath)){hdfs.delete(dstPath,true);}hdfs.copyFromLocalFile(false, new Path(src), dstPath);} catch (IOException ie) {logger.error(ie.getMessage());return false;}return true;}public static boolean deleteHDFS(String src,Configuration conf) {conf.set("fs.defaultFS",ConfigUtils.FS_DEFAULTFS);        conf.set("dfs.nameservices",ConfigUtils.DFS_NAMESERVICE);        conf.set("dfs.ha.namenodes."+ConfigUtils.DFS_NAMESERVICE,ConfigUtils.DFS_HA_NAMENODES_NS1);        conf.set("dfs.namenode.rpc-address."+ConfigUtils.DFS_NAMESERVICE+".nn1",ConfigUtils.DFS_NAMENODE_RPC_ADDRESS_NS1_NN1);        conf.set("dfs.namenode.rpc-address."+ConfigUtils.DFS_NAMESERVICE+".nn2",ConfigUtils.DFS_NAMENODE_RPC_ADDRESS_NS1_NN2);        conf.set("dfs.client.failover.proxy.provider."+ConfigUtils.DFS_NAMESERVICE,ConfigUtils.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_NS1);Path dstPath = new Path(ConfigUtils.FS_DEFAULTFS+src);try {FileSystem hdfs  = dstPath.getFileSystem(conf);if(hdfs.exists(dstPath)){hdfs.delete(dstPath,true);}} catch (IOException ie) {logger.error(ie.getMessage());return false;}return true;}public static void main(String[] args) {Configuration conf = new Configuration();String src = "C:\\Users\\think\\Desktop\\hello.sh" ;          String dst = "/user/oozie/root/hive-test-yla" ;          boolean status = putHDFS(src,dst,conf) ;          System.out.println("status="+status) ; }}


配置文件:

#hafs.defaultFS=hdfs://xiaovservice:8020dfs.nameservices=xiaovservicedfs.ha.namenodes.xiaovservice=nn1,nn2dfs.namenode.rpc.address.xiaovservice.nn1=hdfs://xiaovmaster01:8020dfs.namenode.rpc.address.xiaovservice.nn2=hdfs://xiaovmaster02:8020dfs.client.failover.proxy.provider.xiaovservice=org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider




原创粉丝点击