Java读写HDFS文件
来源:互联网 发布:淘宝怎么看自己评价的 编辑:程序博客网 时间:2024/05/20 17:10
一、依赖包maven路径
- <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-client -->
- <dependency>
- <groupId>org.apache.hadoop</groupId>
- <artifactId>hadoop-client</artifactId>
- <version>2.7.3</version>
- <scope>runtime</scope>
- </dependency>
二、针对HDFS文件的操作类HDFSOperate
- package com.hdfs.util;
- import java.io.BufferedReader;
- import java.io.File;
- import java.io.FileOutputStream;
- import java.io.IOException;
- import java.io.InputStreamReader;
- import java.io.PrintStream;
- import java.net.URI;
- import org.apache.hadoop.conf.Configuration;
- import org.apache.hadoop.fs.FSDataInputStream;
- import org.apache.hadoop.fs.FSDataOutputStream;
- import org.apache.hadoop.fs.FileSystem;
- import org.apache.hadoop.fs.Path;
- import org.apache.hadoop.io.IOUtils;
- /**
- * 针对HDFS文件的操作类
- */
- public class HDFSOperate {
- /**
- * 新增(创建)HDFS文件
- * @param hdfs
- */
- public void createHDFS(String hdfs){
- try {
- Configuration conf = new Configuration();
- conf.setBoolean("dfs.support.append", true);
- conf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER");
- conf.set("dfs.client.block.write.replace-datanode-on-failure.enable","true");
- FileSystem fs = FileSystem.get(URI.create(hdfs), conf);
- Path path = new Path(hdfs);
- //判断HDFS文件是否存在
- if(fs.exists(path)){
- //System.out.println(hdfs + "已经存在!!!");
- }else{
- FSDataOutputStream hdfsOutStream = fs.create(new Path(hdfs));
- hdfsOutStream.close();
- }
- fs.close();
- } catch (Exception e) {
- // TODO: handle exception
- e.printStackTrace();
- }
- }
- /**
- * 在HDFS文件后面追加内容
- * @param hdfs
- * @param appendContent
- */
- public void appendHDFS(String hdfs,String appendContent){
- try {
- Configuration conf = new Configuration();
- conf.setBoolean("dfs.support.append", true);
- conf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER");
- conf.set("dfs.client.block.write.replace-datanode-on-failure.enable","true");
- FileSystem fs = FileSystem.get(URI.create(hdfs), conf);
- Path path = new Path(hdfs);
- //判断HDFS文件是否存在
- if(fs.exists(path)){
- //System.out.println(hdfs + "已经存在!!!");
- }else{
- FSDataOutputStream hdfsOutStream = fs.create(new Path(hdfs));
- hdfsOutStream.close();
- }
- FSDataOutputStream hdfsOutStream = fs.append(new Path(hdfs));
- byte [] str = appendContent.getBytes("UTF-8");//防止中文乱码
- hdfsOutStream.write(str);
- hdfsOutStream.close();
- fs.close();
- } catch (Exception e) {
- // TODO: handle exception
- e.printStackTrace();
- }
- }
- /**
- * 修改HDFS文件内容 / 删除就是替换为空
- * @param hdfs : hdfs文件路径
- * @param sourceContent :要修改的hdfs文件内容
- * @param changeContent :需要修改成的文件内容
- */
- public void change(String hdfs,String sourceContent,String changeContent){
- try {
- Configuration conf = new Configuration();
- conf.setBoolean("dfs.support.append", true);
- conf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER");
- conf.set("dfs.client.block.write.replace-datanode-on-failure.enable","true");
- FileSystem fs = FileSystem.get(URI.create(hdfs), conf);
- Path path = new Path(hdfs);
- //判断HDFS文件是否存在
- if(fs.exists(path)){
- //System.out.println(hdfs + "已经存在!!!");
- FSDataInputStream in = fs.open(path);
- BufferedReader bf=new BufferedReader(new InputStreamReader(in));//防止中文乱码
- String totalString = "";
- String line = null;
- while ((line = bf.readLine()) != null) {
- totalString += line;
- }
- String changeString = totalString.replace(sourceContent, changeContent);
- FSDataOutputStream hdfsOutStream = fs.create(new Path(hdfs));
- byte [] str = changeString.getBytes("UTF-8");
- hdfsOutStream.write(str);
- hdfsOutStream.close();
- }else{
- //System.out.println(hdfs + "不存在,无需操作!!!");
- }
- fs.close();
- } catch (Exception e) {
- // TODO: handle exception
- e.printStackTrace();
- }
- }
- /**
- * 判断要追加的内容是否存在
- * @param hdfs
- * @param appendContent
- * @return
- */
- public Boolean isContentExist(String hdfs,String appendContent){
- try {
- Configuration conf = new Configuration();
- conf.setBoolean("dfs.support.append", true);
- conf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER");
- conf.set("dfs.client.block.write.replace-datanode-on-failure.enable","true");
- FileSystem fs = FileSystem.get(URI.create(hdfs), conf);
- Path path = new Path(hdfs);
- //判断HDFS文件是否存在
- if(fs.exists(path)){
- //System.out.println(hdfs + "已经存在!!!");
- FSDataInputStream in = fs.open(path);
- BufferedReader bf=new BufferedReader(new InputStreamReader(in));//防止中文乱码
- String totalString = "";
- String line = null;
- while ((line = bf.readLine()) != null) {
- totalString += line;
- }
- if(totalString.contains(appendContent)){
- return true;
- }
- }else{
- //System.out.println(hdfs + "不存在,无需操作!!!");
- }
- fs.close();
- } catch (Exception e) {
- // TODO: handle exception
- e.printStackTrace();
- }
- return false;
- }
- public static void main(String[] args) throws IOException {
- String hdfs = "hdfs://192.168.168.200:9000/test/tes.txt";
- HDFSOperate hdfsOperate = new HDFSOperate();
- hdfsOperate.createHDFS(hdfs);
- hdfsOperate.appendHDFS(hdfs,"测试新增内容");
- //hdfsOperate.change(hdfs, "测试新增内容", "测试修改成功");
- }
- }
阅读全文
0 0
- hdfs java 文件读写
- java读写hdfs文件
- Java读写HDFS文件
- HDFS上文件处理、Java文件读写
- HDFS读写文件的java代码
- (6)Java 读写 hdfs文件或者目录
- hdfs java读写hdfs demo
- HDFS文件读写
- hdfs随机读写文件
- HDFS读写文件BUG
- hdfs文件读写bug2
- HDFS文件读写流程
- hadoop HDFS读写文件
- HDFS读写文件流程
- HDFS文件读写过程
- HDFS读写文件操作
- HDFS读写文件流程
- HDFS文件读写流程
- 你真的了解:IIS连接数、IIS并发连接数、IIS最大并发工作线程数、应用程序池的队列长度、应用程序池的...
- Type中的3个bool属性: IsGenericType , IsGenericTypeDefinition , IsGenericParameter 标签: 泛型TypeC# 2015-05-20
- 【CSP201609-2】火车购票
- Drozer的安装和使用(-)
- javax.el.PropertyNotFoundException
- Java读写HDFS文件
- Unity:线形和球形检测
- Linux之Redis安装
- keyCode
- PHP的数据类型
- [iOS]多国语言国际化
- 在新建的Qt项目中添加显示点云的部件
- The Unique MST(次小生成树)
- js获得近六个月的时间