Hadoop的FileStatus简单使用
来源:互联网 发布:淘宝开店如何退保证金 编辑:程序博客网 时间:2024/06/05 19:46
Hadoop的FileStatus简单使用
FileStatus类继承关系:
FileStatus类的属性和方法:
输入文件信息:
blb@hadoop1:/home/blb/blb$ hdfs dfs -text /user/blb/libin/input/inputpath1.txthadoop aspark ahive ahbase atachyon astorm aredis ablb@hadoop1:/home/blb/blb$
代码:
import java.io.IOException;import org.apache.hadoop.conf.Configuration;import org.apache.hadoop.fs.FileStatus;import org.apache.hadoop.fs.FileSystem;import org.apache.hadoop.fs.Path;import org.apache.hadoop.fs.permission.FsPermission;import org.apache.hadoop.io.LongWritable;import org.apache.hadoop.io.NullWritable;import org.apache.hadoop.io.Text;import org.apache.hadoop.mapreduce.Job;import org.apache.hadoop.mapreduce.Mapper;import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;import org.apache.hadoop.util.GenericOptionsParser;public class GetStatusMapReduce {public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {Configuration conf = new Configuration();String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();if(otherArgs.length!=2){System.err.println("Usage databaseV1 <inputpath> <outputpath>");}conf.set("path", otherArgs[0]);Job job = Job.getInstance(conf, GetStatusMapReduce.class.getSimpleName() + "1");job.setJarByClass(GetStatusMapReduce.class);job.setMapOutputKeyClass(Text.class);job.setMapOutputValueClass(Text.class);job.setOutputKeyClass(Text.class);job.setOutputValueClass(NullWritable.class);job.setMapperClass(MyMapper1.class);job.setNumReduceTasks(0);job.setInputFormatClass(TextInputFormat.class);job.setOutputFormatClass(TextOutputFormat.class);FileInputFormat.addInputPath(job, new Path(otherArgs[0]));FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));job.waitForCompletion(true);}public static class MyMapper1 extends Mapper<LongWritable, Text, Text, NullWritable>{@Overrideprotected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, NullWritable>.Context context)throws IOException, InterruptedException {FileSystem fileSystem=FileSystem.get(context.getConfiguration());FileStatus[] fileStatus=fileSystem.listStatus(new Path(context.getConfiguration().get("path")));int length = fileStatus.length;context.write(new Text("==================== 一次map函数 =============================="), NullWritable.get());context.write(new Text("length--"+length), NullWritable.get());for (FileStatus fs : fileStatus) {long accessTime = fs.getAccessTime(); long blockSize = fs.getBlockSize();Class<? extends FileStatus> class1 = fs.getClass();String group = fs.getGroup();long len = fs.getLen();long modificationTime = fs.getModificationTime();String owner = fs.getOwner();Path path = fs.getPath();FsPermission permission = fs.getPermission();short replication = fs.getReplication();//Path symlink = fs.getSymlink();String string = fs.toString();boolean directory = fs.isDirectory();boolean encrypted = fs.isEncrypted();boolean file = fs.isFile();context.write(new Text("===================================================================================="), NullWritable.get());context.write(new Text("accessTime--"+accessTime), NullWritable.get());context.write(new Text("blockSize--"+blockSize), NullWritable.get());context.write(new Text("class1--"+class1), NullWritable.get());context.write(new Text("group--"+group), NullWritable.get());context.write(new Text("len--"+len), NullWritable.get());context.write(new Text("modificationTime--"+modificationTime), NullWritable.get());context.write(new Text("owner--"+owner), NullWritable.get());context.write(new Text("path--"+path), NullWritable.get());context.write(new Text("permission--"+permission), NullWritable.get());context.write(new Text("replication--"+replication), NullWritable.get());//context.write(new Text("symlink--"+symlink), NullWritable.get());context.write(new Text("string--"+string), NullWritable.get());context.write(new Text("directory--"+directory), NullWritable.get());context.write(new Text("encrypted--"+encrypted), NullWritable.get());context.write(new Text("file--"+file), NullWritable.get());}}}}
输出:
==================== 一次map函数 ==============================length--1====================================================================================accessTime--1459406433657blockSize--268435456class1--class org.apache.hadoop.fs.FileStatusgroup--hadooplen--58modificationTime--1456301571884owner--blbpath--hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txtpermission--rw-r--r--replication--2string--FileStatus{path=hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txt; isDirectory=false; length=58; replication=2; blocksize=268435456; modification_time=1456301571884; access_time=1459406433657; owner=blb; group=hadoop; permission=rw-r--r--; isSymlink=false}directory--falseencrypted--falsefile--true==================== 一次map函数 ==============================length--1====================================================================================accessTime--1459406433657blockSize--268435456class1--class org.apache.hadoop.fs.FileStatusgroup--hadooplen--58modificationTime--1456301571884owner--blbpath--hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txtpermission--rw-r--r--replication--2string--FileStatus{path=hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txt; isDirectory=false; length=58; replication=2; blocksize=268435456; modification_time=1456301571884; access_time=1459406433657; owner=blb; group=hadoop; permission=rw-r--r--; isSymlink=false}directory--falseencrypted--falsefile--true==================== 一次map函数 ==============================length--1====================================================================================accessTime--1459406433657blockSize--268435456class1--class org.apache.hadoop.fs.FileStatusgroup--hadooplen--58modificationTime--1456301571884owner--blbpath--hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txtpermission--rw-r--r--replication--2string--FileStatus{path=hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txt; isDirectory=false; length=58; replication=2; blocksize=268435456; modification_time=1456301571884; access_time=1459406433657; owner=blb; group=hadoop; permission=rw-r--r--; isSymlink=false}directory--falseencrypted--falsefile--true==================== 一次map函数 ==============================length--1====================================================================================accessTime--1459406433657blockSize--268435456class1--class org.apache.hadoop.fs.FileStatusgroup--hadooplen--58modificationTime--1456301571884owner--blbpath--hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txtpermission--rw-r--r--replication--2string--FileStatus{path=hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txt; isDirectory=false; length=58; replication=2; blocksize=268435456; modification_time=1456301571884; access_time=1459406433657; owner=blb; group=hadoop; permission=rw-r--r--; isSymlink=false}directory--falseencrypted--falsefile--true==================== 一次map函数 ==============================length--1====================================================================================accessTime--1459406433657blockSize--268435456class1--class org.apache.hadoop.fs.FileStatusgroup--hadooplen--58modificationTime--1456301571884owner--blbpath--hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txtpermission--rw-r--r--replication--2string--FileStatus{path=hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txt; isDirectory=false; length=58; replication=2; blocksize=268435456; modification_time=1456301571884; access_time=1459406433657; owner=blb; group=hadoop; permission=rw-r--r--; isSymlink=false}directory--falseencrypted--falsefile--true==================== 一次map函数 ==============================length--1====================================================================================accessTime--1459406433657blockSize--268435456class1--class org.apache.hadoop.fs.FileStatusgroup--hadooplen--58modificationTime--1456301571884owner--blbpath--hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txtpermission--rw-r--r--replication--2string--FileStatus{path=hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txt; isDirectory=false; length=58; replication=2; blocksize=268435456; modification_time=1456301571884; access_time=1459406433657; owner=blb; group=hadoop; permission=rw-r--r--; isSymlink=false}directory--falseencrypted--falsefile--true==================== 一次map函数 ==============================length--1====================================================================================accessTime--1459406433657blockSize--268435456class1--class org.apache.hadoop.fs.FileStatusgroup--hadooplen--58modificationTime--1456301571884owner--blbpath--hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txtpermission--rw-r--r--replication--2string--FileStatus{path=hdfs://hadoop1:9000/user/blb/libin/input/inputpath1.txt; isDirectory=false; length=58; replication=2; blocksize=268435456; modification_time=1456301571884; access_time=1459406433657; owner=blb; group=hadoop; permission=rw-r--r--; isSymlink=false}directory--falseencrypted--falsefile--true
0 0
- Hadoop的FileStatus简单使用
- FileStatus的主要方法的使用(源码)
- Hadoop 用FileStatus类来查看HDFS中文件或目录的元信息
- Hadoop中的FileStatus、BlockLocation、LocatedBlocks、InputSplit
- hdfs FileStatus的accesstime的变更
- hadoop的Context简单使用
- hadoop的FileSplit简单使用
- hadoop之hdfs api的简单使用
- hadoop中conbine的简单使用《转》
- HDFS之filestatus(查看hdfs里的数据信息)
- Hadoop的基本配置及HDFS的简单使用
- hadoop的简单了解
- Hadoop 的简单介绍
- Hadoop-简单的MapReduce
- Hadoop的简单介绍
- Hadoop 的简单描述
- hadoop命令及hive数据库操作语句的简单使用
- hadoop所封装的RPC框架简单使用
- 关于JVM的常见问题(二)
- 如何实现友盟第三方登录与分享
- uboot下gpio驱动移植
- Android 上传头像自定义(剪切、平移,缩放)
- storm实战入门一
- Hadoop的FileStatus简单使用
- shiny-server 安装与配置、问题汇总
- 聚类分析
- [转载]Linux中profile、bashrc、bash_profile之间的区别和联系
- 使用O-LLVM和NDK对Android应用进行混淆
- List<E>类
- svn更改登录用户和密码
- Location 定位获取用户许可
- python3抓取百度贴吧帖子