hadoop2.2.0 core-site.xml--file system properties

来源:互联网 发布:亚马逊选择centos系统 编辑:程序博客网 时间:2024/06/01 07:44

<!-- file system properties -->

<property>  <name>fs.defaultFS</name>  <value>file:///</value>  <description>The name of the default file system.  A URI whose  scheme and authority determine the FileSystem implementation.  The  uri's scheme determines the config property (fs.SCHEME.impl) naming  the FileSystem implementation class.  The uri's authority is used to  determine the host, port, etc. for a filesystem.</description></property>

注释:

<property>  <name>fs.default.name</name>  <value>file:///</value>  <description>Deprecated. Use (fs.defaultFS) property  instead</description></property>

注释:

 

<property>  <name>fs.trash.interval</name>  <value>0</value>  <description>Number of minutes after which the checkpoint  gets deleted.  If zero, the trash feature is disabled.  </description></property>

注释:

<property>  <name>fs.trash.checkpoint.interval</name>  <value>0</value>  <description>Number of minutes between trash checkpoints.  Should be smaller or equal to fs.trash.interval.  Every time the checkpointer runs it creates a new checkpoint   out of current and removes checkpoints created more than   fs.trash.interval minutes ago.  </description></property>

注释:

<property>  <name>fs.file.impl</name>  <value>org.apache.hadoop.fs.LocalFileSystem</value>  <description>The FileSystem for file: uris.</description></property>

注释:

<property>  <name>fs.hdfs.impl</name>  <value>org.apache.hadoop.hdfs.DistributedFileSystem</value>  <description>The FileSystem for hdfs: uris.</description></property>

注释:

<property>  <name>fs.viewfs.impl</name>  <value>org.apache.hadoop.fs.viewfs.ViewFileSystem</value>  <description>The FileSystem for view file system for viewfs: uris  (ie client side mount table:).</description></property>

注释:

<property>  <name>fs.AbstractFileSystem.file.impl</name>  <value>org.apache.hadoop.fs.local.LocalFs</value>  <description>The AbstractFileSystem for file: uris.</description></property>

注释:

<property>  <name>fs.AbstractFileSystem.hdfs.impl</name>  <value>org.apache.hadoop.fs.Hdfs</value>  <description>The FileSystem for hdfs: uris.</description></property>

注释:

<property>  <name>fs.AbstractFileSystem.viewfs.impl</name>  <value>org.apache.hadoop.fs.viewfs.ViewFs</value>  <description>The AbstractFileSystem for view file system for viewfs: uris  (ie client side mount table:).</description></property>

注释:

<property>  <name>fs.s3.impl</name>  <value>org.apache.hadoop.fs.s3.S3FileSystem</value>  <description>The FileSystem for s3: uris.</description></property>

注释:

<property>  <name>fs.s3n.impl</name>  <value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value>  <description>The FileSystem for s3n: (Native S3) uris.</description></property>

注释:

<property>  <name>fs.kfs.impl</name>  <value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value>  <description>The FileSystem for kfs: uris.</description></property>

注释:

<property>  <name>fs.hftp.impl</name>  <value>org.apache.hadoop.hdfs.HftpFileSystem</value></property>

注释:

<property>  <name>fs.hsftp.impl</name>  <value>org.apache.hadoop.hdfs.HsftpFileSystem</value></property>

注释:

<property>  <name>fs.webhdfs.impl</name>  <value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value></property>

注释:

<property>  <name>fs.ftp.impl</name>  <value>org.apache.hadoop.fs.ftp.FTPFileSystem</value>  <description>The FileSystem for ftp: uris.</description></property>

注释:

<property>  <name>fs.ftp.host</name>  <value>0.0.0.0</value>  <description>FTP filesystem connects to this server</description></property>

注释:

<property>  <name>fs.ftp.host.port</name>  <value>21</value>  <description>    FTP filesystem connects to fs.ftp.host on this port  </description></property>

注释:

<property>  <name>fs.har.impl</name>  <value>org.apache.hadoop.fs.HarFileSystem</value>  <description>The filesystem for Hadoop archives. </description></property>

注释:

<property>  <name>fs.har.impl.disable.cache</name>  <value>true</value>  <description>Don't cache 'har' filesystem instances.</description></property>

注释:

<property>  <name>fs.df.interval</name>  <value>60000</value>  <description>Disk usage statistics refresh interval in msec.</description></property>

注释:

<property>  <name>fs.s3.block.size</name>  <value>67108864</value>  <description>Block size to use when writing files to S3.</description></property>

注释:

<property>  <name>fs.s3.buffer.dir</name>  <value>${hadoop.tmp.dir}/s3</value>  <description>Determines where on the local filesystem the S3 filesystem  should store files before sending them to S3  (or after retrieving them from S3).  </description></property>

注释:

<property>  <name>fs.s3.maxRetries</name>  <value>4</value>  <description>The maximum number of retries for reading or writing files to S3,   before we signal failure to the application.  </description></property>

注释:

<property>  <name>fs.s3.sleepTimeSeconds</name>  <value>10</value>  <description>The number of seconds to sleep between each S3 retry.  </description></property>

注释:

<property>  <name>fs.automatic.close</name>  <value>true</value>  <description>By default, FileSystem instances are automatically closed at program  exit using a JVM shutdown hook. Setting this property to false disables this  behavior. This is an advanced option that should only be used by server applications  requiring a more carefully orchestrated shutdown sequence.  </description></property>

注释:

<property>  <name>fs.s3n.block.size</name>  <value>67108864</value>  <description>Block size to use when reading files using the native S3  filesystem (s3n: URIs).</description></property>

注释:

<property>  <name>io.seqfile.compress.blocksize</name>  <value>1000000</value>  <description>The minimum block size for compression in block compressed           SequenceFiles.  </description></property>

注释:

<property>  <name>io.seqfile.lazydecompress</name>  <value>true</value>  <description>Should values of block-compressed SequenceFiles be decompressed          only when necessary.  </description></property>

注释:

<property>  <name>io.seqfile.sorter.recordlimit</name>  <value>1000000</value>  <description>The limit on number of records to be kept in memory in a spill           in SequenceFiles.Sorter  </description></property>

注释:

 <property>  <name>io.mapfile.bloom.size</name>  <value>1048576</value>  <description>The size of BloomFilter-s used in BloomMapFile. Each time this many  keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter).  Larger values minimize the number of filters, which slightly increases the performance,  but may waste too much space if the total number of keys is usually much smaller  than this number.  </description></property>

注释:

<property>  <name>io.mapfile.bloom.error.rate</name>  <value>0.005</value>  <description>The rate of false positives in BloomFilter-s used in BloomMapFile.  As this value decreases, the size of BloomFilter-s increases exponentially. This  value is the probability of encountering false positives (default is 0.5%).  </description></property>

注释:

<property>  <name>hadoop.util.hash.type</name>  <value>murmur</value>  <description>The default implementation of Hash. Currently this can take one of the  two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash.  </description></property>

注释:

0 0
原创粉丝点击