package my.test;
import java.util.Map.Entry;
import org.apache.hadoop.conf.*;
 
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.util.*;
 
// ConfigPrinter:print all settings and properties of hadoop
public class ConfigPrinter extends Configured implements Tool {
  /*
  static {
    Configuration.addDefaultResource("hdfs-default.xml");
    Configuration.addDefaultResource("hdfs-site.xml");
    Configuration.addDefaultResource("mapred-default.xml");
    Configuration.addDefaultResource("mapred-site.xml");
    //Configuration.addDefaultResource("test.xml");
  }
*/
  @Override
  public int run(String[] args) throws Exception {
Configuration conf = getConf();
 
 
conf.addResource("test2.xml");
 
//设置job才会加载mapred-site.xml和hdfs-site.xml
Job job = new Job(conf, "print args");
job.setJarByClass(ConfigPrinter.class);
 
//System.setProperty("test1", "mytest");
for (Entry<String, String> entry: conf) {
      System.out.printf("%s=%s\n", entry.getKey(), entry.getValue());
    }
 
    return 0;
  }
 
  public static void main(String[] args) throws Exception {
    int exitCode = ToolRunner.run(new ConfigPrinter(), args);
    System.exit(exitCode);
  }
}

生成jar,执行,打印环境结果。其中我加的-D参数会替换xml配置文件。
test2.xml放在src根目录下,或放到HADOOP_CONF_DIR能找到的路径中。

[zhouhh@Hadoop48 ~]$ hadoop jar ConfigPrinter.jar -Dmapred.map.tasks=6io.map.index.skip=0io.seqfile.compress.blocksize=1000000mapred.task.profile.maps=0-2keep.failed.task.files=falsemapred.tasktracker.map.tasks.maximum=2mapreduce.reduce.input.limit=-1mapred.task.tracker.http.address=0.0.0.0:55060mapred.map.tasks.speculative.execution=truemapred.used.genericoptionsparser=truemapred.userlog.retain.hours=24webinterface.private.actions=falsefs.s3.impl=org.apache.hadoop.fs.s3.S3FileSystemmapred.local.dir.minspacestart=0hadoop.native.lib=truefs.checkpoint.edits.dir=${fs.checkpoint.dir}ipc.server.listen.queue.size=128mapred.cluster.reduce.memory.mb=-1io.sort.spill.percent=0.80mapred.reduce.parallel.copies=5tasktracker.http.threads=40mapred.reduce.tasks=1mapreduce.tasktracker.outofband.heartbeat=falsehadoop.security.authorization=falseio.file.buffer.size=4096mapred.min.split.size=0hadoop.logfile.size=10000000mapred.job.queue.name=defaultmapred.submit.replication=10mapred.local.dir.minspacekill=0fs.webhdfs.impl=org.apache.hadoop.hdfs.web.WebHdfsFileSystemmapred.task.profile=falseipc.client.kill.max=10mapred.acls.enabled=falsemapred.heartbeats.in.second=100mapreduce.reduce.shuffle.read.timeout=180000mapred.output.compress=falseipc.server.tcpnodelay=falsemapred.healthChecker.interval=60000mapred.jobtracker.blacklist.fault-bucket-width=15mapred.task.timeout=600000mapred.temp.dir=${hadoop.tmp.dir}/mapred/tempmapred.jobtracker.taskScheduler=org.apache.hadoop.mapred.JobQueueTaskSchedulermapred.max.tracker.blacklists=4mapred.skip.reduce.max.skip.groups=0mapred.tasktracker.indexcache.mb=10mapreduce.jobtracker.staging.root.dir=${hadoop.tmp.dir}/mapred/stagingmapred.queue.default.state=RUNNINGmapred.tasktracker.dns.nameserver=defaulthadoop.logfile.count=10mapred.tasktracker.taskmemorymanager.monitoring-interval=5000mapred.tasktracker.expiry.interval=600000hadoop.security.uid.cache.secs=14400mapred.skip.attempts.to.start.skipping=2mapreduce.reduce.shuffle.connect.timeout=180000map.sort.class=org.apache.hadoop.util.QuickSortmapred.job.tracker.persist.jobstatus.active=falsemapred.tasktracker.reduce.tasks.maximum=2fs.ftp.impl=org.apache.hadoop.fs.ftp.FTPFileSystemmapred.max.tracker.failures=4mapred.output.compression.codec=org.apache.hadoop.io.compress.DefaultCodecjobclient.output.filter=FAILEDmapred.job.tracker.http.address=0.0.0.0:55030fs.file.impl=org.apache.hadoop.fs.LocalFileSystemmapred.jobtracker.restart.recover=falsemapred.healthChecker.script.timeout=600000ipc.client.connection.maxidletime=10000mapred.local.dir=${hadoop.tmp.dir}/mapred/localmapreduce.job.complete.cancel.delegation.tokens=truemapred.job.tracker.persist.jobstatus.dir=/jobtracker/jobsInfomapred.job.tracker=Hadoop48:54311io.sort.record.percent=0.05job.end.retry.attempts=0mapred.job.shuffle.merge.percent=0.66mapred.map.max.attempts=4mapred.reduce.tasks.speculative.execution=truemapreduce.job.counters.limit=120fs.checkpoint.size=67108864io.skip.checksum.errors=falsemapred.job.reduce.input.buffer.percent=0.0fs.s3n.impl=org.apache.hadoop.fs.s3native.NativeS3FileSystemfs.s3.maxRetries=4mapred.output.compression.type=RECORDmapred.task.cache.levels=2mapred.task.tracker.task-controller=org.apache.hadoop.mapred.DefaultTaskControllermapred.job.reuse.jvm.num.tasks=1mapred.system.dir=${hadoop.mydata.dir}/mapred/systemtest1=tdata1io.sort.factor=10mapred.userlog.limit.kb=0mapred.jobtracker.maxtasks.per.job=-1fs.default.name=hdfs://Hadoop48:54310mapred.job.tracker.retiredjobs.cache.size=1000ipc.client.idlethreshold=4000fs.hsftp.impl=org.apache.hadoop.hdfs.HsftpFileSystemhadoop.tmp.dir=/tmp/hadoop-${user.name}fs.checkpoint.dir=${hadoop.tmp.dir}/dfs/namesecondarymapred.skip.map.auto.incr.proc.count=truefs.s3.block.size=67108864io.serializations=org.apache.hadoop.io.serializer.WritableSerializationmapred.inmem.merge.threshold=1000hadoop.util.hash.type=murmurio.seqfile.lazydecompress=truemapred.job.reduce.memory.mb=-1mapred.skip.map.max.skip.records=0mapred.job.map.memory.mb=-1io.mapfile.bloom.size=1048576hadoop.mydata.dir=/home/zhouhh/myhadoopfs.s3.buffer.dir=${hadoop.tmp.dir}/s3mapred.tasktracker.dns.interface=defaultmapred.reduce.max.attempts=4test2=tdata2io.compression.codecs=org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.SnappyCodecmapred.task.profile.reduces=0-2mapred.job.tracker.jobhistory.lru.cache.size=5mapred.cluster.map.memory.mb=-1topology.script.number.args=100mapred.skip.reduce.auto.incr.proc.count=truefs.har.impl=org.apache.hadoop.fs.HarFileSystemmapred.cluster.max.map.memory.mb=-1mapred.job.tracker.persist.jobstatus.hours=0io.seqfile.sorter.recordlimit=1000000mapred.reduce.slowstart.completed.maps=0.05fs.trash.interval=0hadoop.security.authentication=simplelocal.cache.size=10737418240hadoop.security.group.mapping=org.apache.hadoop.security.ShellBasedUnixGroupsMappingmapred.job.tracker.handler.count=10hadoop.security.token.service.use_ip=trueipc.client.connect.max.retries=10fs.ramfs.impl=org.apache.hadoop.fs.InMemoryFileSystemhadoop.rpc.socket.factory.class.default=org.apache.hadoop.net.StandardSocketFactoryfs.kfs.impl=org.apache.hadoop.fs.kfs.KosmosFileSystemmapreduce.job.acl-view-job=fs.checkpoint.period=3600mapred.map.output.compression.codec=org.apache.hadoop.io.compress.DefaultCodectopology.node.switch.mapping.impl=org.apache.hadoop.net.ScriptBasedMappingjob.end.retry.interval=30000mapred.tasktracker.tasks.sleeptime-before-sigkill=5000mapred.job.shuffle.input.buffer.percent=0.70mapred.jobtracker.completeuserjobs.maximum=100mapred.user.jobconf.limit=5242880mapred.compress.map.output=falsemapred.queue.names=defaultfs.hdfs.impl=org.apache.hadoop.hdfs.DistributedFileSystemmapred.child.java.opts=-Xmx200mmapred.jobtracker.blacklist.fault-timeout-window=180mapred.merge.recordsBeforeProgress=10000mapred.jobtracker.job.history.block.size=3145728mapreduce.reduce.shuffle.maxfetchfailures=10io.mapfile.bloom.error.rate=0.005mapreduce.job.split.metainfo.maxsize=10000000io.bytes.per.checksum=512mapred.child.tmp=./tmpfs.har.impl.disable.cache=trueipc.client.tcpnodelay=falsefs.hftp.impl=org.apache.hadoop.hdfs.HftpFileSystemio.sort.mb=100mapred.cluster.max.reduce.memory.mb=-1mapred.line.input.format.linespermap=1mapreduce.tasktracker.outofband.heartbeat.damper=1000000mapreduce.job.acl-modify-job=mapred.combine.recordsBeforeProgress=10000fs.s3.sleepTimeSeconds=10mapred.map.tasks=6mapred.task.tracker.report.address=127.0.0.1:0