一个根据所给资源自动配置CDH中Hadoop等参数的工具
来源:互联网 发布:上海电信网络安装电话 编辑:程序博客网 时间:2024/05/16 06:46
分享一个根据所给内存,CPU等资源,自动配置Hadoop等参数的工具。
假设Python的名字为:cdh_auto_configuration.py,完整内容如下:
#!/usr/bin/env python
import optparse
from pprint import pprint
import logging
import sys
import math
import ast
''' Reserved for OS + DN + NM, Map: Memory => Reservation '''
reservedStack = { 4:1, 7:2, 8:2, 16:2, 24:4, 48:6, 64:8, 72:8, 96:12,
128:24, 256:32, 512:64}
''' Reserved for HBase. Map: Memory => Reservation '''
reservedHBase = {4:1, 8:1, 16:2, 24:4, 48:8, 64:8, 72:8, 96:16,
128:24, 256:32, 512:64}
GB = 1024
def getMinContainerSize(memory):
if (memory <= 4):
return 256
elif (memory <= 8):
return 512
elif (memory <= 24):
return 1024
else:
return 2048
pass
def getReservedStackMemory(memory):
if (reservedStack.has_key(memory)):
return reservedStack[memory]
if (memory <= 4):
ret = 1
elif (memory >= 512):
ret = 64
else:
ret = 1
return ret
def getReservedHBaseMem(memory):
if (reservedHBase.has_key(memory)):
return reservedHBase[memory]
if (memory <= 4):
ret = 1
elif (memory >= 512):
ret = 64
else:
ret = 2
return ret
def getRoundedMemory(memory):
denominator = 128
if (memory > 4096):
denominator = 1024
elif (memory > 2048):
denominator = 512
elif (memory > 1024):
denominator = 256
else:
denominator = 128
return int(math.floor(memory/denominator)) * denominator
def main():
log = logging.getLogger(__name__)
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter(' %(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO)
parser = optparse.OptionParser()
memory = 0
cores = 0
disks = 0
hbaseEnabled = True
parser.add_option('-c', '--cores', default = 16, help = 'Number of cores on each host')
parser.add_option('-m', '--memory', default = 64, help = 'Amount of Memory on each host in GB')
parser.add_option('-d', '--disks', default = 4, help = 'Number of disks on each host')
parser.add_option('-k', '--hbase', default ='True', help = 'True if HBase is installed, False is not')
(options, args) = parser.parse_args()
cores = int (options.cores)
memory = int (options.memory)
disks = int (options.disks)
hbaseEnabled = ast.literal_eval(options.hbase)
log.info("Using cores=" + str(cores) + " memory=" + str(memory) + "GB" + " disks=" + str(disks) + " hbase=" + str(hbaseEnabled))
minContainerSize = getMinContainerSize(memory)
reservedStackMemory = getReservedStackMemory(memory)
reservedHBaseMemory = 0
if (hbaseEnabled):
reservedHBaseMemory = getReservedHBaseMem(memory)
reservedMem = reservedStackMemory + reservedHBaseMemory
usableMem = memory - reservedMem
memory -= (reservedMem)
if (memory < 2):
memory = 2
reservedMem = max(0, memory - reservedMem)
memory *= GB
containers = int (max(3, min(2 * cores, min(math.ceil(1.8 * float(disks)), memory/minContainerSize))))
log.info("Profile: cores=" + str(cores) + " memory=" + str(memory) + "MB"
+ " reserved=" + str(reservedMem) + "GB" + " usableMem="
+ str(usableMem) + "GB" + " disks=" + str(disks))
container_ram = getRoundedMemory(abs(memory/containers))
log.info("Num Container=" + str(containers))
log.info("Container Ram=" + str(container_ram) + "MB")
log.info("Used Ram=" + str(int (containers*container_ram/float(GB))) + "GB")
log.info("Unused Ram=" + str(reservedMem) + "GB")
map_memory = container_ram
reduce_memory = container_ram
if (container_ram < 2048):
reduce_memory = 2 * container_ram
am_memory = min(map_memory, reduce_memory)
''' MapReduce Configuration '''
log.info("***** mapred-site.xml *****")
log.info("mapreduce.map.memory.mb=" + str(int(map_memory)))
log.info("mapreduce.map.java.opts=-Xmx" + str(getRoundedMemory(int(0.8 * map_memory))) +"m")
log.info("mapreduce.reduce.memory.mb=" + str(int(reduce_memory)))
log.info("mapreduce.reduce.java.opts=-Xmx" + str(getRoundedMemory(int(0.8 * reduce_memory))) + "m")
''' io.sort.mb cannot be greater than 2047 '''
log.info("mapreduce.task.io.sort.mb=" + str(getRoundedMemory(int(min(0.4 * map_memory, 2047)))))
''' YARN Configuration '''
log.info("***** yarn-site.xml *****")
log.info("yarn.scheduler.minimum-allocation-mb=" + str(container_ram))
log.info("yarn.scheduler.maximum-allocation-mb=" + str(containers*container_ram))
log.info("yarn.nodemanager.resource.memory-mb=" + str(containers*container_ram))
log.info("yarn.app.mapreduce.am.resource.mb=" + str(int(am_memory)))
log.info("yarn.app.mapreduce.am.command-opts=-Xmx" + str(getRoundedMemory(int(0.8*am_memory))) + "m")
''' Tez Configuration '''
log.info("***** tez-site.xml *****")
am_memory = max(map_memory, reduce_memory)
log.info("tez.am.resource.memory.mb=" + str(int(am_memory)))
log.info("tez.am.java.opts=-Xmx" + str(getRoundedMemory(int(0.8*am_memory))) + "m")
heap_size = getRoundedMemory(int(0.8 * container_ram))
''' Hive Configuration '''
log.info("***** hive-site.xml *****")
log.info("hive.tez.container.size=" + str(int(container_ram)))
log.info("hive.tez.java.opts=-Xmx" + str(heap_size) +"m")
hive_noconditional_task_size = int (getRoundedMemory(int(heap_size*0.33)) * 1024 * 1024)
log.info("hive.auto.convert.join.noconditionaltask.size=" + str(hive_noconditional_task_size / 1000 * 1000))
pass
if __name__ == '__main__':
try:
main()
except(KeyboardInterrupt, EOFError):
print("\nAborting ... Keyboard Interrupt.")
sys.exit(1)
下面我们来看一下这个脚本的常用参数:
执行帮助命令:
python cdh_auto_configuration.py -h
返回结果:
Usage: cdh_auto_configuration.py [options]Options:
-h, --help show this help message and exit
-c CORES, --cores=CORES
Number of cores on each host
-m MEMORY, --memory=MEMORY
Amount of Memory on each host in GB
-d DISKS, --disks=DISKS
Number of disks on each host
-k HBASE, --hbase=HBASE
True if HBase is installed, False is not
根据参数提示,下面我们来演示一下脚本的执行情况:
python cdh_auto_configuration.py -c 64 -m 128 -d 4 -k True
返回结果:
Using cores=64 memory=128GB disks=4 hbase=TrueProfile: cores=64 memory=81920MB reserved=48GB usableMem=80GB disks=4
Num Container=8
Container Ram=10240MB
Used Ram=80GB
Unused Ram=48GB
***** mapred-site.xml *****
mapreduce.map.memory.mb=10240
mapreduce.map.java.opts=-Xmx8192m
mapreduce.reduce.memory.mb=10240
mapreduce.reduce.java.opts=-Xmx8192m
mapreduce.task.io.sort.mb=1792
***** yarn-site.xml *****
yarn.scheduler.minimum-allocation-mb=10240
yarn.scheduler.maximum-allocation-mb=81920
yarn.nodemanager.resource.memory-mb=81920
yarn.app.mapreduce.am.resource.mb=10240
yarn.app.mapreduce.am.command-opts=-Xmx8192m
***** tez-site.xml *****
tez.am.resource.memory.mb=10240
tez.am.java.opts=-Xmx8192m
***** hive-site.xml *****
hive.tez.container.size=10240
hive.tez.java.opts=-Xmx8192m
hive.auto.convert.join.noconditionaltask.size=2684354000
然后根据实际情况进行查看分析,如果不满足你的要求,可以对上面的Python源码进行修改并重新生成配置。
0 1
- 一个根据所给资源自动配置CDH中Hadoop等参数的工具
- 一个根据所给资源自动配置CDH中Hadoop等参数的工具
- CDH集群中YARN的参数配置
- CDH集群中配置Yarn资源
- Mybatis中根据数据库表结构自动生成dao层等代码的工具及其步骤
- c#中自动配置存储过程所需参数
- Hadoop的CDH简介
- CDH中服务的配置及启动
- Makefile中如何根据源文件自动生成其所需要的头文件
- hadoop-cdh的伪分布式
- CDH 中hbase的Regionserver自动退出问题
- 一个根据数据库中数据,并且页面中用户的勾选所达到的效果
- 所有我所收集或者工作中积累的代码以及程序等资源
- Oracle中根据条件参数插入自增的(带参数自动获取DPNO)
- 将材质编辑器中选择的材质自动赋予给所创建的对象
- CDH配置(Zookeeper,HADOOP,Hive)
- cdh 中 spark 配置 lzo
- Eclipse配置maven所需的资源
- eclipse原有项目导入到新的svn
- 七牛-资源列举(c#)
- php5.6关于curl以@方式上传文件的解决方案
- jar包详解和META-INF作用(转)
- 关于 html中的title属性隐藏 办法
- 一个根据所给资源自动配置CDH中Hadoop等参数的工具
- HTML锚点定位偏移
- 非常全面的Bitmap梳理
- vim格式化代码
- 算法_TrueSkill_Python
- 使用PHP导入和导出CSV文件
- 天纵智能软件快速开发平台状态图查询分析插件
- 无法解析的外部符号 __imp__MessageBoxA
- Spring的helloworld的缺包错误