生产环境实战spark (9)分布式集群 5台设备 SPARK集群安装

来源:互联网 发布:淘宝司法拍卖房产税 编辑:程序博客网 时间:2024/04/29 02:05

 生产环境实战spark (9)分布式集群 5台设备 SPARK集群安装


1, 上传spark到master,检查

[root@master rhzf_spark_setupTools]# lshadoop-2.6.5.tar.gz  jdk-8u121-linux-x64.tar.gz  scala-2.11.8.zip  spark-2.1.0-bin-hadoop2.6.tgz[root@master rhzf_spark_setupTools]# 
 

2,解压缩spark安装

[root@master rhzf_spark_setupTools]# tar -zxvf spark-2.1.0-bin-hadoop2.6.tgz[root@master rhzf_spark_setupTools]# lshadoop-2.6.5.tar.gz  jdk-8u121-linux-x64.tar.gz  scala-2.11.8.zip  spark-2.1.0-bin-hadoop2.6  spark-2.1.0-bin-hadoop2.6.tgz[root@master rhzf_spark_setupTools]# mv spark-2.1.0-bin-hadoop2.6 /usr/local[root@master rhzf_spark_setupTools]# cd /usr/local[root@master local]# lsbin  games         include       lib    libexec             rhzf_spark_setupTools  scala-2.11.8  spark-2.1.0-bin-hadoop2.6etc  hadoop-2.6.5  jdk1.8.0_121  lib64  rhzf_setup_scripts  sbin                   share         src[root@master local]# 

3,编辑 /etc/profile  profile文件

export JAVA_HOME=/usr/local/jdk1.8.0_121export SCALA_HOME=/usr/local/scala-2.11.8export HADOOP_HOME=/usr/local/hadoop-2.6.5export SPARK_HOME=/usr/local/spark-2.1.0-bin-hadoop2.6export PATH=.:$PATH:$JAVA_HOME/bin:$SCALA_HOME/bin:$HADOOP_HOME/bin:$SPARK_HOME/bin

刷新生效
[root@master spark-2.1.0-bin-hadoop2.6]# source  /etc/profile



4,spark配置文件修改

[root@master spark-2.1.0-bin-hadoop2.6]# cd ..[root@master local]# lsbin  games         include       lib    libexec             rhzf_spark_setupTools  scala-2.11.8  spark-2.1.0-bin-hadoop2.6etc  hadoop-2.6.5  jdk1.8.0_121  lib64  rhzf_setup_scripts  sbin                   share         src[root@master local]# cd spark-2.1.0-bin-hadoop2.6[root@master spark-2.1.0-bin-hadoop2.6]# lsbin  conf  data  examples  jars  LICENSE  licenses  NOTICE  python  R  README.md  RELEASE  sbin  yarn[root@master spark-2.1.0-bin-hadoop2.6]# cd conf[root@master conf]# lsdocker.properties.template  log4j.properties.template    slaves.template               spark-env.sh.templatefairscheduler.xml.template  metrics.properties.template  spark-defaults.conf.template[root@master conf]# mv spark-env.sh.template spark-env.sh[root@master conf]# lsdocker.properties.template  log4j.properties.template    slaves.template               spark-env.shfairscheduler.xml.template  metrics.properties.template  spark-defaults.conf.template[root@master conf]# vi spark-env.sh


export JAVA_HOME=/usr/local/jdk1.8.0_121export SCALA_HOME=/usr/local/scala-2.11.8export SPARK_MASTER_IP=10. 0.237export SPARK_WORKER_MEMORY=2gexport HADOOP_CONF_DIR=/usr/local/hadoop-2.6.5/etc/hadoop"spark-env.sh" 82L, 4180C written[root@master conf]# 

[root@master conf]# lsdocker.properties.template  log4j.properties.template    slaves.template               spark-env.shfairscheduler.xml.template  metrics.properties.template  spark-defaults.conf.template[root@master conf]# mv  slaves.template  slaves[root@master conf]# lsdocker.properties.template  fairscheduler.xml.template  log4j.properties.template  metrics.properties.template  slaves  spark-defaults.conf.template  spark-env.sh[root@master conf]# vi  slavesworker01worker02worker03worker04



5,woker节点脚本分发

[root@master rhzf_setup_scripts]# lsrhzf_hadoop.sh  rhzf_hosts_scp.sh  rhzf_jdk.sh  rhzf_scala.sh  rhzf_ssh.sh[root@master rhzf_setup_scripts]# vi rhzf_spark.sh#!/bin/shfor i in  238 239 240 241doscp   -rq /usr/local/spark-2.1.0-bin-hadoop2.6  root@10 .$i:/usr/local/spark-2.1.0-bin-hadoop2.6scp   -rq /etc/profile  root@10 .$i:/etc/profilessh   root@10. 0.$i source /etc/profiledone


[root@master rhzf_setup_scripts]# lsrhzf_hadoop.sh  rhzf_hosts_scp.sh  rhzf_jdk.sh  rhzf_scala.sh  rhzf_spark.sh  rhzf_ssh.sh[root@master rhzf_setup_scripts]# chmod u+x rhzf_spark.sh[root@master rhzf_setup_scripts]# ./rhzf_spark.sh[root@master rhzf_setup_scripts]# 

6,启动spark集群

[root@master bin]# pwd/usr/local/spark-2.1.0-bin-hadoop2.6/bin[root@master bin]# cd ..[root@master spark-2.1.0-bin-hadoop2.6]# cd sbin[root@master sbin]# lsslaves.sh         start-all.sh               start-mesos-shuffle-service.sh  start-thriftserver.sh   stop-mesos-dispatcher.sh       stop-slaves.shspark-config.sh   start-history-server.sh    start-shuffle-service.sh        stop-all.sh             stop-mesos-shuffle-service.sh  stop-thriftserver.shspark-daemon.sh   start-master.sh            start-slave.sh                  stop-history-server.sh  stop-shuffle-service.shspark-daemons.sh  start-mesos-dispatcher.sh  start-slaves.sh                 stop-master.sh          stop-slave.sh[root@master sbin]# start-all.shstarting org.apache.spark.deploy.master.Master, logging to /usr/local/spark-2.1.0-bin-hadoop2.6/logs/spark-root-org.apache.spark.deploy.master.Master-1-master.outworker03: starting org.apache.spark.deploy.worker.Worker, logging to /usr/local/spark-2.1.0-bin-hadoop2.6/logs/spark-root-org.apache.spark.deploy.worker.Worker-1-worker03.outworker04: starting org.apache.spark.deploy.worker.Worker, logging to /usr/local/spark-2.1.0-bin-hadoop2.6/logs/spark-root-org.apache.spark.deploy.worker.Worker-1-worker04.outworker01: starting org.apache.spark.deploy.worker.Worker, logging to /usr/local/spark-2.1.0-bin-hadoop2.6/logs/spark-root-org.apache.spark.deploy.worker.Worker-1-worker01.outworker02: starting org.apache.spark.deploy.worker.Worker, logging to /usr/local/spark-2.1.0-bin-hadoop2.6/logs/spark-root-org.apache.spark.deploy.worker.Worker-1-worker02.out

显示结果如下






0 0
原创粉丝点击