1.txt linux 用法 和jar包执行,kafka执行,flume

来源:互联网 发布:数据迁移重要程度 编辑:程序博客网 时间:2024/06/05 05:05
首先到238执行:
cd /usr/local/hbase-1.2.1/bin/
./stop-hbase.sh 


碰到报错,到238,239,240
cd /usr/local
 ./stophbase.sh 
 执行完之后,使用
 ps axu |grep hbase
 查看是否还有残余进程。。有的话,根据查看到的pid 执行
 kill -9 $pid (这里的$pid是你看到的pid数字)
 然后使用  
 zkServer.sh restart


 ./restartzookeeper.sh   (三台都要执行)重启zookeeper
 
 然后执行
 ./starthbase.sh      启动hbase
 
 
  scan 'DW_USER_BIRTHDAY',{LIMIT=>5}
  
  get 'DW_INVEST_CZTZ', '201612300111330000463029'
  
  delete 'DW_INVEST_CZTZ','201612300111330000463029'
  

  deleteall 'DW_DX_P2P','2017031311152418787523932'



[root@mnode spark-2.0.0-bin-hadoop2.7]# ./bin/spark-submit  --class "planJob" --master local[4]    myApp/BigData.jar


[/usr/local/kafka_2.11-0.10.0.0/bin]# kafka-topics.sh --create --zookeeper "192.168.100.110:2181" --topic "producer_test" --partitions 10 --replication-factor 1




-server -XX:PermSize=128M -XX:MaxPermSize=256m


deleteall 'DW_GET_HONEYBEES','201701081406280000608674'
deleteall 'DW_DX_P2P','deleteall 'DW_DX_P2P','20160290000000022'


deleteall 'DW_GET_HONEYBEES','201701081406280000608674'
deleteall 'DW_DX_P2P','deleteall 'DW_DX_P2P','20160290000000022'




deleteall 'DW_DX_P2P','20160290000450405'

kafka-topics.sh --create --zookeeper 192.168.100.110:2181 --topic "producer_test" --partitions 10 --replication-factor 2


kafka-topics.sh --zookeeper 192.168.100.110:2181 --list


./kafka-topics.sh --describe --zookeeper "192.168.100.110:2181" --topic "producer_test"


kafka-console-producer.sh --broker-list 192.168.100.110:2181 --topic producer_test


./kafka-server-start.sh ../config/server.properties & ./kafka-server-start.sh ../config/server-1.properties & ./kafka-server-start.sh ../config/server-2.properties &


bin/kafka-server-start.sh config/server.properties &  




deleteall 'DW_DX_P2P','20160290000450405'


n |grep 端口号
netstat -tpln  查看所有端口
kill -9 进程号


启动之前,要先关闭端口,再启动端口


接收消息
bin/flume-ng agent --conf conf --conf-file conf/receive.conf --name producer -Dflume.root.logger=INFO,console  


收集消息
bin/flume-ng agent --conf conf --conf-file conf/collectsp2p.conf --name a1 -Dflume.root.logger=INFO,console 


 进入spark-sql 界面
 ./spark-sql –name “test″ –master spark://192.168.100.110:7077

\\192.168.100.253\dzjr\公共盘\大众金融内部通讯录


select * from (
select table_schema,table_name,concat(round(sum(data_length/1024/1024),2),'MB') as data 
from information_schema.tables where table_schema='sp2p628'  and table_type = 'BASE TABLE'
GROUP BY table_schema,table_name
) a where a.data > '0.02MB'




./bin/spark-submit  --class "planJob" --master local[4]    myApp/BigData.jar

0 0
原创粉丝点击