06 sample02

来源:互联网 发布:informix数据库客户端 编辑:程序博客网 时间:2024/06/15 21:02

采集数据

用tail命令获取数据,下沉到hdfs
启动命令:

bin/flume-ng agent -c conf -f conf/tail-hdfs.conf -n a1

tail-hdfs.conf

# Name the components on this agenta1.sources = r1a1.sinks = k1a1.channels = c1# Describe/configure the sourcea1.sources.r1.type = execa1.sources.r1.command = tail -F /home/hadoop/log/test.loga1.sources.r1.channels = c1# Describe the sinka1.sinks.k1.type = hdfsa1.sinks.k1.channel = c1a1.sinks.k1.hdfs.path = /flume/events/%y-%m-%d/a1.sinks.k1.hdfs.filePrefix = events-a1.sinks.k1.hdfs.round = truea1.sinks.k1.hdfs.roundValue = 10a1.sinks.k1.hdfs.roundUnit = minutea1.sinks.k1.hdfs.rollInterval = 3a1.sinks.k1.hdfs.rollSize = 20a1.sinks.k1.hdfs.rollCount = 5a1.sinks.k1.hdfs.batchSize = 1a1.sinks.k1.hdfs.useLocalTimeStamp = true#生成的文件类型,默认是Sequencefile,可用DataStream,则为普通文本a1.sinks.k1.hdfs.fileType = DataStream# Use a channel which buffers events in memorya1.channels.c1.type = memorya1.channels.c1.capacity = 1000a1.channels.c1.transactionCapacity = 100# Bind the source and sink to the channela1.sources.r1.channels = c1a1.sinks.k1.channel = c1

转移到处理目录

move_to_preworkdir.sh

#!/bin/bash# 功能描述:     移动文件到预处理工作目录# 输入参数:     运行日期# 目标路径:     /data/weblog/preprocess/input# 数据源  :     /data/weblog/preprocess/output#set java envexport JAVA_HOME=/home/hadoop/apps/jdk1.7.0_51export JRE_HOME=${JAVA_HOME}/jreexport CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/libexport PATH=${JAVA_HOME}/bin:$PATH#set hadoop envexport HADOOP_HOME=/home/hadoop/apps/hadoop-2.6.1export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH#flume采集生成的日志文件存放的目录log_flume_dir=/flume/events/#预处理程序的工作目录log_pre_input=/data/weblog/preprocess/input#获取时间信息day_01=`date -d'-1 day' +%Y-%m-%d`syear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`#读取日志文件的目录,判断是否有需要上传的文件files=`hadoop fs -ls $log_flume_dir | grep $day_01 | wc -l`if [ $files -gt 0 ]; thenhadoop fs -mv ${log_flume_dir}/${day_01} ${log_pre_input}echo "success moved ${log_flume_dir}/${day_01} to ${log_pre_input} ....."fi

预处理原

log_preprocess.sh

#!/bin/bash# 功能描述:     预处理原始日志# 输入参数:     运行日期# 目标路径:     /data/weblog/preprocess/input# 数据源  :     /data/weblog/preprocess/output#set java envexport JAVA_HOME=/home/hadoop/apps/jdk1.7.0_51export JRE_HOME=${JAVA_HOME}/jreexport CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/libexport PATH=${JAVA_HOME}/bin:$PATH#set hadoop envexport HADOOP_HOME=/home/hadoop/apps/hadoop-2.6.1export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH#预处理程序类名preprocess_class="com.hive.mr.pre.WeblogPreProcess"#只输出valid记录的预处理程序类名pre_valid_class="com.hive.mr.pre.WeblogPreValid"#待处理日志存放的目录log_pre_input=/data/weblog/preprocess/input#预处理输出结果(raw)目录log_pre_output=/data/weblog/preprocess/output#预处理输出结果(valid)目录log_pre_valid_output=/data/weblog/preprocess/valid_output#获取时间信息day_01=`date -d'-1 day' +%Y-%m-%d`syear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`#读取日志文件的目录,判断是否有当日待处理的目录(如:2016-03-18)files=`hadoop fs -ls $log_pre_input | grep $day_01 | wc -l`if [ $files -gt 0 ]; then#提交mr任务job运行echo "running..    hadoop jar weblog.jar $preprocess_class $log_pre_input/$day_01 /$log_pre_output/$day_01"hadoop jar weblog.jar $preprocess_class $log_pre_input/$day_01 $log_pre_output/$day_01fiecho "raw预处理运行结果: $?"if [ $? -eq 0 ];then#提交mr任务job运行echo "running..    hadoop jar weblog.jar $pre_valid_class $log_pre_input $day_01 /$log_pre_valid_output/$day_01"hadoop jar weblog.jar $pre_valid_class $log_pre_input/$day_01 $log_pre_valid_output/$day_01fi#如果失败#发送邮件或短信,人为来干预

log_click.sh

#!/bin/bash# 功能描述:     点击流模型数据预处理# 输入参数:     运行日期#set java envexport JAVA_HOME=/home/hadoop/apps/jdk1.7.0_51export JRE_HOME=${JAVA_HOME}/jreexport CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/libexport PATH=${JAVA_HOME}/bin:$PATH#set hadoop envexport HADOOP_HOME=/home/hadoop/apps/hadoop-2.6.1export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH#点击流pagevies模型预处理程序类名click_pv_class="com.hive.mr.ClickStreamThree"#点击流pagevies模型程序输入目录,即预处理输出结果(valid)目录log_pre_output=/data/weblog/preprocess/output#点击流pagevies模型预处理程序输出目录click_pvout=/data/weblog/preprocess/click_pv_out#点击流visit模型预处理程序类名click_visit_class="com.hive.mr.ClickStreamVisit"#点击流visit模型预处理程序输入目录,即pagevies模型预处理程序输出目录  $click_pvout#点击流visit模型预处理程序输出目录click_vstout=/data/weblog/preprocess/click_visit_out#获取时间信息day_01=`date -d'-1 day' +%Y-%m-%d`syear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`#读取日志文件的目录,判断是否有当日待处理的目录(如:2016-03-18)files=`hadoop fs -ls $log_pre_output | grep $day_01 | wc -l`if [ $files -gt 0 ]; then#提交mr任务job运行echo "running..    hadoop jar weblog.jar $click_pv_class $log_pre_output/$day_01 $click_pvout/$day_01"hadoop jar weblog.jar $click_pv_class $log_pre_output/$day_01 $click_pvout/$day_01fiecho "pv处理运行结果: $?"if [ $? -eq 0 ];then#提交mr任务job运行echo "running..    hadoop jar weblog.jar $click_visit_class $click_pvout $day_01 $click_vstout/$day_01"hadoop jar weblog.jar $click_visit_class $click_pvout/$day_01 $click_vstout/$day_01fi

加载到ODS

load_ods_table.sh

#!/bin/bash# 功能描述:     加载数据到ODS# 输入参数:     运行日期# 数据路径:     /data/weblog/preprocess/output# 目标hive:     weblog_orgin#set java envexport JAVA_HOME=/home/hadoop/apps/jdk1.7.0_51export JRE_HOME=${JAVA_HOME}/jreexport CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/libexport PATH=${JAVA_HOME}/bin:$PATH#set hadoop envexport HADOOP_HOME=/home/hadoop/apps/hadoop-2.6.1export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH#获取时间信息day_01=`date -d'-1 day' +%Y-%m-%d`syear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`#预处理输出结果(raw)目录log_pre_output=/data/weblog/preprocess/output#点击流pagevies模型预处理程序输出目录click_pvout="/data/weblog/preprocess/click_pv_out"#点击流visit模型预处理程序输出目录click_vstout="/data/weblog/preprocess/click_visit_out"#目标hive表ods_weblog_origin="ods_weblog_origin"ods_click_pageviews="ods_click_pageviews"ods_click_visit="ods_click_visit"#导入raw数据到ods_weblog_originHQL_origin="load data inpath '$log_pre_output/$day_01' into table $ods_weblog_origin partition(datestr='$day_01')"echo $HQL_origin /home/hadoop/apps/hive/bin/hive -e "$HQL_origin"#导入点击流模型pageviews数据到HQL_pvs="load data inpath '$click_pvout/$day_01' into table $ods_click_pageviews partition(datestr='$day_01')"echo $HQL_pvs /home/hadoop/apps/hive/bin/hive -e "$HQL_pvs"#导入点击流模型visit数据到HQL_vst="load data inpath '$click_vstout/$day_01' into table $ods_click_visit partition(datestr='$day_01')"echo $HQL_vst /home/hadoop/apps/hive/bin/hive -e "$HQL_vst"

ETL

etl_detail.sh

#!/bin/bash# 目标表名:     zs.ods_weblog_detail# 数据源表:     zs.ods_weblog_origin### 1.参数加载exe_hive="/home/hadoop/apps/hive/bin/hive"if [ $# -eq 1 ]then    day_01=`date --date="${1}" +%Y-%m-%d`else    day_01=`date -d'-1 day' +%Y-%m-%d`fisyear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`TARGET_DB=zsTARGET_TABLE=ods_weblog_detail### 2.定义执行HQLHQL="insert into table zs.ods_weblog_detail partition(datestr='$day_01')select c.valid,c.remote_addr,c.remote_user,c.time_local,substring(c.time_local,0,10) as daystr,substring(c.time_local,12) as tmstr,substring(c.time_local,6,2) as month,substring(c.time_local,9,2) as day,substring(c.time_local,11,3) as hour,c.request,c.status,c.body_bytes_sent,c.http_referer,c.ref_host,c.ref_path,c.ref_query,c.ref_query_id,c.http_user_agentfrom(SELECT a.valid,a.remote_addr,a.remote_user,a.time_local,a.request,a.status,a.body_bytes_sent,a.http_referer,a.http_user_agent,b.ref_host,b.ref_path,b.ref_query,b.ref_query_id FROM zs.ods_weblog_origin a LATERAL VIEW parse_url_tuple(regexp_replace(http_referer, \"\\\"\", \"\"), 'HOST', 'PATH','QUERY', 'QUERY:id') b as ref_host, ref_path, ref_query, ref_query_id) c"#执行hql$exe_hive -e "$HQL"#异常处理#如果失败,发送邮件、短信

etl_pvs_hour.sh

#!/bin/bash     # 功能描述:     抽取明细宽表# 输入参数:     运行日期# 目标表名:     zs.dw_pvs_hours# 数据源表:     zs.ods_weblog_detail### 1.参数加载exe_hive="/home/hadoop/apps/hive/bin/hive"if [ $# -eq 1 ]then    day_01=`date --date="${1}" +%Y-%m-%d`else    day_01=`date -d'-1 day' +%Y-%m-%d`fisyear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`TARGET_DB=zsTARGET_TABLE=dw_pvs_hoursHQL="insert into table zs.dw_pvs_hour partition(datestr='$day_01')select a.month as month,a.day as day,a.hour as hour,count(1) as pvs from zs.ods_weblog_detail awhere a.datestr='$day_01' group by a.month,a.day,a.hour;"#执行hql$exe_hive -e "$HQL"

导出结果

sqoop_export.sh

#!/bin/bashif [ $# -eq 1 ]then    cur_date=`date --date="${1}" +%Y-%m-%d`else    cur_date=`date -d'-1 day' +%Y-%m-%d`fiecho "cur_date:"${cur_date}year=`date --date=$cur_date +%Y`month=`date --date=$cur_date +%m`day=`date --date=$cur_date +%d`table_name=""table_columns=""hadoop_dir=/user/rd/bi_dm/app_user_experience_d/year=${year}/month=${month}/day=${day}mysql_db_pwd=biall_pwd2015mysql_db_name=bi_tag_allecho 'sqoop start'$SQOOP_HOME/bin/sqoop export \--connect "jdbc:mysql://hadoop03:3306/biclick" \--username $mysql_db_name \--password $mysql_db_pwd \--table $table_name \--columns $table_columns \--fields-terminated-by '\001' \--export-dir $hadoop_direcho 'sqoop end'

azkaban任务

load-weblog.job

# load-weblog.jobtype=commandcommand=bash load-weblog.sh

load_weblog.sh

#!/bin/bashdatestr=`date +%d/%a/%Y`HQL="load data inpath '/azweblog/preprocessedlog/' into table ods_origin_weblog partition(datestr='${datestr:0:11}')"echo $HQL/home/hadoop/apps/hive/bin/hive -e "$HQL"

detail.job

# detail.jobtype=commandcommand=bash detail.sh

detail.sh

#!/bin/bashHQL="insert into table ods_weblog_detail partition(datestr='18/Sep/2013')select c.valid,c.remote_addr,c.remote_user,c.time_local,substring(c.time_local,0,11) as daystr,substring(c.time_local,13) as timestr,substring(c.time_local,4,3) as month,substring(c.time_local,0,2) as day,substring(c.time_local,13,2) as hour,c.request,c.status,c.body_bytes_sent,c.http_referer,c.ref_host,c.ref_path,c.ref_query,c.ref_query_id,c.http_user_agentfrom(SELECT a.valid,a.remote_addr,a.remote_user,a.time_local,a.request,a.status,a.body_bytes_sent,a.http_referer,a.http_user_agent,b.ref_host,b.ref_path,b.ref_query,b.ref_query_id FROM ods_origin_weblog a LATERAL VIEW parse_url_tuple(regexp_replace(http_referer, \"\\\"\", \"\"), 'HOST', 'PATH','QUERY', 'QUERY:id') b as ref_host, ref_path, ref_query, ref_query_id) c"echo $HQL/home/hadoop/apps/hive/bin/hive -e "$HQL"

pv.job

# pv.jobtype=commandcommand=bash pv.sh

pv.sh

#!/bin/bashHQL="insert into table dw_pvs_hour partition(datestr='18/Sep/2013')select a.month as month,a.day as day,a.hour as hour,count(1) as pvs from azdw.v_time ajoin ods_weblog_detail b on b.datestr='18/Sep/2013' and a.month=b.month and a.day=b.day and a.hour=b.hourgroup by a.month,a.day,a.hour;"echo $HQL/home/hadoop/apps/hive/bin/hive -e "$HQL"

hive-etl模板

xxx.sh

#!/bin/bash### 1.参数加载exe_hive="hive"if [ $# -eq 1 ]then    day_01=`date --date="${1}" +%Y-%m-%d`else    day_01=`date -d'-1 day' +%Y-%m-%d`fisyear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`TARGET_DB=app.dbTARGET_TABLE=app_order_city_d### 2.定义执行HQLHQL="insert overwrite table app.app_order_city_d partition (dt='${day_01}')SELECT city_id,COUNT(1) FROM dw_order WHERE dt='${day_01}' AND order_status=5 GROUP BY city_id;"### 3.检查依赖wait4FlagFile HDFS /user/hive/warehouse/dw.db/dw_order/dt=${day_01} _SUCCESS 18612039282#### 4.执行HQL$exe_hive -e "$HQL"#### 5. 判断代码是否执行成功,touch控制文件result=`hadoop fs -ls /user/hive/warehouse/${TARGET_DB}/${TARGET_TABLE}/dt=${day_01} | wc -l`if [[ $result -gt 0 ]]; then    hadoop fs -touchz /user/hive/warehouse/${TARGET_DB}/${TARGET_TABLE}/dt=${day_01}/_SUCCESSelse    echo "失败发送预警短信和邮件"fi

wait4FlagFile.sh

#!/bin/bashfunction SendText{   phoneList=($1)   msg=$2   for iPhone in ${phoneList[@]}   do      php ~/global_tools/sms.php $iPhone "$msg"    done} function wait4FlagFile(){        FILE_SYS=$1        INPUT_PATH=$2        FLAG_FILE_NAME=$3        TARGET_MONITORS=$4        clock=0        waitingHours=0        while true        do                if [ "$FILE_SYS" == "HDFS" ]                then                        hadoop fs -ls ${INPUT_PATH}/${FLAG_FILE_NAME} && result=$? || result=$?                        echo ${INPUT_PATH}/${FLAG_FILE_NAME}                else                        if [ -e ${INPUT_PATH}/${FLAG_FILE_NAME} ]                        then                                result=0                        fi                fi                if [ $result -eq 0 ]                then                        if [ $waitingHours \> 0.5 ]                                then                                 SendText "$TARGET_MONITORS" "文件 ${INPUT_PATH}/${FLAG_FILE_NAME} 已经到达"                        fi                        break                else                        echo 'Source file not ready, sleep 10 secs'                        sleep 10                    ((clock=$clock+10))                        if [ $[$clock%1500] -eq 0 ]                        then                                waitingHours=$(echo "scale=1;$clock/3600"|bc)                                echo "Already waiting for       ${waitingHours} hours!"                               #SendText "$TARGET_MONITORS" "文件 ${INPUT_PATH}/${FLAG_FILE_NAME} 已经等待 ${waitingHours}小时了"                        fi                fi        done}#wait4FlagFile HDFS /user/hive/warehouse/dw.db/t_order/dt=2015-10-10 _SUCCESS 18612039282

uploadFile2Hdfs.v2.sh

#!/bin/bash#set java envexport JAVA_HOME=/home/hadoop/app/jdk1.7.0_51export JRE_HOME=${JAVA_HOME}/jreexport CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/libexport PATH=${JAVA_HOME}/bin:$PATH#set hadoop envexport HADOOP_HOME=/home/hadoop/app/hadoop-2.6.4export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH#版本1的问题:#虽然上传到Hadoop集群上了,但是原始文件还在。如何处理?#日志文件的名称都是xxxx.log1,再次上传文件时,因为hdfs上已经存在了,会报错。如何处理?#如何解决版本1的问题#       1、先将需要上传的文件移动到待上传目录#   2、在讲文件移动到待上传目录时,将文件按照一定的格式重名名#       /export/software/hadoop.log1   /export/data/click_log/xxxxx_click_log_{date}#日志文件存放的目录log_src_dir=/home/hadoop/logs/log/#待上传文件存放的目录log_toupload_dir=/home/hadoop/logs/toupload/#日志文件上传到hdfs的根路径hdfs_root_dir=/data/clickLog/20151226/#打印环境变量信息echo "envs: hadoop_home: $HADOOP_HOME"#读取日志文件的目录,判断是否有需要上传的文件echo "log_src_dir:"$log_src_dirls $log_src_dir | while read fileNamedo    if [[ "$fileName" == access.log.* ]]; then    # if [ "access.log" = "$fileName" ];then        date=`date +%Y_%m_%d_%H_%M_%S`        #将文件移动到待上传目录并重命名        #打印信息        echo "moving $log_src_dir$fileName to $log_toupload_dir"xxxxx_click_log_$fileName"$date"        mv $log_src_dir$fileName $log_toupload_dir"xxxxx_click_log_$fileName"$date        echo $log_toupload_dir"xxxxx_click_log_$fileName"$date >> $log_toupload_dir"willDoing."$date    fidonels $log_toupload_dir | grep will |grep -v "_COPY_" | grep -v "_DONE_" | while read linedo    #打印信息    echo "toupload is in file:"$line    mv $log_toupload_dir$line $log_toupload_dir$line"_COPY_"    cat $log_toupload_dir$line"_COPY_" |while read line    do        #打印信息        echo "puting...$line to hdfs path.....$hdfs_root_dir"        hadoop fs -put $line $hdfs_root_dir    done        mv $log_toupload_dir$line"_COPY_"  $log_toupload_dir$line"_DONE_"done
原创粉丝点击