点击流日志分析项目实战开发流程

来源:互联网 发布:怎么修改淘宝密码 编辑:程序博客网 时间:2024/06/16 04:41

step-1:
使用flume采集数据到 /flume/events/%y-%m-%d/下面
flume配置

 tail-hdfs.conf用tail命令获取数据,下沉到hdfs启动命令:bin/flume-ng agent -c conf -f conf/tail-hdfs.conf -n a1######### Name the components on this agenta1.sources = r1a1.sinks = k1a1.channels = c1# Describe/configure the sourcea1.sources.r1.type = execa1.sources.r1.command = tail -F /home/hadoop/log/test.loga1.sources.r1.channels = c1# Describe the sinka1.sinks.k1.type = hdfsa1.sinks.k1.channel = c1a1.sinks.k1.hdfs.path = /flume/events/%y-%m-%d/a1.sinks.k1.hdfs.filePrefix = events-a1.sinks.k1.hdfs.round = truea1.sinks.k1.hdfs.roundValue = 10a1.sinks.k1.hdfs.roundUnit = minutea1.sinks.k1.hdfs.rollInterval = 3a1.sinks.k1.hdfs.rollSize = 20a1.sinks.k1.hdfs.rollCount = 5a1.sinks.k1.hdfs.batchSize = 1a1.sinks.k1.hdfs.useLocalTimeStamp = true#生成的文件类型,默认是Sequencefile,可用DataStream,则为普通文本a1.sinks.k1.hdfs.fileType = DataStream# Use a channel which buffers events in memorya1.channels.c1.type = memorya1.channels.c1.capacity = 1000a1.channels.c1.transactionCapacity = 100# Bind the source and sink to the channela1.sources.r1.channels = c1a1.sinks.k1.channel = c1

step-2 移动数据到预处理工作目录
tail-hdfs.conf

 #!/bin/bash## ===========================================================================# 程序名称:     # 功能描述:     移动文件到预处理工作目录# 输入参数:     运行日期# 目标路径:     /data/weblog/preprocess/input# 数据源 :     /data/weblog/preprocess/output# 创建人 :     ljt# 创建日期:     2016-12-21# 版本说明:     v1.0# 代码审核:     # 修改人名:# 修改日期:# 修改原因:# 修改列表: # ===========================================================================#set java envexport JAVA_HOME=/home/hadoop/apps/jdk1.7.0_51export JRE_HOME=${JAVA_HOME}/jreexport CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/libexport PATH=${JAVA_HOME}/bin:$PATH#set hadoop envexport HADOOP_HOME=/home/hadoop/apps/hadoop-2.6.1export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH#flume采集生成的日志文件存放的目录log_flume_dir=/data/flumedata/#预处理程序的工作目录log_pre_input=/data/weblog/preprocess/input#获取时间信息day_01=`date -d'-1 day' +%Y-%m-%d`syear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`#读取日志文件的目录,判断是否有需要上传的文件files=`hadoop fs -ls $log_flume_dir | grep $day_01 | wc -l`if [ $files -gt 0 ]; thenhadoop fs -mv ${log_flume_dir}/${day_01} ${log_pre_input}echo "success moved ${log_flume_dir}/${day_01} to ${log_pre_input} ....."fi

step-3 运行预处理MR程序
log_click.sh:

 #!/bin/bash## ===========================================================================# 程序名称:     # 功能描述:     点击流模型数据预处理# 输入参数:     运行日期# 目标路径:     /data/weblog/preprocess/input# 数据源  :     /data/weblog/preprocess/output#   创建人:     ljt# 创建日期:     2016-12-21# 版本说明:     v1.0# 代码审核:     # 修改人名:# 修改日期:# 修改原因:# 修改列表: # ===========================================================================#set java envexport JAVA_HOME=/home/hadoop/apps/jdk1.7.0_51export JRE_HOME=${JAVA_HOME}/jreexport CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/libexport PATH=${JAVA_HOME}/bin:$PATH#set hadoop envexport HADOOP_HOME=/home/hadoop/apps/hadoop-2.6.1export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH#点击流pagevies模型预处理程序类名click_pv_class="cn.itcast.bigdata.hive.mr.ClickStreamThree"#点击流pagevies模型程序输入目录,即预处理输出结果(valid)目录log_pre_output=/data/weblog/preprocess/output#点击流pagevies模型预处理程序输出目录click_pvout=/data/weblog/preprocess/click_pv_out#点击流visit模型预处理程序类名click_visit_class="cn.itcast.bigdata.hive.mr.ClickStreamVisit"#点击流visit模型预处理程序输入目录,即pagevies模型预处理程序输出目录  $click_pvout#点击流visit模型预处理程序输出目录click_vstout=/data/weblog/preprocess/click_visit_out#获取时间信息day_01=`date -d'-1 day' +%Y-%m-%d`syear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`#读取日志文件的目录,判断是否有当日待处理的目录(如:2016-03-18)files=`hadoop fs -ls $log_pre_output | grep $day_01 | wc -l`if [ $files -gt 0 ]; then#提交mr任务job运行echo "running..    hadoop jar weblog.jar $click_pv_class $log_pre_output/$day_01 $click_pvout/$day_01"hadoop jar weblog.jar $click_pv_class $log_pre_output/$day_01 $click_pvout/$day_01fiecho "pv处理运行结果: $?"if [ $? -eq 0 ];then#提交mr任务job运行echo "running..    hadoop jar weblog.jar $click_visit_class $click_pvout $day_01 $click_vstout/$day_01"hadoop jar weblog.jar $click_visit_class $click_pvout/$day_01 $click_vstout/$day_01fi

log_preprocess.sh

 #!/bin/bash## ===========================================================================# 程序名称:     # 功能描述:     预处理原始日志# 输入参数:     运行日期# 目标路径:     /data/weblog/preprocess/input# 数据源  :     /data/weblog/preprocess/output#   创建人:     ljt# 创建日期:     2016-12-21# 版本说明:     v1.0# 代码审核:     # 修改人名:# 修改日期:# 修改原因:# 修改列表: # ===========================================================================#set java envexport JAVA_HOME=/home/hadoop/apps/jdk1.7.0_51export JRE_HOME=${JAVA_HOME}/jreexport CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/libexport PATH=${JAVA_HOME}/bin:$PATH#set hadoop envexport HADOOP_HOME=/home/hadoop/apps/hadoop-2.6.1export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH#预处理程序类名preprocess_class="cn.itcast.bigdata.hive.mr.pre.WeblogPreProcess"#只输出valid记录的预处理程序类名pre_valid_class="cn.itcast.bigdata.hive.mr.pre.WeblogPreValid"#待处理日志存放的目录log_pre_input=/data/weblog/preprocess/input#预处理输出结果(raw)目录log_pre_output=/data/weblog/preprocess/output#预处理输出结果(valid)目录log_pre_valid_output=/data/weblog/preprocess/valid_output#获取时间信息day_01=`date -d'-1 day' +%Y-%m-%d`syear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`#读取日志文件的目录,判断是否有当日待处理的目录(如:2016-03-18)files=`hadoop fs -ls $log_pre_input | grep $day_01 | wc -l`if [ $files -gt 0 ]; then#提交mr任务job运行echo "running..    hadoop jar weblog.jar $preprocess_class $log_pre_input/$day_01 /$log_pre_output/$day_01"hadoop jar weblog.jar $preprocess_class $log_pre_input/$day_01 $log_pre_output/$day_01fiecho "raw预处理运行结果: $?"if [ $? -eq 0 ];then#提交mr任务job运行echo "running..    hadoop jar weblog.jar $pre_valid_class $log_pre_input $day_01 /$log_pre_valid_output/$day_01"hadoop jar weblog.jar $pre_valid_class $log_pre_input/$day_01 $log_pre_valid_output/$day_01fi#如果失败#发送邮件或短信,人为来干预 

step-4 加载数据到ODS(Operational Data Store)进行数据仓库存储
建立数据仓库,连接hive创建对应的数据仓库ODS

 hive> create database click_streaming;OKTime taken: 0.521 secondshive> show databases;OKclick_streamingdefaultTime taken: 0.022 seconds, Fetched: 2 row(s)hive> use click_streaming;#数据仓库DDL#ods贴源表drop table if exists ods_weblog_origin;create table ods_weblog_origin(valid string,remote_addr string,remote_user string,time_local string,request string,status string,body_bytes_sent string,http_referer string,http_user_agent string)partitioned by (datestr string)row format delimitedfields terminated by '\001';#ods点击流pageviews表drop table if exists ods_click_pageviews;create table ods_click_pageviews(Session string,remote_addr string,#加一个字段   user string,time_local string,request string,visit_step string,page_staylong string,http_referer string,http_user_agent string,body_bytes_sent string,status string)partitioned by (datestr string)row format delimitedfields terminated by '\001';#点击流visit表drop table if exist ods_click_visit;create table ods_click_visit(session     string,remote_addr string,inTime      string,outTime     string,inPage      string,outPage     string,referal     string,pageVisits  int)partitioned by (datestr string);#etl明细宽表drop table ods_weblog_detail;create table ods_weblog_detail(valid           string, --有效标识remote_addr     string, --来源IPremote_user     string, --用户标识time_local      string, --访问完整时间daystr          string, --访问日期timestr         string, --访问时间month           string, --访问月day             string, --访问日hour            string, --访问时request         string, --请求的urlstatus          string, --响应码body_bytes_sent string, --传输字节数http_referer    string, --来源urlref_host        string, --来源的hostref_path        string, --来源的路径ref_query       string, --来源参数queryref_query_id    string, --来源参数query的值http_user_agent string --客户终端标识)partitioned by(datestr string);#时间维度表create table v_time(year string,month string,day string,hour string)row format delimitedfields terminated by ',';load data local inpath '/home/hadoop/v_time.txt' into table v_time;#每小时pv统计表drop table dw_pvs_hour;create table dw_pvs_hour(month string,day string,hour string,pvs bigint) partitioned by(datestr string);#每日用户平均pvdrop table dw_avgpv_user_d;create table dw_avgpv_user_d(day string,avgpv string);#来源维度PV统计表(小时粒度)drop table zs.dw_pvs_referer_h;create table zs.dw_pvs_referer_h(referer_url string,referer_host string,month string,day string,hour string,pv_referer_cnt bigint) partitioned by(datestr string);#每小时来源PV topndrop table zs.dw_pvs_refhost_topn_h;create table zs.dw_pvs_refhost_topn_h(hour string,toporder string,ref_host string,ref_host_cnts string) partitioned by(datestr string);
 #!/bin/bash## ===========================================================================# 程序名称:     # 功能描述:     加载数据到ODS# 输入参数:     运行日期# 数据路径:     /data/weblog/preprocess/output# 目标hive:     sz.ods_weblog_orgin#   创建人:     ljt# 创建日期:     2016-12-21# 版本说明:     v1.0# 代码审核:     # 修改人名:# 修改日期:# 修改原因:# 修改列表: # ===========================================================================#set java envexport JAVA_HOME=/home/hadoop/apps/jdk1.7.0_51export JRE_HOME=${JAVA_HOME}/jreexport CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/libexport PATH=${JAVA_HOME}/bin:$PATH#set hadoop envexport HADOOP_HOME=/home/hadoop/apps/hadoop-2.6.1export PATH=${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH#获取时间信息day_01=`date -d'-1 day' +%Y-%m-%d`syear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`#预处理输出结果(raw)目录log_pre_output=/data/weblog/preprocess/output#点击流pagevies模型预处理程序输出目录click_pvout="/data/weblog/preprocess/click_pv_out"#点击流visit模型预处理程序输出目录click_vstout="/data/weblog/preprocess/click_visit_out"#目标hive表ods_weblog_origin="shizhan.ods_weblog_origin"ods_click_pageviews="shizhan.ods_click_pageviews"ods_click_visit="shizhan.ods_click_visit"#导入raw数据到zs.ods_weblog_originHQL_origin="load data inpath '$log_pre_output/$day_01' into table $ods_weblog_origin partition(datestr='$day_01')"echo $HQL_origin /home/hadoop/apps/hive/bin/hive -e "$HQL_origin"#导入点击流模型pageviews数据到HQL_pvs="load data inpath '$click_pvout/$day_01' into table $ods_click_pageviews partition(datestr='$day_01')"echo $HQL_pvs /home/hadoop/apps/hive/bin/hive -e "$HQL_pvs"#导入点击流模型visit数据到HQL_vst="load data inpath '$click_vstout/$day_01' into table $ods_click_visit partition(datestr='$day_01')"echo $HQL_vst /home/hadoop/apps/hive/bin/hive -e "$HQL_vst"

step-5 ETL(extract transfer load )处理

etl_detail.sh

 #!/bin/bash# . /home/anjianbing/soft/functions/wait4FlagFile.sh# ===========================================================================# 程序名称:     # 功能描述:     抽取明细宽表# 输入参数:     运行日期# 目标表名:     zs.ods_weblog_detail# 数据源表:     zs.ods_weblog_origin#   创建人:     ljt# 创建日期:     2016-12-21# 版本说明:     v1.0# 代码审核:     # 修改人名:# 修改日期:# 修改原因:# 修改列表: # ===========================================================================### 1.参数加载exe_hive="/home/hadoop/apps/hive/bin/hive"if [ $# -eq 1 ]then    day_01=`date --date="${1}" +%Y-%m-%d`else    day_01=`date -d'-1 day' +%Y-%m-%d`fisyear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`TARGET_DB=zsTARGET_TABLE=ods_weblog_detail### 2.定义执行HQLHQL="insert into table zs.ods_weblog_detail partition(datestr='$day_01')select c.valid,c.remote_addr,c.remote_user,c.time_local,substring(c.time_local,0,10) as daystr,substring(c.time_local,12) as tmstr,substring(c.time_local,6,2) as month,substring(c.time_local,9,2) as day,substring(c.time_local,11,3) as hour,c.request,c.status,c.body_bytes_sent,c.http_referer,c.ref_host,c.ref_path,c.ref_query,c.ref_query_id,c.http_user_agentfrom(SELECT a.valid,a.remote_addr,a.remote_user,a.time_local,a.request,a.status,a.body_bytes_sent,a.http_referer,a.http_user_agent,b.ref_host,b.ref_path,b.ref_query,b.ref_query_id FROM zs.ods_weblog_origin a LATERAL VIEW parse_url_tuple(regexp_replace(http_referer, \"\\\"\", \"\"), 'HOST', 'PATH','QUERY', 'QUERY:id') b as ref_host, ref_path, ref_query, ref_query_id) c"#执行hql$exe_hive -e "$HQL"#异常处理#如果失败,发送邮件、短信

etl_pvs_hour.sh

 #!/bin/bash# . /home/anjianbing/soft/functions/wait4FlagFile.sh# ===========================================================================# 程序名称:     # 功能描述:     抽取明细宽表# 输入参数:     运行日期# 目标表名:     zs.dw_pvs_hours# 数据源表:     zs.ods_weblog_detail#   创建人:     ljt# 创建日期:     2016-12-21# 版本说明:     v1.0# 代码审核:     # 修改人名:# 修改日期:# 修改原因:# 修改列表: # ===========================================================================### 1.参数加载exe_hive="/home/hadoop/apps/hive/bin/hive"if [ $# -eq 1 ]then    day_01=`date --date="${1}" +%Y-%m-%d`else    day_01=`date -d'-1 day' +%Y-%m-%d`fisyear=`date --date=$day_01 +%Y`smonth=`date --date=$day_01 +%m`sday=`date --date=$day_01 +%d`TARGET_DB=zsTARGET_TABLE=dw_pvs_hoursHQL="insert into table zs.dw_pvs_hour partition(datestr='$day_01')select a.month as month,a.day as day,a.hour as hour,count(1) as pvs from zs.ods_weblog_detail awhere a.datestr='$day_01' group by a.month,a.day,a.hour;"#执行hql$exe_hive -e "$HQL"

step-6 sqoop导出结果

sqoop_export.sh

 #!/bin/bashif [ $# -eq 1 ]then    cur_date=`date --date="${1}" +%Y-%m-%d`else    cur_date=`date -d'-1 day' +%Y-%m-%d`fiecho "cur_date:"${cur_date}year=`date --date=$cur_date +%Y`month=`date --date=$cur_date +%m`day=`date --date=$cur_date +%d`table_name=""table_columns=""hadoop_dir=/user/rd/bi_dm/app_user_experience_d/year=${year}/month=${month}/day=${day}mysql_db_pwd=biall_pwd2015mysql_db_name=bi_tag_allecho 'sqoop start'$SQOOP_HOME/bin/sqoop export \--connect "jdbc:mysql://hadoop03:3306/biclick" \--username $mysql_db_name \--password $mysql_db_pwd \--table $table_name \--columns $table_columns \--fields-terminated-by '\001' \--export-dir $hadoop_direcho 'sqoop end'
原创粉丝点击