Linux服务器性能日志收集和分析脚本

来源:互联网 发布:粮票知乎 编辑:程序博客网 时间:2024/06/06 02:17

     最近老大要求分析服务器的性能数据,找到服务器运行的性能瓶颈,结果花了两天时间,写了两个脚本可以生成日志并可以进行数据提取,最终生成数据可以放到excel生成报表。过程中也学到了不少shell编程技术。

收集性能数据系统日志,每3秒收集一次,将脚本放到后台运行就行。

#!/bin/shwhile :doiostat -x -t >> /var/log/jciostat.logvmstat -t -S M >> /var/log/jcvmstat.logfree -g >> /var/log/jcfree_g.logtop -b -n 1 |head -5 >> /var/log/jctop.logsar -P ALL 1 1 | grep : | grep all | cut -d: -f2 >> /var/log/jccpu.logsar -n DEV 1 1 | grep : | cut -d: -f2 >> /var/log/jcnetwork.logif [ -f "/var/log/jciostat.log" ];thenif [ $(stat -c "%s" /var/log/jciostat.log) -gt $((100*1024*1024)) ];then       # file size is greater more than 200MB,clean file datacd /var/log/ >/dev/null 2>&1tar czvf jc.log.tar.gz jciostat.log jcvmstat.log jcfree_g.log jctop.log > /dev/null 2>&1echo "" > /var/log/jciostat.logecho "" > /var/log/jcvmstat.logecho "" > /var/log/jcfree_g.logecho "" > /var/log/jctop.logcd - > /dev/null 2>&1fifisleep 1done




日志文件分析脚本

#!/bin/shprint_help(){echo "use age: analyz.sh  -day <day> -start <start time> -end <end time> -<option1> <colum1,colum2...> -<option2> <colum1,colum2...> -<option3> <colum1,colum2...>"echo "day: YYYY-MM-DD"echo "start time:HH:MM:SS"echo "end time:HH:MM:SS"echo "                   1    2        3       4       5       6        7        8         9         10     11     12     13  14  15  16  17"echo "-vmstat:           r    b        swpd    free    buff    cache    si       so        bi        bo     in     cs     us  sy  id  wa  st"echo "-sda:                   rrqm/s   wrqm/s  r/s     w/s     rsec/s   wsec/s   avgrq-sz  avgqu-sz  await  svctm  %util"echo "-sdb:                   rrqm/s   wrqm/s  r/s     w/s     rsec/s   wsec/s   avgrq-sz  avgqu-sz  await  svctm  %util"echo "-network                rxpck/s  txpck/s rxkB/s  txkB/s  rxcmp/s  txcmp/s  rxmcst/s"echo "-cpu                    us       ni      sy      wa      st       id"echo "-mem:                   total    used    free    shared  buffers  cached"echo "-swap:                  total    used    free"echo "-la(load average): 5min 10min    15min"echo "-network <netdev:[cloudbr0/bond0/eth0...]> <colum1,colum2...>"echo "example:$0 -sda 1,2,3 -sdb 10,11,12 -network cloudbr0 2,3,4 -swap 3,4 -day 2016-07-08 -start 07:00:00 -end 08:00:00"}cp /var/log/jc*.log ./day=""start=""end=""vmstat=""sda=""sdb=""mem=""swap=""la=""cpu=""network=""netdev=""while [ -n "$1" ]do  case "$1" in   "-vmstat")        vmstat=$2        shift        ;;      "-sda")          sda=$2        shift        ;;      "-sdb")          sdb=$2        shift        ;;      "-mem")          mem=$2        shift        ;;    "-swap")          swap=$2        shift        ;;     "-la")          la=$2        shift        ;;    "-day")        day=$2        shift        ;;    "-start")        start=$2        shift        ;;    "-end")        end=$2        shift        ;;    "-cpu")        cpu=$2        shift        ;;    "-network")        netdev=$2        network=$3        shift        shift        ;;    "--help")print_helpexit 0        ;;     *)      echo "$1 is not an option"      ;;esacshiftdone# 第一步:生成独立的日志csv文件if [ ! -z $vmstat ];thencolum_name=("CST" "vmstat_r" "vmstat_b" "vmstat_swpd" "vmstat_free" "vmstat_buff" "vmstat_cache" "vmstat_si" "vmstat_so" "vmstat_bi" "vmstat_bo" "vmstat_in" "vmstat_cs" "vmstat_us" "vmstat_sy" "vmstat_id" "vmstat_wa" "vmstat_st")OLD_IFS="$IFS"IFS=","colums=($vmstat)IFS="$OLD_IFS"o_colum=""o_colum_name=""for c in ${colums[@]}doif [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];thencontinuefio_colum=${o_colum}\$$c\",\"o_colum_name=${o_colum_name}${colum_name[$c]},doneo_colum=${o_colum%\"}o_colum=${o_colum%,}o_colum=${o_colum%\"}o_colum_name=${o_colum_name%,}echo $o_colum_name > vmstat.csv1# 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行echo '#!/bin/sh' > vmstat.shecho "grep ${colum_name[0]} jcvmstat.log | gawk '{print $o_colum}' >> vmstat.csv1" >> vmstat.shchmod u+x vmstat.sh./vmstat.shrm -rf vmstat.shfiif [ ! -z $sda ];thencolum_name=("sda" "" "sda_rrqm/s" "sda_wrqm/s" "sda_r/s" "sda_w/s" "sda_rsec/s" "sda_wsec/s" "sda_avgrq-sz" "sda_avgqu-sz" "sda_await" "sda_svctm" "sda_%util")OLD_IFS="$IFS" IFS="," colums=($sda) IFS="$OLD_IFS" o_colum=""o_colum_name=""for c in ${colums[@]} doif [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];thencontinuefio_colum=${o_colum}\$$c\",\"o_colum_name=${o_colum_name}${colum_name[$c]},doneo_colum=${o_colum%\"}o_colum=${o_colum%,}o_colum=${o_colum%\"}o_colum_name=${o_colum_name%,}echo $o_colum_name > sda_io.csv1# 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行echo '#!/bin/sh' > sda.shecho "grep ${colum_name[0]} jciostat.log | gawk '{print $o_colum}' >> sda_io.csv1" >> sda.shchmod u+x sda.sh./sda.shrm -rf sda.shfiif [ ! -z $sdb ];thencolum_name=("sdb" "" "sdb_rrqm/s" "sdb_wrqm/s" "sdb_r/s" "sdb_w/s" "sdb_rsec/s" "sdb_wsec/s" "sdb_avgrq-sz" "sdb_avgqu-sz" "sdb_await" "sdb_svctm" "sdb_%util")OLD_IFS="$IFS" IFS="," colums=($sdb) IFS="$OLD_IFS" o_colum=""o_colum_name=""for c in ${colums[@]} doif [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];thencontinuefio_colum=${o_colum}\$$c\",\"o_colum_name=${o_colum_name}${colum_name[$c]},doneo_colum=${o_colum%\"}o_colum=${o_colum%,}o_colum=${o_colum%\"}o_colum_name=${o_colum_name%,}echo $o_colum_name > sdb_io.csv1# 因为gawk '{print $o_colum}'引用$o_colum做输出控制,但是无法使用,只能转到临时脚本中再执行echo '#!/bin/sh' > sdb.shecho "grep ${colum_name[0]} jciostat.log | gawk '{print $o_colum}' >> sdb_io.csv1" >> sdb.shchmod u+x sdb.sh./sdb.shrm -rf sdb.shfiif [ ! -z $mem ];thencolum_name=("Mem" "" "mem_total" "mem_used" "mem_free" "shared" "buffers" "cached")OLD_IFS="$IFS" IFS="," colums=($mem) IFS="$OLD_IFS" o_colum=""o_colum_name=""for c in ${colums[@]} doif [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];thencontinuefio_colum=${o_colum}\$$c\",\"o_colum_name=${o_colum_name}${colum_name[$c]},doneo_colum=${o_colum%\"}o_colum=${o_colum%,}o_colum=${o_colum%\"}o_colum_name=${o_colum_name%,}echo $o_colum_name > mem_used.csv1echo '#!/bin/sh' > mem.shecho "grep ${colum_name[0]} jcfree_g.log | gawk '{print $o_colum}' >> mem_used.csv1" >> mem.shchmod u+x mem.sh./mem.shrm -rf mem.shfiif [ ! -z $swap ];thencolum_name=("Swap" "" "swap_total" "swap_used" "swap_free")OLD_IFS="$IFS" IFS="," colums=($swap) IFS="$OLD_IFS"o_colum=""o_colum_name=""for c in ${colums[@]} doif [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];thencontinuefio_colum=${o_colum}\$$c\",\"o_colum_name=${o_colum_name}${colum_name[$c]},doneo_colum=${o_colum%\"}o_colum=${o_colum%,}o_colum=${o_colum%\"}o_colum_name=${o_colum_name%,}echo $o_colum_name > swap_used.csv1echo '#!/bin/sh' > swap.shecho "grep ${colum_name[0]} jcfree_g.log | gawk '{print $o_colum}' >> swap_used.csv1" >> swap.shchmod u+x swap.sh./swap.shrm -rf swap.shfiif [ ! -z $la ];thencolum_name=("load average" "load_5min" "load_10min" "load_15min")OLD_IFS="$IFS"IFS="," colums=($la)IFS="$OLD_IFS"o_colum=""o_colum_name=""for c in ${colums[@]} doif [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];thencontinuefio_colum=${o_colum}\$$co_colum_name=${o_colum_name}${colum_name[$c]},doneo_colum=${o_colum%\"}o_colum=${o_colum%,}o_colum=${o_colum%\"}o_colum_name=${o_colum_name%,}echo $o_colum_name > load.csv1echo '#!/bin/sh' > la.shecho "grep \"${colum_name[0]}\" jctop.log | cut -d, -f3,4,5 | cut -d: -f2 | gawk '{print $o_colum}'>> load.csv1" >> la.shchmod u+x la.sh./la.shrm -rf la.shfiif [ ! -z $cpu ];thencolum_name=("all" "" "us" "ni" "sy" "wa" "st" "id")OLD_IFS="$IFS"IFS="," colums=($cpu)IFS="$OLD_IFS"o_colum=""o_colum_name=""for c in ${colums[@]} doif [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];thencontinuefio_colum=${o_colum}\$$c\",\"o_colum_name=${o_colum_name}${colum_name[$c]},doneo_colum=${o_colum%\"}o_colum=${o_colum%,}o_colum=${o_colum%\"}o_colum_name=${o_colum_name%,}echo $o_colum_name > cpu.csv1echo '#!/bin/sh' > cpu.shecho "grep \"${colum_name[0]}\" jccpu.log | gawk '{print $o_colum}'>> cpu.csv1" >> cpu.shchmod u+x cpu.sh./cpu.shrm -rf cpu.shfiif [ ! -z $network ];thencolum_name=("" "" "rxpck/s" "txpck/s" "rxkB/s" "txkB/s" "rxcmp/s" "txcmp/s" "rxmcst/s")OLD_IFS="$IFS"IFS="," colums=($network)IFS="$OLD_IFS"o_colum=""o_colum_name=""for c in ${colums[@]} doif [ -z "${colum_name[$c]}" ] || [ $c -ge ${#colum_name[@]} ];thencontinuefio_colum=${o_colum}\$$c\",\"o_colum_name=${o_colum_name}${colum_name[$c]}"_"${netdev},doneo_colum=${o_colum%\"}o_colum=${o_colum%,}o_colum=${o_colum%\"}o_colum_name=${o_colum_name%,}echo $o_colum_name > network.csv1echo '#!/bin/sh' > network.shecho "grep \"$netdev\" jcnetwork.log | gawk '{print $o_colum}'>> network.csv1" >> network.shchmod u+x network.sh./network.shrm -rf network.shfi#输出时间echo time > time.csv1grep "CST" jcvmstat.log | gawk {'print $18"/"$19'} >> time.csv1# 第二步:整合csv文件i=0 # next csv filej=0 # prev csv filecsv_files=`ls *.csv1|grep -v "time.csv1"`for f in $csv_filesdo# 可能在行尾有逗号,删除这个逗号sed -i 's/,$//g' $fif [ $i -eq 0 ];then  # firstgawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' time.csv1 $f > tmp$j.csv2i=$(($i+1))else # not firstgawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp$j.csv2 $f > tmp$i.csv2i=$(($i+1))j=$(($j+1))fidonei=$(($i-1))mv tmp$i.csv2  result.csvsed -i 's/time/    /g' result.csv#gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' time.csv swap_used.csv > tmp1.csv#gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp1.csv sda_used.csv > tmp2.csv#gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp2.csv sdb_used.csv > tmp3.csv#gawk 'NR==FNR{a[FNR]=$0;next}{print a[FNR]","$0;next}' tmp3.csv load.csv > result.csv#sed -i 's/time/    /g' result.csvif [ ! -z $day ];thendate_str=`echo $day | grep -E '^[0-9]{4}-[0-9]{2}-[0-9]{2}'`if [ ! -z "$date_str" ];thenhead -1 result.csv > $date_str.csvgrep $date_str result.csv >> $date_str.csvsed -i 's/ //g' $date_str.csvif [ ! -z $start ] && [ ! -z $end ];thenst=`echo $start | grep -E '^[0-9]{2}:[0-9]{2}:[0-9]{2}'`et=`echo $end | grep -E '^[0-9]{2}:[0-9]{2}:[0-9]{2}'`if [ ! -z $st ] && [ ! -z $et ];thenstn=`echo $st|sed 's/://g'`etn=`echo $et|sed 's/://g'`filename=${date_str}-${stn}-${etn}.csvhead -1 $date_str.csv > $filenamelines=`cat $date_str.csv`for line in $linesdoctn=`echo $line | cut -d',' -f1|cut -d'/' -f2|sed 's/://g'`if [ `expr $ctn + 0` -gt `expr $stn + 0` ] && [ `expr $ctn + 0` -lt `expr $etn + 0` ];thenecho $line >> $filenamefidoneelseecho "Time foramt error.Please input HH-MM-SS"fifielseecho "Date foramt error.Please input YYYY-MM-DD"fifirm -rf *.csv1rm -rf *.csv2rm -rf jc*.log

要生成 2016年7月8日 早上7点到8点之间内存的used和cache,swap的used和free,sda磁盘的%util 可以使用如下命令:

./analyz.sh -swap 3,4 -sda 12  -mem 3,7 -day 2016-07-08 -start 07:00:00 -end 08:00:00

将生成的csv文件用excel打开,就可以使用图表功能生成出性能曲线。


0 0
原创粉丝点击