elk的安装使用(二)
来源:互联网 发布:电子滚动屏幕软件 编辑:程序博客网 时间:2024/05/29 04:54
错误1:
[2017-11-25T13:58:31,795][ERROR][o.e.b.Bootstrap ] [node131] node validation exception
[4] bootstrap checks failed
[1]: max file descriptors [4096] for elasticsearch process is too low, increase to at least [65536]
[2]: max number of threads [1024] for user [elkuser] is too low, increase to at least [4096]
[3]: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
[4]: system call filters failed to install; check the logs and fix your configuration or disable system call filters at your own risk
[2017-11-25T13:58:31,827][INFO ][o.e.n.Node ] [node131] stopping ...
vm.max_map_count 不低于 262144
vm.max_map_count 表示虚拟内存大小,它是一个内核参数。elasticsearch 默认要求 vm.max_map_count 不低于 262144。
[root@hadoop001 ~]# sysctl -w vm.max_map_count=262144
[root@hadoop001 ~]# echo "vm.max_map_count=262144" >> /etc/sysctl.conf
[root@hadoop001 ~]# sysctl -p
nofile 不低于 65536
[root@hadoop001 ~]# nofile 表示进程允许打开的最大文件数。elasticsearch 进程要求可以打开的最大文件数不低于 65536。
[root@hadoop001 ~]# echo "* soft nofile 65536" >> /etc/security/limits.conf
[root@hadoop001 ~]# echo "* hard nofile 131072" >> /etc/security/limits.conf
nproc 不低于 2048
nproc 表示最大线程数。elasticsearch 要求最大线程数不低于 2048。
[root@hadoop001 ~]# echo "elkuser soft nproc 4096" >> /etc/security/limits.conf
[root@hadoop001 ~]# echo "elkuser hard nproc 4096" >> /etc/security/limits.conf
[elkuser@hadoop001 elasticsearch-6.0.0]$ ulimit -u 4096
[root@hadoop001 ~]# reboot
4.Logstash安装
[root@hadoop001 software]# tar -xzvf logstash-6.0.0.tar.gz
[root@hadoop001 software]# cd logstash-6.0.0
[root@hadoop001 logstash-6.0.0]# ll
total 100
drwxr-xr-x. 2 root root 4096 Nov 25 14:55 bin
drwxr-xr-x. 2 root root 4096 Nov 25 14:55 config
-rw-r--r--. 1 root root 2276 Nov 11 03:59 CONTRIBUTORS
drwxr-xr-x. 2 root root 4096 Nov 11 03:59 data
-rw-r--r--. 1 root root 3959 Nov 11 04:02 Gemfile
-rw-r--r--. 1 root root 21265 Nov 11 03:59 Gemfile.jruby-2.3.lock
drwxr-xr-x. 5 root root 4096 Nov 25 14:55 lib
-rw-r--r--. 1 root root 589 Nov 11 03:59 LICENSE
drwxr-xr-x. 4 root root 4096 Nov 25 14:55 logstash-core
drwxr-xr-x. 3 root root 4096 Nov 25 14:55 logstash-core-plugin-api
drwxr-xr-x. 4 root root 4096 Nov 25 14:55 modules
-rw-rw-r--. 1 root root 26953 Nov 11 04:02 NOTICE.TXT
drwxr-xr-x. 3 root root 4096 Nov 25 14:55 tools
drwxr-xr-x. 4 root root 4096 Nov 25 14:55 vendor
[root@hadoop001 logstash-6.0.0]#
[root@hadoop001 logstash-6.0.0]# vi logstash-nginx-access-log.conf
input {
file {
path => ["/usr/local/nginx/logs/access.log"]
type => "nginx_access"
start_position => "beginning"
}
}
filter {
grok {
match => {
"message" => '%{IPORHOST:remote_ip} - %{DATA:user_name} \[%{HTTPDATE:time}\] "%{WORD:request_action} %{DATA:request} HTTP/%{NUMBER:http_version}" %{NUMBER:response} %{NUMBER:bytes} "%{DATA:referrer}" "%{DATA:agent}"'
}
}
date {
match => [ "time", "dd/MMM/YYYY:HH:mm:ss Z" ]
locale => en
}}
output {
elasticsearch {
hosts => ["192.168.137.131:9200"]
index => "logstash-nginx-access-log"
}
}
启动logstash:
[root@hadoop001 logstash-6.0.0]# nohup bin/logstash -f logstash-nginx-access-log.conf &
[1] 3547
[root@hadoop001 logstash-6.0.0]# nohup: ignoring input and appending output to `nohup.out'
[root@hadoop001 logstash-6.0.0]# kill -9 $(pgrep -f logstash)
[root@hadoop001 logstash-6.0.0]#
[root@hadoop001 logstash-6.0.0]# tail -f nohup.out
5.Kibana安装
[root@hadoop001 software]# tar -xzvf kibana-6.0.0-linux-x86_64.tar.gz
[root@hadoop001 software]# cd kibana-6.0.0-linux-x86_64
[root@hadoop001 kibana-6.0.0-linux-x86_64]# ll
total 864
drwxr-xr-x. 2 1000 1000 4096 Nov 11 02:50 bin
drwxrwxr-x. 2 1000 1000 4096 Nov 11 02:50 config
drwxrwxr-x. 2 1000 1000 4096 Nov 11 02:50 data
-rw-rw-r--. 1 1000 1000 562 Nov 11 02:50 LICENSE.txt
drwxrwxr-x. 6 1000 1000 4096 Nov 11 02:50 node
drwxrwxr-x. 620 1000 1000 24576 Nov 11 02:50 node_modules
-rw-rw-r--. 1 1000 1000 799543 Nov 11 02:50 NOTICE.txt
drwxrwxr-x. 3 1000 1000 4096 Nov 11 02:50 optimize
-rw-rw-r--. 1 1000 1000 721 Nov 11 02:50 package.json
drwxrwxr-x. 2 1000 1000 4096 Nov 11 02:50 plugins
-rw-rw-r--. 1 1000 1000 4654 Nov 11 02:50 README.txt
drwxr-xr-x. 14 1000 1000 4096 Nov 11 02:50 src
drwxrwxr-x. 5 1000 1000 4096 Nov 11 02:50 ui_framework
drwxr-xr-x. 2 1000 1000 4096 Nov 11 02:50 webpackShims
[root@hadoop001 kibana-6.0.0-linux-x86_64]# cd config/
[root@hadoop001 config]# ll
total 8
-rw-r--r--. 1 1000 1000 4649 Nov 11 02:50 kibana.yml
[root@hadoop001 config]# vi kibana.yml
server.host: "192.168.137.131"
elasticsearch.url: "http://192.168.137.131:9200"
[root@hadoop001 config]# cd ../
[root@hadoop001 kibana-6.0.0-linux-x86_64]# bin/kibana
http://192.168.137.131:5601
[2017-11-25T13:58:31,795][ERROR][o.e.b.Bootstrap ] [node131] node validation exception
[4] bootstrap checks failed
[1]: max file descriptors [4096] for elasticsearch process is too low, increase to at least [65536]
[2]: max number of threads [1024] for user [elkuser] is too low, increase to at least [4096]
[3]: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
[4]: system call filters failed to install; check the logs and fix your configuration or disable system call filters at your own risk
[2017-11-25T13:58:31,827][INFO ][o.e.n.Node ] [node131] stopping ...
vm.max_map_count 不低于 262144
vm.max_map_count 表示虚拟内存大小,它是一个内核参数。elasticsearch 默认要求 vm.max_map_count 不低于 262144。
[root@hadoop001 ~]# sysctl -w vm.max_map_count=262144
[root@hadoop001 ~]# echo "vm.max_map_count=262144" >> /etc/sysctl.conf
[root@hadoop001 ~]# sysctl -p
nofile 不低于 65536
[root@hadoop001 ~]# nofile 表示进程允许打开的最大文件数。elasticsearch 进程要求可以打开的最大文件数不低于 65536。
[root@hadoop001 ~]# echo "* soft nofile 65536" >> /etc/security/limits.conf
[root@hadoop001 ~]# echo "* hard nofile 131072" >> /etc/security/limits.conf
nproc 不低于 2048
nproc 表示最大线程数。elasticsearch 要求最大线程数不低于 2048。
[root@hadoop001 ~]# echo "elkuser soft nproc 4096" >> /etc/security/limits.conf
[root@hadoop001 ~]# echo "elkuser hard nproc 4096" >> /etc/security/limits.conf
[elkuser@hadoop001 elasticsearch-6.0.0]$ ulimit -u 4096
[root@hadoop001 ~]# reboot
4.Logstash安装
[root@hadoop001 software]# tar -xzvf logstash-6.0.0.tar.gz
[root@hadoop001 software]# cd logstash-6.0.0
[root@hadoop001 logstash-6.0.0]# ll
total 100
drwxr-xr-x. 2 root root 4096 Nov 25 14:55 bin
drwxr-xr-x. 2 root root 4096 Nov 25 14:55 config
-rw-r--r--. 1 root root 2276 Nov 11 03:59 CONTRIBUTORS
drwxr-xr-x. 2 root root 4096 Nov 11 03:59 data
-rw-r--r--. 1 root root 3959 Nov 11 04:02 Gemfile
-rw-r--r--. 1 root root 21265 Nov 11 03:59 Gemfile.jruby-2.3.lock
drwxr-xr-x. 5 root root 4096 Nov 25 14:55 lib
-rw-r--r--. 1 root root 589 Nov 11 03:59 LICENSE
drwxr-xr-x. 4 root root 4096 Nov 25 14:55 logstash-core
drwxr-xr-x. 3 root root 4096 Nov 25 14:55 logstash-core-plugin-api
drwxr-xr-x. 4 root root 4096 Nov 25 14:55 modules
-rw-rw-r--. 1 root root 26953 Nov 11 04:02 NOTICE.TXT
drwxr-xr-x. 3 root root 4096 Nov 25 14:55 tools
drwxr-xr-x. 4 root root 4096 Nov 25 14:55 vendor
[root@hadoop001 logstash-6.0.0]#
[root@hadoop001 logstash-6.0.0]# vi logstash-nginx-access-log.conf
input {
file {
path => ["/usr/local/nginx/logs/access.log"]
type => "nginx_access"
start_position => "beginning"
}
}
filter {
grok {
match => {
"message" => '%{IPORHOST:remote_ip} - %{DATA:user_name} \[%{HTTPDATE:time}\] "%{WORD:request_action} %{DATA:request} HTTP/%{NUMBER:http_version}" %{NUMBER:response} %{NUMBER:bytes} "%{DATA:referrer}" "%{DATA:agent}"'
}
}
date {
match => [ "time", "dd/MMM/YYYY:HH:mm:ss Z" ]
locale => en
}}
output {
elasticsearch {
hosts => ["192.168.137.131:9200"]
index => "logstash-nginx-access-log"
}
}
启动logstash:
[root@hadoop001 logstash-6.0.0]# nohup bin/logstash -f logstash-nginx-access-log.conf &
[1] 3547
[root@hadoop001 logstash-6.0.0]# nohup: ignoring input and appending output to `nohup.out'
[root@hadoop001 logstash-6.0.0]# kill -9 $(pgrep -f logstash)
[root@hadoop001 logstash-6.0.0]#
[root@hadoop001 logstash-6.0.0]# tail -f nohup.out
5.Kibana安装
[root@hadoop001 software]# tar -xzvf kibana-6.0.0-linux-x86_64.tar.gz
[root@hadoop001 software]# cd kibana-6.0.0-linux-x86_64
[root@hadoop001 kibana-6.0.0-linux-x86_64]# ll
total 864
drwxr-xr-x. 2 1000 1000 4096 Nov 11 02:50 bin
drwxrwxr-x. 2 1000 1000 4096 Nov 11 02:50 config
drwxrwxr-x. 2 1000 1000 4096 Nov 11 02:50 data
-rw-rw-r--. 1 1000 1000 562 Nov 11 02:50 LICENSE.txt
drwxrwxr-x. 6 1000 1000 4096 Nov 11 02:50 node
drwxrwxr-x. 620 1000 1000 24576 Nov 11 02:50 node_modules
-rw-rw-r--. 1 1000 1000 799543 Nov 11 02:50 NOTICE.txt
drwxrwxr-x. 3 1000 1000 4096 Nov 11 02:50 optimize
-rw-rw-r--. 1 1000 1000 721 Nov 11 02:50 package.json
drwxrwxr-x. 2 1000 1000 4096 Nov 11 02:50 plugins
-rw-rw-r--. 1 1000 1000 4654 Nov 11 02:50 README.txt
drwxr-xr-x. 14 1000 1000 4096 Nov 11 02:50 src
drwxrwxr-x. 5 1000 1000 4096 Nov 11 02:50 ui_framework
drwxr-xr-x. 2 1000 1000 4096 Nov 11 02:50 webpackShims
[root@hadoop001 kibana-6.0.0-linux-x86_64]# cd config/
[root@hadoop001 config]# ll
total 8
-rw-r--r--. 1 1000 1000 4649 Nov 11 02:50 kibana.yml
[root@hadoop001 config]# vi kibana.yml
server.host: "192.168.137.131"
elasticsearch.url: "http://192.168.137.131:9200"
[root@hadoop001 config]# cd ../
[root@hadoop001 kibana-6.0.0-linux-x86_64]# bin/kibana
http://192.168.137.131:5601
阅读全文
0 0
- elk的安装使用(二)
- elk安装及使用二(elasticsearch的简单使用及插件的安装)
- elk的安装使用(一)
- elk安装及使用一(elk的概述及elasticsearch安装)
- ELK(二)安装logstash
- 日志系统ELK使用详解(二)--Logstash安装和使用
- Doker ELK 的安装部署使用教程
- elk安装及使用四(logstash的安装及使用)
- ELK(二)ElasticSearch
- elk的安装部署三(kibana的安装及使用filebeat收集日志)
- Docker实战(十一):Docker安装ELK环境(二)
- Docker实战(十一):Docker安装ELK环境(二)
- Docker实战(十一):Docker安装ELK环境(二)
- ELK收集Nginx日志,使用grok正则表达式(二)
- Linux操作系统安装ELK stack日志管理系统--(1)Logstash和Filebeat的安装与使用
- ELK中logstash的使用
- ELK中filebeat的使用
- ubuntu环境下的elk安装指南(草稿)
- MVP基类,Retrofit,OkHttp拦截器,RxJava封装
- 关于eclipse中xml文件的注释快捷键
- STM32 HAL 库, 配置串口DMA接收及空闲中断
- CentOS7 安装 MongoDB3.6
- springboot(四、springboot+mybatis)
- elk的安装使用(二)
- Java切割wav音频文件
- Java并发编程札记-(三)JUC原子类-04原子方式更新引用
- pandas基础:Series、和NumPy里的random.x()
- Josn在资源加载中的使用
- vs2013+NetCDF三维数据的读取方法
- 使用navicat for mysql 写一个简单的定时任务。
- intellij idea 显示Arraylist 扩容过程 解决not showing null elements
- delphi运行一个bat文件