elk1
cd /opt
把filebeat投进去
tar -xf filebeat-6.7.2-linux-x86_64.tar.gz
mv filebeat-6.7.2-linux-x86_64 filebeat
cd filebeat/
yum -y install nginx
systemctl restart nginx
vim /usr/share/nginx/html/index.html
this is nginx
cp filebeat.yml filebeat.yml.bak
vim filebeat.yml
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
- /var/log/nginx/error.log
tags: ["nginx"]
fields:
service_name: 192.168.66.17_nginx_kafka
log_type: nginx
from: 192.168.66.17
output.kafka:
enabled: true
hosts: ["192.168.66.18:9092","192.168.66.19:9092","192.168.66.20:9092"]
topic: "nginx"
chmod 777 /var/log/nginx/access.log /var/log/nginx/error.log
cd /opt/filebeat
nohup ./filebeat -e -c filebeat.yml > filebeat.out &
tail -f filebeat.out
cd /etc/logstash/conf.d
input {
kafka {
bootstrap_servers => "192.168.66.18:9092,192.168.66.19:9092,192.168.66.20:9092"
topics => "nginx"
type => "nginx_kafka"
codec => "json"
auto_offset_reset => "earliest"
decorate_events => true
}
}
output {
if "nginx" in [tags] {
elasticsearch {
hosts => ["192.168.66.15:9200","192.168.66.16:9200"]
index => "%{[fields][service_name]}-%{+YYYY.MM.dd}"
}
}
}
codec => "json"
解析json格式的代码
auto_offset_reset => "earliest"
从头拉去,latest(实时更新)
decorate_events => true
传递给es实例中的信息包含kafka的属性数据
在
kafka上面看
elk:
logstash -f kafka.conf --path.data /opt/test8 &