机器分配
192.168.77.136 docker-compose
192.168.77.137 log-test cron
centos
yum -y install docker-ce docker-compose
debian/ubuntu
apt -y install docker-ce docker-compose
编写docker-compose.yaml
mkdir /opt/elk
cd /opt/elk
vim docker-compose.yaml
内容如下
version: '3'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.14.0
environment:
- node.name=elasticsearch
- cluster.initial_master_nodes=elasticsearch
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
volumes:
- data01:/usr/share/elasticsearch/data
ports:
- 9200:9200
logstash:
image: docker.elastic.co/logstash/logstash:7.14.0
volumes:
- /var/log:/host/var/log
- /opt/elk/pipeline:/usr/share/logstash/pipeline
ports:
- 5000:5000
kibana:
image: docker.elastic.co/kibana/kibana:7.14.0
ports:
- 5601:5601
volumes:
data01:
driver: local
##被收集日志赋权
chmod +x /var/log
chmod -R o+r /var/log
## 创建配置文件
mkdir /opt/elk/pipeline
cd /opt/elk/pipeline
[root@localhost pipeline]# cat logstash-cron.conf
input {
file {
path => "/host/var/log/cron*" # 调整为实际的 cron 日志路径
start_position => "beginning"
sincedb_path => "/dev/null"
type => "cron" # 这将在日志事件中添加一个字段,用于表示日志的类型
}
}
filter {
if [type] == "cron" {
grok {
match => { "message" => "%{GREEDYDATA:cron_message}" }
add_field => { "source_ip" => "192.168.77.136" } # 将这个 IP 地址更改为实际的源服务器 IP 地址
}
}
}
output {
if [type] == "cron" {
elasticsearch {
hosts => ["192.168.77.136:9200"]
index => "cron_%{source_ip}-%{+YYYY.MM.dd}"
}
}
stdout { codec => rubydebug }
}
[root@localhost pipeline]# cat logstash-nginx.conf
input {
file {
path => "/host/var/log/nginx/*" # 调整为实际的 cron 日志路径
start_position => "beginning"
sincedb_path => "/dev/null"
type => "nginx" # 这将在日志事件中添加一个字段,用于表示日志的类型
}
}
filter {
if [type] == "nginx" {
grok {
match => { "message" => "%{GREEDYDATA:cron_message}" }
add_field => { "source_ip" => "192.168.77.136" } # 将这个 IP 地址更改为实际的源服务器 IP 地址
}
}
}
output {
if [type] == "nginx" {
elasticsearch {
hosts => ["192.168.77.136:9200"]
index => "nginx_%{source_ip}-%{+YYYY.MM.dd}"
}
}
stdout { codec => rubydebug }
}
注意权限问题:对日志文件夹有执行权限,对日志文件有读取权限,关闭selinx
cd /opt/elk
docker-compose up -d > elk.log
docker-compose down
mkdir /opt/elk
cd /opt/elk
cat logstash.yml
http.host: "0.0.0.0"
xpack.monitoring.elasticsearch.hosts: [ "http://192.168.77.136:9200" ]
## 创建配置文件
mkdir /opt/elk/pipeline
cd /opt/elk/pipeline
input {
file {
path => "/host/var/log/cron*" # 调整为实际的 cron 日志路径
start_position => "beginning"
sincedb_path => "/dev/null"
type => "cron" # 这将在日志事件中添加一个字段,用于表示日志的类型
}
}
filter {
if [type] == "cron" {
grok {
match => { "message" => "%{GREEDYDATA:cron_message}" }
add_field => { "source_ip" => "192.168.77.137" } # 将这个 IP 地址更改为实际的源服务器 IP 地址
}
}
}
output {
if [type] == "cron" {
elasticsearch {
hosts => ["192.168.77.136:9200"]
index => "cron_%{source_ip}-%{+YYYY.MM.dd}"
}
}
stdout { codec => rubydebug }
}
##启动
docker run --name elk_logstash_1 -d -p 5000:5000 -v /var/log:/host/var/log -v /opt/elk/pipeline:/usr/share/logstash/pipeline -v /opt/elk/logstash.yml:/usr/share/logstash/config/logstash.yml docker.elastic.co/logstash/logstash:7.14.0