简单的 docker 部署ELK

简单的 docker 部署ELK

这是我的运维同事部署ELK的文档,我这里记录转载一下

服务规划

架构: Filebeat->kafka->logstash->ES
在这里插入图片描述

  • kafka集群部署参照: kafka集群部署

    部署服务程序路径/数据目录端口配置文件
    elasticsearch/data/elasticsearch9200/data/elasticsearch/config/elasticsearch.yml
    logstash/data/logstash/data/logstash/config/logstash.yml
    kibana/data/kibana5601/data/kibana/config/kibana.yml
    filebeat/data/filebeat/data/filebeat/config/filebeat.yml

索引服务-Elasticsearch

创建数据目录

mkdir -pv /data/elasticsearch/{config,data,logs}
chown 1000 /data/elasticsearch/{data,logs}

修改主机配置

vim /etc/sysctl.conf
加入
vm.max_map_count=655360
sysctl -p

vim /etc/security/limits.conf
加入
* soft memlock unlimited
* hard memlock unlimited

配置文件

cat > /data/elasticsearch/config/elasticsearch.yml << 'EOF'
cluster.name: ccms-es-cluster
node.name: ccms-es1
network.host: 172.16.20.51
http.port: 9200
bootstrap.memory_lock: true

# 允许跨域访问
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-methods: "OPTIONS, HEAD, GET, POST, PUT, DELETE"
http.cors.allow-headers: "Authorization, X-Requested-With, Content-Type, Content-Length, X-User"

# Cluster
node.master: true
node.data: true
transport.tcp.port: 9300
discovery.seed_hosts: ["172.16.20.51","172.16.20.52","172.16.20.53"]
cluster.initial_master_nodes: ["ccms-es1","ccms-es2","ccms-es3"]

cluster.routing.allocation.same_shard.host: true
cluster.routing.allocation.node_initial_primaries_recoveries: 4
cluster.routing.allocation.node_concurrent_recoveries: 4

# X-Pack
xpack.security.enabled: true
xpack.license.self_generated.type: basic
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
EOF

chown 1000 /data/elasticsearch/config/*
# 容器启动后先生成证书, 分发到各个节点的config目录下, 再重启es容器

discovery.zen.minimum_master_nodes算法: 节点数/2+1

# 设置ES密码:
# 自动设置密码命令
elasticsearch-setup-passwords auto
# 或者
# 自定义密码命令
elasticsearch-setup-passwords interactive

# es-head登录
http://172.16.20.52:9200/?auth_user=elastic&auth_password=elastic123456

# 生成证书(证书不需要设置密码):
cd /usr/share/elasticsearch/config/
elasticsearch-certutil ca -out config/elastic-certificates.p12 -pass ""

docker-compose编排

mkdir -pv /data/docker-compose/elasticsearch/
cat > /data/docker-compose/elasticsearch/docker-compose.yml << EOF
version: "3"
services:
  es:
    container_name: es
    image: elasticsearch:7.11.1
    network_mode: host
    restart: always
    volumes:
      - /etc/localtime:/etc/localtime
      - /data/elasticsearch/config:/usr/share/elasticsearch/config
      - /data/elasticsearch/data:/usr/share/elasticsearch/data
      - /data/elasticsearch/logs:/usr/share/elasticsearch/logs
    environment:
      TZ: Asia/Shanghai
      bootstrap.memory_lock: true
      ES_JAVA_OPTS: "-Xmx8G -Xms8G"
      ELASTIC_PASSWORD: "G1T@es2022#ccms"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    deploy:
      resources:
        limits:
           memory: 10G
EOF
# 1. 解决es-head跨域问题(浏览器报: Request header field Content-Type is not allowed by Access-Control-Allow-Headers)
# es配置文件加入:
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-methods: "OPTIONS, HEAD, GET, POST, PUT, DELETE"
http.cors.allow-headers: "X-Requested-With, Content-Type, Content-Length, X-User"

# 2. 解决es-head数据浏览空白(浏览器报: 406 Not Acceptable)
# 修改es-head代码文件vendor.js
# 第6886行左右
contentType: "application/x-www-form-urlencoded" --> contentType: "application/json;charset=UTF-8"

启动

docker-compose up -d

日志采集-Filebeat

创建数据目录

mkdir -pv /data/filebeat/{config,data}

配置文件

发送到kafka

cat > /data/filebeat/config/filebeat.yml << 'EOF'
###################### Filebeat Configuration Example #########################
filebeat.name: ccms-test-08
filebeat.idle_timeout: 5s
filebeat.spool_zie: 2048

#----------------------------------input form ccms servers--------------------------------#
filebeat.inputs:
- type: log
  enabled: true
  paths:
   - /opt/ccms-auto-deploy/credit-business/*/*/target/logs/*.log
   - /opt/ccms-auto-deploy/credit-support/*/*/target/logs/*.log
  fields:
    kafka_topic: topic-ccms-dev
  fields_under_root: true

  # filebeat 多行日志的处理
  multiline.pattern: '^\['
  multiline.negate: true
  multiline.match: after

  encoding: plain
  tail_files: false

  # 检测指定目录下文件更新时间
  scan_frequency: 3s
  # 每隔1s检测一下文件变化,如果连续检测2次之后文件还没有变化,下一次检测间隔时间变为5s
  backoff: 1s
  max_backoff: 5s
  backoff_factor: 2

#----------------------------------input form nginx access_log--------------------------------#
- type: log
  enabled: true
  paths:
   - /data/nginx/logs/ccms-access.log
  fields:
    kafka_topic: topic-nginx-access
  fields_under_root: true

  encoding: plain
  tail_files: false

  json.keys_under_root: true
  json.overwrite_keys: true
  json.add_error_key: false

  # 检测指定目录下文件更新时间
  scan_frequency: 3s
  # 每隔1s检测一下文件变化,如果连续检测2次之后文件还没有变化,下一次检测间隔时间变为5s
  backoff: 1s
  max_backoff: 5s
  backoff_factor: 2

#----------------------------------Kafka output--------------------------------#
output.kafka:
  enabled: true
  hosts: ['3.1.101.33:9092','3.1.101.34:9092','3.1.101.35:9092']
  topic: '%{[kafka_topic]}'
EOF

docker-compose编排

mkdir -pv /data/docker-compose/filebeat
cat > /data/docker-compose/filebeat/docker-compose.yml << EOF
version: "3"
services:
  filebeat:
    container_name: filebeat
    image: elastic/filebeat:7.11.1
    user: root
    restart: always
    volumes:
      - /etc/localtime:/etc/localtime
      - /data/filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml
      - /data/filebeat/data:/usr/share/filebeat/data/registry
      - /opt/ccms-auto-deploy:/opt/ccms-auto-deploy
      - /data/nginx/logs:/data/nginx/logs/
    deploy:
      resources:
        limits:
           memory: 4G
        reservations:
           memory: 1G
EOF

启动

docker-compose up -d

安装kibana仪表盘

docker-compose exec filebeat filebeat setup --dashboards

过滤服务-Logstash

创建数据目录

mkdir -pv /data/logstash/{config,data,pipeline,logs}
chown 1000.1000 /data/logstash/{config,data,pipeline,logs}

配置文件

logstash.yml

cat > /data/logstash/config/logstash.yml << 'EOF'
node.name: logstast-node1
http.host: "0.0.0.0"
path.data: data
path.logs: /usr/share/logstash/logs
config.reload.automatic: true
config.reload.interval: 5s
config.test_and_exit: false
EOF

如果使用pipeline管道,不要配置path.config

pipelines.yml

cat > /data/logstash/config/pipelines.yml << 'EOF'
- pipeline.id: ccms-credit-java
  path.config: "/usr/share/logstash/pipeline/ccms-credit-java.conf"
- pipeline.id: ccms-credit-nginx-access
  path.config: "/usr/share/logstash/pipeline/ccms-credit-nginx-access.conf"
- pipeline.id: ccms-credit-nginx-error
  path.config: "/usr/share/logstash/pipeline/ccms-credit-nginx-error.conf"
EOF

pipeline配置文件

pipeline/ccms-credit-java.conf

cat > /data/logstash/pipeline/ccms-credit-java.conf<< 'EOF'
input {
   kafka {
    topics_pattern => "topic-ccms-credit-sit-java"
    bootstrap_servers => "172.16.20.51:9092,172.16.20.52:9092,172.16.20.53:9092"
    consumer_threads => 4
    decorate_events => true
    group_id => "kafka-ccms-credit-sit-java"
    add_field => {"logstash-server" => "172.16.20.51"}
   }
}

filter {
    json {
      source => "message"
    }

    grok {
      match => { "message" => "\[%{TIMESTAMP_ISO8601:currentDateTime}\] \[%{LOGLEVEL:level}\] \[%{DATA:traceInfo}\] \[%{NOTSPACE:class}\] \[%{DATA:hostName}\] \[%{IP:hostIp}\] \[%{DATA:applicationName}\] \[%{DATA:location}\] \[%{DATA:messageInfo}\] ## %{QUOTEDSTRING:throwable}" }
    }

    mutate{
      enable_metric => "false"
      remove_field => ["ecs","tags","input","agent","@version","log","port","host","message"]
    }

    date {
      match => [ "currentDateTime", "ISO8601" ]
    }
}

output {
        elasticsearch {
        hosts => ["172.16.20.51:9200","172.16.20.52:9200","172.16.20.53:9200"]
        user => "elastic"
        password => "G1T@es2022#ccms"
        index => "index-ccms-credit-sit-java_%{+YYY-MM-dd}"
        sniffing => true
        template_overwrite => true
    }
}
EOF

pipeline/ccms-credit-nginx-access.conf

cat > /data/logstash/pipeline.d/ccms-nginx-access.conf<< 'EOF'
input {
   kafka {
    topics_pattern => "topic-ccms-credit-sit-nginx-access"
    bootstrap_servers => "172.16.20.51:9092,172.16.20.52:9092,172.16.20.53:9092"
    codec => "json"
    consumer_threads => 4
    decorate_events => true
    group_id => "kafka-ccms-credit-sit-nginx-access"
    add_field => {"logstash-server" => "172.16.20.51"}
   }
}

filter {
  geoip {
      source => "client_ip"
      target => "geoip"
      add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
      add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
      remove_field => [ "[geoip][latitude]", "[geoip][longitude]", "[geoip][country_code2]","[geoip][country_code3]", "[geoip][timezone]", "[geoip][continent_code]", "[geoip][dma_code]", "[geoip][region_code]" ]
  }

  mutate {
    convert => [ "size", "integer" ]
    convert => [ "status", "integer" ]
    convert => [ "responsetime", "float" ]
    convert => [ "upstreamtime", "float" ]
    convert => [ "[geoip][coordinates]", "float" ]
    # 过滤 filebeat 没用的字段,这里过滤的字段要考虑好输出到es的,否则过滤了就没法做判断
    remove_field => [ "ecs","agent","host","cloud","@version","input","logs_type" ]
  }


  useragent {
    source => "http_user_agent"
    target => "ua"
    # 过滤useragent没用的字段
    remove_field => [ "[ua][minor]","[ua][major]","[ua][build]","[ua][patch]","[ua][os_minor]","[ua][os_major]" ]
  }

}

output {
        elasticsearch {
        hosts => ["172.16.20.51:9200","172.16.20.52:9200","172.16.20.53:9200"]
        user => "elastic"
        password => "G1T@es2022#ccms"
        index => "logstash-ccms-credit-sit-nginx-access_%{+YYY-MM-dd}"
        sniffing => true
        template_overwrite => true
    }
}
EOF

pipeline/ccms-credit-nginx-error.conf

cat > /data/logstash/pipeline.d/ccms-nginx-error.conf<< 'EOF'
input {
   kafka {
    topics_pattern => "topic-ccms-credit-sit-nginx-error"
    bootstrap_servers => "172.16.20.51:9092,172.16.20.52:9092,172.16.20.53:9092"
    consumer_threads => 4
    decorate_events => true
    group_id => "kafka-ccms-credit-sit-nginx-error"
    add_field => {"logstash-server" => "172.16.20.51"}
    enable_metric => true
   }
}

filter {
    json {
      source => "message"
    }

    grok {
      match => [
        "message", "%{DATESTAMP:currentDateTime}\s{1,}\[%{LOGLEVEL:level}\]\s{1,}(%{NUMBER:pid:int}#%{NUMBER}:\s{1,}\*%{NUMBER})\s{1,}(%{GREEDYDATA:messageInfo})(?:,\s{1,}client:\s{1,}(?<client>%{IP}|%{HOSTNAME}))(?:,\s{1,}server:\s{1,}%{IPORHOST:server})(?:, request: %{QS:request})?(?:, upstream: \"%{URI:endpoint}\")?(?:, host: \"%{HOSTPORT:host}\")?(?:, referrer: \"%{URI:referrer}\")?",
        "message", "%{DATESTAMP:currentDateTime}\s{1,}\[%{DATA:level}\]\s{1,}%{GREEDYDATA:messageInfo}"]
    }

    date{
      match => ["currentDateTime", "yy/MM/dd HH:mm:ss", "ISO8601"]
      timezone => "+08:00"
      target => "@timestamp"
    }

    mutate{
      enable_metric => "false"
      remove_field => [ "ecs","tags","input","agent","@version","log","port","host","message" ]
    }
}

output {
        elasticsearch {
        hosts => ["172.16.20.51:9200","172.16.20.52:9200","172.16.20.53:9200"]
        user => "elastic"
        password => "G1T@es2022#ccms"
        index => "logstash-ccms-credit-sit-nginx-error_%{+YYY-MM-dd}"
        sniffing => true
        template_overwrite => true
    }
}
EOF

docker-compose编排

mkdir -pv /data/docker-compose/logstash
cat > /data/docker-compose/logstash/docker-compose.yml << EOF
version: "3"
services:
  logstash:
    container_name: logstash
    image: 172.16.20.50:8005/public/logstash:7.11.1
    user: root
    network_mode: host
    restart: always
    volumes:
      - /etc/localtime:/etc/localtime
      - /data/logstash/config:/usr/share/logstash/config
      - /data/logstash/data:/usr/share/logstash/data
      - /data/logstash/pipeline:/usr/share/logstash/pipeline
    environment:
      TZ: Asia/Shanghai
      LS_JAVA_OPTS: "-Xmx8G -Xms8G"
    deploy:
      resources:
        limits:
           memory: 10G
EOF

启动

docker-compose up -d

展示服务-Kibana

创建数据目录

mkdir -pv /data/kibana/{config,logs}
chown 1000 /data/kibana/{config,logs}

配置文件

cat > /data/kibana/config/kibana.yml << 'EOF'
# Default Kibana configuration for docker target
server.name: ccms-kibana
server.port: 5601
server.host: "0"
elasticsearch.hosts: [ "http://172.16.20.51:9200","http://172.16.20.52:9200","http://172.16.20.53:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
map.tilemap.url: 'http://webrd02.is.autonavi.com/appmaptile?lang=zh_cn&size=1&scale=1&style=7&x={x}&y={y}&z={z}'

xpack.security.enabled: true
xpack.security.encryptionKey: "fhjskloppd678ehkdfdlliverpoolfcr"
elasticsearch.username: "elastic"
elasticsearch.password: "G1T@es2022#ccms"
EOF

docker-compose编排

mkdir -pv /data/docker-compose/kibana/
cat > /data/docker-compose/kibana/docker-compose.yml << EOF
version: "3"
services:
  kibana:
    container_name: kibana
    image: kibana:7.11.1
    restart: always
    ports:
      - "5601:5601"
    volumes:
      - /data/kibana/config/kibana.yml:/opt/kibana/config/kibana.yml
EOF

启动

docker-compose up -d

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/913024.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

【初阶数据结构篇】二叉树OJ题

文章目录 须知 &#x1f4ac; 欢迎讨论&#xff1a;如果你在学习过程中有任何问题或想法&#xff0c;欢迎在评论区留言&#xff0c;我们一起交流学习。你的支持是我继续创作的动力&#xff01; &#x1f44d; 点赞、收藏与分享&#xff1a;觉得这篇文章对你有帮助吗&#xff1…

5分钟科普:AI网关是什么?应用场景是什么?有没有开源的选择?

AI网关的功能及其定义 AI网关位于企业应用与内外部大模型调用的交汇点&#xff0c;能够灵活地将请求转发给内部自建模型或外部大模型服务提供商&#xff0c;甚至海外的服务商。它管理着企业所有的AI出口流量&#xff0c;为企业内的不同团队提供了多方面的优势。 对于开发团队…

Ansys Zemax | 手机镜头设计 - 第 4 部分:用LS-DYNA进行冲击性能分析

该系列文章将讨论智能手机镜头模组设计的挑战&#xff0c;从概念和设计到制造和结构变形分析。本文是四部分系列中的第四部分&#xff0c;它涵盖了相机镜头的显式动态模拟&#xff0c;以及对光学性能的影响。使用Ansys Mechanical和LS-DYNA对相机在地板上的一系列冲击和弹跳过程…

凸优化理论,凸二次规划问题,对偶问题及KKT条件

凸优化理论 ​ 研究凸优化之前我们不妨提出几个小问题&#xff1a; 什么是优化问题&#xff1f;优化问题的解是什么&#xff1f;什么是凸优化问题&#xff1f;凸优化问题的解决方案是什么&#xff1f; 1.1 优化问题 ​ 理解优化问题其实很简单&#xff0c;我们其实从高中事…

实战攻略 | ClickHouse优化之FINAL查询加速

【本文作者&#xff1a;擎创科技资深研发 禹鼎侯】 查询时为什么要加FINAL 我们在使用ClickHouse存储数据时&#xff0c;通常会有一些去重的需求&#xff0c;这时候我们可以使用ReplacingMergeTree引擎。这个引擎允许你存储重复数据&#xff0c;但是在merge的时候会根据order …

3DGS与NeRF的区别

0 论文链接 nerf&#xff1a;https://arxiv.org/abs/2003.08934 3dgs&#xff1a;https://repo-sam.inria.fr/fungraph/3d-gaussian-splatting/3d_gaussian_splatting_low.pdf 1 简要 1.1 nerf neural radiance fields神经辐射场 作者提出了一种优化来自一组输入图像的场景…

关于python的复习

Python的基础 自动声明: 在 Python 中&#xff0c;不需要显式声明变量类型&#xff0c;变量的类型是在赋值时根据值自动推断的。 动态类型: Python 是动态类型语言&#xff0c;变量的类型可以在运行时改变。 x 10 # 整数 x "hello" # 现在是字符串 变量…

HBuilderX运行微信小程序,编译的文件在哪,怎么运行

1. 点击HBuilderX顶部的运行-运行到小程序模拟器-微信开发者工具&#xff0c;就会开始编译 2. 编译完成后的文件在根目录找到 unpackage -- dist -- dev -- mp-weixin, 这里面就是编译后的文件&#xff0c;如果未跳转到开发者工具&#xff0c;那可能是没设置启动路径&#xff0…

自然语言处理在客户服务中的应用

&#x1f493; 博客主页&#xff1a;瑕疵的CSDN主页 &#x1f4dd; Gitee主页&#xff1a;瑕疵的gitee主页 ⏩ 文章专栏&#xff1a;《热点资讯》 自然语言处理在客户服务中的应用 自然语言处理在客户服务中的应用 自然语言处理在客户服务中的应用 引言 自然语言处理概述 定义…

【学习笔记】Kylin-Desktop-V10-SP1 麒麟系统知识4——设备设置

提示&#xff1a;学习麒麟Kylin-Desktop-V10-SP1系统设备设置相关知识&#xff0c;包含设备设置进入方法、配置打印机、设置鼠标、键盘相关参数&#xff08;包含输入法的配置&#xff09;、以及管理快捷键组合、和多屏协同相关配置 一、前期准备 成功安装麒麟系统&#xff08…

Gen-RecSys——一个通过生成和大规模语言模型发展起来的推荐系统

概述 生成模型的进步对推荐系统的发展产生了重大影响。传统的推荐系统是 “狭隘的专家”&#xff0c;只能捕捉特定领域内的用户偏好和项目特征&#xff0c;而现在生成模型增强了这些系统的功能&#xff0c;据报道&#xff0c;其性能优于传统方法。这些模型为推荐的概念和实施带…

【国内中间件厂商排名及四大中间件对比分析】

国内中间件厂商排名 随着新兴技术的涌入&#xff0c;一批国产中间件厂商破土而出&#xff0c;并在短时间内迅速发展&#xff0c;我国中间件市场迎来洗牌&#xff0c;根据市占率&#xff0c;当前我国中间件厂商排名依次为&#xff1a;东方通、宝兰德、中创股份、金蝶天燕、普元…

PVE纵览-备份与快照指南

PVE纵览-备份与快照指南 文章目录 PVE纵览-备份与快照指南摘要1 备份与快照概述定义与区别备份与快照在PVE中的应用场景 2 PVE 备份功能详解备份类型与策略配置备份任务自动化备份管理 3 PVE 快照功能详解快照的工作原理快照的创建与恢复机制快照对系统性能的影响快照的使用场景…

解非线性方程组

实验类型&#xff1a;●验证性实验 ○综合性实验 ○设计性实验 实验目的&#xff1a;进一步熟练掌握解非线性方程组牛顿迭代算法&#xff0c;提高编程能力和解算非线性方程组问题的实践技能。 实验内容&#xff1a; 设有非线性方程组(此方程组是非标准型) 实验说明&#xff1…

JavaWeb合集23-文件上传

二十三 、 文件上传 实现效果&#xff1a;用户点击上传按钮、选择上传的头像&#xff0c;确定自动上传&#xff0c;将上传的文件保存到指定的目录中&#xff0c;并重新命名&#xff0c;生成访问链接&#xff0c;返回给前端进行回显。 1、前端实现 vue3AntDesignVue实现 <tem…

设计模式-七个基本原则之一-开闭原则 + SpringBoot案例

开闭原则:(SRP) 面向对象七个基本原则之一 对扩展开放&#xff1a;软件实体&#xff08;类、模块、函数等&#xff09;应该能够通过增加新功能来进行扩展。对修改关闭&#xff1a;一旦软件实体被开发完成&#xff0c;就不应该修改它的源代码。 要看实际场景&#xff0c;比如组内…

图形几何之美系列:仿射变换矩阵(二)

“ 在几何计算、图形渲染、动画、游戏开发等领域&#xff0c;常需要进行元素的平移、旋转、缩放等操作&#xff0c;一种广泛应用且简便的方法是使用仿射变换进行处理。相关的概念还有欧拉角、四元数等&#xff0c;四元数在图形学中主要用于解决旋转问题&#xff0c;特别是在三维…

python识别ocr 图片和pdf文件

#识别图片 pip3 install paddleocr pip3 install paddlepaddle#识别pdf pip3 install PyMuPDF 重点&#xff1a;路径不能有中文&#xff0c;不然pdf文件访问不了 from paddleocr import PaddleOCR from rest_framework.response import Response from rest_framework.views im…

使用Ubuntu快速部署MinIO对象存储

想拥有自己的私有云存储&#xff0c;安全可靠又高效&#xff1f;MinIO是你的理想选择&#xff01;这篇文章将手把手教你如何在Ubuntu 22.04服务器上部署MinIO&#xff0c;并使用Nginx反向代理和Let’s Encrypt证书进行安全加固。 即使你是新手&#xff0c;也能轻松完成&#xf…

EasyUI弹出框行编辑,通过下拉框实现内容联动

EasyUI弹出框行编辑&#xff0c;通过下拉框实现内容联动 需求 实现用户支付方式配置&#xff0c;当弹出框加载出来的时候&#xff0c;显示用户现有的支付方式&#xff0c;datagrid的第一列为conbobox,下来选择之后实现后面的数据直接填充&#xff1b; 点击新增&#xff1a;新…