Elk

简介

ELK 是一套由 Elasticsearch、Logstash 和 Kibana 组成的开源工具,用于搜索、分析和可视化数据。

部署

创建es数据目录data和插件目录plugins

mkdir data plugins
chmod 777 data plugins

创建elasticsearch.yml

cat > elasticsearch.yml <<EOF
network.host: 0.0.0.0
EOF

创建es-jvm.options文件

cat > es-jvm.options <<EOF
-Xms1g
-Xmx1g
8-13:-XX:+UseConcMarkSweepGC
8-13:-XX:CMSInitiatingOccupancyFraction=75
8-13:-XX:+UseCMSInitiatingOccupancyOnly
14-:-XX:+UseG1GC
14-:-XX:G1ReservePercent=25
14-:-XX:InitiatingHeapOccupancyPercent=30
-Djava.io.tmpdir=\${ES_TMPDIR}
-XX:+HeapDumpOnOutOfMemoryError
-XX:HeapDumpPath=data
-XX:ErrorFile=logs/hs_err_pid%p.log
8:-XX:+PrintGCDetails
8:-XX:+PrintGCDateStamps
8:-XX:+PrintTenuringDistribution
8:-XX:+PrintGCApplicationStoppedTime
8:-Xloggc:logs/gc.log
8:-XX:+UseGCLogFileRotation
8:-XX:NumberOfGCLogFiles=32
8:-XX:GCLogFileSize=64m
9-:-Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m
EOF

创建kibana.yml

cat > kibana.yml <<EOF
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
EOF

创建kibana-jvm.options文件

cat > kibana-jvm.options <<EOF
-Xms1g
-Xmx1g
-XX:+UseConcMarkSweepGC
-XX:CMSInitiatingOccupancyFraction=75
-XX:+UseCMSInitiatingOccupancyOnly
-Djava.awt.headless=true
-Dfile.encoding=UTF-8
-Djruby.compile.invokedynamic=true
-Djruby.jit.threshold=0
-Djruby.regexp.interruptible=true
-XX:+HeapDumpOnOutOfMemoryError
-Djava.security.egd=file:/dev/urandom
-Dlog4j2.isThreadContextMapInheritable=true
EOF

创建logstash.conf

cat > logstash.conf <<EOF
input {
  # 来源beats
  beats {
      # 端口
      port => "5044"
  }
}
output {
  elasticsearch {
    hosts => ["http://elasticsearch:9200"]
    index => "test"
  }
  stdout { codec => rubydebug }
}
EOF

创建docker-compose.yaml

cat > docker-compose.yaml <<EOF
services:
  elasticsearch:
    image: elasticsearch:7.7.0       # 镜像版本
    container_name: elasticsearch    # 容器名称
    restart: always                  # 自动重启
    environment:
      - "discovery.type=single-node" # 以单一节点模式启动
      - "TZ=Asia/Shanghai"           # 设置时区
    volumes:                         # 持久化数据
      - ./data:/usr/share/elasticsearch/data
      - ./elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - ./es-jvm.options:/usr/share/elasticsearch/config/jvm.options
      - ./plugins:/usr/share/elasticsearch/plugins
    ports:                           # 端口映射
      - 9200:9200
      - 9300:9300
    deploy:
      resources:
        limits:
          cpus: "1"              # 限制 CPU 核心数
          memory: "2G"           # 限制内存大小
  kibana:
    image: kibana:7.7.0
    container_name: kibana
    restart: always
    depends_on:
      - elasticsearch #kibana在elasticsearch启动之后再启动
    environment:
      - i18n.locale=zh-CN
      - TZ=Asia/Shanghai
    volumes:
      - ./kibana.yml:/usr/share/kibana/config/kibana.yml
    ports:
      - 5601:5601
    deploy:
      resources:
        limits:
          cpus: "1"
          memory: "1G"
  logstash:
    image: logstash:7.7.0
    container_name: logstash
    restart: always
    volumes:
      - ./logstash.conf:/usr/share/logstash/pipeline/logstash.conf
      - ./kibana-jvm.options:/usr/share/logstash/config/jvm.options
    ports:
      - 5044:5044
    environment:
      - TZ=Asia/Shanghai
    depends_on:
      - elasticsearch
    deploy:
      resources:
        limits:
          cpus: "1"
          memory: "1G"
EOF

启动

docker compose up -d

访问

curl http://127.0.0.1:5601

设置密码

修改配置elasticsearch.yml

elasticsearch.yml
# 1. 开启X-Pack安全认证(核心开关,7.7.0必须加)
xpack.security.enabled: true
# 2. 关闭SSL证书校验(7.7.0关键,否则启动失败,纯账号密码认证)
xpack.security.transport.ssl.enabled: false
# 3. 允许远程访问(保证Kibana能连接ES,已配置可忽略)
network.host: 0.0.0.0
discovery.type: single-node

重启elasticsearch容器,并进入容器执行命令设置密码。

# 执行初始化密码命令(7.7.0专属,固定命令)
elasticsearch-setup-passwords interactive

Initiating the setup of passwords for reserved users elastic,apm_system,kibana,logstash_system,beats_system.
You will be prompted to enter passwords as the process progresses.
Please confirm that you would like to continue [y/N] → 输入 y 回车

Enter password for [elastic]: → 输入你的密码(如Elk@7700),回车
Reenter password for [elastic]: → 重复输入密码,回车

Enter password for [apm_system]: → 输入密码,回车
...(依次为kibana、logstash_system、beats_system设置密码,建议统一设为同一个)

设置kibana密码

server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"

# 配置ES的超级管理员账号(固定为elastic)
elasticsearch.username: "elastic"
# 配置该账号对应的密码(和你刚才为elastic设置的密码一致)
elasticsearch.password: "你设置的ES密码"
# 开启Kibana登录认证(7.7.0建议开启)
xpack.security.enabled: true

设置logstash密码

input {
  # 来源beats
  beats {
      # 端口
      port => "5044"
  }
}
output {
  elasticsearch {
    hosts => ["http://elasticsearch:9200"]
    # 按天生成索引
    index => "log-%{+YYYY.MM.dd}"
    # 配置ES账号密码
    user => "elastic"
    password => "你设置的ES密码"
  }
  stdout { codec => rubydebug }
}

重启elk服务

docker compose restart

logstash配置示例

input {
  beats {
    port => 5044
  }
}

filter {
  # 解析日志行,提取时间戳和其他字段
  grok {
    match => { 
      "message" => "^<(?<log_level>\d+)> %{TIMESTAMP_ISO8601:log_timestamp} \[%{LOGLEVEL:log_level_str}\] %{IP:client_ip} #%{NUMBER:connection_id} \[%{DATA:api_path}\] \[%{DATA:api_response}\] %{NUMBER:response_time}ms (?<json_data>{.*})$"
    }
    tag_on_failure => ["_grokparsefailure"]
  }

  # 将日志时间戳设置为 @timestamp
  # 注意:日志格式是 "2026-01-15 14:45:17",中间有空格
  date {
    match => ["log_timestamp", "yyyy-MM-dd HH:mm:ss"]
    target => "@timestamp"
    timezone => "Asia/Shanghai"
    locale => "zh_CN"
    tag_on_failure => ["_dateparsefailure"]
  }

  # 如果日期解析失败,使用当前时间
  if "_dateparsefailure" in [tags] {
    mutate {
      remove_tag => ["_dateparsefailure"]
      add_tag => ["timestamp_fallback"]
    }
    # 使用当前时间作为备选
    date {
      match => ["@timestamp", "ISO8601"]
      timezone => "Asia/Shanghai"
    }
  }
  
  # 添加字段
  mutate {
    add_field => {
      "api_performance" => "%{response_time}"
      "status" => "%{response_time}"
    }
  }
  
  # 清理不必要的字段
  mutate {
    remove_field => [
      "[beat][hostname]",
      "[beat][name]",
      "[beat][version]",
      "[message]"
    ]
  }
}

output {
    elasticsearch {
        hosts => ["127.0.0.1:9200"]
        index => "%{index}-%{+YYYY-MM-dd}"
        user => elastic
        password => aaabbbccc
    }
}