In addition to Weibo, there is also WeChat
Please pay attention
WeChat public account
Shulou
2025-01-18 Update From: SLTechnology News&Howtos shulou NAV: SLTechnology News&Howtos > Servers >
Share
Shulou(Shulou.com)06/02 Report--
Environment
Host IP 192.168.0.9Docker version 19.03.2docker-compose version 1.24.0-rc1elasticsearch version 6.6.1kibana version 6.6.1logstash version 6.6.1
I. ELK-dockerfile file preparation and configuration file
● elasticsearch
1 、 elasticsearch-dockerfile
FROM centos:latestADD elasticsearch-6.6.1.tar.gz / usr/local/COPY elasticsearch.yml / usr/local/elasticsearch-6.6.1/config/COPY jdk1.8 / usr/local/ENV JAVA_HOME=/usr/local/jdk1.8ENV CLASSPATH=$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/libENV PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH:$HOME/binRUN groupadd elsearch & &\ useradd elsearch-g elsearch-p elasticsearch & &\ chown-R elsearch:elsearch / usr / local/elasticsearch-6.6.1 & &\ cp / usr/share/zoneinfo/Asia/Shanghai / etc/localtime & &\ echo "Asia/shanghai" > / etc/timezone & &\ yum install which-y &\ mkdir / opt/data &\ mkdir / opt/logsEXPOSE 9200 930 mainly switch to elsearch users to start esUSER elsearchWORKDIR / usr/local/elasticsearch-6.6.1/bin/ENTRYPOINT [". / elasticsearch"]
2 、 elasticsearch.yml
[root@localhost elasticsearch] # egrep "^ [^ #]" elasticsearch.yml cluster.name: es-clusternode.name: node-1path.data: / opt/datapath.logs: / opt/logsnetwork.host: 0.0.0.0http.port: 9200cluster.routing.allocation.disk.threshold_enabled: truecluster.routing.allocation.disk.watermark.low: 94%cluster.routing.allocation.disk.watermark.high: 96%cluster.routing.allocation.disk.watermark.flood_stage: 98%discovery.zen.minimum_master_nodes: 1
● logstash
1 、 logstash-dockerfile
FROM centos:latestADD logstash-6.6.1.tar.gz / usr/local/COPY logstash.yml / usr/local/logstash-6.6.1/config/COPY logstash.conf / usr/local/logstash-6.6.1/config/COPY jdk1.8 / usr/local/COPY start.sh / start.shENV JAVA_HOME=/usr/local/jdk1.8ENV CLASSPATH=$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/libENV PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH:$HOME/binRUN Mkdir / opt/data & &\ mkdir / opt/logs & &\ chmod + x / start.shENTRYPOINT ["/ start.sh"]
2 、 logstash-start.sh
#! / bin/bash/usr/local/logstash-6.6.1/bin/logstash-f / usr/local/logstash-6.6.1/config/logstash.conf
3 、 logstash.yml
[root@localhost logstash] # egrep "^ [^ #]" logstash.yml path.data: / opt/datapath.logs: / opt/logspipeline.batch.size: 200
4 、 logstash.conf
Input {file {path = > "/ usr/local/nginx/logs/access.log" type = > "nginx" start_position = > "beginning" sincedb_path = > "/ dev/null"} file {path = > "/ var/log/secure" type = > "secure" start_position = > "beginning" sincedb_path = > "/ dev/null"}} # details can be found in my previous blog filter {grok {match = > {"message" = >'(? [0-9] {1mague 3}\. [0-9] {1Mague 3}\. [0-9] {1Mague 3}\. [0-9] {1Mague 3})-- (?\ [[0-9] {1} 2}\ / [0-9] {4}\: [0-9] {2}\: [0-9] {2}\ + [0-9] *]) "(? [Amurz] +) (? [^] +) (? HTTP/\ d\.\ d)" (? [0-9] +) (? [0-9] +) "(? [^] | (http | https): / / [0-9] {1mer3}\. [0-9] {1mer3}\. [0-9] {1mer3}\. [0-9] {1} 3}\ /)" (? (ARAUZ | 0-9 | 0-9 | |. ) +) "'} remove_field = > [" message " "log", "beat", "offset", "prospector", "host", "@ version"]} # output points to es container output {if [type] = = "nginx" {elasticsearch {hosts = > ["es:9200"] index = > "nginx-% {+ YYYY.MM.dd}"} else if [type] = = "secure" {elasticsearch {hosts = > ["es:9200"] index = > "secure-% {+ YYYY.MM.dd}"}
● kibana
1 、 kibana-dockerfile
FROM centos:latestADD kibana-6.6.1-linux-x86_64.tar.gz / usr/local/COPY kibana.yml / usr/local/kibana-6.6.1-linux-x86_64/config/COPY start.sh / start.shRUN chmod + x / start.shEXPOSE 5601ENTRYPOINT ["/ start.sh"]
2 、 kibana.yml
[root@localhost kibana] # egrep "^ [^ #]" kibana.yml server.port: 5601server.host: "0.0.0.0" # points to port 9200 elasticsearch.hosts of the es container: ["http://es:9200"]"
3 、 kibana-start.sh
#! / bin/bash/usr/local/kibana-6.6.1-linux-x86_64/bin/kibana
II. Docker-compose,yml document compilation
[root@localhost elk_dockerfile] # cat docker-compose.yml version: '3.7'services: elasticsearch: image: elasticsearch:elk container_name: es networks:-elk volumes:-/ opt/data:/opt/data-/ opt/logs:/opt/logs expose:-9200-9300 restart: always depends_on:-logstash-kibana logstash: image: logstash:elk container_name: logstash networks:-elk volumes:-/ Opt/logstash/data/:/op/data-/ opt/logstash/logs/:/opt/logs-/ opt/elk/elk_dockerfile/logstash/logstash.conf:/usr/local/logstash-6.6.1/config/logstash.conf-/ usr/local/nginx/logs:/usr/local/nginx/logs-/ var/log/secure:/var/log/secure restart: always kibana: image: kibana:elk container_name: kibana ports:-5601 : 5601 networks:-elk volumes:-/ opt/elk/elk_dockerfile/kibana/kibana.yml:/usr/local/kibana-6.6.1-linux-x86_64/config/kibana.ymlnetworks: elk:
The version version of the compose file points to
Third, access the interface
The above is the whole content of this article, I hope it will be helpful to your study, and I also hope that you will support it.
Welcome to subscribe "Shulou Technology Information " to get latest news, interesting things and hot topics in the IT industry, and controls the hottest and latest Internet news, technology news and IT industry trends.
Views: 0
*The comments in the above article only represent the author's personal views and do not represent the views and positions of this website. If you have more insights, please feel free to contribute and share.
Continue with the installation of the previous hadoop.First, install zookooper1. Decompress zookoope
"Every 5-10 years, there's a rare product, a really special, very unusual product that's the most un
© 2024 shulou.com SLNews company. All rights reserved.