小生博客:http://xsboke.blog.51cto.com
創(chuàng)新互聯(lián)是一家從事企業(yè)網(wǎng)站建設、成都網(wǎng)站建設、做網(wǎng)站、行業(yè)門戶網(wǎng)站建設、網(wǎng)頁設計制作的專業(yè)網(wǎng)站建設公司,擁有經(jīng)驗豐富的網(wǎng)站建設工程師和網(wǎng)頁設計人員,具備各種規(guī)模與類型網(wǎng)站建設的實力,在網(wǎng)站建設領域樹立了自己獨特的設計風格。自公司成立以來曾獨立設計制作的站點近1000家。 -------謝謝您的參考,如有疑問,歡迎交流
目錄:
WEB
配置Elasticsearch
配置nginx
訪問elasticsearch
和kibana
filebeat input
配置filebeat收集日志 -> logstash過濾/格式化 -> elasticsearch存儲 -> kibana展示
# 個人理解
其實logstash和filebeat都可以收集日志并且直接輸出到elasticsearch.
只不過logstash功能比filebeat更多,比如:過濾,格式化
filebeat比logstash更輕,所以filebeat收集日志速度更快.
#基于ELK7.4,通過收集Nginx日志示例.
centos7.2-web 172.16.100.251 nginx/filebeat/logstash
centos7.2-elasticsearch 172.16.100.252 elasticsearch/kibana
WEB
配置Nginx
yum -y install yum-utils
vim /etc/yum.repos.d/nginx.repo
[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
[nginx-mainline]
name=nginx mainline repo
baseurl=http://nginx.org/packages/mainline/centos/$releasever/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
yum-config-manager --enable nginx-mainline
yum -y install nginx
nginx
JDK
tar zxf jdk-8u202-linux-x64.tar.gz
mv jdk1.8.0_202 /usr/local/jdk1.8
vim /etc/profile
export JAVA_HOME=/usr/local/jdk1.8
export JRE_HOME=/usr/local/jdk1.8/jre
export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib:$CLASSPATH
export PATH=$JAVE_HOME/bin:$JRE_HOME/bin:$PATH
source /etc/profile
# 如果不做這個軟連接logstash依然會報錯找不到openSDK
ln -s /usr/local/jdk1.8/bin/java /usr/bin/java
filebeat
curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.4.0-x86_64.rpm
rpm -vi filebeat-7.4.0-x86_64.rpm
vim /etc/filebeat/filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log # 監(jiān)控的日志
tags: ["access"] # 用于實現(xiàn)多日志收集
- type: log
enabled: true
paths:
- /var/log/nginx/error.log
tags: ["error"]
output.logstash:
hosts: ["localhost:5044"] # logstash的配置文件會指定監(jiān)聽這個端口
# 注釋: "output.elasticsearch",否則在啟用logstash模塊時會報錯:Error initializing beat: error unpacking config data: more than one namespace configured accessing 'output' (source:'/etc/filebeat/filebeat.yml')
# 啟動logstatsh模塊,其實修改的是這個文件"/etc/filebeat/modules.d/logstash.yml"
filebeat modules enable logstash
logstash
rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
vim /etc/yum.repos.d/logstash.repo
[logstash-7.x]
name=Elastic repository for 7.x packages
baseurl=https://artifacts.elastic.co/packages/7.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
yum -y install logstash
ln -s /usr/share/logstash/bin/logstash /usr/local/bin/
# logstash.yml部分配置簡介
path.data: 數(shù)據(jù)存放目錄
config.reload.automatic: 是否動態(tài)加載配置文件
config.reload.interval: 動態(tài)加載配置文件間隔
http.host: 監(jiān)聽主機
http.port: 端口
# 在logstash/conf.d/ 下編寫你的配置文件
vim /etc/logstash/conf.d/nginx.conf
input {
beats {
port => 5044
}
}
output {
if "access" in [tags] { # 通過判斷標簽名,為不同的日志配置不同的index
elasticsearch {
hosts => ["172.16.100.252:9200"]
index => "nginx-access-%{+YYYY.MM.dd}" # 索引名不能大寫
sniffing => true
template_overwrite => true
}
}
if "error" in [tags] {
elasticsearch {
hosts => ["172.16.100.252:9200"]
index => "nginx-error-%{+YYYY.MM.dd}"
sniffing => true
template_overwrite => true
}
}
}
systemctl daemon-reload
systemctl enable logstashe
systemctl start logstashe
firewall-cmd --permanent --add-port=80/tcp
firewall-cmd --reload
Elasticsearch
配置JDK
tar zxf jdk-8u202-linux-x64.tar.gz
mv jdk1.8.0_202 /usr/local/jdk1.8
vim /etc/profile
export JAVA_HOME=/usr/local/jdk1.8
export JRE_HOME=/usr/local/jdk1.8/jre
export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib:$CLASSPATH
export PATH=$JAVE_HOME/bin:$JRE_HOME/bin:$PATH
source /etc/profile
elasticsearch
```
vim /etc/yum.repos.d/elasticsearch.repo
[elasticsearch-7.x]
name=Elasticsearch repository for 7.x packages
baseurl=https://artifacts.elastic.co/packages/7.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
yum -y install elasticsearch
# 修改elasticsearch
關鍵字:
cluster.name: 群集名字
node.name: 節(jié)點名字
path.data: 數(shù)據(jù)存放路徑
path.logs: 日志存放路徑
bootstrap.memory_lock: 在啟動時侯是否鎖定內(nèi)存
network.host: 提供服務綁定的ip地址,0.0.0.0代表所有地址
http.port: 偵聽端口
discovery.seed_hosts: 集群主機
cluster.initial_master_nodes: 指定master節(jié)點
sed -i "/#cluster.name: my-application/a\cluster.name: my-elk-cluster" /etc/elasticsearch/elasticsearch.yml
sed -i "/#node.name: node-1/a\node.name: node-1" /etc/elasticsearch/elasticsearch.yml
sed -i "s/path.data: \/var\/lib\/elasticsearch/path.data: \/data\/elasticsearch/g" /etc/elasticsearch/elasticsearch.yml
sed -i "/#bootstrap.memory_lock: true/a\bootstrap.memory_lock: false" /etc/elasticsearch/elasticsearch.yml
sed -i "/#network.host: 192.168.0.1/a\network.host: 0.0.0.0" /etc/elasticsearch/elasticsearch.yml
sed -i "/#http.port: 9200/a\http.port: 9200" /etc/elasticsearch/elasticsearch.yml
sed -i '/#discovery.seed_hosts: \["host1", "host2"\]/a\discovery.seed_hosts: \["172.16.100.252"\]' /etc/elasticsearch/elasticsearch.yml
sed -i '/#cluster.initial_master_nodes: \["node-1", "node-2"\]/a\cluster.initial_master_nodes: \["node-1"\]' /etc/elasticsearch/elasticsearch.yml
mkdir -p /data/elasticsearch
chown elasticsearch:elasticsearch /data/elasticsearch
systemctl daemon-reload
systemctl enable elasticsearch
systemctl start elasticsearch
```
Kibana
rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
vim /etc/yum.repos.d/kibana.repo
[kibana-7.x]
name=Kibana repository for 7.x packages
baseurl=https://artifacts.elastic.co/packages/7.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
yum -y install kibana
sed -i "/#server.port: 5601/a\server.port: 5601" /etc/kibana/kibana.yml
sed -i '/#server.host: "localhost"/a\server.host: "0.0.0.0"' /etc/kibana/kibana.yml
sed -i '/#elasticsearch.hosts: \["http:\/\/localhost:9200"\]/a\elasticsearch.hosts: \["http:\/\/localhost:9200"\]' /etc/kibana/kibana.yml
sed -i '/#kibana.index: ".kibana"/a\kibana.index: ".kibana"' /etc/kibana/kibana.yml
systemctl daemon-reload
systemctl enable kibana
systemctl start kibana
firewall-cmd --permanent --add-port=9200/tcp
# firewall-cmd --permanent --add-port=9300/tcp # 集群端口
firewall-cmd --permanent --add-port=5601/tcp
firewall-cmd --reload
nginx
訪問elasticsearch
和kibana
(使用nginx
實現(xiàn)elasticsearch
和kibana
的訪問限制)172.16.100.252
# 修改hosts
vim /etc/hosts
172.16.100.252 elk.elasticsearch
# 安裝nginx并且配置
server {
listen 80;
server_name elk.elasticsearch;
location / {
allow 127.0.0.1/32;
allow 172.16.100.251/32;
deny all;
proxy_pass http://127.0.0.1:9200;
}
}
server {
listen 80;
server_name elk.kibana;
location / {
allow "可以訪問kibana的IP";
deny all;
proxy_pass http://127.0.0.1:5601;
}
}
# 修改elasticsearch配置
network.host: 127.0.0.1
discovery.seed_hosts: ["elk.elasticsearch"]
# 修改kibana配置
server.host: "127.0.0.1"
systemctl restart elasticsearch
systemctl restart kibana
172.16.100.251
# 修改hosts
vim /etc/hosts
172.16.100.252 elk.elasticsearch
# logstash input output conf
vim /etc/logstash/conf.d/nginx.conf
input {
beats {
port => 5044
}
}
output {
if "access" in [tags] { # 通過判斷標簽名,為不同的日志配置不同的index
elasticsearch {
hosts => ["elk.elasticsearch:80"] # 必須指定端口,否則默認訪問9200
index => "nginx-access-%{+YYYY.MM.dd}" # 索引名不能大寫
sniffing => false
template_overwrite => true
}
}
if "error" in [tags] {
elasticsearch {
hosts => ["elk.elasticsearch:80"]
index => "nginx-error-%{+YYYY.MM.dd}"
sniffing => false
template_overwrite => true
}
}
}
systemctl restart logstash
filebeat input
配置# filebeat將多行合并為一行收集
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
tags: ["access"]
multiline.pattern: '^\[[0-9]{4}' # 指定要匹配的正則表達式模式,匹配以[YYYY 開頭的行.
multiline.negate: true # 不匹配模式的連續(xù)行
multiline.match: after # 追加到不匹配的前一行
# filebeat收集指定目錄下的日志并且包括子目錄
filebeat.inputs:
- type: log
enabled: true
paths:
- "/var/log/**"
recursive_glob.enabled: true # 開啟遞歸模式
tags: ["LogAll"]
# 啟動filebeat并且將信息輸出到終端
filebeat -e
# 啟動logstash并且將信息輸出到終端
logstash /etc/logstash/conf.d/nginx.conf
# 隨意寫入內(nèi)容到收集的日志中
echo "1" >> /var/log/nginx/access.log
# 然后通過查看filebeat和logstash的輸出來判斷錯誤
另外有需要云服務器可以了解下創(chuàng)新互聯(lián)cdcxhl.cn,海內(nèi)外云服務器15元起步,三天無理由+7*72小時售后在線,公司持有idc許可證,提供“云服務器、裸金屬服務器、高防服務器、香港服務器、美國服務器、虛擬主機、免備案服務器”等云主機租用服務以及企業(yè)上云的綜合解決方案,具有“安全穩(wěn)定、簡單易用、服務可用性高、性價比高”等特點與優(yōu)勢,專為企業(yè)上云打造定制,能夠滿足用戶豐富、多元化的應用場景需求。
文章標題:ELK7.4-快速入門實現(xiàn)數(shù)據(jù)收集-創(chuàng)新互聯(lián)
標題URL:http://jinyejixie.com/article40/djedho.html
成都網(wǎng)站建設公司_創(chuàng)新互聯(lián),為您提供移動網(wǎng)站建設、關鍵詞優(yōu)化、App開發(fā)、全網(wǎng)營銷推廣、網(wǎng)站排名、網(wǎng)站內(nèi)鏈
聲明:本網(wǎng)站發(fā)布的內(nèi)容(圖片、視頻和文字)以用戶投稿、用戶轉載內(nèi)容為主,如果涉及侵權請盡快告知,我們將會在第一時間刪除。文章觀點不代表本網(wǎng)站立場,如需處理請聯(lián)系客服。電話:028-86922220;郵箱:631063699@qq.com。內(nèi)容未經(jīng)允許不得轉載,或轉載時需注明來源: 創(chuàng)新互聯(lián)
猜你還喜歡下面的內(nèi)容