系统:centos6.6   192.168.142.130主机部署elk收集192.168.142.131上的日志

192.168.142.130  jdk-8u121-linux-x64.gz  logstash-5.2.2.tar.gz  elasticsearch-5.2.2.tar.gz  kibana-5.2.2-linux-x86_64.tar.gz  redis nginx (nginx用于反向代理,直接访问kibana就不需要了)

192.168.142.131  jdk-8u121-linux-x64.gz  logstash-5.2.2.tar.gz  apache-tomcat-7.0.63.tar.gz nginx

一、elk介绍

Elasticsearch 是个开源分布式搜索引擎,它的特点有:分布式,零配置,自动发现,索引自动分片,索引副本机制,restful风格接口,多数据源,自动搜索负载等。

Logstash 是一个完全开源的工具,他可以对你的日志进行收集、分析,并将其存储供以后使用(如,搜索)

FileBeat,它是一个轻量级的日志收集处理工具(Agent),Filebeat占用资源少,适合于在各个服务器上搜集日志后传输给Logstash

kibana 也是一个开源和免费的工具,他Kibana可以为 Logstash 和 ElasticSearch 提供的日志分析友好的 Web 界面,可以帮助您汇总、分析和搜索重要数据日志。

二、部署elk环境

tar zxf jdk-8u121-linux-x64.gz -C /usr/local/
cd /usr/local/
ln -sv jdk1.8.0_121 java
vim /etc/profile.d/java.sh
export JAVA_HOME=/usr/local/java
export PATH=$JAVA_HOME/bin:$PATH
source /etc/profile.d/java.sh
java -version
##################nginx和epel的yum源###########################
vim /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=0
enabled=1
vim /etc/yum.repos.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 6 - $basearch
baseurl=http://archives.fedoraproject.org/pub/archive/epel/6/$basearch
enabled=1
gpgcheck=0
yum -y install nginx redis ntpdate
ntpdate -u cn.pool.ntp.org
echo "*/20 * * * * /usr/sbin/ntpdate -u cn.pool.ntp.org >/dev/null &" >> /var/spool/cron/root

vim /etc/redis.conf
bind 192.168.142.130
daemonize yes #支持后台运行
service redis start

三、部署elk组件

1.安装配置elasticsearch

groupadd elk
useradd es -g elk
#########################centos6安装elasticsearch需要的系统配置#####################
vim /etc/security/limits.conf
es soft nofile 65535
es hard nofile 65537
vim /etc/security/limits.d/90-nproc.conf
* soft nproc 2048
root soft nproc unlimited
vim /etc/sysctl.conf
vm.max_map_count=655360
vm.swappiness=1
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
sysctl -p
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.2.2.tar.gz
tar xf elasticsearch-5.2.2.tar.gz -C /usr/local/
chown -R es.elk /usr/local/elasticsearch-5.2.2/
vim /usr/local/elasticsearch-2.4.2/config/elasticsearch.yml #配置elasticsearch
node.name: node-130
network.host: 0.0.0.0
http.port: 9200
bootstrap.memory_lock: false
bootstrap.system_call_filter: false
su - es
nohup /usr/local/elasticsearch-5.2.2/bin/elasticsearch >/usr/local/elasticsearch-5.2.2/nohub &


elk5.2.2+redis日志分析系统搭建_elk

2.安装配置kibana

tar zxf kibana-5.2.2-linux-x86_64.tar.gz -C /usr/local
vim /usr/local/kibana-5.2.2-linux-x86_64/config/kibana.yml #配置kibana
elasticsearch.url: "http://192.168.142.130:9200"
server.port: 5601
server.host: "0.0.0.0"
kibana.index: ".kibana"
nohup /usr/local/kibana-5.2.2-linux-x86_64/bin/kibana > /usr/local/kibana-5.2.2-linux-x86_64/nohub.out &
tar xf logstash-5.2.2.tar.gz -C /usr/local/
mkdir /usr/local/logstash-5.2.2/conf
vim /usr/local/logstash-5.2.2/conf/service_collect.conf
input {
redis {
type => "system"
host => "192.168.142.130"
key => "system"
data_type => 'list'
port => "6379"
db => "6"
}
redis {
type => "tomcat"
host => "192.168.142.130"
key => "tomcat"
data_type => 'list'
port => "6379"
db => "6"
}
redis {
type => "nginx-log"
host => "192.168.142.130"
key => "nginx-log"
data_type => 'list'
port => "6379"
db => "6"
}
}
output {
if [type] == "system" {
elasticsearch {
hosts => ["192.168.142.130:9200"]
index => "system-%{+YYYY.MM.dd}"
}
}
if [type] == "tomcat" {
elasticsearch {
hosts => ["192.168.142.130:9200"]
index => "tomcat-%{+YYYY.MM.dd}"
}
}
if [type] == "nginx-log" {
elasticsearch {
hosts => ["192.168.142.130:9200"]
index => "nginx-log-%{+YYYY.MM.dd}"
}
}
}
/usr/local/logstash-5.2.2/bin/logstash -f /usr/local/logstash-5.2.2/conf/service_collect.conf 启动程序



四、客户端安装配置

tar zxf jdk-8u121-linux-x64.gz -C /usr/local/
cd /usr/local/
ln -sv jdk1.8.0_121 java

vim /etc/profile.d/java.sh
export JAVA_HOME=/usr/local/java
export PATH=$JAVA_HOME/bin:$PATH
source /etc/profile.d/java.sh
java -version
tar zxf apache-tomcat-7.0.63.tar.gz -C /usr/local/

vim /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=0
enabled=1
yum -y install nginx ntpdate
ntpdate -u cn.pool.ntp.org
echo "*/20 * * * * /usr/sbin/ntpdate -u cn.pool.ntp.org >/dev/null &" >> /var/spool/cron/root

vim /etc/nginx/nginx.conf #定义nginx日志json格式
log_format access_json '{"@timestamp":"$time_iso8601",'
'"host":"$server_addr",'
'"client":"$remote_addr",'
'"size":$body_bytes_sent,'
'"responsetime":$request_time,'
'"upstreamtime":"$upstream_response_time",'
'"upstreamhost":"$upstream_addr",'
'"http_host":"$host",'
'"url":"$uri",'
'"domain":"$host",'
'"xff":"$http_x_forwarded_for",'
'"referer":"$http_referer",'
'"status":"$status"}';
access_log /etc/nginx/access_json.log access_json;

tar xf logstash-5.2.2.tar.gz -C /usr/local/
mkdir /usr/local/logstash-5.2.2/conf

vim /usr/local/logstash-5.2.2/conf/client_collect.conf
input {
file {
path => "/var/log/messages"
type => "system"
start_position => "beginning"
}
file {
path => "/etc/nginx/access_json.log"
codec => json
type => "nginx-log"
start_position => "beginning"
}
file {
path => "/usr/local/apache-tomcat-7.0.63/logs/catalina.*"
type => "tomcat"
start_position => "beginning"
codec => multiline {
pattern => "^\["
negate => true
what => "previous"
}
}
}
output {
if [type] == "system" {
redis {
host => "192.168.142.130"
key => "system"
data_type => 'list'
port => "6379"
db => "6"
}
}
if [type] == "tomcat" {
redis {
host => "192.168.142.130"
key => "tomcat"
data_type => 'list'
port => "6379"
db => "6"
}
}
if [type] == "nginx-log" {
redis {
host => "192.168.142.130"
key => "nginx-log"
data_type => 'list'
port => "6379"
db => "6"
}
}
}

/usr/local/logstash-5.2.2/bin/logstash -f /usr/local/logstash-5.2.2/conf/client_collect.conf 启动程序

下面的命令可以验证logstash是否可以和elasticsearch通信,elasticsearch能否获取到数据:

/usr/local/logstash-5.2.2/bin/logstash -e 'input { stdin{} } output { stdout{ codec => rubydebug} }' 

/usr/local/logstash-5.2.2/bin/logstash -e 'input { stdin{} } output { elasticsearch { hosts => ["192.168.142.130:9200"] } }'

elk5.2.2+redis日志分析系统搭建_nginx_02

五、服务端使用nginx代理访问

vim /etc/nginx/nginx.conf
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" $http_x_forwarded_for $request_length $msec $connection_requests $request_time';
log_format json '{"@timestamp":"$time_iso8601",'
'"host":"$server_addr",'
'"clientip":"$remote_addr",'
'"size":$body_bytes_sent,'
'"responsetime":$request_time,'
'"upstreamtime":"$upstream_response_time",'
'"upstreamhost":"$upstream_addr",'
'"http_host":"$host",'
'"url":"$uri",'
'"domain":"$host",'
'"xff":"$http_x_forwarded_for",'
'"referer":"$http_referer",'
'"agent":"$http_user_agent",'
'"status":"$status"}';
server {
listen 80;
index index.htm;
server_name 192.168.142.130;
access_log /etc/nginx/logs/access_json.log json;
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://192.168.142.130:5601;
}
}
service nginx start

六、mysql慢查询日志和tcp日志收集示例

input {
file {
path => "/root/slow.log"
type => "mysql-slowlog"
start_position => "beginning"
codec => multiline {
pattern => "^# User@Host:"
negate => true
what => "previous"
}
}
}
filter {
grok {
match => { "message" => "SELECT SLEEP" }
add_tag => [ "sleep_drop" ]
tag_on_failure => []
}
if "sleep_drop" in [tags] {
drop {}
}
grok {
match => [ "message", "(?m)^# Time:.*\s+# User@Host: %{USER:user}\[[^\]]+\] @ (?:(?<clienthost>\S*) )?\[(?:%{IP:clientip})?\]\s*Id: %{NUMBER:id:int}\s+# Query_time: %{NUMBER:query_time:float}\s+Lock_time: %{NUMBER:lock_time:float}\s+Rows_sent: %{NUMBER:rows_sent:int}\s+Rows_examined: %{NUMBER:rows_examined:int}\s*(?:use %{DATA:database};\s*)?SET timestamp=%{NUMBER:timestamp};\s*(?<query>(?<action>\w+)\s+.*)$" ]
}
date {
match => [ "timestamp", "UNIX" ]
remove_field => [ "timestamp" ]
}
}
output {
elasticsearch {
hosts => "192.168.142.130:9200"
index => "mysql-slowlog"
workers => 1
flush_size => 20000
idle_flush_time => 10
template_overwrite => true
}
}
output {
elasticsearch {
hosts => ["192.168.142.130:9200"]
index => "mysql-slowlog-%{+YYYY.MM.dd}"
}
}
监听tcp插件
input {
tcp {
host => "192.168.142.130"
port => "6666"
}
}
output {
elasticsearch {
hosts => ["192.168.142.130:9200"]
}
stdout { codec => rubydebug }
}