环境准备

Centos6.5 JDK1.8+ 所有都是单节点

安装JDK

忽略

安装ES
下载安装
[root@localhost ~]# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.0.0.rpm
[root@localhost ~]# rpm -ivh elasticsearch-6.0.0.rpm
配置ES
[root@localhost ~]# vim /etc/elasticsearch/elasticsearch.yml  # 增加或更改以下内容
cluster.name: master-node  # 集群中的名称
node.name: master  # 该节点名称
node.master: true  # 意思是该节点为主节点
node.data: true  # 表示这是数据节点
network.host: 0.0.0.0  # 监听全部ip,在实际环境中应设置为一个安全的ip
http.port: 9200  # es服务的端口号
检查
[root@localhost bin]# curl 'localhost:9200/_cluster/health?pretty'
{
  "cluster_name" : "ELK",
  "status" : "yellow",
  "timed_out" : false,
  "number_of_nodes" : 1,
  "number_of_data_nodes" : 1,
  "active_primary_shards" : 6,
  "active_shards" : 6,
  "relocating_shards" : 0,
  "initializing_shards" : 0,
  "unassigned_shards" : 6,
  "delayed_unassigned_shards" : 0,
  "number_of_pending_tasks" : 0,
  "number_of_in_flight_fetch" : 0,
  "task_max_waiting_in_queue_millis" : 0,
  "active_shards_percent_as_number" : 50.0
}
安装Kibana
安装
[root@master-node ~]# wget https://artifacts.elastic.co/downloads/kibana/kibana-6.0.0-x86_64.rpm
[root@master-node ~]# rpm -ivh kibana-6.0.0-x86_64.rpm
配置
[root@master-node ~]# vim /etc/kibana/kibana.yml  # 增加以下内容
server.port: 5601  # 配置kibana的端口
server.host: xx.xx.xx.xx  # 配置监听ip
elasticsearch.url: "http://xx.xx.xx.xx:9200"  # 配置es服务器的ip,如果是集群则配置该集群中主节点的ip
logging.dest: /var/log/kibana.log  # 配置kibana的日志文件路径,不然默认是messages里记录日志
创建日志
[root@master-node ~]# touch /var/log/kibana.log; chmod 777 /var/log/kibana.log
启动查看进程
[root@localhost bin]# /etc/init.d/kibana start
访问
http://xx.xx.xx.xx:5601
安装logstash
安装
[root@data-node1 ~]# wget https://artifacts.elastic.co/downloads/logstash/logstash-6.0.0.rpm
[root@data-node1 ~]# rpm -ivh logstash-6.0.0.rpm
配置
[root@localhost conf.d]# pwd
/etc/logstash/conf.d
[root@localhost conf.d]# vi syslog.conf 
input{
    tcp{
        type => "elk"
        port => 10514
        mode => "server"
        tags => "tags"
        codec => "json"
        }
}
output{
     elasticsearch{
        action => "index"
        hosts => ["192.168.36.137:9200"]
        index => "%{appname}-%{+YYYY.MM.dd}"
        }
     stdout{
        codec => rubydebug {}
        }
}

检查配置是否有错误
[root@data-node1 ~]# cd /usr/share/logstash/bin
[root@data-node1 /usr/share/logstash/bin]# ./logstash --path.settings /etc/logstash/ -f /etc/logstash/conf.d/syslog.conf --config.test_and_exit
Sending Logstash's logs to /var/log/logstash which is now configured via log4j2.properties
Configuration OK  # 为ok则代表配置文件没有问题
配置Kibana
[root@data-node1 ~]# vim /etc/rsyslog.conf
#### RULES ####

*.* @@xx.xx.xxx.xx:10514
重启rsyslog
/etc/init.d/rsyslog restart
启动
[root@localhost bin]# pwd
/usr/share/logstash/bin
[root@localhost bin]#./logstash  -f /etc/logstash/conf.d/syslog.conf

如果报错参见日志/var/log/logstash

配置Springboot

aplication.property
logging.config.classpath = logback.xml
在同级目录新建logback.xml

<?xml version="1.0" encoding="UTF-8"?>
<configuration debug="false">
    <property name="LOG_HOME" value="D:/eclipse-workspace" />
    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
        </encoder>
    </appender>

    <appender name="logstash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
        <remoteHost>192.168.36.137</remoteHost>
        <port>10514</port>
        <encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder">
            <customFields>{"appname":"kakashi"}</customFields>
        </encoder>
    </appender>

    <!--
    <appender name="FILE"  class="ch.qos.logback.core.rolling.RollingFileAppender">
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <FileNamePattern>${LOG_HOME}/TestWeb.log.%d{yyyy-MM-dd}.log</FileNamePattern>
            <MaxHistory>30</MaxHistory>
        </rollingPolicy>
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
        </encoder>
        <triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
            <MaxFileSize>10MB</MaxFileSize>
        </triggeringPolicy>
    </appender>

    myibatis log configure-->
    <logger name="com.apache.ibatis" level="TRACE"/>
    <logger name="java.sql.Connection" level="DEBUG"/>
    <logger name="java.sql.Statement" level="DEBUG"/>
    <logger name="java.sql.PreparedStatement" level="DEBUG"/>

    <root level="INFO">
        <appender-ref ref="STDOUT" />
        <appender-ref ref="logstash" />
    </root>
</configuration>

安装EKL参考的:http://blog.51cto.com/zero01/2082794
配置logstash和logback百度出来的

发表评论

电子邮件地址不会被公开。 必填项已用*标注