李玉志 2019-12-25
[ ~]# vim /etc/hosts //配置解析名 192.168.80.128 node1 192.168.80.129 node2 [ ~]# java -version //查看是Java是否安装 [ ~]# mount.cifs //192.168.80.2/LNMP-C7 /mnt/ Password for //192.168.80.2/LNMP-C7: [ mnt]# cd /mnt/elk/ [ elk]# rpm -ivh elasticsearch-5.5.0.rpm //安装 [ elk]# systemctl daemon-reload //重载守护进程 [ elk]# systemctl enable elasticsearch.service //开机自动启动 [ elk]# cd /etc/elasticsearch/ [ elasticsearch]# cp elasticsearch.yml elasticsearch.yml.bak //备份 [ elasticsearch]# vim elasticsearch.yml //修改配置文件 cluster.name: my-elk-cluster //集群名 node.name: node1 //节点名,第二个节点为node2 path.data: /data/elk_data //数据存放位置 path.logs: /var/log/elasticsearch/ //日志存放位置 bootstrap.memory_lock: false //不在启动时锁定内存 network.host: 0.0.0.0 //提供服务绑定的IP地址,为所有地址 http.port: 9200 ##端口号为9200 discovery.zen.ping.unicast.hosts: ["node1", "node2"] //集群发现通过单播实现 [ elasticsearch]# mkdir -p /data/elk_data //创建数据存放点 [ elasticsearch]# chown elasticsearch.elasticsearch /data/elk_data/ //给权限 [ elasticsearch]# systemctl start elasticsearch.service //开启服务 [ elasticsearch]# netstat -ntap | grep 9200 //查看开启情况 tcp6 0 0 :::9200 :::* LISTEN 2166/java
[ elasticsearch]# yum install gcc gcc-c++ make -y //安装编译工具 [ elasticsearch]# cd /mnt/elk/ [ elk]# tar zxvf node-v8.2.1.tar.gz -C /opt/ //解压插件 [ elk]# cd /opt/node-v8.2.1/ [ node-v8.2.1]# ./configure //配置 [ node-v8.2.1]# make && make install //编译安装
[ elk]# tar jxvf phantomjs-2.1.1-linux-x86_64.tar.bz2 -C /usr/local/src/ //解压到/usr/local/src下 [ elk]# cd /usr/local/src/phantomjs-2.1.1-linux-x86_64/bin/ [ bin]# cp phantomjs /usr/local/bin/ //编译系统识别
[ bin]# cd /mnt/elk/ [ elk]# tar zxvf elasticsearch-head.tar.gz -C /usr/local/src/ //解压 [ elk]# cd /usr/local/src/elasticsearch-head/ [ elasticsearch-head]# npm install //安装
[ elasticsearch-head]# vim /etc/elasticsearch/elasticsearch.yml //末行加入 http.cors.enabled: true //开启跨域访问支持,默认为false http.cors.allow-origin: "*" //跨域访问允许的域名地址 [ elasticsearch-head]# systemctl restart elasticsearch.service //重启 [ elasticsearch-head]# cd /usr/local/src/elasticsearch-head/ [ elasticsearch-head]# npm run start & //后台运行数据可视化服务 [1] 82515 [ elasticsearch-head]# netstat -ntap | grep 9100 tcp 0 0 0.0.0.0:9100 0.0.0.0:* LISTEN 82525/grunt [ elasticsearch-head]# netstat -ntap | grep 9200 tcp6 0 0 :::9200 :::* LISTEN 82981/java
[ ~]# curl -XPUT ‘localhost:9200/index-demo/test/1?pretty&pretty‘ -H ‘content-Type: application/json‘ -d ‘{"user":"zhangsan","mesg":"hello world"}‘
[ ~]# yum install httpd -y //安装服务 [ ~]# systemctl start httpd.service //启动服务 [ ~]# java -version [ ~]# mount.cifs //192.168.100.8/LNMP-C7 /mnt/ //挂载 Password for //192.168.100.8/LNMP-C7: [ ~]# cd /mnt/elk/ [ elk]# rpm -ivh logstash-5.5.1.rpm //安装logstash [ elk]# systemctl start logstash.service [ elk]# systemctl enable logstash.service //设置开机自启 [ elk]# ln -s /usr/share/logstash/bin/logstash /usr/local/bin/ //便于系统识别 [ elk]# logstash -e ‘input { stdin{} } output { stdout{} }‘ //标准输入输出 The stdin plugin is now waiting for input: 16:58:11.145 [Api Webserver] INFO logstash.agent - Successfully started Logstash API endpoint {:port=>9600} www.baidu.com //输入 2019-12-19T08:58:35.707Z apache www.baidu.com www.sina.com.cn //输入 2019-12-19T08:58:42.092Z apache www.sina.com.cn [ elk]# logstash -e ‘input { stdin{} } output { stdout{ codec=>rubydebug } }‘ //使用rubydebug显示详细输出,codec为一种编解码器 The stdin plugin is now waiting for input: 17:03:08.226 [Api Webserver] INFO logstash.agent - Successfully started Logstash API endpoint {:port=>9600} www.baidu.com //格式化的处理 { "@timestamp" => 2019-12-19T09:03:80.267Z, "@version" => "1", "host" => "apache", "message" => "www.baidu.com" } [ elk]# logstash -e ‘input { stdin{} } output { elasticsearch { hosts=>["192.168.80.129:9200"] } }‘ ##使用logstach将信息写入elasticsearch中 The stdin plugin is now waiting for input: 17:06:46.846 [Api Webserver] INFO logstash.agent - Successfully started Logstash API endpoint {:port=>9600} www.baidu.com //输入信息 www.sina.com.cn
[ elk]# chmod o+r /var/log/messages //给其他用户读权限 [ elk]# vim /etc/logstash/conf.d/system.conf //创建文件 input { file{ path => "/var/log/messages" //输出目录 type => "system" start_position => "beginning" } } output { elasticsearch { #输入地址指向node1节点 hosts => ["192.168.80.129:9200"] index => "system-%{+YYYY.MM.dd}" } } [ elk]# systemctl restart logstash.service //重启服务
[ ~]# cd /mnt/elk/ [ elk]# rpm -ivh kibana-5.5.1-x86_64.rpm //安装 [ elk]# cd /etc/kibana/ [ kibana]# cp kibana.yml kibana.yml.bak //备份 [ kibana]# vim kibana.yml //修改配置文件 server.port: 5601 //端口号 server.host: "0.0.0.0" //监听任意网段 elasticsearch.url: "http://192.168.80.129:9200" //本机节点地址 kibana.index: ".kibana" //索引名称 [ kibana]# systemctl start kibana.service //开启服务 [ kibana]# systemctl enable kibana.service
[ elk]# vim /etc/logstash/conf.d/apache_log.conf //创建配置文件 input { file{ path => "/etc/httpd/logs/access_log" //输入信息 type => "access" start_position => "beginning" } file{ path => "/etc/httpd/logs/error_log" type => "error" start_position => "beginning" } } output { if [type] == "access" { //根据条件判断输出信息 elasticsearch { hosts => ["192.168.80.129:9200"] index => "apache_access-%{+YYYY.MM.dd}" } } if [type] == "error" { elasticsearch { hosts => ["192.168.80.129:9200"] index => "apache_error-%{+YYYY.MM.dd}" } } } [ elk]# logstash -f /etc/logstash/conf.d/apache_log.conf //根据配置文件配置logstach
另外一部分,则需要先做聚类、分类处理,将聚合出的分类结果存入ES集群的聚类索引中。数据处理层的聚合结果存入ES中的指定索引,同时将每个聚合主题相关的数据存入每个document下面的某个field下。