私有云搭建基础(5)

Ansible 服务部署:部署 ELK 集群服务

编写 Playbook,部署的 ELK。

使用赛项提供的 OpenStack 私有云平台,创建三台 CentOS7.9 系统的云主机分别命名

为 elk-1、elk-2 和 elk-3,Ansible 主机可以使用上一题的环境。要求 Ansible 节点编写

剧本,执行 Ansible 剧本可以在这三个节点部署 ELK 集群服务(在/root 目录下创建

install_elk 目录作为 ansible 工作目录,部署的入口文件命名为 install_elk.yaml)。具体要求为三个节点均安装 Elasticserach 服务并配置为 Elasticserach 集群;kibana 安

装在第一个节点;Logstash 安装在第二个节点。(需要用到的软件包在 HTTP 服务下)完成

后提交 ansible 节点的用户名、密码和 IP 地址到答题框。

[root@localhost ~]# hostnamectl set-hostname ansible
[root@localhost ~]# bash
[root@ansible ~]# vi /etc/hosts  #在这里加入ansible节点的ip
192.168.19.25 ansible
192.168.19.29 elk-1
192.168.19.31 elk-2
192.168.19.32 elk-3   

[root@ansible ~]# ssh-keygen
[root@ansible ~]# ssh-copy-id elk-1
[root@ansible ~]# ssh-copy-id elk-2
[root@ansible ~]# ssh-copy-id elk-3
[root@ansible ~]# # scp /etc/hosts elk-1:/etc/
[root@ansible ~]# # scp /etc/hosts elk-2:/etc/
[root@ansible ~]# # scp /etc/hosts elk-3:/etc/
[root@ansible ~]# vi /etc/selinux/config
[root@ansible ~]# systemctl stop firewalld
[root@ansible ~]# systemctl disable firewalld
[root@ansible ~]# setenforce 0
[root@ansible ~]# getenforce 
Permissive
#在这里需要上传包。出了ansible还有elasticsearch-6.0.0.rpm,kibana-6.0.0-x86_64.rpm,logstash-6.0.0.rpm
[root@ansible ~]# tar -zxvf Ansible.tar.gz 
[root@ansible ~]# scp kibana-6.0.0-x86_64.rpm elk-1:/root/
kibana-6.0.0-x86_64.rpm: No such file or directory
[root@ansible ~]# scp elasticsearch-6.0.0.rpm elk-1:/root/
elasticsearch-6.0.0.rpm                 100%   27MB  51.6MB/s   00:00    
[root@ansible ~]# scp elasticsearch-6.0.0.rpm elk-2:/root/
elasticsearch-6.0.0.rpm                 100%   27MB  50.8MB/s   00:00    
[root@ansible ~]# scp elasticsearch-6.0.0.rpm elk-3:/root/
elasticsearch-6.0.0.rpm                 100%   27MB  54.3MB/s   00:00    
[root@ansible ~]# scp kibana-6.0.0-x86_64.rpm elk-1:/root/
kibana-6.0.0-x86_64.rpm                 100%   61MB  61.0MB/s   00:01    
[root@ansible ~]# scp kibana-6.0.0-x86_64.rpm elk-2:/root/
kibana-6.0.0-x86_64.rpm                 100%   61MB  61.0MB/s   00:01    
[root@ansible ~]# scp kibana-6.0.0-x86_64.rpm elk-3:/root/
kibana-6.0.0-x86_64.rpm                 100%   61MB  61.3MB/s   00:00    
[root@ansible ~]# scp logstash-6.0.0.rpm elk-1:/root/
logstash-6.0.0.rpm                      100%  108MB  55.4MB/s   00:01    
[root@ansible ~]# scp logstash-6.0.0.rpm elk-2:/root/
logstash-6.0.0.rpm                      100%  108MB  59.0MB/s   00:01    
[root@ansible ~]# scp logstash-6.0.0.rpm elk-3:/root/
logstash-6.0.0.rpm  
[root@ansible ~]# tar -zxvf ansible.tar.gz -C /opt/  #这里在之前部署时创建,可以检查有没有yum
[root@ansible ~]# mv /etc/yum.repos.d/* /media/
[root@ansible ~]# cat /etc/yum.repos.d/local.repo 
[ansible]
name=ansible
baseurl=file:///opt/ansible
gpgcheck=0
enabled=1
[root@ansible ~]# yum -y install ansible
[root@ansible ~]# mkdir example
[root@ansible ~]# cd example

[root@ansible example]# vi /etc/ansible/hosts
[elk-2]
192.168.19.31
[elk-3]
192.168.19.32
[elk-1]
192.168.19.29
将镜像CentOS-7-x86_64-DVD-2009.iso 镜挂载至/opt/centos

[root@ansible example]# curl -O http://mirrors.douxuedu.com/competition/CentOS-7-x86_64-DVD-2009.iso  #这一步是否可以不用写
[root@ansible example]# mkdir /opt/centos
[root@ansible centos]# mount -o loop CentOS-7-x86_64-DVD-2009.iso /opt/centos/
[root@ansible centos]# cat /etc/yum.repos.d/local.repo
[ansible]
name=ansible
baseurl=file:///opt/ansible
gpgcheck=0
enabled=1
[centos]
name=centos
baseurl=file:///opt/centos
gpgcheck=0
enabled=1
[root@ansible centos]# yum install -y vsftpd
[root@ansible centos]# vi /etc/vsftpd/vsftpd.conf
添加anon_root=/opt
[root@ansible centos]# systemctl restart vsftpd
[root@ansible centos]# cd /etc/vsftpd/
[root@ansible vsftpd]# vi  ftp.repo


[centos]
name=centos
baseurl=ftp://192.168.19.25/centos
gpgcheck=0
enabled=1
[root@ansible vsftpd]# cd 
[root@ansible ~]# cd example
[root@ansible example]# rpm -ivh /root/elasticsearch-6.0.0.rpm
[root@ansible example]# cp -rf /etc/elasticsearch/elasticsearch.yml elk-1.yml
[root@ansible example]# cat  elk-1.yml | grep -Ev "^$|^#"    #这可个地方出现的和手册上的不一样,把ip地址改成node1节点的
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
[root@ansible example]# cp elk-1.yml elk-2.yml
[root@ansible example]# cat elk-2.yml | grep -Ev "^$|^#"     #把ip地址改成node2节点的
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
[root@ansible example]# cp elk-1.yml elk-3.yml

[root@ansible example]# cat elk-3.yml | grep -Ev "^$|^#"    #把ip地址改成node3节点的
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
[root@ansible example]# rpm -ivh /root/kibana-6.0.0-x86_64.rpm
[root@ansible example]# cp -rf  /etc/kibana/kibana.yml  kibana1.yml
[root@ansible example]# cat kibana.yml |grep -v^#   #这一步失败并没有成功
[root@ansible kibana]# cd /etc/kibana
[root@ansible kibana]# vi kibana.yml
修改下面的配置文件
server.port: 5601
server.host: "172.128.11.217"
elasticsearch.url: "http://172.128.11.217:9200"
[root@ansible kibana]# cd 
[root@ansible ~]# cd example/
[root@ansible example]# rpm -ivh /root/logstash-6.0.0.rpm
[root@ansible example]# cp -rf /etc/logstash/logstash.yml .
[root@ansible example]# vi logstash.yml
把http.host: "192.168.19.31" 改为第二个节点的地址
[root@ansible example]# vi syslog.conf

input {
     file {
          path => "/var/log/messages" #指定文件的路径
          type => "systemlog" #定义日志类型,可自定义
          start_position => "beginning" #指定何时开始收集
          stat_interval => "3"
     }
}
output {
    if [type] == "systemlog" {
          elasticsearch {
                hosts => ["192.168.19.29:9200"] #这里的地址为 node1 主机地址
                index => "system-log-%{+YYYY.MM.dd}"
          }
     }
}

[root@ansible example]# vi cscc_install.yaml
- hosts: all
 
  remote_user:  root
 
  tasks:
 
    - name: rm repo
 
      shell: rm -rf /etc/yum.repos.d/* 
    - name: copy repo
 
      copy: src=ftp.repo dest=/etc/yum.repos.d/
 
    - name: install java
 
      shell: yum -y install java-1.8.0-*
 
#    - name: install elk
#      shell: rpm -ivh elasticsearch-6.0.0.rpm
- hosts: node1
 
  remote_user: root
 
  tasks:
 
    - name: copy config
 
      copy: src=elk-1.yml dest=/etc/elasticsearch/elasticsearch.yml
 
    - name: daemon-reload
 
      shell: systemctl daemon-reload
 
    - name: start elk
 
      shell: systemctl start elasticsearch && systemctl enable elasticsearch
 
#    - name: install kibana
# 
#      shell: rpm -ivh kibana-6.0.0-x86_64.rpm
 
    - name: copy config
 
      template: src=kibana.yml dest=/etc/kibana/kibana.yml
 
    - name: start kibana
 
      shell: systemctl start kibana && systemctl enable kibana
- hosts: node2
 
  remote_user: root
 
  tasks:
 
    - name: copy config
 
      copy: src=elk-2.yml dest=/etc/elasticsearch/elasticsearch.yml
 
    - name: daemon-reload

      shell: systemctl daemon-reload
 
    - name: start elk
 
      shell: systemctl start elasticsearch && systemctl enable elasticsearch 
#    - name: install logstash
 
#      shell: rpm -ivh logstash-6.0.0.rpm
 
    - name: copy config
 
      copy: src=logstash.yml dest=/etc/logstash/logstash.yml
 
    - name: copy config
 
      copy: src=syslog.conf dest=/etc/logstash/conf.d/syslog.conf
- hosts: node3
 
  remote_user: root
 
  tasks:
 
    - name: copy config
 
      copy: src=elk-3.yml dest=/etc/elasticsearch/elasticsearch.yml
 
    - name: daemon-reload
 
      shell: systemctl daemon-reload
 
    - name: start elk
 
      shell: systemctl start elasticsearch && systemctl enable elasticsearch

Ansible 部署 Kafka 服务

编写 Playbook,部署的 ZooKeeper 和 Kafka。

使用提供的 OpenStack 私有云平台,创建 4 台系统为 centos7.5 的云主机,其中一台

作为 Ansible 的母机并命名为 ansible,另外三台云主机命名为 node1、node2、node3,通

过附件中的/ansible/ansible.tar.gz 软件包在 ansible 节点安装 Ansible 服务;使用这

一台母机,编写 Ansible 脚本(在/root 目录下创建 example 目录作为 Ansible 工作目

录,部署的入口文件命名为 cscc_install.yaml),编写 Ansible 脚本使用 roles 的方式

对其他三台云主机进行安装 kafka 集群的操作(zookeeper 和 kafka 的安装压缩包在

gpmall-single.tar.gz 压缩包中,将 zookeeper 和 kafka 的压缩包解压到 node 节点的

/opt 目录下进行安装)。完成后提交 ansible 节点的用户名、密码和 IP 地址到答题框。

(考试系统会连接到你的 ansible 节点,去执行 ansible 脚本,请准备好环境,以便考试

系统访问)

在上一步的基础上进行
[root@ansible example]# mkdir -p myid/{myid1,myid2,myid3}
[root@ansible example]# echo "1" > myid/myid1/myid
[root@ansible example]# echo "2" > myid/myid2/myid
[root@ansible example]# echo "3" > myid/myid3/myid
上传gp...文件
[root@ansible example]# tar -zxvf gpmall-single.tar.gz 

[root@ansible example]# vi ftp.repo 
添加
[gpmall-repo]
name=gpmall
baseurl=ftp://ansible/gpmall-repo  #确认存放地址
gpgcheck=0
enabled=1
[centos]
nmae=centos
baseurl=ftp://ansible/centos
gpgcheck=0
enabled=1
[root@ansible example]# vi zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/tmp/zookeeper
clientPort=2181
server.1=192.168.19.29:2888:3888
server.2=192.168.19.31:2888:3888
server.3=192.168.19.32:2888:3888
#先上传gp...文件,进行解压(解压是注意位置)
把zookeeper和kafka都进行解压,把jdk的包也要上穿
[root@ansible example]# scp jdk-8u202-linux-x64.rpm  root@192.168.19.29:/root/example
jdk-8u202-linux-x64.rpm                                                                                                                  100%  170MB  56.7MB/s   00:03    
[root@ansible example]# scp jdk-8u202-linux-x64.rpm  root@192.168.19.31:/root/example
jdk-8u202-linux-x64.rpm                                                                                                                  100%  170MB  43.3MB/s   00:03    
[root@ansible example]# scp jdk-8u202-linux-x64.rpm  root@192.168.19.32:/root/example
jdk-8u202-linux-x64.rpm 
mkdir  -p /etc/zookeeper-3.4.14/conf   #这个是文件地址

- hosts: all
  remote_user: root
  tasks:
    - name: rm repo
      shell: rm -rf /etc/yum.repos.d/*
    - name: copy repo
      copy: src=ftp.repo dest=/etc/yum.repos.d/
    - name: install java
      shell: rpm -ivh /root/example/jdk-8u202-linux-x64.rpm
    - name: copy zookeeper
      copy: src=/root/example/gpmall-single/zookeeper-3.4.14.tar.gz dest=/root/zookeeper-3.4.14.tar.gz remote_src=yes
    - name: tar-zookeeper
      shell: tar -zxvf /root/example/gpmall-single/zookeeper-3.4.14.tar.gz -C /root/example/gpmall-single/
    - name: copy zoo.cfg
      copy: src=zoo.cfg  dest=/etc/zookeeper-3.4.14/conf/zoo.cfg
    - name: mkdir
      shell: mkdir -p /tmp/zookeeper
- hosts: node1
  remote_user: root
  tasks:
    - name: copy myid1
      copy: src=myid/myid1/myid dest=/tmp/zookeeper/myid
- hosts: node2
  remote_user: root
  tasks:
    - name: copy myid2
      copy: src=myid/myid2/myid dest=/tmp/zookeeper/myid
- hosts: node3
  remote_user: root
  tasks:
    - name: copy myid3
      copy: src=myid/myid3/myid dest=/tmp/zookeeper/myid
- hosts: all
  remote_user: root
  tasks:
    - name: start zookerper
      shell: /root/example/gpmall-single/zookeeper-3.4.14/bin/zkServer.sh start
  #以上就是最后合适的样子,问题出在没有上传相应文件,剧本中指令出现问题,单个节点没有相关文件,对文件服务了解不透彻    
       
配置kafka
[root@ansible example]# mkdir -p server/{server1,server2,server3}
[root@ansible example]# vi server/server1/server.properties

broker.id=1

num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000

zookeeper.connect=192.168.19.29:2181,192.168.19.31:2181,192.168.19.32:2181
listeners = PLAINTEXT://192.168.19.29:9092
   #ipnode1

zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
[root@ansible example]# vi server/server1/server.properties
broker.id=2

num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000

zookeeper.connect=192.168.200.76:2181,192.168.200.77:2181,192.168.200.78:2181
listeners = PLAINTEXT://192.168.200.77:9092

zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0

[root@ansible example]# vi server/server3/server.properties
broker.id=3

num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/tmp/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000

zookeeper.connect=192.168.19.29:2181,192.168.19.31:2181,192.168.19.32:2181
listeners = PLAINTEXT://192.168.19.32:9092

zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
     
#最后执行报错的时候会出问题,提前输入 chmod +x /root/start_kafka.sh
       
需要手动启动kafka    
String zookeeperStartupCmd = kafkaDir + "/bin/zookeeper-server-start.sh " +
kafkaDir + "/config/zookeeper.properties";
Process zookeeperProcess = Runtime.getRuntime().exec(zookeeperStartupCmd);
                 
[root@ansible example]# vi  cscc_install_kafka.yml

- hosts: all
  remote_user: root
  tasks:
    - name: copy kafka
      copy: src=/root/example/gpmall-single
/kafka_2.11-1.1.1.tgz dest=/root/kafka_2.11-1.1.1.tgz
    - name: tar-kafka
      shell: tar -zxvf kafka_2.11-1.1.1.tgz
- hosts: node1
  remote_user: root
  tasks:
    - name: copy server1
      copy: src=server/server1/server.properties dest=/root/kafka_2.11-1.1.1/config
- hosts: node2
  remote_user: root
  tasks:
    - name: copy server2
      copy: src=server/server2/server.properties dest=/root/kafka_2.11-1.1.1/config
- hosts: node3
  remote_user: root
  tasks:
    - name: copy server3
      copy: src=server/server3/server.properties dest=/root/kafka_2.11-1.1.1/config
- hosts: all
  remote_user: root
  tasks:
    - name: copy kafka.sh
      copy: src=start_kafka.sh dest=/root/start_kafka.sh
    - name: start kafka
      shell: bash /root/start_kafka.sh
      到最后没有找到kafka.proper文件打开失败,zookeeper的好像没有问题,
[root@ansible example]# ansible-playbook -C  cscc_install_kafka.yml

目前这些就是私有云搭建基础的全部内容了。有其他需要的可以私信联系,楼主有题。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值