一、配置RS主机(server2、server3)
[root@server2 ~]
[root@server2 ~]
[root@server2 ~]
Starting httpd: httpd: Could not reliably determine the server's fully qualified domain name , using 172.25 .12 .2 for ServerName
[ OK ]
[root@server2 ~]
<h1>server-2 </h1>
[root@server3 ~]
<h1>server-3 </h1>
二、sever1主机配置nginx
1、源码安装nginx
1 、源码安装nginx
[root@server1 ~]
nginx-1.14 .0 .tar.gz
[root@server1 ~]
[root@server1 ~]
nginx-1.14 .0 nginx-1.14 .0 .tar.gz
[root@server1 ~]
[root@server1 nginx-1.14 .0 ]
[root@server1 nginx-1.14 .0 ]
[root@server1 nginx-1.14 .0 ]
[root@server1 nginx-1.14 .0 ]
[root@server1 nginx-1.14 .0 ]
[root@server1 nginx-1.14 .0 ]
[root@server1 nginx-1.14 .0 ]
[root@server1 nginx-1.14 .0 ]
2、调试
[root@server1 nginx-1.14 .0 ]# nginx -t
nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful
[root@server1 nginx-1.14 .0 ]# nginx
[root@server1 nginx-1.14 .0 ]# curl -I localhost
HTTP/1.1 200 OK
Server: nginx
Date: Wed, 27 Jun 2018 15 :22 :35 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Wed, 27 Jun 2018 15 :21 :00 GMT
Connection: keep-alive
ETag: "5b33ab5c-264"
Accept-Ranges: bytes
三、实现nginx负载均衡
1、配置nginx
[root@server1 nginx-1.14 .0 ]
[root@server1 nginx-1.14 .0 ]
2 user nginx nginx;
3 worker_processes 1 ;
18 upstream test{
19 server 172.25 .12.2 :80 ;
20 server 172.25 .12.3 :80 ;
21 }
123 server {
124 listen 80 ;
125 server_name www.test.org;
126 location / {
127 proxy_pass http://test ;
128 }
129
130 }
[root@server1 nginx-1.14 .0 ]
nginx: [emerg] unexpected "}" in /usr/local /nginx/conf/nginx.conf:128
nginx: configuration file /usr/local /nginx/conf/nginx.conf test failed
[root@server1 nginx-1.14 .0 ]
[root@server1 nginx-1.14 .0 ]
nginx: the configuration file /usr/local /nginx/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local /nginx/conf/nginx.conf test is successful
[root@server1 nginx-1.14 .0 ]
2、物理主机测试ok
[root@foundation12 ~]
172.25 .12 .1 www.test.org
[root@foundation12 ~]
<h1>server-2 </h1>
[root@foundation12 ~]
<h1>server-3 </h1>
[root@foundation12 ~]
<h1>server-2 </h1>
[root@foundation12 ~]
<h1>server-3 </h1>
四、实现高可用
1、server4主机配置nginx
[root@server1 nginx-1.14 .0 ]
[root@server4 local ]
[root@server4 local ]
[root@server4 local ]
nginx: the configuration file /usr/local /nginx/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local /nginx/conf/nginx.conf test is successful
2、server1和server4配置pacemaker
注意 :nginx关闭,采用服务管理注意 :配置yum源注意 :server1和server4主机配置要一致
yum install -y corosync pacemaker
cd /etc/corosync/
cp corosync.conf .example corosync.conf
vim corosync.conf
#################
10 bindnetaddr: 172.25 .12 .0
11 mcastaddr: 226.94 .1 .12
12 mcastport: 5405
34 service {
35 name: pacemaker
36 ver: 0
37 }
#################
scp corosync.conf server4:/etc/corosync/
/etc/init.d /corosync start (server1和server4主机都做)
yum install -y crmsh-1.2 .6 -0. rc2.2 .1 .x 86_64.rpm pssh-2.3 .1 -2.1 .x 86_64.rpm
3、配置crm
[root@server1 cluster]
error : unpack_resources: Resource start-up disabled since no STONITH resources have been defined
error : unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
error : unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
Errors found during check: config not valid
[root@server1 cluster]
crm(live)
crm(live)configure
crm(live)configure
[root@server1 cluster]
4、实现高可用
[root@server1 ~]
crm(live)
crm(live)configure
crm(live)configure
crm(live)configure
crm(live)configure
crm(live)configure
crm(live)configure
crm(live)configure
crm(live)configure
crm(live)configure
crm(live)configure
fence_legacy fence_pcmk
5、排错
[root@foundation12 rhel6.5 ]# systemctl status fence_virtd.service
● fence_virtd.service - Fence-Virt system host daemon
Loaded: loaded (/usr/lib/systemd/system /fence_virtd.service; enabled; vendor preset: disabled)
Active: active (running) since Wed 2018 -06 -27 20 :03 :49 CST; 3 h 59 min ago
Process: 1640 ExecStart=/usr/sbin/fence_virtd $FENCE_VIRTD_ARGS (code=exited, status=0 /SUCCESS)
Main PID: 2297 (fence_virtd)
CGroup: /system .slice/fence_virtd.service
└─2297 /usr/sbin/fence_virtd -w
server1主机 (配置fence_xvm.key文件)
[root@server1 cluster ]# pwd
/etc/cluster
[root@server1 cluster ]# ls
fence_xvm.key
server4主机 (配置fence_xvm.key文件)
[root@server4 cluster ]# pwd
/etc/cluster
[root@server4 cluster ]# ls
fence_xvm.key
注意:Server1和server4主机安装fence-virt
##fence_virt可在 www.pcks.org 下载
yum install -y fence-virt-0.2 .3 -24. el6.x 86_64.rpm
[root@server1 ~]
crm(live)
crm(live)configure
crm(live)configure
crm(live)configure
crm(live)configure
6、高可用状态
Online : [ server1 server4 ]
Resource Group : nginxgroup
vip (ocf ::heartbeat :IPaddr2) : Started server1
nginx (lsb :nginx) : Started server1
vmfence (stonith :fence_xvm) : Started server1
[root@server1 cluster]
crm(live)
crm(live)resource
Cleaning up vmfence on server1
Cleaning up vmfence on server4
Waiting for 1 replies from the CRMd. OK
Online : [ server1 server4 ]
Resource Group : nginxgroup
vip (ocf ::heartbeat :IPaddr2) : Started server1
nginx (lsb :nginx) : Started server1
vmfence (stonith :fence_xvm) : Started server4