使用keepalive在centos7环境nginx的HA集群:
nginx1:172.16.12.104
nginx2:172.16.12.105
关闭selinux iptables
主机名
/etc/hosts
联网
同步时间
nginx1&2:安装软件
[root@jq-nginx1 ~]# yum install keepalived -y
[root@jq-nginx1 ~]# yum install elinks -y
nginx1:
[root@jq-nginx1 ~]# echo "172.16.12.104 nginx1.jq.com" >> /etc/hosts
[root@jq-nginx1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.16.12.104 nginx1.jq.com
nginx2:
[root@jq-nginx2 ~]# echo "172.16.12.105 nginx2.jq.com" >> /etc/hosts
[root@jq-nginx2 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.16.12.105 nginx2.jq.com
[root@jq-nginx1 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id Nginx_HA_1
}
vrrp_script check_run {
script "/root/check_nginx.sh"
interval 2
}
vrrp_sync_group VG1 {
group {
VI_1
}
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 12
priority 100
advert_int 1
nopreempt
authentication {
auth_type PASS
auth_pass nginxha
}
track_script {
check_run
}
virtual_ipaddress {
172.16.12.230 dev ens33 label ens33:0
}
}
[root@jq-nginx1 ~]# vim /root/check_nginx.sh
#!/bin/bash
##nginx的健康检查脚本
ELINKS="/usr/bin/links"
HOST="172.16.12.104"
GW="172.16.0.254"
CHECK_TIME=3
function check_nginx_health () {
#三层检查
ping -W 1 -c1 ${GW} &> /dev/null
if [ $? -eq 0 ]
then
#七层检查
${ELINKS} -dump 1 ${HOST} &> /dev/null
if [ $? -eq 0 ]
then
NGINX_OK=1
else
NGINX_OK=0
fi
else
NGINX_OK=0
fi
}
while [ $CHECK_TIME -ne 0 ]
do
let "CHECK_TIME-=1"
check_nginx_health
if [ ${NGINX_OK} -eq 1 ]
then
CHECK_TIME=0
exit
fi
if [ ${NGINX_OK} -eq 0 ] && [ $CHECK_TIME -eq 0 ]
then
systemctl stop keepalived
exit 1
fi
sleep 1
done
[root@jq-nginx1 ~]# scp check_nginx.sh 172.16.12.105:/root
[root@jq-nginx1 ~]# scp /etc/keepalived/keepalived.conf 172.16.12.105:/etc/keepalived/
nginx2:
[root@jq-nginx2 ~]# vim check_nginx.sh
HOST="172.16.12.105"
修改IP
nginx1:
[root@jq-nginx1 ~]# echo "/usr/local/nginx/sbin/nginx -s reload" >> /etc/rc.local
[root@jq-nginx1 ~]# systemctl restart keepalived
[root@jq-nginx1 ~]# echo "systemctl restart keepalived" >> /etc/rc.local
[root@jq-nginx1 ~]# chmod +x /etc/rc.d/rc.local
nginx2:
[root@jq-nginx2 ~]# echo "/usr/local/nginx/sbin/nginx -s reload" >> /etc/rc.local
[root@jq-nginx2 ~]# systemctl restart keepalived
[root@jq-nginx2 ~]# echo "systemctl restart keepalived" >> /etc/rc.local
[root@jq-nginx2 ~]# chmod +x /etc/rc.d/rc.local
看到浮动IP
[root@jq-nginx1 ~]# ifconfig ens33:0
ens33:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.230 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:b5:77:44 txqueuelen 1000 (Ethernet)
测试集群可靠性:
1、nginx1服务停掉:
[root@jq-nginx1 ~]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.104 netmask 255.255.0.0 broadcast 172.16.255.255
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
浮动IP消失了。
nginx2:
[root@jq-nginx2 ~]# ifconfig ens33:0
ens33:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.230 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:aa:0d:c1 txqueuelen 1000 (Ethernet)
nginx2就能查到浮动IP。
2、重启nginx2:
启动nginx1的nginx服务。
nginx2:
reboot
nginx1:
[root@jq-nginx1 ~]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.104 netmask 255.255.0.0 broadcast 172.16.255.255
ens33:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.230 netmask 255.255.255.255 broadcast 0.0.0.0
nginx1能够看到浮动IP了。
3、重启nginx1:
nginx1:
reboot
nginx2:
[root@jq-nginx2 ~]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.105 netmask 255.255.0.0 broadcast 172.16.255.255
ens33:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.230 netmask 255.255.255.255 broadcast 0.0.0.0
nginx2上发现浮动IP!
mysql的HA集群:
mysql不能做负载均衡集群但是可以做高可用集群。
mysql1:172.16.12.106
mysql2:172.16.12.107
首先mysql1 和 mysql2 要做双A
关闭selinux iptables
主机名
/etc/hosts
联网
同步时间
MySQL1:
[root@ha-mysql1 keepalived]# echo "172.16.12.106 ha-mysql1.jq.com" >> /etc/hosts
[root@ha-mysql1 keepalived]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.16.12.106 ha-mysql1.jq.com
MySQL2:
[root@ha-mysql2 ~]# echo "172.16.12.107 ha-mysql2.jq.com" >> /etc/hosts
[root@ha-mysql2 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
172.16.12.107 ha-mysql2.jq.com
双A配置步骤:https://blog.youkuaiyun.com/n_u_l_l_/article/details/103168099
MySQL1&2:安装软件
[root@ha-mysql1 ~]# yum install keepalived -y
MySQL1:
[root@ha-mysql1 ~]# cd /etc/keepalived/
[root@ha-mysql1 keepalived]# vim keepalived.conf
! Configuration File for keepalived
global_defs {
router_id HA_MySQL_1
}
vrrp_script check_run{
script "/root/check_mysql.sh"
interval 2
}
vrrp_sync_group VG1 {
group {
VI_1
}
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 106
priority 100
advert_int 1
nopreempt
authentication {
auth_type PASS
auth_pass hamysql
}
track_script {
check_run
}
virtual_ipaddress {
172.16.12.100 dev ens33 label ens33:0
}
}
[root@ha-mysql1 keepalived]# vim /root/check_mysql.sh
#!/bin/bash
## myqsl 可用性检查
HOST="172.16.12.106"
GW="172.16.0.254"
passwd="000000"
CHECK_TIME=3
function mysql_health () {
ping -W 1 -c1 ${GW} &> /dev/null
if [ $? -eq 0 ]
then
/usr/local/mysql/bin/mysql -u root -p${passwd} -e "exit" &> /dev/null //这里最好写绝对路径,哪怕配置了环境变量也要写绝对路径
if [ $? -eq 0 ]
then
MySQL_status=0
else
MySQL_status=1
fi
else
MySQL_status=1
fi
}
while [ $CHECK_TIME -ne 0 ]
do
let "--CHECK_TIME"
mysql_health
if [ ${MySQL_status} -eq 0 ]
then
exit 0
fi
if [ ${MySQL_status} -eq 1 ] && [ ${CHECK_TIME} -eq 0 ]
then
systemctl stop keepalived
exit 1
fi
sleep 1
done
MySQL1:
[root@ha-mysql1 ~]# scp check_mysql.sh 172.16.12.107:/root
[root@ha-mysql1 ~]# scp /etc/keepalived/keepalived.conf 172.16.12.107:/etc/keepalived/
MySQL2:
[root@ha-mysql2 ~]# vim check_mysql.sh
HOST="172.16.12.107"
修改IP为自己的IP
MySQL1:
[root@ha-mysql1 ~]# systemctl restart keepalived
看到虚拟的网卡和IP就基本可以了:
[root@ha-mysql1 ~]# ifconfig ens33:0
ens33:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.100 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:1b:ad:ac txqueuelen 1000 (Ethernet)
[root@ha-mysql2 ~]# ps -ef | grep keepalived
root 881 1 0 02:40 ? 00:00:00 /usr/sbin/keepalived -D
root 882 881 0 02:40 ? 00:00:00 /usr/sbin/keepalived -D
root 883 881 0 02:40 ? 00:00:00 /usr/sbin/keepalived -D
root 1540 1468 0 02:43 pts/0 00:00:00 grep --color=auto keepalived
[root@ha-mysql2 ~]# netstat -antlup | grep mysql
tcp 0 0 172.16.12.107:41394 172.16.12.106:3306 ESTABLISHED 1467/mysqld
tcp6 0 0 :::3306 :::* LISTEN 1467/mysqld
tcp6 0 0 172.16.12.107:3306 172.16.12.106:34280 ESTABLISHED 1467/mysqld
然后设置开机自启动:
MySQL1:
[root@ha-mysql1 ~]# echo "/usr/local/mysql/bin/mysqld_safe --defaults-file=/etc/my.cnf --user=mysql &" >> /etc/rc.local
[root@ha-mysql1 ~]# chmod +x /etc/rc.d/rc.local
[root@ha-mysql1 ~]# echo "systemctl restart keepalived" >> /etc/rc.local
MySQL2
[root@ha-mysql2 ~]# echo "/usr/local/mysql/bin/mysqld_safe --defaults-file=/etc/my.cnf --user=mysql &" >> /etc/rc.local
[root@ha-mysql2 ~]# chmod +x /etc/rc.d/rc.local
[root@ha-mysql2 ~]# echo "systemctl restart keepalived" >> /etc/rc.local
注意:服务一定要在keepalive之前。
注意:服务一定要在keepalive之前。
注意:服务一定要在keepalive之前。
然后测试集群可靠性:
1、停掉1 的服务
[root@ha-mysql1 ~]# kill 3233
[root@ha-mysql1 ~]# netstat -antlup | grep mysql
[root@ha-mysql1 ~]#
[root@ha-mysql1 ~]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.106 netmask 255.255.0.0 broadcast 172.16.255.255
inet6 fe80::20c:29ff:fe1b:adac prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:1b:ad:ac txqueuelen 1000 (Ethernet)
RX packets 18954 bytes 4045013 (3.8 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 8137 bytes 1318823 (1.2 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 1 (Local Loopback)
RX packets 24 bytes 2060 (2.0 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 24 bytes 2060 (2.0 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@ha-mysql1 ~]#
查IP 发现浮动IP消失了。
mysql2:
[root@ha-mysql2 ~]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.107 netmask 255.255.0.0 broadcast 172.16.255.255
inet6 fe80::20c:29ff:fe36:4f2c prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:36:4f:2c txqueuelen 1000 (Ethernet)
RX packets 10132 bytes 3390120 (3.2 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 3598 bytes 418053 (408.2 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens33:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.100 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:36:4f:2c txqueuelen 1000 (Ethernet)
发现浮动IP到2上了。
2、启动MySQL1 的服务,重启MySQL2
MySQL1
[root@ha-mysql1 ~]# netstat -antlup | grep mysql
tcp 0 0 172.16.12.106:59074 172.16.12.107:3306 ESTABLISHED 4260/mysqld
tcp6 0 0 :::3306 :::* LISTEN 4260/mysqld
tcp6 0 0 172.16.12.106:3306 172.16.12.107:54266 ESTABLISHED 4260/mysqld
MySQL2:
reboot
MySQL1:
[root@ha-mysql1 ~]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.106 netmask 255.255.0.0 broadcast 172.16.255.255
inet6 fe80::20c:29ff:fe1b:adac prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:1b:ad:ac txqueuelen 1000 (Ethernet)
RX packets 2033 bytes 126875 (123.9 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 104 bytes 12183 (11.8 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens33:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.100 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:1b:ad:ac txqueuelen 1000 (Ethernet)
发现浮动IP到MySQL1上了。
3、然后MySQL1再重启
MySQL1:
reboot
MySQL2:
[root@ha-mysql2 ~]# ifconfig
ens33: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.107 netmask 255.255.0.0 broadcast 172.16.255.255
inet6 fe80::20c:29ff:fe36:4f2c prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:36:4f:2c txqueuelen 1000 (Ethernet)
RX packets 752 bytes 50091 (48.9 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 94 bytes 11371 (11.1 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
ens33:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 172.16.12.100 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:36:4f:2c txqueuelen 1000 (Ethernet)
浮动IP道MySQL2上了。
到此,可以认为集群的可靠性ok。
配置文件解读
第一部分:全局定义块
1、email通知。作用:有故障,发邮件报警。
2、Lvs负载均衡器标识(lvs_id)。在一个网络内,它应该是唯一的。
3、花括号“{}”。用来分隔定义块,因此必须成对出现。如果写漏了,keepalived运行时,不会得到预期的结果。由于定义块内存在嵌套关系,因此很容易遗漏结尾处的花括号,这点要特别注意。
global_defs {
notification_email { //指定keepalived在发生切换时需要发送email到的对象,一行一个
sysadmin@fire.loc
}
notification_email_from Alexandre.Cassen@firewall.loc //指定发件人
smtp_server localhost //指定smtp服务器地址
smtp_connect_timeout 30 //指定smtp连接超时时间
router_id LVS_DEVEL //运行keepalived机器的一个标识
}
第二部分:vrrp_sync_group作用:确定失败切换(FailOver)包含的路由实例个数。即在有2个负载均衡器的场景,一旦某个负载均衡器失效,需要自动切换到另外一个负载均衡器的实例是哪些? 实例组group{}至少包含一个vrrp实例
vrrp_sync_group VG_1{ //监控多个网段的实例
group {
VI_1 //实例名
VI_2
......
}
notify_master /path/xx.sh //指定当切换到master时,执行的脚本
netify_backup /path/xx.sh //指定当切换到backup时,执行的脚本
notify_fault "path/xx.sh VG_1" //故障时执行的脚本
notify /path/xx.sh
smtp_alert //使用global_defs中提供的邮件地址和smtp服务器发送邮件通知
}
第三部分:vrrp_instance,实例名出自实例组group所包含的那些名字。
vrrp_instance VI_1 {
state BACKUP //指定那个为master,那个为backup,如果设置了nopreempt这个值不起作用,主备考priority决定
interface eth0 //设置实例绑定的网卡
dont_track_primary //忽略vrrp的interface错误(默认不设置)
track_interface{ //设置额外的监控,里面那个网卡出现问题都会切换
eth0
eth1
}
mcast_src_ip //发送多播包的地址,如果不设置默认使用绑定网卡的primary ip
garp_master_delay //在切换到master状态后,延迟进行gratuitous ARP请求
virtual_router_id 50 //VPID标记
priority 99 //优先级,高优先级竞选为master
advert_int 1 //检查间隔,默认1秒
nopreempt //设置为不抢占 注:这个配置只能设置在backup主机上,而且这个主机优先级要比另外一台高
preempt_delay //抢占延时,默认5分钟
debug //debug级别
authentication { //设置认证
auth_type PASS //认证方式
auth_pass 111111 //认证密码
}
virtual_ipaddress { //设置浮动IP 也叫VIP
192.168.202.200
}
}