一、核心概念
-
LVS(Linux Virtual Server)
-
工作在网络层(四层负载均衡),基于 IP 和端口分发请求。
-
支持多种调度算法(如轮询、加权轮询、最小连接等)。
-
三种工作模式:NAT、DR(Direct Routing)、TUN(IP Tunneling)。
-
-
Keepalived
-
基于 VRRP(Virtual Router Redundancy Protocol)协议实现高可用。
-
监控 LVS 节点和后端服务器的健康状态,自动切换故障节点。
-
管理虚拟 IP(VIP)的漂移。
-
二、架构示例(DR 模式)
三、实现步骤
1. 环境准备
-
两台 LVS 节点(主备):
LVS_01
和LVS_02
,配置虚拟 IP(VIP),添加仅主机网卡。 -
两台后端 Real Server(如 Nginx/Web 服务器)。
-
所有机器需在同一局域网内。
2.两台LVS节点的配置
LVS_01的配置:
[root@LVS_01 ~]# yum install ipvsadm keepalived -y
[root@LVS_01 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_1
}
vrrp_instance VI_1 {
state MASTER
interface ens160
lvs_sync_daemon_interface ens224 #心跳检测网卡
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.8.10
}
}
virtual_server 192.168.8.10 80 { #定义虚拟服务,需指定IP地址和端口,
delay_loop 6 #定义RS运行情况监测时间间隔
lb_algo rr #定义负载调度算法
lb_kind DR #定义LVS的工作模式
# persistence_timeout 50
protocol TCP #指定转发协议
real_server 192.168.8.158 80 {
weight 1
TCP_CHECK { #RS server健康检查部分
connect_timeout 3 #定义超出3s连接超时
retry 3 定义重试次数
delay_before_retry 3 3 #定义重试时间间隔
connect_port 80 #定义健康检查端口
}
}
real_server 192.168.8.160 80 {
weight 1
TCP_CHECK {
connect_timeout 3
retry 3
delay_before_retry 3
connect_port 80
}
}
}
LVS_02的配置:
[root@LVS_02 ~]# yum install ipvsadm keepalived -y
[root@LVS_02 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_2
}
vrrp_instance VI_2 {
state BACKUP
interface ens160
lvs_sync_daemon_interface ens224 #心跳检测网卡
virtual_router_id 51
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.8.10
}
}
virtual_server 192.168.8.10 80 { #定义虚拟服务,需指定IP地址和端口,
delay_loop 6 #定义RS运行情况监测时间间隔
lb_algo rr #定义负载调度算法
lb_kind DR #定义LVS的工作模式
# persistence_timeout 50
protocol TCP #指定转发协议
real_server 192.168.8.158 80 {
weight 1
TCP_CHECK { #RS server健康检查部分
connect_timeout 3 #定义超出3s连接超时
retry 3 定义重试次数
delay_before_retry 3 3 #定义重试时间间隔
connect_port 80 #定义健康检查端口
}
}
real_server 192.168.8.160 80 {
weight 1
TCP_CHECK {
connect_timeout 3
retry 3
delay_before_retry 3
connect_port 80
}
}
}
3.后端 Real Server 的配置
web1和web2分别写入脚本 lvs_rs,并赋予执行权限
#!/bin/sh
#
# Startup script handle the initialisation of LVS
# chkconfig: - 28 72
# description: Initialise the Linux Virtual Server for DR
#
### BEGIN INIT INFO
# Provides: ipvsadm
# Required-Start: $local_fs $network $named
# Required-Stop: $local_fs $remote_fs $network
# Short-Description: Initialise the Linux Virtual Server
# Description: The Linux Virtual Server is a highly scalable and highly
# available server built on a cluster of real servers, with the load
# balancer running on Linux.
# description: start LVS of DR-RIP
LOCK=/var/lock/ipvsadm.lock
VIP=192.168.8.10
. /etc/rc.d/init.d/functions
start() {
PID=`ifconfig | grep lo:10 | wc -l`
if [ $PID -ne 0 ];
then
echo "The LVS-DR-RIP Server is already running !"
else
/sbin/ifconfig lo:10 $VIP netmask 255.255.255.255 broadcast $VIP up
/sbin/route add -host $VIP dev lo:10
echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/ens160/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/ens160/arp_announce
echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce
/bin/touch $LOCK
echo "starting LVS-DR-RIP server is ok !"
fi
}
stop() {
/sbin/route del -host $VIP dev lo:10
/sbin/ifconfig lo:10 down >/dev/null
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/ens160/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/ens160/arp_announce
echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore
echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce
rm -rf $LOCK
echo "stopping LVS-DR-RIP server is ok !"
}
status() {
if [ -e $LOCK ];
then
echo "The LVS-DR-RIP Server is already running !"
else
echo "The LVS-DR-RIP Server is not running !"
fi
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
status)
status
;;
*)
echo "Usage: $1 {start|stop|restart|status}"
exit 1
esac
exit 0
4.启动服务
systemctl start keepalived
systemctl enable keepalived
5.验证
-
检查 VIP 绑定
在主节点执行ip addr show eth0
,应看到 VIP。 -
测试故障转移
关闭主节点的 Keepalived,备节点应接管 VIP。 -
验证负载均衡
使用curl http://192.168.1.100
多次请求,观察后端服务器的日志。