Author: Jin
Date: 20130613
Title: Nginx + Keeplived 双主测试
前言:一年多前做过一次测试,时间久了忘记了,现在又重新做一次
一、环境
1、基本信息和规划
pxe-svr.skylog.cn CentOS release 6.2 (Final)
eth0 192.168.78.250
vip 192.168.78.215
vrrp_instance NGINX1
client0601.skylog.cn CentOS release 6.2 (Final)
eth0 192.168.78.110
vip 192.168.78.115
vrrp_instance NGINX2
2、web
[root@pxe-svr ~]# nginx -v
nginx version: nginx/1.0.15
http://192.168.78.250/index.html
[root@client0601 ~]# nginx -v
nginx version: nginx/1.0.15
http://192.168.78.110/index.html
二、 keeplived安装配置
1、安装
[root@client0601 ~]# yum -y install keepalived
[root@pxe-svr ~]# yum -y install keepalived
2、设置脚本
检测本机http状态,如果状态有问题,则stop keepalived
192.168.78.250上
[root@pxe-svr ~]# cat /root/bin/check_http.sh
#!/bin/bash
url="http://192.168.78.250/index.html"
status=$(/usr/bin/curl -s --head "$url" | awk '/HTTP/ {print $2}')
if [ "$status" != "200" ]; then
/etc/init.d/keepalived stop
fi
192.168.78.110上
[root@client0601 ~]# cat /root/bin/check_http.sh
#!/bin/bash
url="http://192.168.78.110/index.html"
status=$(/usr/bin/curl -s --head "$url" | awk '/HTTP/ {print $2}')
if [ "$status" != "200" ]; then
/etc/init.d/keepalived stop
fi
3、配置keepalived
pxe-svr.skylog.cn
[root@pxe-svr ~]# vim /etc/keepalived/keepalived.conf
#vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id NGINX_ID_1 #定义本机的ID
}
vrrp_script chk_nginx { #检测脚本
script "/root/bin/check_http.sh"
interval 2 ##检测间隔
weight 2
}
vrrp_instance NGINX1 { #定义一个实例
state MASTER #定义为master
interface eth0
virtual_router_id 51 #0-255在同一个instance中一致,在整个vrrp唯一
priority 150 #优先级大的为master
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script { #检查脚本
chk_nginx
}
virtual_ipaddress { #此实例的浮动IP
192.168.78.215
}
}
vrrp_instance NGINX2 {
state BACKUP
interface eth0
virtual_router_id 52
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_nginx
}
virtual_ipaddress {
192.168.78.115
}
}
client0601.skylog.cn
将pxe-svr.skylog.cn 设置为backup
[root@client0601 ~]#
[root@client0601 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id NGINX_ID_2
}
vrrp_script chk_nginx {
script "/root/bin/check_http.sh"
interval 2
weight 2
}
vrrp_instance NGINX1 {
state BACKUP #把另外一台的设置为BACKUP
interface eth0
virtual_router_id 51 #0-255在同一个instance中一致,在整个vrrp唯一,无变化
priority 110 #优先级大的为mastera,这里要降低,有变化
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script { #检查脚本
chk_nginx
}
virtual_ipaddress { #此实例的浮动IP
192.168.78.215
}
}
vrrp_instance NGINX2 { #本机
state MASTER #修改为MASTER
interface eth0
virtual_router_id 52 #和另外一台配置一
priority 150 #提升为150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_nginx
}
virtual_ipaddress {
192.168.78.115
}
}
配置补充
man keepalived.conf
根据范例
# interface for inside_network, bound by vrrp
interface eth0
virtual_ipaddress {
<IPADDR>/<MASK> brd <IPADDR> dev <STRING> scope <SCOPE> label <LABEL>
192.168.200.17/24 dev eth1
192.168.200.18/24 dev eth2 label eth2:1
}
这两个设备不一样
4、启动服务
[root@pxe-svr ~]# /etc/init.d/keepalived start
Starting keepalived: [ OK ]
[root@pxe-svr ~]# /etc/init.d/keepalived status
keepalived (pid 2896) is running...
[root@client0601 ~]# /etc/init.d/keepalived restart
Stopping keepalived: [ OK ]
Starting keepalived: [ OK ]
[root@client0601 ~]# /etc/init.d/keepalived status
keepalived (pid 3024) is running...
[root@client0601 ~]# ps xua|grep keepalived
root 3024 0.0 0.0 16456 1020 ? Ss 12:20 0:00 /usr/sbin/keepalived -D
root 3026 0.0 0.2 16512 2432 ? S 12:20 0:00 /usr/sbin/keepalived -D
root 3027 0.1 0.1 16512 1916 ? S 12:20 0:12 /usr/sbin/keepalived -D
PID
111 Keepalived <-- Parent process monitoring children
112 \_ Keepalived <-- VRRP child
113 \_ Keepalived <-- Healthchecking child
三、测试
测试前的状态
[root@pxe-svr ~]# ip addr show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
link/ether 00:0c:29:81:78:c1 brd ff:ff:ff:ff:ff:ff
inet 192.168.78.250/24 brd 192.168.78.255 scope global eth0
inet 192.168.1.251/24 brd 192.168.1.255 scope global eth0:1
inet 192.168.78.215/32 scope global eth0
inet6 fe80::20c:29ff:fe81:78c1/64 scope link
valid_lft forever preferred_lft forever
[root@client0601 ~]# ip addr show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
link/ether 00:0c:29:97:c0:f8 brd ff:ff:ff:ff:ff:ff
inet 192.168.78.110/24 brd 192.168.78.255 scope global eth0
inet 192.168.78.115/32 scope global eth0
inet6 fe80::20c:29ff:fe97:c0f8/64 scope link
valid_lft forever preferred_lft forever
C:\Users\diege>ping 192.168.78.215
正在 Ping 192.168.78.215 具有 32 字节的数据:
来自 192.168.78.215 的回复: 字节=32 时间<1ms TTL=64
192.168.78.215 的 Ping 统计信息:
数据包: 已发送 = 1,已接收 = 1,丢失 = 0 (0% 丢失),
往返行程的估计时间(以毫秒为单位):
最短 = 0ms,最长 = 0ms,平均 = 0ms
Control-C
^C
C:\Users\diege>ping 192.168.78.115
正在 Ping 192.168.78.115 具有 32 字节的数据:
来自 192.168.78.115 的回复: 字节=32 时间<1ms TTL=64
来自 192.168.78.115 的回复: 字节=32 时间<1ms TTL=64
192.168.78.115 的 Ping 统计信息:
数据包: 已发送 = 2,已接收 = 2,丢失 = 0 (0% 丢失),
往返行程的估计时间(以毫秒为单位):
最短 = 0ms,最长 = 0ms,平均 = 0ms
http访问
http://192.168.78.115/
This is client0601
http://192.168.78.215/
This is pxe-svr.skylog.cn
1.将pxe-svr.skylog.cn上的nginx杀掉后观察
[root@pxe-svr ~]# pkill nginx
先访问
http://192.168.78.115/
This is client0601
没变化
http://192.168.78.215/
切换到
This is client0601
都能访问 ping肯定也会同的
观察ip
[root@pxe-svr ~]# ip addr show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
link/ether 00:0c:29:81:78:c1 brd ff:ff:ff:ff:ff:ff
inet 192.168.78.250/24 brd 192.168.78.255 scope global eth0
inet 192.168.1.251/24 brd 192.168.1.255 scope global eth0:1
inet6 fe80::20c:29ff:fe81:78c1/64 scope link
valid_lft forever preferred_lft forever
192.168.78.215 这个IP从 pxe-svr.skylog.cn 拿掉了
[root@pxe-svr ~]# /etc/init.d/keepalived status
keepalived is stopped
服务也停掉了
[root@client0601 ~]# ip addr show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
link/ether 00:0c:29:97:c0:f8 brd ff:ff:ff:ff:ff:ff
inet 192.168.78.110/24 brd 192.168.78.255 scope global eth0
inet 192.168.78.115/32 scope global eth0
inet 192.168.78.215/32 scope global eth0
inet6 fe80::20c:29ff:fe97:c0f8/64 scope link
valid_lft forever preferred_lft forever
192.168.78.215 绑到了client0601.skylog.cn的eth0了
注意 inet 192.168.78.215/32 scope global eth0
开启nginx看看
[root@pxe-svr ~]# /etc/init.d/nginx start
开启nginx并不能恢复
2.开启nginx后再开启keepalived
[root@pxe-svr ~]# /etc/init.d/keepalived start
Starting keepalived: [ OK ]
访问http://192.168.78.215/
This is pxe-svr.skylog.cn
[root@pxe-svr ~]# ip addr show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
link/ether 00:0c:29:81:78:c1 brd ff:ff:ff:ff:ff:ff
inet 192.168.78.250/24 brd 192.168.78.255 scope global eth0
inet 192.168.1.251/24 brd 192.168.1.255 scope global eth0:1
inet 192.168.78.215/32 scope global eth0
inet6 fe80::20c:29ff:fe81:78c1/64 scope link
valid_lft forever preferred_lft forever
[root@client0601 ~]# ip addr show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
link/ether 00:0c:29:97:c0:f8 brd ff:ff:ff:ff:ff:ff
inet 192.168.78.110/24 brd 192.168.78.255 scope global eth0
inet 192.168.78.115/32 scope global eth0
inet6 fe80::20c:29ff:fe97:c0f8/64 scope link
valid_lft forever preferred_lft forever
vip从client0601.skylog.cn切换到pxe-svr.skylog.cn
3、直接关掉其中一台服务器
[root@pxe-svr ~]# shutdown -h now
访问
http://192.168.78.215/
This is client0601
这个没有靠脚本检查关闭keepalived因为直接关机就关闭了服务器,
[root@client0601 ~]# ip addr show eth0
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
link/ether 00:0c:29:97:c0:f8 brd ff:ff:ff:ff:ff:ff
inet 192.168.78.110/24 brd 192.168.78.255 scope global eth0
inet 192.168.78.115/32 scope global eth0
inet 192.168.78.215/32 scope global eth0
inet6 fe80::20c:29ff:fe97:c0f8/64 scope link
valid_lft forever preferred_lft forever
[root@client0601 ~]#
也正常了
LOG
初次启动keepalived的时候
[root@pxe-svr ~]#
Jun 13 12:19:13 pxe-svr Keepalived_healthcheckers[2897]: Opening file '/etc/keepalived/keepalived.conf'.
Jun 13 12:19:13 pxe-svr Keepalived_vrrp[2899]: Configuration is using : 42289 Bytes
Jun 13 12:19:13 pxe-svr Keepalived_healthcheckers[2897]: Configuration is using : 5609 Bytes
Jun 13 12:19:13 pxe-svr Keepalived_vrrp[2899]: Using LinkWatch kernel netlink reflector...
Jun 13 12:19:14 pxe-svr Keepalived_healthcheckers[2897]: Using LinkWatch kernel netlink reflector... #healthcheckers 健康检查
Jun 13 12:19:14 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX2) Entering BACKUP STATE
Jun 13 12:19:14 pxe-svr Keepalived_vrrp[2899]: VRRP sockpool: [ifindex(2), proto(112), fd(11,12)]
Jun 13 12:19:14 pxe-svr Keepalived_vrrp[2899]: VRRP_Script(chk_nginx) succeeded #检查脚本成功
Jun 13 12:19:15 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX1) Transition to MASTER STATE
Jun 13 12:19:16 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX1) Entering MASTER STATE
Jun 13 12:19:16 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX1) setting protocol VIPs.
Jun 13 12:19:16 pxe-svr Keepalived_healthcheckers[2897]: Netlink reflector reports IP 192.168.78.215 added
Jun 13 12:19:16 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX1) Sending gratuitous ARPs on eth0 for 192.168.78.215
Jun 13 12:19:17 pxe-svr ntpd[1534]: Listening on interface #9 eth0, 192.168.78.215#123 Enabled
Jun 13 12:19:17 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX2) Transition to MASTER STATE
Jun 13 12:19:18 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX2) Entering MASTER STATE
Jun 13 12:19:18 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX2) setting protocol VIPs.
Jun 13 12:19:18 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX2) Sending gratuitous ARPs on eth0 for 192.168.78.115
Jun 13 12:19:18 pxe-svr Keepalived_healthcheckers[2897]: Netlink reflector reports IP 192.168.78.115 added
Jun 13 12:19:20 pxe-svr ntpd[1534]: Listening on interface #10 eth0, 192.168.78.115#123 Enabled
Jun 13 12:19:21 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX1) Sending gratuitous ARPs on eth0 for 192.168.78.215
Jun 13 12:19:23 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX2) Sending gratuitous ARPs on eth0 for 192.168.78.115
Jun 13 12:19:33 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX2) Received higher prio advert
Jun 13 12:19:33 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX2) Entering BACKUP STATE
Jun 13 12:19:33 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX2) removing protocol VIPs.
Jun 13 12:19:33 pxe-svr Keepalived_healthcheckers[2897]: Netlink reflector reports IP 192.168.78.115 removed
VIP被删除
Jun 13 12:24:52 pxe-svr Keepalived[2896]: Stopping Keepalived v1.2.7 (02/21,2013)
Jun 13 12:24:52 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX1) sending 0 priority
Jun 13 12:24:52 pxe-svr Keepalived_vrrp[2899]: VRRP_Instance(NGINX1) removing protocol VIPs.
Jun 13 12:24:54 pxe-svr ntpd[1534]: Deleting interface #9 eth0, 192.168.78.215#123, interface stats: received=0, sent=0, dropped=0, active_time=337 secs
Jun 13 13:53:10 pxe-svr Keepalived[4402]: Starting Keepalived v1.2.7 (02/21,2013)
另外一台VIP被添加
Jun 13 12:20:21 client0601 Keepalived_vrrp[3027]: VRRP_Instance(NGINX2) Sending gratuitous ARPs on eth0 for 192.168.78.115
Jun 13 12:25:27 client0601 Keepalived_vrrp[3027]: VRRP_Instance(NGINX1) Transition to MASTER STATE
Jun 13 12:25:28 client0601 Keepalived_vrrp[3027]: VRRP_Instance(NGINX1) Entering MASTER STATE
Jun 13 12:25:28 client0601 Keepalived_vrrp[3027]: VRRP_Instance(NGINX1) setting protocol VIPs.
Jun 13 12:25:28 client0601 Keepalived_vrrp[3027]: VRRP_Instance(NGINX1) Sending gratuitous ARPs on eth0 for 192.168.78.215
Jun 13 12:25:28 client0601 Keepalived_healthcheckers[3026]: Netlink reflector reports IP 192.168.78.215 added
Jun 13 12:25:29 client0601 ntpd[1299]: Listening on interface #10 eth0, 192.168.78.215#123 Enabled
Jun 13 12:25:33 client0601 Keepalived_vrrp[3027]: VRRP_Instance(NGINX1) Sending gratuitous ARPs on eth0 for 192.168.78.215
Jun 13 13:18:10 client0601 kernel: hrtimer: interrupt took 2965114 ns
恢复正常时被删除
Jun 13 13:53:45 client0601 Keepalived_vrrp[3027]: VRRP_Instance(NGINX1) Received higher prio advert
Jun 13 13:53:45 client0601 Keepalived_vrrp[3027]: VRRP_Instance(NGINX1) Entering BACKUP STATE
Jun 13 13:53:45 client0601 Keepalived_vrrp[3027]: VRRP_Instance(NGINX1) removing protocol VIPs.
Jun 13 13:53:45 client0601 Keepalived_healthcheckers[3026]: Netlink reflector reports IP 192.168.78.215 removed
Jun 13 13:53:46 client0601 ntpd[1299]: Deleting interface #10 eth0, 192.168.78.215#123, interface stats: received=0, sent=0, dropped=0, active_time=5297 secs
前端做dns轮训到,这个vip即可
四、将VIP绑定到另外一个块网卡试试看
配置补充
man keepalived.conf
根据范例
# interface for inside_network, bound by vrrp
interface eth0
virtual_ipaddress {
<IPADDR>/<MASK> brd <IPADDR> dev <STRING> scope <SCOPE> label <LABEL>
192.168.200.17/24 dev eth1
192.168.200.18/24 dev eth2 label eth2:1
}
这两个设置不一样
[root@pxe-svr ~]#
vrrp_instance NGINX1 { #定义一个实例,本机
state MASTER #定义为master
interface eth0
virtual_router_id 51 #0-255在同一个instance中一致,在整个vrrp唯一
priority 150 #优先级大的为master
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script { #检查脚本
chk_nginx
}
virtual_ipaddress { #此实例的浮动IP
#192.168.78.215
172.16.9.70/24 dev eth1
}
}
vrrp_instance NGINX2 { #另外一台
state BACKUP
interface eth0
virtual_router_id 52
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_nginx
}
virtual_ipaddress {
#192.168.78.115
172.16.9.69/24 dev eth1
}
}
}
[root@client0601 ~]#
vrrp_instance NGINX1 {
state BACKUP #把另外一台的设置为BACKUP
interface eth0
virtual_router_id 51 #0-255在同一个instance中一致,在整个vrrp唯一,无变化
priority 110 #优先级大的为master,这里要降低,有变化
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script { #检查脚本
chk_nginx
}
virtual_ipaddress { #此实例的浮动IP
#192.168.78.215
172.16.9.70/24 dev eth1
}
}
vrrp_instance NGINX2 { #本机
state MASTER #修改为MASTER
interface eth0
virtual_router_id 52 #和另外一台配置一
priority 150 #提升为150
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_nginx
}
virtual_ipaddress {
#192.168.78.115
172.16.9.69/24 dev eth1
}
}
启动keepalived
[root@pxe-svr ~]# /etc/init.d/keepalived start
Starting keepalived: [ OK ]
[root@pxe-svr ~]# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
link/ether 00:0c:29:81:78:c1 brd ff:ff:ff:ff:ff:ff
inet 192.168.78.250/24 brd 192.168.78.255 scope global eth0
inet 192.168.1.251/24 brd 192.168.1.255 scope global eth0:1
inet6 fe80::20c:29ff:fe81:78c1/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
link/ether 00:0c:29:81:78:cb brd ff:ff:ff:ff:ff:ff
inet 172.16.9.204/24 brd 172.16.9.255 scope global eth1
inet 172.16.9.70/24 scope global secondary eth1
inet 172.16.9.69/24 scope global secondary eth1
inet6 fe80::20c:29ff:fe81:78cb/64 scope link
valid_lft forever preferred_lft forever
启动后访问
http://172.16.9.70/
http://172.16.9.69/
都是
This is pxe-svr.skylog.cn
[root@client0601 ~]# /etc/init.d/keepalived start
Starting keepalived: [ OK ]
再启动
[root@client0601 ~]# ip addr show
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
link/ether 00:0c:29:97:c0:f8 brd ff:ff:ff:ff:ff:ff
inet 192.168.78.110/24 brd 192.168.78.255 scope global eth0
inet6 fe80::20c:29ff:fe97:c0f8/64 scope link
valid_lft forever preferred_lft forever
3: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
link/ether 00:0c:29:97:c0:02 brd ff:ff:ff:ff:ff:ff
inet 172.16.9.251/16 brd 172.16.255.255 scope global eth1
inet 172.16.9.69/24 scope global eth1
inet6 fe80::20c:29ff:fe97:c002/64 scope link
valid_lft forever preferred_lft forever
http://172.16.9.70/
This is pxe-svr.skylog.cn
http://172.16.9.69/
This is client0601
总结
# interface for inside_network, bound by vrrp
#内部网络接口,受vrrp
interface eth0
如果不在virtual_ipaddress{}中指定dev 则默认为前面定义的interface
interface
五、改进
1、增加停止keepalived 时的log
这样知道什么时候nginx不正常 keepalived被停止的
#!/bin/bash
url="http://192.168.78.250/index.html"
status=$(/usr/bin/curl -s --head "$url" | awk '/HTTP/ {print $2}')
if [ "$status" != "200" ]; then
/etc/init.d/keepalived stop
echo "$(date) keepalived has been stopped" >> $(pwd -P)/$0.log
fi
[root@pxe-svr ~]# pkill nginx
[root@pxe-svr ~]# cat /root/bin/check_http.sh.log
Thu Jun 13 14:44:04 CST 2013 keepalived has been stopped
2、增加mail通知,或者调用http接口通知
3、启动keeplived就启动nginx
恢复时,需要先启动nginx,再启动keepalived
单独启动keepalived 无法启动nginx,keepalived也自动不起来
如果需要启动keepalived的时候,nginx也起来,则需要改进脚本,检测进程,如果没有启动则启动
http://deidara.blog.51cto.com/400447/302402/
#!/bin/bash
url="http://192.168.78.250/index.html"
if [ $(ps -C nginx --no-header |wc -l) -eq 0 ];then
/etc/init.d/nginx start
sleep 3
status=$(/usr/bin/curl -s --head "$url" | awk '/HTTP/ {print $2}')
if [ "$status" != "200" ]; then
/etc/init.d/keepalived stop
echo "$(date) keepalived has been stopped" >> $(pwd -P)/$0.log
fi
fi
原理,启动keepalived会执行检测脚本。执行检测脚本的时候就启动nginx,nginx启动异常的话keeplived无法启动
这个脚本,如果把nginx关掉后,会自动再起来,keepalive检查到不存在就执行脚本启动了
是否用脚本把keepalive和nginx 关联起来 看具体的情况
六、总结
其实这个是测试keepalived,web服务器是apache或者nginx都可以
双主为两台都使用,相互为备份
七、延伸
nginx做反向代理
代理到后面的多台服务器
nginx反向代理配置一样,代理多台后端服务器。
这个方式需要手动修改配置文件,手动重载,可能需要需要puppet去管理
八、配置文件内容
master
#vim /etc/keepalived/keepalived.conf ! Configuration File for keepalived global_defs { notification_email { wangkaijin@pxe-svr.skylog.cn } notification_email_from root@pxe-svr.skylog.cn smtp_server 127.0.0.1 stmp_connect_timeout 30 router_id NGINX_ID_1 } vrrp_script chk_nginx { #简单脚本 script "/root/bin/check_http.sh" interval 2 ##检测间隔 weight 2 } vrrp_instance NGINX1 { #定义一个实例,本机 state MASTER #定义为master interface eth0 virtual_router_id 51 #0-255在同一个instance中一致,在整个vrrp唯一 priority 150 #优先级大的为master advert_int 1 authentication { auth_type PASS auth_pass 1111 } track_script { #检查脚本 chk_nginx } virtual_ipaddress { #此实例的浮动IP #192.168.78.215 172.16.9.70/24 dev eth1 } smtp_alert } vrrp_instance NGINX2 { #另外一台 state BACKUP interface eth0 virtual_router_id 52 priority 100 advert_int 1 authentication { auth_type PASS auth_pass 1111 } track_script { chk_nginx } virtual_ipaddress { #192.168.78.115 172.16.9.69/24 dev eth1 } smtp_alert }
slave
! Configuration File for keepalived global_defs { notification_email { wangkaijin@pxe-svr.skylog.cn } notification_email_from root@pxe-svr.skylog.cn smtp_server 127.0.0.1 stmp_connect_timeout 30 router_id NGINX_ID_2 } vrrp_script chk_nginx { script "/root/bin/check_http.sh" interval 2 weight 2 } vrrp_instance NGINX1 { state BACKUP #把另外一台的设置为BACKUP interface eth0 virtual_router_id 51 #0-255在同一个instance中一致,在整个vrrp唯一,无变化 priority 110 #优先级大的为master,这里要降低,有变化 advert_int 1 authentication { auth_type PASS auth_pass 1111 } track_script { #检查脚本 chk_nginx } virtual_ipaddress { #此实例的浮动IP #192.168.78.215 172.16.9.70/24 dev eth1 } } vrrp_instance NGINX2 { #本机 state MASTER #修改为MASTER interface eth0 virtual_router_id 52 #和另外一台配置一 priority 150 #提升为150 advert_int 1 authentication { auth_type PASS auth_pass 1111 } track_script { chk_nginx } virtual_ipaddress { #192.168.78.115 172.16.9.69/24 dev eth1 } }
View Code
本站文章如无特殊说明,均为本站原创,如若转载,请注明出处:Nginx + Keeplived双主测试 - Python技术站