k8s高可用架构部署

参考:
KubeSphere文档
https://www.kubesphere.io/zh/docs/v3.3/installing-on-linux/on-premises/install-kubesphere-on-vmware-vsphere/

在lb01和lb02主机上部署Keepalived + HAProxy

1
yum install keepalived haproxy psmisc -y

配置服务

配置HAProxy

在 IP 为 172.16.10.11 与 172.16.10.12 的服务器上按如下参数配置 HAProxy (两台 lb 机器配置一致即可,注意后端服务地址)。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
# HAProxy Configure /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2

chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
log global
option httplog
option dontlognull
timeout connect 5000
timeout client 5000
timeout server 5000

#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kube-apiserver
bind *:6443
mode tcp
option tcplog
default_backend kube-apiserver

#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------

backend kube-apiserver
mode tcp
option tcplog
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server kube-apiserver-1 172.16.11.11:6443 check
server kube-apiserver-2 172.16.11.21:6443 check

启动之前检查语法是否有问题

1
haproxy -f /etc/haproxy/haproxy.cfg -c

启动 Haproxy,并设置开机自启动

1
systemctl restart haproxy && systemctl enable haproxy

停止 Haproxy

1
systemctl stop haproxy

配置 Keepalived

主 HAProxy 100 lb01-172.16.10.11 (/etc/keepalived/keepalived.conf)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
global_defs {
notification_email {
}

smtp_connect_timeout 30 #连接超时时间
router_id LVS_DEVEL01 #相当于给这个服务器起个昵称
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
}

vrrp_script chk_haproxy {
script "killall -0 haproxy"
interval 2
weight 20
}

vrrp_instance haproxy-vip {
state MASTER #主服务器 是MASTER
priority 100 #主服务器优先级要比备服务器高
interface eth0 #实例绑定的网卡
virtual_router_id 60 #定义一个热备组,可以认为这是60号热备组
advert_int 1 #1秒互相通告一次,检查对方死了没。
authentication {
auth_type PASS #认证类型
auth_pass password #认证密码 这些相当于暗号
}

unicast_src_ip 172.16.10.11 #当前机器地址
unicast_peer {
172.16.10.12 #peer中其它机器地址
}

virtual_ipaddress {
#vip地址
172.16.10.1
}

track_script {
chk_haproxy
}
}

备 HAProxy 90 lb02-172.16.10.12 (/etc/keepalived/keepalived.conf)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
global_defs {
notification_email {

}

router_id LVS_DEVEL02 #相当于给这个服务器起个昵称
vrrp_skip_check_adv_addr
vrrp_garp_interval 0
vrrp_gna_interval 0
}

vrrp_script chk_haproxy {
script "killall -0 haproxy"
interval 2
weight 20
}

vrrp_instance haproxy-vip {
state BACKUP #备份服务器 是 backup
priority 90 #优先级要低(<主的100)
interface eth0 #实例绑定的网卡
virtual_router_id 60
advert_int 1
authentication {
auth_type PASS
auth_pass password
}

unicast_src_ip 172.16.10.12 #当前机器地址
unicast_peer {
172.16.10.11 #peer 中其它机器地址
}

virtual_ipaddress {
#VIP address
172.16.10.1
}

track_script {
chk_haproxy
}
}

在lb01或lb02上启动并自启动服务
启动 keepalived,设置开机自启动

1
2
systemctl restart keepalived && systemctl enable keepalived
systemctl stop keepalived

开启 keepalived服务

1
systemctl start keepalived

验证可用性

在lb01主机上执行

1
ip a s

查看VIP绑定情况

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[root@lb01 ~]# ip a s
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether fa:16:3e:45:33:52 brd ff:ff:ff:ff:ff:ff
inet 172.16.10.11/16 brd 172.16.255.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 172.16.10.1/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::f816:3eff:fe45:3352/64 scope link
valid_lft forever preferred_lft forever

可以查看到172.16.10.1已经绑定在了lb01主机上
暂停 vip 所在节点的HAProxy,即在lb01主机停止haproxy服务

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
lb01
[root@lb01 ~]# systemctl stop haproxy.service
[root@lb01 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether fa:16:3e:45:33:52 brd ff:ff:ff:ff:ff:ff
inet 172.16.10.11/16 brd 172.16.255.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet6 fe80::f816:3eff:fe45:3352/64 scope link
valid_lft forever preferred_lft forever

lb02
[root@lb02 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether fa:16:3e:e6:c9:2c brd ff:ff:ff:ff:ff:ff
inet 172.16.10.12/16 brd 172.16.255.255 scope global noprefixroute eth0
valid_lft forever preferred_lft forever
inet 172.16.10.1/32 scope global eth0
valid_lft forever preferred_lft forever
inet6 fe80::f816:3eff:fee6:c92c/64 scope link
valid_lft forever preferred_lft forever

发现在lb01关闭haproxy后,vip节点绑定到了lb02上
重启lb01的haproxy后,vip节点又回到了lb01上
验证成功