部署keepalive+lvs 多master 节点⾼可⽤ 最低配置2核 2G
192.168.1.11 master1 192.168.1.12 master2
192.168.1.13 master3 192.168.1.20 node1
1、修改主机名,配置hosts⽂件,修改yum源,防⽕墙,selinux,时间同步,swap 2、修改内核参数,docker 配置[root@master1 ~]# hostnamectl set-hostname master1[root@master1 ~]# vim /etc/hosts 192.168.0.6 master1192.168.0.16 master2192.168.0.26 master3192.168.0.56 node1[root@master1 ~]# curl -o /pos.po mirrors.aliyun/po [root@master1 ~]# curl -o /pos.po mirrors.aliyun/docker-ce/linux/po [root@master1 ~]# cat <<EOF > /pos.po [kubernetes ]name =Kubernetes baseurl =mirrors.aliyun/kubernetes/yum/repos/kubernetes-el7-x86_64enabled =1gpgcheck =0EOF [root@master1 ~]# yum clean all [root@master1 ~]# yum makecache fast [root@master1 ~]# systemctl
stop firewalld && systemctl disable firewalld [root@master1 ~]# ntpdate p.org [root@master1 ~]# crontab -e * */1 * * * /usr/sbin/ntpdate p.org [root@master1 ~]# systemctl restart crond [root@master1 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config [root@master1 ~]# swapoff -a [root@master1 ~]# sed -i '/swap/s/^/#/g' /etc/fstab [root@master1 ~]# reboot -f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
1516
17
18
19
20
21
22
23
24
25
26[root@master1 ~]# cat <<EOF > /etc/sysctl.f net.bridge.bridge-nf-call-ip6tables = 1net.bridge.bridge-nf-call-iptables = 1EOF [root@master1 ~]# modprobe br_netfilter [root@master1 ~]# sysctl --system [root@master1 ~]# mkdir -p /etc/docker [root@master1 ~]# cat > /etc/docker/daemon.json <<EOF { "exec-opts": ["updriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "storage-driver": "overlay2", "storage-opts": [ "overlay2.override_kernel_check=true" ]}EOF [root@master1 ~]# yum install -y docker-ce-19.03.7-3.el7[root@master1 ~]# systemctl enable docker && systemctl start docker
1
2
3
4
5
6
7
8壬辰变法
9
我们相爱6年
10
11
12
13
14
15
16
17
18
19
20
21
22
3、⽹桥配置永久⽣效,开启ipvs
上传镜像到master1、master2、master3和node1节点
部署keepalive+lvs 实现master 节点⾼可⽤-对apiserver 做⾼可⽤[root@master1 ~]# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables [root@master1 ~]# echo 1 >/proc/sys/net/bridge/bridge-nf-call-ip6tables [root@master1 ~]# echo """vm.swappiness = 0net.bridge.bridge-nf-call-iptables = 1net.ipv4.ip_forward = 1net.bridge.bridge-nf-call-ip6tables = 1""" > /f [root@master1 ~]# sysctl -p [root@master1 ~]# cat > /etc/sysconfig/dules <<EOF #!/bin/bash ipvs_modules ="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"for kernel_module in \${ipvs_modules}; do /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1 if [ $? -eq 0 ]; then /sbin/modprobe \${kernel_module} fi done EOF [root@master1 ~]# chmod 755 /etc/sysconfig/dules && bash /etc/sysconfig/dules [root@master1 ~]# lsmod | grep ip_vs nf_nat 26583 4 ip_vs_ftp,nf_nat_ipv4,nf_nat_ipv6,nf_nat_masquerade_ipv4ip_vs_sed 12519 0ip_vs_nq 12516 0ip_vs_sh 12688 0ip_vs_dh 12688 0ip_vs_lblcr 12922 0ip_vs_lblc 12819 0ip_vs_wrr 12697 0ip_vs_rr 12600 35ip_vs_wlc 12519 0ip_vs_lc 12516 0ip_vs 145458 59 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip
_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc nf_conntrack 139264 9 ip_vs,nf_nat,nf_nat_ipv4,nf_nat_ipv6,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4,nf_conn
libcrc32c 12644 4 xfs,ip_vs,nf_nat,nf_conntrack 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35[root@master1 ~]# yum -y install kubelet-1.18.2 kubeadm-1.18.2Installed: kubeadm.x86_64 0:1.18.2-0 kubelet.x86_64 0:1.18.2-0Dependency Installed: cri-tools.x86_64 0:1.19.0-0 kubectl.x86_64 0:1.23.0-0 kubernetes-cni.x86_64 0:0.8.7-0
1
2
3
4
5[root@master1 ~]# for i in `ls *.gz`;do docker load -i $i;done [root@master1 ~]# docker images REPOSITORY TAG IMAGE ID CREATED io/etcd 3.4.3-0 303ce5db0e90 2 years ago io/kube-apiserver v1.18.2 6ed75ad404bd 20 months ago io/kube-scheduler v1.18.2 a3099161e137 2
0 months ago 95.io/kube-controller-manager v1.18.2 ace0a8c17ba9 20 months ago 162MB node 上只需要下⾯三个io/pause 3.2 80d28bedfe5d 22 months ago io/coredns 1.6.7 67da37a9a360 22 months ago 43.io/kube-proxy v1.18.2 0d40868643c6 20 months ago 117MB
1
2
中房指数3
4
5
6
7
8
9
10
11
url { path /healthz status_code 200 } connect_timeout 3 nb_get_retry 3 delay_before_retry 3 }
1
2
3
4
5
6
7
8
9
10
11
12国培计划2012
13
14窦娥冤电影
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
其他两个master 节点区别在于
7 interface ens33 #实际⽹卡名
9 priority 100 #权重100 110 90
在master1、master2、master3依次执⾏如下命令
在master1节点初始化k8s 集
初始化k8s集 } }}
63
64
65[root@master2 ~]# vim /etc/f [root@master3 ~]# vim /etc/f
1
2[root@master1 ~]# systemctl enable keepalived.service && systemctl start keepalived.service [root@master1 ~]# systemctl status keepalived.service [root@master1 ~]# ip add 2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP > mtu 1500 qdisc pfifo_fast state UP group default qlen
1000 link/ether 00:0c:29:9d:7b:09 brd ff:ff:ff:ff:ff:ff inet 192.168.1.11/24 brd 192.168.0.255 scope global noprefixroute ens33 valid_lft forever preferred_lft forever inet 192.168.1.188/32 scope global ens33[root@master1 ~]# ping 192.168.1.188
1
2
3
4
5
6
7
8[root@master1 ~]# vim kubeadm-config.yaml apiVersion: kubeadm.k8s.io/v1beta2kind: ClusterConfiguration kubernetesVersion: v1.18.2controlPlaneEndpoint: 192.168.1.188:6443apiServe
r: certSANs:#证书⽣成的节点 - 192.168.1.11 - 192.168.1.12 - 192.168.1.13 - 192.168.1.21 - 192.168.1.188networking: podSubnet: 10.244.0.0/16---apiVersion: fig.k8s.io/v1alpha1kind: KubeProxyConfiguration mode: ipvs 1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
冰片霜
19
在master1节点执⾏如下,这样才能有权限操作k8s资源
因为没有安装⽹络插件,所以还是 node NotReady,coredns 是Pending状态,需要安装calico或者flannel。
master2、master3同样操作
把master1节点的证书拷贝到master2和master3上
(1)在master2和master3上创建证书存放⽬录
(2)在master1节点把证书拷贝到master2和master3上[root@master1 ~]# kubeadm init --config kubeadm-config.yaml ...Your Kubernetes control-plane has initialized successfully !To start using your cluster, you need to run the following as a regular user: mkdir -p $HOME /.kube sudo cp -i /etc/f $HOME /.kube/config sudo chown $(id -u):$(id -g) $HOME /.kube/config You should now deploy a pod network to the cluster.Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: kubernetes.io/docs/concepts/cluster-administration/addons/You can now join any number of control-plane nodes by copying certificate authorities and service account keys on each node and then running the following as root: kubeadm join 192.168.1.188:6443 --token h13awv.1mkmz2majgn4gesg \ --discovery-token-ca-cert-hash sha256:32414237c63a9f7b70fa1f8e2c644a8c328a010208d909897cd1db33d45c8750 \ --
control-plane Then you can join any number of worker nodes by running the following on each as root:kubeadm join 192.168.1.188:6443 --token h13awv.1mkmz2majgn4gesg \ --discovery-token-ca-cert-hash sha256:32414237c63a9f7b70fa1f8e2c644a8c328a010208d909897cd1db33d45c87501
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
2122[root@master1 ~]# mkdir -p $HOME/.kube [root@master1 ~]# sudo cp -i /etc/kubernetes/admi
1
2
3
4
5
6
7
8
9
10[root@master1 ~]# docker load -i ;docker load -i [root@master1 ~]# vim calico.yaml 167 value: "can-reach=192.168.1.11"181 value: "10.244.0.0/16"[root@master1 ~]# kubectl apply -f calico.yaml [root@master1 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION master1 Ready master 37m v1.18.2
1
2
3
4
5
6
7
8[root@master2 ~]# cd /root && mkdir -p /etc/kubernetes/pki/etcd &&mkdir -p ~/.kube/
1