一、etcd集群搭建
所有节点安装etcd数据库
[root@k8s-master ~]# yum install etcd -y
master节点的配置文件
[root@k8s-master ~]# grep -Ev '^$|#' /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="node1"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.0.0.11:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.11:2379"
ETCD_INITIAL_CLUSTER="node1=http://10.0.0.11:2380,node2=http://10.0.0.12:2380,node3=http://10.0.0.13:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
node1节点的配置文件
[root@k8s-node1 ~]# grep -Ev '^$|#' /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="node2"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.0.0.12:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.12:2379"
ETCD_INITIAL_CLUSTER="node1=http://10.0.0.11:2380,node2=http://10.0.0.12:2380,node3=http://10.0.0.13:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
node2节点的配置文件操作
[root@k8s-node2 xiaoniao]# grep -Ev '^$|#' /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/"
ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_NAME="node3"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.0.0.13:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.13:2379"
ETCD_INITIAL_CLUSTER="node1=http://10.0.0.11:2380,node2=http://10.0.0.12:2380,node3=http://10.0.0.13:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
master节点停止服务
systemctl stop flanneld.service
systemctl stop kube-apiserver.service
systemctl stop kube-controller-manager.service
systemctl stop kube-scheduler.service
所有node节点停止服务
systemctl stop flanneld.service
systemctl stop kubelet.service kube-proxy.service
systemctl start etcd
master节点清除之前安装的etcd数据信息
rm -fr /var/lib/etcd/*
所有节点使用xshell功能同时启动
systemctl start etcd
节点测试
[root@k8s-master ~]# etcdctl member list
9e80988e833ccb43: name=node1 peerURLs=http://10.0.0.11:2380 clientURLs=http://10.0.0.11:2379 isLeader=false
a10d8f7920cc71c7: name=node3 peerURLs=http://10.0.0.13:2380 clientURLs=http://10.0.0.13:2379 isLeader=false
abdc532bc0516b2d: name=node2 peerURLs=http://10.0.0.12:2380 clientURLs=http://10.0.0.12:2379 isLeader=true[root@k8s-master ~]# etcdctl cluster-health
member 9e80988e833ccb43 is healthy: got healthy result from http://10.0.0.11:2379
member a10d8f7920cc71c7 is healthy: got healthy result from http://10.0.0.13:2379
member abdc532bc0516b2d is healthy: got healthy result from http://10.0.0.12:2379
cluster is healthy
etcd数据库集群安装完成
二、创建etcd数据
etcdctl mk /atomic.io/network/config '{ "Network": "172.18.0.0/16" }'systemctl restart flanneldsystemctl restart docker
三、修改master节点的api-server,controller-manager,scheduler(127.0.0.1:8080)
vim /etc/kubernetes/apiserverKUBE_ETCD_SERVERS="--etcd-servers=http://10.0.0.11:2379,http://10.0.0.12:2379,http://10.0.0.13:2379"vim /etc/kubernetes/configKUBE_MASTER="--master=http://127.0.0.1:8080"systemctl restart kube-apiserver.servicesystemctl restart kube-controller-manager.service kube-scheduler.service
四、修改master02的api-server,controller-manager,scheduler(127.0.0.1:8080)
yum install kubernetes-master.x86_64 -yscp -rp 10.0.0.11:/etc/kubernetes/apiserver /etc/kubernetes/apiserverscp -rp 10.0.0.11:/etc/kubernetes/config /etc/kubernetes/configsystemctl stop kubelet.servicesystemctl disable kubelet.servicesystemctl stop kube-proxy.servicesystemctl disable kube-proxy.servicesystemctl enable kube-apiserver.servicesystemctl restart kube-apiserver.servicesystemctl enable kube-controller-manager.servicesystemctl restart kube-controller-manager.servicesystemctl enable kube-scheduler.servicesystemctl restart kube-scheduler.service
五、安装keepalived
2台master安装keepalived
yum install keepalived.x86_64 -y
#master01配置:! Configuration File for keepalivedglobal_defs {router_id LVS_DEVEL_11}vrrp_instance VI_1 {state BACKUPinterface eth0virtual_router_id 51priority 100advert_int 1authentication {auth_type PASSauth_pass 1111}virtual_ipaddress {10.0.0.10}}#master02配置! Configuration File for keepalivedglobal_defs {router_id LVS_DEVEL_12}vrrp_instance VI_1 {state BACKUPinterface eth0virtual_router_id 51priority 80advert_int 1authentication {auth_type PASSauth_pass 1111}virtual_ipaddress {10.0.0.10}}systemctl enable keepalivedsystemctl start keepalived
所有node节点kubelet,kube-proxy指向api-server的vip
vim /etc/kubernetes/kubeletKUBELET_API_SERVER="--api-servers=http://10.0.0.10:8080"vim /etc/kubernetes/configKUBE_MASTER="--master=http://10.0.0.10:8080"systemctl restart kubelet.service kube-proxy.service
node节点测试
[root@k8s-node2 xiaoniao]# kubectl -s 10.0.0.11:8080 get nodes
NAME STATUS AGE
10.0.0.13 Ready 5m
[root@k8s-node2 xiaoniao]# kubectl -s 10.0.0.12:8080 get nodes
NAME STATUS AGE
10.0.0.13 Ready 5m
创建节点
[root@k8s-node2 xiaoniao]# kubectl -s 10.0.0.10:8080 run nginx --image=10.0.0.11:5000/nginx:1.13 --replicas=2
查看节点
[root@k8s-node2 xiaoniao]# kubectl -s 10.0.0.10:8080 get all
给master节点1关机
然后继续查看节点
[root@k8s-node2 xiaoniao]# kubectl -s 10.0.0.10:8080 get all
有问题请加博主微信进行沟通!
全部评论