swapoff -a
sed -i'/swap/s/^/#/g' /etc/fstab
lsmod|grep ip_vs
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
sysctl -w net.ipv4.ip_forward=1
echo 1 > /proc/sys/net/ipv4/ip_forward
echo -e "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
echo -e "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
echo -e "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo -e "vm.swappiness = 0" >> /etc/sysctl.conf
sysctl -p
cat > /etc/sysconfig/kubelet << EOF
KUBELETEXTRAARGS="--fail-swa-on=false"
KUBEPROXYMODE=ipvs
EOF
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
cat > /etc/yum.repo.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum -y install docker-ce kubelet-1.21.5 kubeadm-1.21.5 kubectl-1.21.5
mkdir -p /etc/docker
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"graph": "/data/docker",
"log-driver": "json-file",
"log-opts": {
"max-size": "256m",
"max-file": "3"
}
}
EOF
systemctl enable kubelet
systemctl enable docker --now
cat >> /etc/hosts << EOF
127.0.0.1 lb.local.kube k8s-master01
EOF
cat >> /~/kubeadm-init.yaml << EOF
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: lb.local.kube:6443
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.21.5
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
scheduler: {}
EOF
kubeadm init --config=/~/kubeadm-init.yaml
mkdir -p ~/.kube/
cp /etc/kubernetes/admin.conf ~/.kube/config
kubectl get nodes
将第一台初始化完成的master节点证书拷贝至 第2、3台
在第 2、3个master上执行
mkdir -p /etc/kubernetes/pki/etcd/
cd /etc/kubernetes/pki/
在第一台机器上执行
scp ca.* front-proxy-ca.* sa.* k8s-master02:/etc/kubernetes/pki/
scp ca.* front-proxy-ca.* sa.* k8s-master03:/etc/kubernetes/pki/
scp etcd/ca.* k8s-master02:/etc/kubernetes/pki/etcd/
scp etcd/ca.* k8s-master03:/etc/kubernetes/pki/etcd/
在k8s-master01上执行
echo "$(kubeadm token create --print-join-command) --control-plane"
将执行结果粘贴到k8s-master02 k8s-master03上执行
在k8s-master01上执行
echo "$(kubeadm token create --print-join-command)"
将执行结果粘贴到k8s-worker-001 k8s-worker-002 ...上执行
kubectl get node
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
执行以下命令
kubectl get cs # 发现kube-scheduler kube-manager 俩个组件故障
sed -i "/- --port=0/s/^/#/g" /etc/kubernetes/manifests/kube-controller-manager.yaml
kubectl get cs # 30s后发现kube-scheduler kube-manager 俩个组件正常
kube-controller-manager Yaml文件增加以下配置
volumeMounts:
- mountPath: /etc/localtime
name: date-config
readOnly: true
....
volumes:
- hostPath:
path: /etc/localtime
name: date-config