# 设置worker节点为维护模式 [root@vms10 ~]# kubectl drain vms12 --delete-emptydir-data --force --ignore-daemonsets node/vms12 cordoned WARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-87846, kube-system/kube-proxy-7f2jx evicting pod kube-system/coredns-7f89b7bc75-q24dt evicting pod kube-system/calico-kube-controllers-7f4f5bf95d-9c95m pod/calico-kube-controllers-7f4f5bf95d-9c95m evicted pod/coredns-7f89b7bc75-q24dt evicted node/vms12 evicted [root@vms10 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION vms10 Ready control-plane,master 17m v1.20.1 vms11 Ready <none> 12m v1.20.1 vms12 Ready,SchedulingDisabled <none> 12m v1.20.1 # 删除节点 [root@vms10 ~]# kubectl delete node vms12 node "vms12" deleted [root@vms10 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION vms10 Ready control-plane,master 18m v1.20.1 vms11 Ready <none> 13m v1.20.1 # 清空节点配置 [root@vms12 ~]# kubeadm reset [reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted. [reset] Are you sure you want to proceed? [y/N]: y [preflight] Running pre-flight checks W0520 01:30:17.934650 13451 removeetcdmember.go:79] [reset] No kubeadm config, using etcd pod spec to get data directory [reset] No etcd config found. Assuming external etcd [reset] Please, manually reset etcd to prevent further issues [reset] Stopping the kubelet service [reset] Unmounting mounted directories in "/var/lib/kubelet" [reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki] [reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf] [reset] Deleting contents of stateful directories: [/var/lib/kubelet /var/lib/dockershim /var/run/kubernetes /var/lib/cni]
The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d
The reset process does not reset or clean up iptables rules or IPVS tables. If you wish to reset iptables, you must do so manually by using the "iptables" command.
If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar) to reset your system's IPVS tables.
The reset process does not clean your kubeconfig files and you must remove them manually. Please, check the contents of the $HOME/.kube/config file. [root@vms12 ~]# rm -rf /etc/cni/net.d/* # 重新加入集群 [root@vms12 ~]# kubeadm join 172.16.10.10:6443 --token rd4el2.q4mfotxvwm195p7m --discovery-token-ca-cert-hash sha256:0021ae786417d2a3202b42d580a71218ba17fa563ce7fe2b56d21624ffa389f9 [root@vms10 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION vms10 Ready control-plane,master 21m v1.20.1 vms11 Ready <none> 16m v1.20.1 vms12 Ready <none> 53s v1.20.1
# 升级Master # 查看节点情况 [root@vms10 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION vms10 Ready control-plane,master 15h v1.20.1 vms11 Ready <none> 15h v1.20.1 vms12 Ready <none> 15h v1.20.1 # 查看版本情况 [root@vms10 ~]# kubectl version --short Client Version: v1.20.1 Server Version: v1.20.1 # 解除升级锁 [root@vms10 ~]# yum versionlock del kubeadm Loaded plugins: fastestmirror, langpacks, versionlock Deleting versionlock for: 0:kubeadm-1.20.1-0.* versionlock deleted: 1 # 查看可升级版本,确定升级到1.21.14版本,不建议跨大版本升级 [root@vms10 ~]# yum list --showduplicates kubeadm kubeadm.x86_64 1.21.14-0 kubeadm.x86_64 1.22.0-0 # 升级kubeadm [root@vms10 ~]# yum install -y kubeadm-1.21.14-0 # 验证kubeadm版本 [root@vms10 ~]# kubeadm version kubeadm version: &version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.14", GitCommit:"0f77da5bd4809927e15d1658fb4aa8f13ad890a5", GitTreeState:"clean", BuildDate:"2022-06-15T14:16:13Z", GoVersion:"go1.16.15", Compiler:"gc", Platform:"linux/amd64"} # 查看升级方案 [root@vms10 ~]# kubeadm upgrade plan # 检查当前集群状态 [upgrade/config] Making sure the configuration is correct: [upgrade/config] Reading configuration from the cluster... [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml' [preflight] Running pre-flight checks. [upgrade] Running cluster health checks [upgrade] Fetching available versions to upgrade to [upgrade/versions] Cluster version: v1.20.1 [upgrade/versions] kubeadm version: v1.21.14 I0520 17:54:43.190274 54385 version.go:254] remote version is much newer: v1.27.2; falling back to: stable-1.21 [upgrade/versions] Target version: v1.21.14 [upgrade/versions] Latest version in the v1.20 series: v1.20.15
Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT TARGET kubelet 3 x v1.20.1 v1.20.15
Upgrade to the latest version in the v1.20 series:
Components that must be upgraded manually after you have upgraded the control plane with 'kubeadm upgrade apply': COMPONENT CURRENT TARGET kubelet 3 x v1.20.1 v1.21.14
The table below shows the current state of component configs as understood by this version of kubeadm. Configs that have a "yes" mark in the "MANUAL UPGRADE REQUIRED" column require manual config upgrade or resetting to kubeadm defaults before a successful upgrade can be performed. The version to manually upgrade to is denoted in the "PREFERRED VERSION" column.
API GROUP CURRENT VERSION PREFERRED VERSION MANUAL UPGRADE REQUIRED kubeproxy.config.k8s.io v1alpha1 v1alpha1 no kubelet.config.k8s.io v1beta1 v1beta1 no _____________________________________________________________________ # 设置Master为维护模式,并清空pod [root@vms10 ~]# kubectl drain vms10 --ignore-daemonsets node/vms10 cordoned WARNING: ignoring DaemonSet-managed Pods: kube-system/calico-node-tjswd, kube-system/kube-proxy-9cfhk node/vms10 drained [root@vms10 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION vms10 Ready,SchedulingDisabled control-plane,master 16h v1.20.1 vms11 Ready <none> 16h v1.20.1 vms12 Ready <none> 16h v1.20.1 [root@vms10 ~]# kubeadm upgrade apply 1.21.14 [upgrade/config] Making sure the configuration is correct: [upgrade/config] Reading configuration from the cluster... [upgrade/config] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml' [preflight] Running pre-flight checks. [upgrade] Running cluster health checks [upgrade/successful] SUCCESS! Your cluster was upgraded to "v1.21.14". Enjoy! [upgrade/kubelet] Now that your control plane is upgraded, please proceed with upgrading your kubelets if you haven't already done so. # 解除Master的维护模式 [root@vms10 ~]# kubectl uncordon vms10 node/vms10 uncordoned # 恢复对kubeadm的加锁 [root@vms10 ~]# yum versionlock add kubeadm Loaded plugins: fastestmirror, langpacks, versionlock Adding versionlock on: 0:kubeadm-1.21.14-0 versionlock added: 1 # 升级kubelet和kukbectl # 解锁kubelet和kubectl [root@vms10 ~]# yum versionlock del kubelet kubectl Loaded plugins: fastestmirror, langpacks, versionlock Deleting versionlock for: 0:kubectl-1.20.1-0.* Deleting versionlock for: 0:kubelet-1.20.1-0.* versionlock deleted: 2 [root@vms10 ~]# yum install -y kubelet-1.21.14-0 kubectl-1.21.14-0 [root@vms10 ~]# systemctl daemon-reload && systemctl restart kubelet [root@vms10 ~]# kubectl version --short Client Version: v1.21.14 Server Version: v1.21.14 # 再对kubelet和kubectl加锁以免误升级 [root@vms10 ~]# yum versionlock add kubelet kubectl Loaded plugins: fastestmirror, langpacks, versionlock Adding versionlock on: 0:kubelet-1.21.14-0 Adding versionlock on: 0:kubectl-1.21.14-0 versionlock added: 2 # Master升级完毕 [root@vms10 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION vms10 Ready control-plane,master 17h v1.21.14 vms11 Ready <none> 17h v1.20.1 vms12 Ready <none> 16h v1.20.1 # 升级Worker节点 # 创建升级脚本 [root@vms11 ~]# tee ./upgrade-k8s.sh <<-'EOF' #!/bin/bash # set the upgrade target version="1.21.14" # unlock kubeadm kubelet kubectl yum versionlock del kubeadm kubelet kubectl # upgrade kubeadm yum install -y kubeadm-$version # upgrade kuberentes kubectl drain $(hostname) --ignore-daemonsets kubeadm upgrade apply -y $version kubectl uncordon $(hostname) # upgrade yum install -y kubelet-$version kubectl-$version systemctl daemon-reload && systemctl restart kubelet yum versionlock add kubeadm kubelet kubectl EOF [root@vms11 ~]# chmod +x upgrade-k8s.sh [root@vms11 ~]# ./upgrade-k8s.sh # 升级完成所有worker节点之后查看结果 [root@vms10 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION vms10 Ready control-plane,master 17h v1.21.14 vms11 Ready <none> 17h v1.21.14 vms12 Ready <none> 16h v1.21.14 # 升级完毕
containers: - args: - --cert-dir=/tmp - --secure-port=443 - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-use-node-status-port - --metric-resolution=15s - --kubelet-insecure-tls #使用http而非默认https image: ccr.ccs.tencentyun.com/mirrors/metrics-server:v0.5.0 #修改为国内镜像 # 执行部署 [root@vms10 ~]# kubectl apply -f components.yaml serviceaccount/metrics-server created clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrole.rbac.authorization.k8s.io/system:metrics-server created rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created service/metrics-server created deployment.apps/metrics-server created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created # 查看运行pod [root@vms10 ~]# kubectl get pod -n kube-system |grep metrics-server metrics-server-c44f75469-ltpgz 0/1 Running 0 34s # 查看集群状态 [root@vms10 ~]# kubectl top nodes W0520 18:59:54.167920 103768 top_node.go:119] Using json format to get metrics. Next release will switch to protocol-buffers, switch early by passing --use-protocol-buffers flag NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% vms10 153m 7% 1669Mi 43% vms11 96m 4% 787Mi 20% vms12 86m 4% 767Mi 20%