其实就是Docker上面跑了一个Docker

概念

Kind

kind 即 Kubernetes In Docker,顾名思义,就是将 k8s 所需要的所有组件,全部部署在一个docker容器中,是一套开箱即用的 k8s 环境搭建方案,适用于快速构建本地开发和测试环境。kind 由以下组件构成:

  • Go packages implementing cluster creation, image build, etc.
  • A command line interface (kind) built on these packages.
  • Docker image(s) written to run systemd, Kubernetes, etc.
  • kubetest integration also built on these packages (WIP)
    kind 使用 kubeadm 创建和启动群集节点。kind 使用 containerd 作为容器运行时,所以弃用 Dockershim 对 kind 没有影响。
    kind

部署

环境准备

# 本次部署基于RockyLinux 9.2
# 主机配置为2core CPU/2GB MEM/40GB Disk
# 需要提前部署Dokcer和Kubectl
setenforce 0
# 卸载Podman
dnf remove -y podman*
dnf install -y yum-utils device-mapper-persistent-data lvm2 jq

# 添加软件源信息
dnf config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 更新并安装Docker-CE
dnf makecache
dnf install -y docker-ce docker-ce-cli docker-compose-plugin kubectl

# 安装并锁定版本
dnf install -y python3-dnf-plugin-versionlock
dnf versionlock add docker-ce kubectl

# 关闭SWAP
swapoff -a ; sed -i '/swap/d' /etc/fstab

# 修改内核加载模块
cat > /etc/modules-load.d/containerd.conf <<EOF
overlay
br_netfilter
EOF

cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

# 配置加速源
mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
"group": "docker",
"registry-mirrors": ["https://37y8py0j.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

# 设置防火墙
firewall-cmd --set-default-zone=trusted
firewall-cmd --reload

# 关闭selinux
sed -i 's/enforceing/disabled' /etc/selinux/config

# 重新加载并配置开机启动
systemctl daemon-reload
systemctl enable --now docker

# 加载github的Hosts地址
sh -c 'sed -i "/# GitHub520 Host Start/Q" /etc/hosts && curl https://raw.hellogithub.com/hosts >> /etc/hosts'

# 重启
sync
ldconfig

部署单节点Kind

# 下载执行文件
[ $(uname -m) = x86_64 ] && curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64

# 安装kind
chmod +x ./kind
mv ./kind /usr/local/bin/kind
kind version

# 配置kind的自动补全
kind completion bash > ~/.kindrc
echo "source ~/.kindrc" >> ~/.bashrc

# 配置kubectl的自动补全
dnf install -y bash-completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

# 部署helm
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
chmod 700 get_helm.sh
sh get_helm.sh
helm version

# 添加helm源
helm repo add stable https://charts.helm.sh/stable
helm repo update

# 配置helm的自动补全
helm completion bash > ~/.helmrc
echo "source ~/.helmrc" >> ~/.bashrc

# 安装kubens,用于切换命名空间
curl -L https://github.com/ahmetb/kubectx/releases/download/v0.9.1/kubens -o /usr/local/bin/kubens
chmod +x /usr/local/bin/kubens

# 执行环境变量
source ~/.bashrc
# 使用帮助
[root@Ansible ~]# kind -h
kind creates and manages local Kubernetes clusters using Docker container 'nodes'

Usage:
kind [command]

Available Commands:
build Build one of [node-image]
completion Output shell completion code for the specified shell (bash, zsh or fish)
create Creates one of [cluster]
delete Deletes one of [cluster]
export Exports one of [kubeconfig, logs]
get Gets one of [clusters, nodes, kubeconfig]
help Help about any command
load Loads images into nodes
version Prints the kind CLI version

Flags:
-h, --help help for kind
--loglevel string DEPRECATED: see -v instead
-q, --quiet silence all stderr output
-v, --verbosity int32 info log verbosity, higher value produces more output
--version version for kind

Use "kind [command] --help" for more information about a command.

# 下载部署所需的容器镜像
[root@Ansible ~]# docker pull kindest/node:v1.27.3

# 使用预制kind配置信息,通过阿里云代理镜像
[root@Kind ~]# cat kind-config.yaml
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
kubeadmConfigPatches:
- |
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
metadata:
name: config
networking:
serviceSubnet: 10.96.0.0/12
podSubnet: 10.244.0.0/16
disableDefaultCNI: false
imageRepository: registry.aliyuncs.com/google_containers
nodeRegistration:
kubeletExtraArgs:
pod-infra-container-image: registry.aliyuncs.com/google_containers/pause:3.9
nodes:
- role: control-plane

[root@Ansible ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
ansible-control-plane Ready control-plane 4m32s v1.27.3

[root@Kind ~]# kubectl get ns
NAME STATUS AGE
default Active 3m56s
kube-node-lease Active 3m56s
kube-public Active 3m56s
kube-system Active 3m56s
local-path-storage Active 3m49s

部署测试

# 部署一个nginx的pod
[root@Kind ~]# kubectl run pod1 --image=nginx --image-pull-policy=IfNotPresent
pod/pod1 created
# 查看日志输出
[root@Kind ~]# kubectl logs pod1
/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
/docker-entrypoint.sh: Sourcing /docker-entrypoint.d/15-local-resolvers.envsh
/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
/docker-entrypoint.sh: Configuration complete; ready for start up
2023/10/17 04:59:39 [notice] 1#1: using the "epoll" event method
2023/10/17 04:59:39 [notice] 1#1: nginx/1.25.2
2023/10/17 04:59:39 [notice] 1#1: built by gcc 12.2.0 (Debian 12.2.0-14)
2023/10/17 04:59:39 [notice] 1#1: OS: Linux 5.14.0-284.30.1.el9_2.x86_64
2023/10/17 04:59:39 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1073741816:1073741816
2023/10/17 04:59:39 [notice] 1#1: start worker processes
2023/10/17 04:59:39 [notice] 1#1: start worker process 35
2023/10/17 04:59:39 [notice] 1#1: start worker process 36
2023/10/17 04:59:39 [notice] 1#1: start worker process 37
2023/10/17 04:59:39 [notice] 1#1: start worker process 38
# 部署一个deployment,副本设为3
[root@Kind ~]# kubectl create deployment web1 --image=nginx --replicas=3
# 查看部署结果
[root@Kind ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
pod1 1/1 Running 0 7m26s
web1-c5d84d68d-5x9ng 1/1 Running 0 2m5s
web1-c5d84d68d-pgzp4 1/1 Running 0 2m5s
web1-c5d84d68d-xz78k 1/1 Running 0 2m21s
[root@Kind ~]# kubectl get deployments.apps
NAME READY UP-TO-DATE AVAILABLE AGE
web1 3/3 3 3 2m30s
# 将ds使用80端口发布为服务
[root@Kind ~]# kubectl expose deployment web1 --name=svc1 --port=80 --type=NodePort
service/svc1 exposed
# 容器的30403端口转发到Node的80端口
[root@Kind ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 18m
svc1 NodePort 10.96.235.184 <none> 80:30403/TCP 3m11s

# 实现端口转发
[root@Kind ~]# kubectl port-forward services/svc1 8080:80
Forwarding from 127.0.0.1:8080 -> 80
Forwarding from [::1]:8080 -> 80
Handling connection for 8080
# 默认只能转发到127.0.0.1且大于3000以上的端口
[root@Kind ~]# curl 127.0.0.1:8080
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

其他

  1. Kind部署K8s的确方便快捷,本质它是在Docker之上跑了一个装了Kubernetes的docker容器。同时,它也就要求使用docker或者podman来运行;
  2. Kind其目标是为了本地部署测试,因此很多功能是不完全的需要按需补全,这后面就不省事了;
  3. 网络配置还是比较麻烦,对外提供服务的话,还需要再部署Nginx做一层指向127.0.0.1的代理;
  4. 如果只是单机本地部署,还是可以的。要是作为单节点k8s向其他主机提供服务的话,就不建议了。

参考

  1. kind:Kubernetes in Docker,单机运行 Kubernetes 群集的最佳方案
  2. 使用 Kind 在离线环境创建 K8S 集群