Rocky9_k8s_1.29部署

Rocky9_k8s_1.29

yum install -y bash-completion
echo "source <(kubectl completion bash)" >> ~/.bashrc
source ~/.bashrc

“registry-mirrors”: [
“http://hub-mirror.c.163.com/”,
“http://mirrors.ustc.edu.cn/”,
“https://cr.console.aliyun.com/”,
“https://quay.io/repository/”,
“http://mirror.azure.cn/”,
“https://mirror.baidubce.com/”,
“https://ccr.ccs.tencentyun.com/”
]

# 系统启动顺序
systemd > docker > cri-docker > kubelet > pods

master
10.0.17.100

node1
10.0.17.101

node2
10.0.17.102

1.环境初始化

1.1 master网卡配置

vi /etc/NetworkManager/system-connections/ens160.nmconnection
[ipv4]
address1=10.0.17.100/24,10.0.17.2
dns=223.5.5.5;114.114.114.114;
method=manual


systemctl restart NetworkManager

1.2 Rocky 系统软件源更换

sed -e 's|^mirrorlist=|#mirrorlist=|g' \
    -e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.aliyun.com/rockylinux|g' \
    -i.bak \
    /etc/yum.repos.d/[Rr]ocky*.repo

dnf makecache

1.3 防火墙修改 firewalld 为 iptables

systemctl stop firewalld
systemctl disable firewalld

yum -y install iptables-services
systemctl start iptables
iptables -F
systemctl enable iptables
service iptables save

1.4 禁用 Selinux

setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
grubby --update-kernel ALL --args selinux=0
# 查看是否禁用,grubby --info DEFAULT
# 回滚内核层禁用操作,grubby --update-kernel ALL --remove-args selinux

1.5 设置时区 配置ntp

timedatectl set-timezone Asia/Shanghai

dnf install -y chrony
vi /etc/chrony.conf
server ntp1.aliyun.com iburst
server ntp2.aliyun.com iburst
chronyc sources -n

1.6 关闭 swap 分区

swapoff -a
sed -i 's:/dev/mapper/rl-swap:#/dev/mapper/rl-swap:g' /etc/fstab

1.7 安装 ipvs

yum install -y ipvsadm

1.8 开启路由转发

echo 'net.ipv4.ip_forward=1' >> /etc/sysctl.conf
sysctl -p

1.9 加载 bridge

yum install -y epel-release
yum install -y bridge-utils

modprobe br_netfilter
echo 'br_netfilter' >> /etc/modules-load.d/bridge.conf
echo 'net.bridge.bridge-nf-call-iptables=1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables=1' >> /etc/sysctl.conf
sysctl -p

2.安装Docker

2.1 添加 docker-ce yum 源 阿里

sudo dnf config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
cd /etc/yum.repos.d

2.2 切换阿里源

sed -e 's|download.docker.com|mirrors.aliyun.com/docker-ce|g' docker-ce.repo

2.3 安装docker-ce

yum -y install docker-ce

2.4 配置deamon docker容器下载镜像源

cat > /etc/docker/daemon.json <<EOF
{
  "data-root": "/data/docker",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m",
    "max-file": "100"
  },
  "insecure-registries": ["harbor.xinxainghf.com"],
  "registry-mirrors": ["https://kfp63jaj.mirror.aliyuncs.com"]
}
EOF
mkdir -p /etc/systemd/system/docker.service.d

2.5 重启docker服务 设置开机启动

systemctl daemon-reload && systemctl restart docker && systemctl enable docker

reboot

2.5 安装cri-docker

kubelet调用容器使用ocir标准
需要安装cri-docker

‌cri-dockerd是一个容器运行时接口(CRI)的实现,用于Kubernetes与Docker容器引擎进行交互。‌ 它允许Kubernetes管理和调度Docker容器,是Kubernetes CRI的一个实现,可以与Docker Engine一起使用,将Docker Engine作为容器运行时与Kubernetes集群集成。

cri-dockerd的工作原理是通过实现CRI接口,一头通过CRI跟Kubelet交互,另一头跟Docker API交互,从而间接实现了Kubernetes以Docker作为容器运行时。这种架构使得Kubernetes能够使用Docker作为容器引擎,同时符合CRI标准。

# 下载解压安装cri-docker
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.9/cri-dockerd-0.3.9.amd64.tgz
tar -xf cri-dockerd-0.3.9.amd64.tgz
cp cri-dockerd/cri-dockerd /usr/bin/
chmod +x /usr/bin/cri-dockerd

2.6 配置cri-docker服务

cat <<"EOF" > /usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF


# ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8
# 启动命令
# ExecStart=/usr/bin/cri-dockerd 可执行程序
# --network-plugin=cni 传递当前网络插件 基于cni实现 容器网络接口
# --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8 pod初始化容器下载器地址及版本

2.7 添加cir-docker套接字

cat <<"EOF" > /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF

2.8 启动cri-docker对应服务

systemctl daemon-reload
systemctl enable cri-docker
systemctl start cri-docker
systemctl is-active cri-docker

3.安装k8s

3.1 添加k8s yum源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

# 下载对应软件包 放入同一文件夹 
  conntrack-tools-1.4.7-2.el9.x86_64                                     
  cri-tools-1.29.0-150500.1.1.x86_64                                     
  kubeadm-1.29.0-150500.1.1.x86_64                                       
  kubectl-1.29.0-150500.1.1.x86_64                                       
  kubelet-1.29.0-150500.1.1.x86_64                                       
  kubernetes-cni-1.3.0-150500.1.1.x86_64                                 
  libnetfilter_cthelper-1.0.0-22.el9.x86_64                              
  libnetfilter_cttimeout-1.0.0-19.el9.x86_64                             
  libnetfilter_queue-1.0.5-1.el9.x86_64                                  
  socat-1.7.4.1-5.el9_4.2.x86_64

yum -y install *

#无法访问下载 使用清华镜像源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=kubernetes
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-$basearch
name=Kubernetes
baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
EOF

3.2 安装 kubeadm 1.29 版本

yum install -y kubelet-1.29.0 kubectl-1.29.0 kubeadm-1.29.0
systemctl enable kubelet.service

4.vmware复制虚拟机更改网卡 master主机 kubeadm init及node节点加入

master
10.0.17.100

node1
10.0.17.101

node2
10.0.17.102

4.1 ssh免密码登录

ssh-keygen -f /root/.ssh/id_rsa -N ''
for i in node1 node2; do ssh-copy-id $i; done;

4.2 初始化主节点master

kubeadm init --apiserver-advertise-address=10.0.17.100 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version 1.29.0 --service-cidr=10.10.0.0/12 --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock


[init] Using Kubernetes version: v1.29.0
[preflight] Running pre-flight checks
	[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
W0820 11:27:36.889288    1764 checks.go:835] detected that the sandbox image "registry.aliyuncs.com/google_containers/pause:3.8" of the container runtime is inconsistent with that used by kubeadm. It is recommended that using "registry.aliyuncs.com/google_containers/pause:3.9" as the CRI sandbox image.
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master] and IPs [10.0.0.1 192.168.10.100]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master] and IPs [192.168.10.100 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master] and IPs [192.168.10.100 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 11.018553 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: v9bqbb.wwuk9s8ytn0xaefn
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.10.100:6443 --token v9bqbb.wwuk9s8ytn0xaefn \
	--discovery-token-ca-cert-hash sha256:496eeb8ea938e1919782e25b5f9a74f71f1460900a1c7ae3e0b62d4ffb68271a 
    

4.3 node节点加入

[root@node1 ~]# kubeadm join 192.168.10.100:6443 --token v9bqbb.wwuk9s8ytn0xaefn \
        --discovery-token-ca-cert-hash sha256:496eeb8ea938e1919782e25b5f9a74f71f1460900a1c7ae3e0b62d4ffb68271a 
Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock
To see the stack trace of this error execute with --v=5 or higher

# 出现报错

这个错误表明在您的环境中存在多个容器运行时接口(Container Runtime Interface,CRI)端点,Kubernetes 不确定应该使用哪一个。要解决这个问题,您需要在 kubeadm 配置文件中明确指定要使用的 CRI 端点。

解决方法:
[root@node1 ~]# find / -name cri-dockerd.sock
/run/cri-dockerd.sock
添加--cri-socket unix:///run/cri-dockerd.sock


kubeadm join 192.168.10.100:6443 --token v9bqbb.wwuk9s8ytn0xaefn \
	--discovery-token-ca-cert-hash sha256:496eeb8ea938e1919782e25b5f9a74f71f1460900a1c7ae3e0b62d4ffb68271a \
    --cri-socket unix:///run/cri-dockerd.sock

master查看节点

[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES           AGE    VERSION
master   NotReady   control-plane   7m5s   v1.29.0
node1    NotReady   <none>          42s    v1.29.0
node2    NotReady   <none>          28s    v1.29.0

5.部署网络插件calico

5.1 下载calico 3.26.3

文档
https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises#install-calico-with-kubernetes-api-datastore-more-than-50-nodes
下载地址
https://github.com/projectcalico/calico/releases?page=1

curl https://raw.githubusercontent.com/projectcalico/calico/v3.26.3/manifests/calico-typha.yaml -o calico.yaml


# 修改文件 CALICO_IPV4POOL_CIDR
# kubeadm init --apiserver-advertise-address=192.168.10.100 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version 1.29.0 --service-cidr=10.10.0.0/12 --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=all --cri-socket unix:///var/run/cri-dockerd.sock	
# 根据 master kubeadm init --pod-network-cidr=10.244.0.0/16 指定为 pod 地址 10.244.0.0/16

5.2 每个节点docker导入calico images


tar -zxvf calico-images.tar.gz
cd calico-images
docker load -i calico-cni-v3.26.3.tar
docker load -i calico-node-v3.26.3.tar
docker load -i calico-kube-controllers-v3.26.3.tar
docker load -i calico-typha-v3.26.3.tar


scp -r calico-images root@node1:~
scp -r calico-images root@node2:~

ssh node1
cd calico-images/
docker load -i calico-cni-v3.26.3.tar
docker load -i calico-node-v3.26.3.tar
docker load -i calico-kube-controllers-v3.26.3.tar
docker load -i calico-typha-v3.26.3.tar


ssh node2
cd calico-images/
docker load -i calico-cni-v3.26.3.tar
docker load -i calico-node-v3.26.3.tar
docker load -i calico-kube-controllers-v3.26.3.tar
docker load -i calico-typha-v3.26.3.tar

5.3 修改calico-typha.yaml

vim calico-typha.yaml
# 根据 master kubeadm init --pod-network-cidr=10.244.0.0/16 指定为 pod 地址 10.244.0.0/16
# 修改 CALICO_IPV4POOL_CIDR 保持一致
            - name: CALICO_IPV4POOL_CIDR
              value: "10.244.0.0/16"

# 修改为 BGP 模式
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
  value: "Always"  #改成Off

kubectl apply -f calico-typha.yaml


[root@master calico]# kubectl get nodes
NAME     STATUS   ROLES           AGE   VERSION
master   Ready    control-plane   48m   v1.29.0
node1    Ready    <none>          41m   v1.29.0
node2    Ready    <none>          41m   v1.29.0



[root@master calico]# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-558d465845-nkgfd   1/1     Running   0          55s
kube-system   calico-node-bn82m                          1/1     Running   0          55s
kube-system   calico-node-v4v2g                          1/1     Running   0          55s
kube-system   calico-node-x4s64                          1/1     Running   0          55s
kube-system   calico-typha-5b56944f9b-nztct              1/1     Running   0          55s
kube-system   coredns-857d9ff4c9-664p4                   1/1     Running   0          48m
kube-system   coredns-857d9ff4c9-66qg5                   1/1     Running   0          48m
kube-system   etcd-master                                1/1     Running   0          48m
kube-system   kube-apiserver-master                      1/1     Running   0          48m
kube-system   kube-controller-manager-master             1/1     Running   0          48m
kube-system   kube-proxy-8s274                           1/1     Running   0          42m
kube-system   kube-proxy-kntdl                           1/1     Running   0          42m
kube-system   kube-proxy-nqhhx                           1/1     Running   0          48m
kube-system   kube-scheduler-master                      1/1     Running   0          48m

5.4 固定网卡(可选) calico-typha.yaml

5.4.1 目标 IP 或域名可达
# 目标 IP 或域名可达
       - name: calico-node
         image: registry.geoway.com/calico/node:v3.19.1
         env:
           # Auto-detect the BGP IP address.
           - name: IP
             value: "autodetect"
           - name: IP_AUTODETECTION_METHOD
             value: "can-reach=www.google.com"
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=can-reach=www.google.com
5.4.2 匹配目标网卡
# 匹配目标网卡
- name: calico-node
  image: registry.geoway.com/calico/node:v3.19.1
  env:
    # Auto-detect the BGP IP address.
    - name: IP
      value: "autodetect"
    - name: IP_AUTODETECTION_METHOD
      value: "interface=eth.*"
5.4.3 排除匹配网卡
# 排除匹配网卡
- name: calico-node
  image: registry.geoway.com/calico/node:v3.19.1
  env:
    # Auto-detect the BGP IP address.
    - name: IP
      value: "autodetect"
    - name: IP_AUTODETECTION_METHOD
      value: "skip-interface=eth.*"
5.4.4 CIDR
# CIDR
- name: calico-node
  image: registry.geoway.com/calico/node:v3.19.1
  env:
    # Auto-detect the BGP IP address.
    - name: IP
      value: "autodetect"
    - name: IP_AUTODETECTION_METHOD
      value: "cidr=192.168.200.0/24,172.15.0.0/24"
5.4.5修改kube-proxy 模式为 ipvs
# kubectl edit configmap kube-proxy -n kube-system
mode: ipvs

kubectl delete pod -n kube-system -l k8s-app=kube-proxy
### Kubernetes (k8s) 一键部署Rocky Linux 的方法 #### 背景介绍 Kubernetes 是一种流行的容器编排工具,用于自动化应用程序的部署、扩展和管理。为了简化在不同操作系统上的安装过程,社区提供了多种脚本和指南来实现一键化部署。对于基于 RHEL 的发行版(如 Rocky Linux),可以利用 `kubeadm` 工具快速初始化集群并完成节点加入。 --- #### 方法一:使用 Kubeadm 安装 KubernetesRocky Linux 以下是适用于 Rocky Linux 的一键部署流程: 1. **更新系统软件包** 更新系统的现有软件包以确保兼容性和稳定性。 ```bash sudo dnf update -y && sudo dnf install -y nano git curl wget vim net-tools conntrack ipset jq docker ``` 2. **配置 Docker 和启动服务** 使用官方推荐的方式安装 Docker 并启用它作为 CRI(Container Runtime Interface)。 ```bash sudo dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo sudo dnf install -y containerd.io docker-ce docker-ce-cli sudo systemctl enable --now docker ``` 3. **禁用 SELinux 和防火墙** 关闭 SELinux 可能会减少不必要的权限冲突;如果需要保留,则需调整策略文件。 ```bash setenforce 0 sed -i &#39;s/^SELINUX=enforcing$/SELINUX=permissive/&#39; /etc/selinux/config # 停止 firewalld 或允许必要的端口通信 sudo systemctl stop firewalld sudo systemctl disable firewalld ``` 4. **加载桥接模块支持** 添加内核参数以便于网络插件正常工作。 ```bash cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf br_netfilter EOF cat <<EOF | sudo tee /etc/sysctl.d/kubernetes.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sudo sysctl --system ``` 5. **安装 kubeadm, kubelet 和 kubectl** 下载最新的稳定版本组件并通过 yum/dnf 进行安装。 ```bash cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg exclude=kube* EOF sudo dnf install -y kubelet kubeadm kubectl --disableexcludes=kubernetes sudo systemctl enable --now kubelet ``` 6. **初始化 Master 节点** 初始化命令可以根据实际需求自定义 Pod 子网范围和其他选项。 ```bash sudo kubeadm init --pod-network-cidr=192.168.0.0/16 ``` 7. **配置 KUBECONFIG 文件** 将管理员上下文复制至当前用户的环境变量中方便后续操作。 ```bash mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config ``` 8. **应用网络插件** Flannel 是常用的 overlay 网络解决方案之一。 ```bash kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml ``` 9. **验证集群状态** 检查所有核心组件是否处于健康运行状态。 ```bash kubectl get nodes kubectl get pods --all-namespaces ``` 以上步骤涵盖了从准备阶段到最后成功的整个过程[^1]。 --- #### 方法二:借助 Ansible Playbook 实现自动化部署 Ansible 提供了一种声明式的基础设施即代码方式,能够显著提高效率并降低人为错误风险。下面是一个简单的 playbook 示例结构: ```yaml --- - hosts: all become: yes tasks: - name: Install required packages dnf: name: - epel-release - python3-pip state: present - name: Upgrade system and install dependencies dnf: name: - "{{ item }}" state: latest loop: - docker - bridge-utils - bash-completion - name: Configure kernel parameters for networking copy: src: files/kubernetes.conf dest: /etc/sysctl.d/kubernetes.conf mode: &#39;0644&#39; - name: Enable IP forwarding permanently lineinfile: path: /etc/sysctl.conf regexp: &#39;^net.ipv4.ip_forward=&#39; line: &#39;net.ipv4.ip_forward=1&#39; - name: Add Google&#39;s Kubernetes repository command: > echo "[kubernetes] name=Kubernetes baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=1 repo_gpgcheck=1 gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg" > /etc/yum.repos.d/kubernetes.repo - name: Install Kubernetes binaries dnf: name: - kubelet - kubeadm - kubectl state: present - name: Start and enable kubelet service systemd: name: kubelet state: started enabled: true - name: Initialize the cluster with flannel as network plugin shell: | kubeadm init --pod-network-cidr=10.244.0.0/16 mkdir -p ~/.kube/ cp /etc/kubernetes/admin.conf ~/.kube/config chmod 600 ~/.kube/config export KUBECONFIG=~/.kube/config kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.19.0/Documentation/kube-flannel.yml ``` 此剧本假设目标主机已经可以通过 SSH 访问,并且具有适当的角色分配给 master 和 worker 节点[^2]。 --- #### 注意事项 - 如果遇到僵尸进程或者 API Server 报错等问题,请检查日志 `/var/log/messages` 或者通过 `journalctl -xe` 获取更多信息[^5]。 - 对于高可用架构设计,建议至少三个控制平面实例形成 quorum 来增强容灾能力[^3]. ---
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值