Kubeadm安装kubernetes1.26 基于Centos7.9

1. Kubadm部署kubernetes1.26

本次部署安装采用测试环境:一主两从模式

# 所有系统为Centos7.9最小化
10.0.0.190 master1.k8s.local
10.0.0.191 node01-k8s.local
10.0.0.192 node02-k8s.local

1. 基础设施准备

1.1 设置主机名

[root@localhost ~]# hostnamectl set-hostname master1.k8s.local
[root@localhost ~]# hostnamectl set-hostname node1.k8s.local
[root@localhost ~]# hostnamectl set-hostname node02-k8s.local

1.2 设置镜像源

aliyun镜像站: centos镜像_centos下载地址_centos安装教程-阿里巴巴开源镜像站?

# 每个节都需要更新源
[root@master1 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

1.3 下载所需的工具

# 每个节点需要都需安装
[root@master1 ~]# yum install vim wget lrzsz netools -y 

1.4 配置hosts解析

# 每个节点都需执行
[root@master1 ~]# cat >> /etc/hosts << EOF
> 10.0.0.190 master1.k8s.local
> 10.0.0.191 node01-k8s.local
> 10.0.0.192 node02-k8s.local
> EOF

1.5 配置时间同步

# 每个节点都需安装
[root@master1 ~]# yum -y install ntpdate
# 每个节点都需同步时间
[root@master1 ~]# ntpdate ntp1.aliyun.com
 6 Apr 20:41:05 ntpdate[1890]: adjust time server 120.25.115.20 offset -0.008733 sec
 [root@node01-k8s ~]# ntpdate ntp1.aliyun.com
 6 Apr 20:41:08 ntpdate[4797]: adjust time server 120.25.115.20 offset -0.001972 sec
 [root@node02-k8s yum.repos.d]# ntpdate ntp1.aliyun.com
 6 Apr 20:41:10 ntpdate[9934]: adjust time server 120.25.115.20 offset 0.003773 sec
# 开机自启动
[root@master1 ~]#  systemctl enable --now ntpdate
Created symlink from /etc/systemd/system/multi-user.target.wants/ntpdate.service to /usr/lib/systemd/system/ntpdate.service.
# 查看状态
[root@master1 ~]# systemctl status ntpdate
● ntpdate.service - Set time via NTP
   Loaded: loaded (/usr/lib/systemd/system/ntpdate.service; disabled; vendor preset: disabled)
   Active: active (exited) since 四 2023-04-06 20:42:02 CST; 5s ago
  Process: 1898 ExecStart=/usr/libexec/ntpdate-wrapper (code=exited, status=0/SUCCESS)
 Main PID: 1898 (code=exited, status=0/SUCCESS)
4月 06 20:41:58 master1.k8s.local systemd[1]: Starting Set time via NTP...
4月 06 20:42:02 master1.k8s.local systemd[1]: Started Set time via NTP.

1.6 关闭防火墙

# 每台节点需执行
[root@master1 ~]# systemctl stop firewalld
[root@master1 ~]# systemctl disable firewalld

1.7 关闭SELINUX

[root@master1 ~]# sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
[root@node01-k8s ~]# setenforce status
setenforce: SELinux is disabled

1.8 关闭SWAP分区

# 每个节点需执行
[root@master1 ~]# sed -ri 's@(.*swap.*)@#\1@g' /etc/fstab 
[root@node01-k8s ~]# free -m
              total        used        free      shared  buff/cache   available
Mem:            972         100         780           7          91         752
Swap:             0           0           0

1.9 系统调优配置

# 每个节点需执行
[root@master1 sysctl.d]# cat <<EOF >  /etc/sysctl.d/k8s.conf 
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
vm.swappiness=0
EOF

[root@master1 sysctl.d]# sysctl --system

2.2 集群级准备

2.2.1 安装Contained

docker-ce镜像_docker-ce下载地址_docker-ce安装教程-阿里巴巴开源镜像站

# step 1: 安装必要的一些系统工具
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3
sudo sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# Step 4: 更新并安装Docker-CE
sudo yum makecache fast
sudo yum -y install docker-ce
# Step 4: 开启Docker服务
sudo service docker start

# 注意:
# 官方软件源默认启用了最新的软件,您可以通过编辑软件源的方式获取各个版本的软件包。例如官方并没有将测试版本的软件源置为可用,您可以通过以下方式开启。同理可以开启各种测试版本等。
# vim /etc/yum.repos.d/docker-ce.repo
#   将[docker-ce-test]下方的enabled=0修改为enabled=1

# 可以选择安装的Docker版本
[root@master1 yum.repos.d]# yum list docker-ce.x86_64 --showduplicates | sort -r

# 每个节点需安装docker-ce
[root@node01-k8s ~]# yum install docker-ce -y
[root@node01-k8s ~]# docker --version
Docker version 23.0.3, build 3e7cbfd
# 每个节点启动docker
[root@node01-k8s ~]# systemctl enable --now docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

# 这里为contained
[root@node02-k8s ~]# systemctl status docker
● docker.service - Docker Application Container Engine
   Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)
   Active: active (running) since 四 2023-04-06 21:19:55 CST; 2min 6s ago
     Docs: https://docs.docker.com
 Main PID: 9573 (dockerd)
    Tasks: 12
   Memory: 32.0M
   CGroup: /system.slice/docker.service
           └─9573 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock

2.2.2 配置Containerd

# 每个节点需执行
[root@master1 docker]# cp /etc/containerd/config.toml /etc/containerd/config.toml.bak
[root@master1 docker]# containerd config default > $HOME/config.toml
[root@master1 docker]# cp $HOME/config.toml /etc/containerd/config.toml

# 替换为阿里云即可,默认为registoy.k8s.io
[root@master1 ~]# sed -i "s#registry.k8s.io/pause#registry.cn-hangzhou.aliyuncs.com/google_containers/pause#g" /etc/containerd/config.toml

# 修改为Systemd
[root@master1 ~]# sed -i "s#SystemdCgroup = false#SystemdCgroup = true#g" /etc/containerd/config.toml

# 修改完关闭Docker和containerd然后重启加入开机自启动
[root@node02-k8s ~]# systemctl stop containerd
[root@node02-k8s ~]# systemctl start docker
[root@node02-k8s ~]# systemctl start containerd
[root@node02-k8s ~]# systemctl status docker
● docker.service - Docker Application Container Engine
   Loaded: loaded (/usr/lib/systemd/system/docker.service; enabled; vendor preset: disabled)
   Active: active (running) since 四 2023-04-06 21:36:42 CST; 10s ago
     Docs: https://docs.docker.com
 Main PID: 9822 (dockerd)
    Tasks: 11
   Memory: 32.8M
   CGroup: /system.slice/docker.service
           └─9822 /usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock

4月 06 21:36:41 node02-k8s.local dockerd[9822]: time="2023-04-06T21:36:41.900505121+08:00" level=info msg="[core] [Channel #4] Channel Connectivity change to READY" module=grpc
4月 06 21:36:41 node02-k8s.local dockerd[9822]: time="2023-04-06T21:36:41.919623148+08:00" level=info msg="[graphdriver] using prior storage driver: overlay2"
4月 06 21:36:41 node02-k8s.local dockerd[9822]: time="2023-04-06T21:36:41.921002107+08:00" level=info msg="Loading containers: start."
4月 06 21:36:41 node02-k8s.local dockerd[9822]: time="2023-04-06T21:36:41.997106808+08:00" level=info msg="Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daem... IP address"
4月 06 21:36:42 node02-k8s.local dockerd[9822]: time="2023-04-06T21:36:42.029384939+08:00" level=info msg="Loading containers: done."
4月 06 21:36:42 node02-k8s.local dockerd[9822]: time="2023-04-06T21:36:42.043768432+08:00" level=info msg="Docker daemon" commit=59118bf graphdriver=overlay2 version=23.0.3
4月 06 21:36:42 node02-k8s.local dockerd[9822]: time="2023-04-06T21:36:42.043818314+08:00" level=info msg="Daemon has completed initialization"
4月 06 21:36:42 node02-k8s.local dockerd[9822]: time="2023-04-06T21:36:42.054672362+08:00" level=info msg="[core] [Server #7] Server created" module=grpc
4月 06 21:36:42 node02-k8s.local systemd[1]: Started Docker Application Container Engine.
4月 06 21:36:42 node02-k8s.local dockerd[9822]: time="2023-04-06T21:36:42.058500647+08:00" level=info msg="API listen on /run/docker.sock"
Hint: Some lines were ellipsized, use -l to show in full.
[root@node02-k8s ~]# systemctl enable --now containerd
Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /usr/lib/systemd/system/containerd.service.


# 查看Containerd状态
[root@master1 ~]# systemctl status containerd
● containerd.service - containerd container runtime
   Loaded: loaded (/usr/lib/systemd/system/containerd.service; enabled; vendor preset: disabled)
   Active: active (running) since 四 2023-04-06 21:44:14 CST; 2min 58s ago
     Docs: https://containerd.io
 Main PID: 1856 (containerd)
   CGroup: /system.slice/containerd.service
           └─1856 /usr/bin/containerd

4月 06 21:44:14 master1.k8s.local containerd[1856]: time="2023-04-06T21:44:14.636131568+08:00" level=error msg="failed to load cni during init, please check CRI plugin status before ... cni config"
4月 06 21:44:14 master1.k8s.local containerd[1856]: time="2023-04-06T21:44:14.636319460+08:00" level=info msg=serving... address=/run/containerd/containerd.sock.ttrpc
4月 06 21:44:14 master1.k8s.local containerd[1856]: time="2023-04-06T21:44:14.636345703+08:00" level=info msg=serving... address=/run/containerd/containerd.sock
4月 06 21:44:14 master1.k8s.local containerd[1856]: time="2023-04-06T21:44:14.636379755+08:00" level=info msg="containerd successfully booted in 0.028274s"
4月 06 21:44:14 master1.k8s.local containerd[1856]: time="2023-04-06T21:44:14.663131076+08:00" level=info msg="Start subscribing containerd event"
4月 06 21:44:14 master1.k8s.local containerd[1856]: time="2023-04-06T21:44:14.663218820+08:00" level=info msg="Start recovering state"
4月 06 21:44:14 master1.k8s.local containerd[1856]: time="2023-04-06T21:44:14.663284650+08:00" level=info msg="Start event monitor"
4月 06 21:44:14 master1.k8s.local containerd[1856]: time="2023-04-06T21:44:14.663298264+08:00" level=info msg="Start snapshots syncer"
4月 06 21:44:14 master1.k8s.local containerd[1856]: time="2023-04-06T21:44:14.663308237+08:00" level=info msg="Start cni network conf syncer for default"
4月 06 21:44:14 master1.k8s.local containerd[1856]: time="2023-04-06T21:44:14.663314510+08:00" level=info msg="Start streaming server"

2.2.3 配置Docker镜像加速

# 每个节点需执行
[root@master1 ~]# mkdir -p /etc/docker && tee /etc/docker/daemon.json <<-'EOF'
> {
>   "exec-opts": ["native.cgroupdriver=systemd"],
>   "registry-mirrors":[
>    "https://b4xxxxxz.mirror.aliyuncs.com"
> ]
> }
> EOF
[root@master1 ~]# systemctl daemon-reload && systemctl restart docker

2.2.4 安装kubeadm、kubelet、kubectl

# 所有节点需执行如下命令,当然node节点也可以不安装kubectl
[root@master1 sysctl.d]# yum install -y kubelet-1.26.1-0 kubeadm-1.26.1-0 kubectl-1.26.1-0 --disableexcludes=kubernetes --nogpgcheck

[root@node01-k8s ~]#  yum install -y kubelet-1.26.1-0 kubeadm-1.26.1-0 kubectl-1.26.1-0 --disableexcludes=kubernetes --nogpgcheck

# kubelet加入开机自启动,这个时候集群没有初始化的状态下,kubelet的启动一定是报错的,集群启动后报错消失。
[root@master1 yum.repos.d]# systemctl daemon-reload 
[root@master1 yum.repos.d]# systemctl enable --now kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

2.2.5 集群初始化

# --image-repository:  默认为registry.k8s.io可能无法拉取镜像,这里更改为阿里云即可
# --apiserver-advertise-address API Server的集群通信地址为10.0.0.190
# 当然这里也可以配置Service的地址、token的有效期等,这里不一一写了。
[root@master1 ~]# kubeadm init --image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers \
> --apiserver-advertise-address=10.0.0.190 \
> --pod-network-cidr=10.244.0.0/16
[init] Using Kubernetes version: v1.26.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master1.k8s.local] and IPs [10.96.0.1 10.0.0.190]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master1.k8s.local] and IPs [10.0.0.190 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master1.k8s.local] and IPs [10.0.0.190 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 6.502066 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master1.k8s.local as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node master1.k8s.local as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: p2z4wj.bjgrywq1r1jkhofq
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
# 这个时候集群就显示部署成功了,需要安装附加组件CoreDNS,与kube-proxy
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:
# 创建以下目录即可。
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.


You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:
# 使用以下命令将工作节点加入集群。
kubeadm join 10.0.0.190:6443 --token p2z4wj.bjgrywq1r1jkhofq \
	--discovery-token-ca-cert-hash sha256:962da7d86085f7fd413488e1d6aaafb90768eb9a3cfbf0ac20440ac40f09fb2b 

2.2.6 查看节点

# 这里显示为未就绪,因为没有安装网络附件
[root@master1 ~]# kubectl get nodes
NAME                STATUS     ROLES           AGE     VERSION
master1.k8s.local   NotReady   control-plane   5m31s   v1.26.1

2.2.7 工作节点加入集群

[root@node01-k8s ~]# kubeadm join 10.0.0.190:6443 --token p2z4wj.bjgrywq1r1jkhofq --discovery-token-ca-cert-hash sha256:962da7d86085f7fd413488e1d6aaafb90768eb9a3cfbf0ac20440ac40f09fb2b 
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

2.2.8 查看工作节点是否全部加入集群

[root@master1 ~]# kubectl get nodes
NAME                STATUS     ROLES           AGE     VERSION
master1.k8s.local   NotReady   control-plane   10m     v1.26.1
node01-k8s.local    NotReady   <none>          2m14s   v1.26.1
node02-k8s.local    NotReady   <none>          7s      v1.26.1

2.2.9 安装网络插件flannel

flannel官方站点: GitHub - flannel-io/flannel: flannel is a network fabric for containers, designed for Kubernetes

[root@master1 ~]# https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
[root@master1 ~]# kubectl apply -f kube-flannel.yml 
namespace/kube-flannel created
serviceaccount/flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created

#  查看Pod
[root@master1 flannel]# kubectl get pods -n kube-flannel
NAME                    READY   STATUS    RESTARTS        AGE
kube-flannel-ds-75gdd   1/1     Running   1 (2m51s ago)   7m27s
kube-flannel-ds-7qmpf   1/1     Running   1 (2m28s ago)   7m27s
kube-flannel-ds-954fp   1/1     Running   1 (92s ago)     7m27s

2.2.10 再次查看集群状态

# 集群状态已经显示为Ready
[root@master1 ~]# kubectl get nodes
NAME                STATUS   ROLES           AGE     VERSION
master1.k8s.local   Ready    control-plane   18m     v1.26.1
node01-k8s.local    Ready    <none>          10m     v1.26.1
node02-k8s.local    Ready    <none>          8m19s   v1.26.1

2.2.11 配置Mac使用kubernetes集群

2.2.11.1 Mac安装kubectl命令

sh-3.2# curl -LO "https://dl.k8s.io/release/$(curl -L -s sh-https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert"
sh-3.2# curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl-convert.sha256"
echo "$(cat kubectl-convert.sha256)  kubectl-convert" | shasum -a 256 --check
sh-3.2# chmod +x ./kubectl-convert
sh-3.2# sudo mv ./kubectl-convert /usr/local/bin/kubectl-convert
sh-3.2# sudo chown root: /usr/local/bin/kubectl-convert
sh-3.2# kubectl convert --help
sh-3.2# rm kubectl-convert kubectl-convert.sha256

2.2.11.2 Mac配置文件准备

#  这里跟原生集群里面的配置文件是一样的。
sh-3.2# mkdir -p $HOME/.kube
sh-3.2# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# 这里把config文件复制过来
sh-3.2# cd .kube/
sh-3.2# sudo chown $(id -u):$(id -g) $HOME/.kube/config

2.2.11.3 查看是否成功

sh-3.2# kubectl get nodes
NAME                STATUS   ROLES           AGE   VERSION
master1.k8s.local   Ready    control-plane   31m   v1.26.1
node01-k8s.local    Ready    <none>          30m   v1.26.1
node02-k8s.local    Ready    <none>          30m   v1.26.1

2.2.11.4 Mac创建namespace测试

sh-3.2# cat hello-ns.yaml 
kind: Namespace
apiVersion: v1
metadata:
  name: hello
sh-3.2# kubectl apply -f hello-ns.yaml 
namespace/hello created

sh-3.2# kubectl get ns
NAME              STATUS   AGE
default           Active   49m
hello             Active   6s
kube-flannel      Active   46m
kube-node-lease   Active   49m
kube-public       Active   49m
kube-system       Active   49m


sh-3.2# kubectl get nodes -o wide
NAME                STATUS   ROLES           AGE   VERSION   INTERNAL-IP   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION           CONTAINER-RUNTIME
master1.k8s.local   Ready    control-plane   51m   v1.26.1   10.0.0.190    <none>        CentOS Linux 7 (Core)   3.10.0-1062.el7.x86_64   containerd://1.6.20
node01-k8s.local    Ready    <none>          50m   v1.26.1   10.0.0.191    <none>        CentOS Linux 7 (Core)   3.10.0-1062.el7.x86_64   containerd://1.6.20
node02-k8s.local    Ready    <none>          50m   v1.26.1   10.0.0.192    <none>        CentOS Linux 7 (Core
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值