systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld
3、关闭selinux
sed -i 's/enforcing/disabled/'/etc/selinux/config
setenforce 0
4、关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/'/etc/fstab
5、下载并安装sealos
[root@master ~]# wget https://github.com/labring/sealos/releases/download/v4.1.3/sealos_4.1.3_linux_amd64.tar.gz && tar -zxvf sealos_4.1.3_linux_amd64.tar.gz sealos && chmod +x sealos && mv sealos /usr/bin
--2022-10-0708:34:52-- https://github.com/labring/sealos/releases/download/v4.1.3/sealos_4.1.3_linux_amd64.tar.gz
Resolving github.com(github.com)...20.205.243.166
Connecting to github.com(github.com)|20.205.243.166|:443... connected.HTTP request sent, awaiting response...302 Found
Location: https://objects.githubusercontent.com/github-production-release-asset-2e65be/144849757/2540d204-fdbc-4a77-82bb-43be13336aaa?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20221006%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20221006T163454Z&X-Amz-Expires=300&X-Amz-Signature=998546a102e7c3841682ad0decfec2a1eacdbc7dc45324da558e33ac35f4da1f&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=144849757&response-content-disposition=attachment%3B%20filename%3Dsealos_4.1.3_linux_amd64.tar.gz&response-content-type=application%2Foctet-stream [following]--2022-10-0708:34:53-- https://objects.githubusercontent.com/github-production-release-asset-2e65be/144849757/2540d204-fdbc-4a77-82bb-43be13336aaa?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20221006%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20221006T163454Z&X-Amz-Expires=300&X-Amz-Signature=998546a102e7c3841682ad0decfec2a1eacdbc7dc45324da558e33ac35f4da1f&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=144849757&response-content-disposition=attachment%3B%20filename%3Dsealos_4.1.3_linux_amd64.tar.gz&response-content-type=application%2Foctet-stream
Resolving objects.githubusercontent.com(objects.githubusercontent.com)...185.199.110.133,185.199.111.133,185.199.108.133
Connecting to objects.githubusercontent.com(objects.githubusercontent.com)|185.199.110.133|:443... connected.HTTP request sent, awaiting response...200OKLength:43393801(41M)[application/octet-stream]
Saving to: ‘sealos_4.1.3_linux_amd64.tar.gz’
100%[==========================================================================================================================>]43,393,801 775KB/s in 57s
2022-10-0708:35:51(740KB/s)- ‘sealos_4.1.3_linux_amd64.tar.gz’ saved [43393801/43393801]
sealos
6、创建集群
[root@master ~]# sealos run labring/kubernetes:v1.25.0 labring/helm:v3.8.2 labring/calico:v3.24.1 \
>--masters 192.168.1.200\
>--nodes 192.168.1.201,192.168.1.202-p 123456.com
2022-10-07T08:14:28 info Start to create a newcluster: master [192.168.1.200], worker [192.168.1.201192.168.1.202]2022-10-07T08:14:29 info Executing pipeline Check in CreateProcessor.2022-10-07T08:14:29 info checker:hostname [192.168.1.200:22192.168.1.201:22192.168.1.202:22]2022-10-07T08:14:29 info checker:timeSync [192.168.1.200:22192.168.1.201:22192.168.1.202:22]2022-10-07T08:14:29 info Executing pipeline PreProcess in CreateProcessor.
Resolving "labring/kubernetes" using unqualified-search registries(/etc/containers/registries.conf)
Trying to pull docker.io/labring/kubernetes:v1.25.0...
Getting image source signatures
Copying blob 6d115e2e0121 done
Copying config 95b46cc017 done
Writing manifest to image destination
Storing signatures
95b46cc0171b273167730149f344874a9d57948f1d4b468c446d3dfea1d4c0c0
Resolving "labring/helm" using unqualified-search registries(/etc/containers/registries.conf)
Trying to pull docker.io/labring/helm:v3.8.2...
Getting image source signatures
Copying blob 53a6eade9e7e done
Copying config 1123e8b4b4 done
Writing manifest to image destination
Storing signatures
1123e8b4b455ed291f3ec7273af62e49458fe3dd141f5e7cb2a4243d6284deec
Resolving "labring/calico" using unqualified-search registries(/etc/containers/registries.conf)
Trying to pull docker.io/labring/calico:v3.24.1...
Getting image source signatures
Copying blob f9de59270f64 done
Copying config e2122fc58f done
Writing manifest to image destination
Storing signatures
e2122fc58fd32f1c93ac75da5c473aed746f1ad9b31a73d1f81a0579b96e775b
default-cuyoyi15
default-ia34qoi7
default-mbdsp8yo
2022-10-07T08:19:18 info Executing pipeline RunConfig in CreateProcessor.2022-10-07T08:19:18 info Executing pipeline MountRootfs in CreateProcessor.[1/1]copying files to 192.168.1.201:2250%[======>](2/4,1 it/s)[4s:1s]which: no docker in(/usr/local/sbin:/usr/local/b in:/usr/sbin:/usr/bin:/root/bin)INFO[2022-10-0708:19:48]>> check root,port,cri success
[1/1]copying files to 192.168.1.202:2275%[==========>](3/4,47 it/min)[11s:1s]cp: cannot stat ‘../cri/nerdctl’: No such fil e or directory
Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /etc/systemd/system/containerd.service.[1/1]copying files to 192.168.1.202:2225%[==>](1/4,10 it/min)[6s:18s]INFO[2022-10-0708:20:11]>> Health check c ontainerd![1/1]copying files to 192.168.1.201:2221%[==>](4/19,3 it/s)[1s:4s]INFO[2022-10-0708:20:11]>> containerd is run ning
INFO[2022-10-0708:20:11]>> init containerd success
Created symlink from /etc/systemd/system/multi-user.target.wants/image-cri-shim.service to /etc/systemd/system/image-cri-shim.servic e.[1/1]copying files to 192.168.1.201:2226%[==>](5/19,3 it/s)[1s:4s]INFO[2022-10-0708:20:11]>> Health check imag e-cri-shim!INFO[2022-10-0708:20:11]>> image-cri-shim is running
INFO[2022-10-0708:20:11]>> init shim success
* Applying /usr/lib/sysctl.d/00-system.conf ...
net.bridge.bridge-nf-call-ip6tables =0
net.bridge.bridge-nf-call-iptables =0
net.bridge.bridge-nf-call-arptables =0* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope =0* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq =16
kernel.core_uses_pid =1
kernel.kptr_restrict =1
net.ipv4.conf.default.rp_filter =1
net.ipv4.conf.all.rp_filter =1
net.ipv4.conf.default.accept_source_route =0
net.ipv4.conf.all.accept_source_route =0
net.ipv4.conf.default.promote_secondaries =1
net.ipv4.conf.all.promote_secondaries =1
fs.protected_hardlinks =1
fs.protected_symlinks =1* Applying /etc/sysctl.d/99-sysctl.conf ...* Applying /etc/sysctl.d/k8s.conf ...
net.bridge.bridge-nf-call-ip6tables =1
net.bridge.bridge-nf-call-iptables =1
net.ipv4.conf.all.rp_filter =0* Applying /etc/sysctl.conf ...
net.ipv4.ip_forward =1[1/1]copying files to 192.168.1.201:2242%[=====>](8/19,3 it/s)[2s:3s]Created symlink from /etc/systemd/system/multi-u ser.target.wants/kubelet.service to /etc/systemd/system/kubelet.service.[1/1]copying files to 192.168.1.201:2247%[======>](9/19,3 it/s)[3s:3s]INFO[2022-10-0708:20:13]>> init kube success
INFO[2022-10-0708:20:13]>> init containerd rootfs success
[1/1]copying files to 192.168.1.202:2226%[===>](4/15,4 it/s)[1s:3s]192.168.1.201:22: which: no docker in(/usr/loca l/sbin:/usr/local/bin:/usr/sbin:/usr/bin)192.168.1.201:22:INFO[2022-10-0708:20:23]>> check root,port,cri success
192.168.1.201:22: cp: cannot stat ‘../cri/nerdctl’: No such file or directory
192.168.1.202:22: which: no docker in(/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin)192.168.1.202:22:INFO[2022-10-0708:20:31]>> check root,port,cri success
192.168.1.201:22: Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /etc/systemd/system/contain erd.service.192.168.1.201:22:INFO[2022-10-0708:20:33]>> Health check containerd!192.168.1.201:22:INFO[2022-10-0708:20:33]>> containerd is running
192.168.1.201:22:INFO[2022-10-0708:20:33]>> init containerd success
192.168.1.201:22: Created symlink from /etc/systemd/system/multi-user.target.wants/image-cri-shim.service to /etc/systemd/system/ima ge-cri-shim.service.192.168.1.201:22:INFO[2022-10-0708:20:33]>> Health check image-cri-shim!192.168.1.201:22:INFO[2022-10-0708:20:33]>> image-cri-shim is running
192.168.1.201:22:INFO[2022-10-0708:20:33]>> init shim success
192.168.1.201:22:* Applying /usr/lib/sysctl.d/00-system.conf ...192.168.1.201:22: net.bridge.bridge-nf-call-ip6tables =0192.168.1.201:22: net.bridge.bridge-nf-call-iptables =0192.168.1.201:22: net.bridge.bridge-nf-call-arptables =0192.168.1.201:22:* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...192.168.1.201:22: kernel.yama.ptrace_scope =0192.168.1.201:22:* Applying /usr/lib/sysctl.d/50-default.conf ...192.168.1.201:22: kernel.sysrq =16192.168.1.201:22: kernel.core_uses_pid =1192.168.1.201:22: kernel.kptr_restrict =1192.168.1.201:22: net.ipv4.conf.default.rp_filter =1192.168.1.201:22: net.ipv4.conf.all.rp_filter =1192.168.1.201:22: net.ipv4.conf.default.accept_source_route =0192.168.1.201:22: net.ipv4.conf.all.accept_source_route =0192.168.1.201:22: net.ipv4.conf.default.promote_secondaries =1192.168.1.201:22: net.ipv4.conf.all.promote_secondaries =1192.168.1.201:22: fs.protected_hardlinks =1192.168.1.201:22: fs.protected_symlinks =1192.168.1.201:22:* Applying /etc/sysctl.d/99-sysctl.conf ...192.168.1.201:22:* Applying /etc/sysctl.d/k8s.conf ...192.168.1.201:22: net.bridge.bridge-nf-call-ip6tables =1192.168.1.201:22: net.bridge.bridge-nf-call-iptables =1192.168.1.201:22: net.ipv4.conf.all.rp_filter =0192.168.1.201:22:* Applying /etc/sysctl.conf ...192.168.1.201:22: net.ipv4.ip_forward =1192.168.1.201:22: Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /etc/systemd/system/kubelet.se rvice.192.168.1.201:22:INFO[2022-10-0708:20:35]>> init kube success
192.168.1.201:22:INFO[2022-10-0708:20:35]>> init containerd rootfs success
192.168.1.202:22: cp: cannot stat ‘../cri/nerdctl’: No such file or directory
192.168.1.202:22: Created symlink from /etc/systemd/system/multi-user.target.wants/containerd.service to /etc/systemd/system/contain erd.service.192.168.1.202:22:INFO[2022-10-0708:20:59]>> Health check containerd!192.168.1.202:22:INFO[2022-10-0708:20:59]>> containerd is running
192.168.1.202:22:INFO[2022-10-0708:20:59]>> init containerd success
192.168.1.202:22: Created symlink from /etc/systemd/system/multi-user.target.wants/image-cri-shim.service to /etc/systemd/system/ima ge-cri-shim.service.192.168.1.202:22:INFO[2022-10-0708:20:59]>> Health check image-cri-shim!192.168.1.202:22:INFO[2022-10-0708:20:59]>> image-cri-shim is running
192.168.1.202:22:INFO[2022-10-0708:20:59]>> init shim success
192.168.1.202:22:* Applying /usr/lib/sysctl.d/00-system.conf ...192.168.1.202:22: net.bridge.bridge-nf-call-ip6tables =0192.168.1.202:22: net.bridge.bridge-nf-call-iptables =0192.168.1.202:22: net.bridge.bridge-nf-call-arptables =0192.168.1.202:22:* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...192.168.1.202:22: kernel.yama.ptrace_scope =0192.168.1.202:22:* Applying /usr/lib/sysctl.d/50-default.conf ...192.168.1.202:22: kernel.sysrq =16192.168.1.202:22: kernel.core_uses_pid =1192.168.1.202:22: kernel.kptr_restrict =1192.168.1.202:22: net.ipv4.conf.default.rp_filter =1192.168.1.202:22: net.ipv4.conf.all.rp_filter =1192.168.1.202:22: net.ipv4.conf.default.accept_source_route =0192.168.1.202:22: net.ipv4.conf.all.accept_source_route =0192.168.1.202:22: net.ipv4.conf.default.promote_secondaries =1192.168.1.202:22: net.ipv4.conf.all.promote_secondaries =1192.168.1.202:22: fs.protected_hardlinks =1192.168.1.202:22: fs.protected_symlinks =1192.168.1.202:22:* Applying /etc/sysctl.d/99-sysctl.conf ...192.168.1.202:22:* Applying /etc/sysctl.d/k8s.conf ...192.168.1.202:22: net.bridge.bridge-nf-call-ip6tables =1192.168.1.202:22: net.bridge.bridge-nf-call-iptables =1192.168.1.202:22: net.ipv4.conf.all.rp_filter =0192.168.1.202:22:* Applying /etc/sysctl.conf ...192.168.1.202:22: net.ipv4.ip_forward =1192.168.1.202:22: Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /etc/systemd/system/kubelet.se rvice.192.168.1.202:22:INFO[2022-10-0708:21:07]>> init kube success
192.168.1.202:22:INFO[2022-10-0708:21:07]>> init containerd rootfs success
2022-10-07T08:21:20 info Executing pipeline Init in CreateProcessor.2022-10-07T08:21:20 info start to copy kubeadm config to master0
2022-10-07T08:21:23 info start to generate cert and kubeConfig...2022-10-07T08:21:23 info start to generator cert and copy to masters...2022-10-07T08:21:23 info apiserver altNames :{map[apiserver.cluster.local:apiserver.cluster.local kubernetes:kubernetes kubernetes.default:kubernetes.default kubernetes.default.svc:kubernetes.default.svc kubernetes.default.svc.cluster.local:kubernetes.default.svc .cluster.local localhost:localhost master:master] map[10.103.97.2:10.103.97.210.96.0.1:10.96.0.1127.0.0.1:127.0.0.1192.168.1.200:192.168.1.200]}2022-10-07T08:21:23 info Etcd altnames :{map[localhost:localhost master:master] map[127.0.0.1:127.0.0.1192.168.1.200:192.168.1.200::1:::1]},commonName: master
2022-10-07T08:21:25 info start to copy etc pki files to masters
2022-10-07T08:21:25 info start to create kubeconfig...2022-10-07T08:21:26 info start to copy kubeconfig files to masters
2022-10-07T08:21:26 info start to copy static files to masters
2022-10-07T08:21:26 info start to apply registry
Created symlink from /etc/systemd/system/multi-user.target.wants/registry.service to /etc/systemd/system/registry.service.INFO[2022-10-0708:21:26]>> Health check registry!INFO[2022-10-0708:21:26]>> registry is running
INFO[2022-10-0708:21:26]>> init registry success
2022-10-07T08:21:26 info start to init master0...2022-10-07T08:21:26 info registry auth in node 192.168.1.200:222022-10-07T08:21:27 info domain sealos.hub:192.168.1.200 append success
2022-10-07T08:21:27 info domain apiserver.cluster.local:192.168.1.200 append success
W100708:21:27.4106271991 initconfiguration.go:119] Usage ofCRI endpoints without URL scheme is deprecated and can cause kubele t errors in the future. Automatically prepending scheme "unix" to the "criSocket"with value "/run/containerd/containerd.sock". Plea se update your configuration![init] Using Kubernetes version: v1.25.0[preflight] Running pre-flight checks
[WARNING FileExisting-socat]: socat not found in system path
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'[certs] Using certificateDir folder "/etc/kubernetes/pki"[certs] Using existing ca certificate authority
[certs] Using existing apiserver certificate and key on disk
[certs] Using existing apiserver-kubelet-client certificate and key on disk
[certs] Using existing front-proxy-ca certificate authority
[certs] Using existing front-proxy-client certificate and key on disk
[certs] Using existing etcd/ca certificate authority
[certs] Using existing etcd/server certificate and key on disk
[certs] Using existing etcd/peer certificate and key on disk
[certs] Using existing etcd/healthcheck-client certificate and key on disk
[certs] Using existing apiserver-etcd-client certificate and key on disk
[certs] Using the existing "sa" key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"[kubeconfig] Using existing kubeconfig file:"/etc/kubernetes/admin.conf"[kubeconfig] Using existing kubeconfig file:"/etc/kubernetes/kubelet.conf"W100708:21:57.3567031991 kubeconfig.go:249] a kubeconfig file "/etc/kubernetes/controller-manager.conf" exists already but has an unexpected API Server URL: expected: https://192.168.1.200:6443,got: https://apiserver.cluster.local:6443[kubeconfig] Using existing kubeconfig file:"/etc/kubernetes/controller-manager.conf"W100708:21:57.4272361991 kubeconfig.go:249] a kubeconfig file "/etc/kubernetes/scheduler.conf" exists already but has an unexpe cted API Server URL: expected: https://192.168.1.200:6443,got: https://apiserver.cluster.local:6443[kubeconfig] Using existing kubeconfig file:"/etc/kubernetes/scheduler.conf"[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"[control-plane] Creating static Pod manifest for"kube-apiserver"[control-plane] Creating static Pod manifest for"kube-controller-manager"[control-plane] Creating static Pod manifest for"kube-scheduler"[etcd] Creating static Pod manifest for local etcd in"/etc/kubernetes/manifests"[wait-control-plane] Waiting for the kubelet to boot up the control plane asstatic Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 14.503524 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config"in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config"in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master as control-plane by adding the labels:[node-role.kubernetes.io/control-plane node.kube rnetes.io/exclude-from-external-load-balancers][mark-control-plane] Marking the node master as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule ][bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap,RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively,if you are the root user, you can run:exportKUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml"with one of the options listed at:https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following asroot:
kubeadm join apiserver.cluster.local:6443--token <value withheld> \
--discovery-token-ca-cert-hash sha256:b136e609ece7de06c6258fd5e1ba6f8fea44c0922230547352abd03529eec27a \
--control-plane --certificate-key <value withheld>
Then you can join any number of worker nodes by running the following on each asroot:
kubeadm join apiserver.cluster.local:6443--token <value withheld> \
--discovery-token-ca-cert-hash sha256:b136e609ece7de06c6258fd5e1ba6f8fea44c0922230547352abd03529eec27a
2022-10-07T08:22:13 info Executing pipeline Join in CreateProcessor.2022-10-07T08:22:13 info [192.168.1.201:22192.168.1.202:22] will be added as worker
2022-10-07T08:22:13 info start to get kubernetes token...2022-10-07T08:22:15 info start to join 192.168.1.202:22as worker
2022-10-07T08:22:15 info start to join 192.168.1.201:22as worker
2022-10-07T08:22:15 info start to copy kubeadm join config to node:192.168.1.202:222022-10-07T08:22:15 info start to copy kubeadm join config to node:192.168.1.201:22192.168.1.201:22:2022-10-07T08:22:17 info domain apiserver.cluster.local:10.103.97.2 append success
192.168.1.201:22:2022-10-07T08:22:18 info domain lvscare.node.ip:192.168.1.201 append success
2022-10-07T08:22:17 info registry auth in node 192.168.1.201:22192.168.1.202:22:2022-10-07T08:22:18 info domain apiserver.cluster.local:10.103.97.2 append success
192.168.1.201:22:2022-10-07T08:22:18 info domain sealos.hub:192.168.1.200 append success
192.168.1.202:22:2022-10-07T08:22:18 info domain lvscare.node.ip:192.168.1.202 append success
2022-10-07T08:22:18 info registry auth in node 192.168.1.202:222022-10-07T08:22:18 info run ipvs once module:192.168.1.201:22192.168.1.202:22:2022-10-07T08:22:18 info domain sealos.hub:192.168.1.200 append success
192.168.1.201:22:2022-10-07T08:22:19 info Trying to add route
192.168.1.201:22:2022-10-07T08:22:19 info success to set route.(host:10.103.97.2,gateway:192.168.1.201)2022-10-07T08:22:18 info start join node:192.168.1.201:222022-10-07T08:22:18 info run ipvs once module:192.168.1.202:22192.168.1.201:22:W100708:22:19.2286663367 initconfiguration.go:119] Usage ofCRI endpoints without URL scheme is deprecated an d can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket"with value "/run/containerd/cont ainerd.sock". Please update your configuration!192.168.1.201:22:[preflight] Running pre-flight checks
192.168.1.201:22:[WARNING FileExisting-socat]: socat not found in system path
192.168.1.202:22:2022-10-07T08:22:19 info Trying to add route
192.168.1.202:22:2022-10-07T08:22:19 info success to set route.(host:10.103.97.2,gateway:192.168.1.202)2022-10-07T08:22:19 info start join node:192.168.1.202:22192.168.1.202:22:W100708:22:19.6028263702 initconfiguration.go:119] Usage ofCRI endpoints without URL scheme is deprecated an d can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket"with value "/run/containerd/cont ainerd.sock". Please update your configuration!192.168.1.202:22:[preflight] Running pre-flight checks
192.168.1.202:22:[WARNING FileExisting-socat]: socat not found in system path
192.168.1.201:22:[preflight] Reading configuration from the cluster...192.168.1.201:22:[preflight]FYI: You can look at this config file with'kubectl -n kube-system get cm kubeadm-config -o yaml'192.168.1.201:22:[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"192.168.1.201:22:[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"192.168.1.201:22:[kubelet-start] Starting the kubelet
192.168.1.201:22:[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...192.168.1.202:22:[preflight] Reading configuration from the cluster...192.168.1.202:22:[preflight]FYI: You can look at this config file with'kubectl -n kube-system get cm kubeadm-config -o yaml'192.168.1.202:22:[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"192.168.1.202:22:[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"192.168.1.202:22:[kubelet-start] Starting the kubelet
192.168.1.202:22:[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...192.168.1.201:22:192.168.1.201:22: This node has joined the cluster:192.168.1.201:22:* Certificate signing request was sent to apiserver and a response was received.192.168.1.201:22:* The Kubelet was informed of the newsecure connection details.192.168.1.201:22:192.168.1.201:22: Run 'kubectl get nodes' on the control-plane to see this node join the cluster.192.168.1.201:22:2022-10-07T08:22:45 info succeeded in joining 192.168.1.201:22as worker
192.168.1.202:22:192.168.1.202:22: This node has joined the cluster:192.168.1.202:22:* Certificate signing request was sent to apiserver and a response was received.192.168.1.202:22:* The Kubelet was informed of the newsecure connection details.192.168.1.202:22:192.168.1.202:22: Run 'kubectl get nodes' on the control-plane to see this node join the cluster.192.168.1.202:22:2022-10-07T08:22:45 info succeeded in joining 192.168.1.202:22as worker
2022-10-07T08:22:46 info start to sync lvscare static pod to node:192.168.1.202:22master:[192.168.1.200:6443]2022-10-07T08:22:46 info start to sync lvscare static pod to node:192.168.1.201:22master:[192.168.1.200:6443]192.168.1.201:22:2022-10-07T08:22:47 info generator lvscare static pod is success
192.168.1.202:22:2022-10-07T08:22:47 info generator lvscare static pod is success
2022-10-07T08:22:47 info Executing pipeline RunGuest in CreateProcessor.2022-10-07T08:22:47 info guest cmd is cp opt/helm /usr/bin/2022-10-07T08:22:47 info guest cmd is kubectl create namespace tigera-operator
namespace/tigera-operator created
2022-10-07T08:22:47 info guest cmd is helm install calico charts/calico --namespace tigera-operator
NAME: calico
LASTDEPLOYED: Fri Oct 708:22:522022NAMESPACE: tigera-operator
STATUS: deployed
REVISION:1TESTSUITE: None
2022-10-07T08:22:56 info succeeded in creating a newcluster, enjoy it!2022-10-07T08:22:56 info
___ ___ ___ ___ ___ ___
/\ \ /\ \ /\ \ /\__\ /\ \ /\ \
/::\ \ /::\ \ /::\ \ /:///::\ \ /::\ \
/:/\ \ \ /:/\:\ \ /:/\:\ \ /:///:/\:\ \ /:/\ \ \
_\:\~\ \ \ /::\~\:\ \ /::\~\:\ \ /:///:/ \:\ \ _\:\~\ \ \
/\ \:\ \ \__\ /:/\:\ \:\__\ /:/\:\ \:\__\ /:/__//:/__/ \:\__\ /\ \:\ \ \__\
\:\ \:\ \/__/ \:\~\:\ \/__/ \/__\:\/:// \:\ \ \:\ \ /:// \:\ \:\ \/__/
\:\ \:\__\ \:\ \:\__\ \::// \:\ \ \:\ /:// \:\ \:\__\
\:\/:// \:\ \/__//:// \:\ \ \:\/:// \:\/://
\::// \:\__\ /:// \:\__\ \::// \:://
\/__/ \/__/ \/__/ \/__/ \/__/ \/__/Website:https://www.sealos.io/Address:github.com/labring/sealos