k8s集群部署
1. 安装要求
在开始之前,部署Kubernetes集群机器需要满足以下几个条件:
-至少3台机器,操作系统 CentOS7+
- 硬件配置:2GB或更多RAM,2个CPU或更多CPU,硬盘20GB或更多
- 集群中所有机器之间网络互通
- 可以访问外网,需要拉取镜像
- 禁止swap分区
2. 学习目标
- 在所有节点上安装Docker和kubeadm
- 部署Kubernetes Master
- 部署容器网络插件
- 部署 Kubernetes Node,将节点加入Kubernetes集群中
- 部署Dashboard Web页面,可视化查看Kubernetes资源
3. 准备环境
角色 | IP |
---|---|
master | 192.168.240.30 |
node1 | 192.168.240.50 |
node2 | 192.168.240.60 |
#更改主机名
master
[root@localhost ~]# hostnamectl set-hostname master.example.com
[root@localhost ~]# bash
node1
[root@localhost ~]# hostnamectl set-hostname node1.example.com
[root@localhost ~]# bash
node2
[root@master ~]# hostnamectl set-hostname node2.example.com
[root@master ~]# bash
#关闭防火墙与selinux并重启
[root@master ~]# systemctl disable --now firewalld.service
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@master ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config
[root@master ~]# reboot
[root@node1 ~]# systemctl disable --now firewalld.service
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@node1 ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config
[root@node1 ~]# reboot
[root@node2 ~]# systemctl disable --now firewalld.service
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@node2 ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config
[root@node2 ~]# reboot
#同时关闭三台主机的swap分区
[root@master ~]# vim /etc/fstab
/dev/mapper/cs-home /home xfs defaults 0 0
#/dev/mapper/cs-swap none swap defaults 0 0 ##注释掉
#在master主机上添加hosts
[root@master ~]# vim /etc/hosts
[root@master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.240.30 master master.example.com
192.168.240.50 node1 node1.example.com
192.168.240.60 node2 node2.example.com
将桥接的IPv4流量传递到iptables的链:
[root@master ~]# cat > /etc/sysctl.d/k8s.conf << EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> EOF
[root@master ~]# sysctl --system
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-coredump.conf ...
kernel.core_pattern = |/usr/lib/systemd/systemd-coredump %P %u %g %s %t %c %h %e
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
kernel.kptr_restrict = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.all.promote_secondaries = 1
net.core.default_qdisc = fq_codel
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /usr/lib/sysctl.d/50-libkcapi-optmem_max.conf ...
net.core.optmem_max = 81920
* Applying /usr/lib/sysctl.d/50-pid-max.conf ...
kernel.pid_max = 4194304
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...
#三台主机配置时间同步
[root@master ~]# yum -y install chrony
[root@master ~]# systemctl enable --now chronyd
#免密认证
[root@master ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:WueRYbcMN5jdT8lLwwc/+wQ6HO3aeYL4wdnrLwBI7go root@master.example.com
The key's randomart image is:
+---[RSA 3072]----+
| . |
| . + ooo.|
| o .* * +B+|
| o..O *.+*|
| .S +.* .oo|
| E o.o +.B o.|
| ... o *.= o|
| . . ..+ |
| ..oo.|
+----[SHA256]-----+
[root@master ~]# ssh-copy-id master
[root@master ~]# ssh-copy-id node1
[root@master ~]# ssh-copy-id node2
所有节点安装Docker/kubeadm/kubelet
Kubernetes默认CRI(容器运行时)为Docker,因此先安装Docker
[root@master ~]# cd /etc/yum.repos.d/
[root@master yum.repos.d]# curl -o docker-ce.repo https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/docker-ce.repo
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 1919 100 1919 0 0 2104 0 --:--:-- --:--:-- --:--:-- 2101
[root@master yum.repos.d]# sed -i 's@https://download.docker.com@https://mirrors.tuna.tsinghua.edu.cn/docker-ce@g' docker-ce.repo
[root@master ~]# systemctl start docker.service
[root@master ~]# cat > /etc/docker/daemon.json << EOF
> {
> "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"],
> "exec-opts": ["native.cgroupdriver=systemd"],
> "log-driver": "json-file",
> "log-opts": {
> "max-size": "100m"
> },
> "storage-driver": "overlay2"
> }
> EOF
[root@master ~]# systemctl restart docker.service
添加kubernetes阿里云YUM软件源
[root@master ~]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
安装kubeadm,kubelet和kubectl
[root@master ~]# yum -y install kubelet kubeadm kubectl
[root@master ~]# systemctl enable kubelet.service
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.
部署Kubernetes Master
[root@master ~]# kubeadm init --apiserver-advertise-address 192.168.240.30 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.23.1 --service-cidr=10.96.0.0/12 --pod-network-cidr=10.244.0.0/16
[init] Using Kubernetes version: v1.23.0
[preflight] Running pre-flight checks
[WARNING Service-Docker]: docker service is not enabled, please run 'systemctl enable docker.service'
[WARNING Swap]: swap is enabled; production deployments should disable swap unless testing the NodeSwap feature gate of the kubelet
[WARNING FileExisting-tc]: tc not found in system path
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
·····省略部分·····
### 将其写入一个文件中,后面会用到
kubeadm join 192.168.240.30:6443 --token hf52ar.ge052l3ovjtrjtax \
--discovery-token-ca-cert-hash sha256:676706ba814531ea5aaa6687874c8a928f13a3d0ca022991c117aebaa9ce0366
[root@master ~]# vim init
[root@master ~]# cat init
kubeadm join 192.168.240.30:6443 --token hf52ar.ge052l3ovjtrjtax \
--discovery-token-ca-cert-hash sha256:676706ba814531ea5aaa6687874c8a928f13a3d0ca022991c117aebaa9ce0366
# 设置环境变量使用kubectl工具
[root@master ~]# echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' > /etc/profile.d/k8s.sh
#查看镜像
[root@master ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.aliyuncs.com/google_containers/kube-apiserver v1.23.1 b6d7abedde39 2 days ago 135MB
registry.aliyuncs.com/google_containers/kube-proxy v1.23.1 b46c42588d51 2 days ago 112MB
registry.aliyuncs.com/google_containers/kube-scheduler v1.23.1 71d575efe628 2 days ago 53.5MB
registry.aliyuncs.com/google_containers/kube-controller-manager v1.23.1 f51846a4fd28 2 days ago 125MB
registry.aliyuncs.com/google_containers/etcd 3.5.1-0 25f8c7f3da61 6 weeks ago 293MB
registry.aliyuncs.com/google_containers/coredns v1.8.6 a4ca41631cc7 2 months ago 46.8MB
registry.aliyuncs.com/google_containers/pause 3.6 6270bb605e12 3 months ago 683kB
#查看容器
[root@master ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
a5b3a44e7f96 b46c42588d51 "/usr/local/bin/kube…" 3 minutes ago Up 3 minutes k8s_kube-proxy_kube-proxy-b2cs4_kube-system_54c3c128-15a7-4b9c-8d57-49e89c392cb9_0
f3febbea48ca registry.aliyuncs.com/google_containers/pause:3.6 "/pause" 3 minutes ago Up 3 minutes k8s_POD_kube-proxy-b2cs4_kube-system_54c3c128-15a7-4b9c-8d57-49e89c392cb9_0
16ff0daae31a 71d575efe628 "kube-scheduler --au…" 3 minutes ago Up 3 minutes k8s_kube-scheduler_kube-scheduler-master.example.com_kube-system_78d116366c5c52e663d3704a9b950ba6_0
6e2cb315af8f b6d7abedde39 "kube-apiserver --ad…" 3 minutes ago Up 3 minutes k8s_kube-apiserver_kube-apiserver-master.example.com_kube-system_7df12009584af36af164846cb9c19e64_0
24b91d5807d3 25f8c7f3da61 "etcd --advertise-cl…" 3 minutes ago Up 3 minutes k8s_etcd_etcd-master.example.com_kube-system_c194313026c64e53d0b2debf90376296_0
8e611b486e83 f51846a4fd28 "kube-controller-man…" 3 minutes ago Up 3 minutes k8s_kube-controller-manager_kube-controller-manager-master.example.com_kube-system_e3c7337cbdf9f732e45b211a57aa7a54_0
d3af69a71c83 registry.aliyuncs.com/google_containers/pause:3.6 "/pause" 3 minutes ago Up 3 minutes k8s_POD_kube-scheduler-master.example.com_kube-system_78d116366c5c52e663d3704a9b950ba6_0
84d0175f51a2 registry.aliyuncs.com/google_containers/pause:3.6 "/pause" 3 minutes ago Up 3 minutes k8s_POD_kube-controller-manager-master.example.com_kube-system_e3c7337cbdf9f732e45b211a57aa7a54_0
c5182ba5b66b registry.aliyuncs.com/google_containers/pause:3.6 "/pause" 3 minutes ago Up 3 minutes k8s_POD_kube-apiserver-master.example.com_kube-system_7df12009584af36af164846cb9c19e64_0
8675b909f2a2 registry.aliyuncs.com/google_containers/pause:3.6 "/pause" 3 minutes ago Up 3 minutes k8s_POD_etcd-master.example.com_kube-system_c194313026c64e53d0b2debf90376296_0
#查看端口号
[root@master ~]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
LISTEN 0 128 127.0.0.1:2381 0.0.0.0:*
LISTEN 0 128 127.0.0.1:10257 0.0.0.0:*
LISTEN 0 128 127.0.0.1:10259 0.0.0.0:*
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 127.0.0.1:41207 0.0.0.0:*
LISTEN 0 128 127.0.0.1:10248 0.0.0.0:*
LISTEN 0 128 127.0.0.1:10249 0.0.0.0:*
LISTEN 0 128 192.168.240.30:2379 0.0.0.0:*
LISTEN 0 128 127.0.0.1:2379 0.0.0.0:*
LISTEN 0 128 192.168.240.30:2380 0.0.0.0:*
LISTEN 0 128 *:10256 *:*
LISTEN 0 128 [::]:22 [::]:*
LISTEN 0 128 *:10250 *:*
LISTEN 0 128 *:6443 *:*
查看节点
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master.example.com NotReady control-plane,master 6m59s v1.23.1
master上安装Pod网络插件(CNI)
Flannel可以添加到任何现有的Kubernetes集群中,尽管在使用pod网络的任何pod启动之前添加它是最简单的。
[root@master ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
加入Kubernetes Node
将 node1 和 node2 加入到集群中,使用之前创建的文件内容init
# 未加入之前master上查看
[root@master flannel]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master.example.com Ready control-plane,master 69m v1.23.1
[root@master ~]# cat init # 向集群添加新节点,执行在kubeadm init输出的以下内容
kubeadm join 192.168.240.50:6443 --token z9bkz4.8zl0ca032qqg3qwu \
--discovery-token-ca-cert-hash sha256:2382b876b896591aeff33c2df6bf250a28d54e9b4628839dd40ed4d98e7ac3ca
# 在node1上将node1上加入集群
[root@node1 ~]# kubeadm join 192.168.240.50:6443 --token z9bkz4.8zl0ca032qqg3qwu \
> --discovery-token-ca-cert-hash sha256:2382b876b896591aeff33c2df6bf250a28d54e9b4628839dd40ed4d98e7ac3ca
[preflight] Running pre-flight checks
[WARNING Service-Docker]: docker service is not enabled, please run 'systemctl enable docker.service'
[WARNING FileExisting-tc]: tc not found in system path
[WARNING Hostname]: hostname "node1.example.com" could not be reached
[WARNING Hostname]: hostname "node1.example.com": lookup node1.example.com on 114.114.114.114:53: no such host
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
# 在node2上将node2上加入集群
[root@node2 ~]# kubeadm join 192.168.240.50:6443 --token z9bkz4.8zl0ca032qqg3qwu \
> --discovery-token-ca-cert-hash sha256:2382b876b896591aeff33c2df6bf250a28d54e9b4628839dd40ed4d98e7ac3ca
[preflight] Running pre-flight checks
[WARNING Service-Docker]: docker service is not enabled, please run 'systemctl enable docker.service'
[WARNING FileExisting-tc]: tc not found in system path
[WARNING Hostname]: hostname "node2.example.com" could not be reached
[WARNING Hostname]: hostname "node2.example.com": lookup node2.example.com on 114.114.114.114:53: no such host
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
将node1和node2加入集群之后master上查看
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master.example.com Ready control-plane,master 80m v1.23.1
node1.example.com Ready <none> 2m25s v1.23.1
node2.example.com Ready <none> 2m21s v1.23.1
测试kubernetes集群
在Kubernetes集群中创建一个pod,验证是否正常运行:
# 创建一个pod,是deployment类型的nginx,使用nginx镜像,没有指定在哪个节点运行
[root@master ~]# kubectl create deployment nginx --image=nginx
deployment.apps/nginx created
# 暴露pod是deployment类型的nginx端口80,暴露在节点上
[root@master ~]# kubectl expose deployment nginx --port=80 --type=NodePort
service/nginx exposed
# 查看
[root@master ~]# kubectl get pod,svc
NAME READY STATUS RESTARTS AGE
pod/nginx-85b98978db-xd6wz 1/1 Running 0 68s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 30m
service/nginx NodePort 10.99.129.159 <none> 80:31343/TCP 48s
# 查看在哪个节点上运行
[root@master ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-85b98978db-xd6wz 1/1 Running 0 87s 10.244.2.2 node2.example.com(运行在) <none> <none>
# 访问seriveIP
[root@master ~]# curl http://10.99.129.159
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
..............
# node2上查看映射的随机端口
[root@node2 ~]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
LISTEN 0 128 127.0.0.1:37919 0.0.0.0:*
LISTEN 0 128 127.0.0.1:10248 0.0.0.0:*
LISTEN 0 128 127.0.0.1:10249 0.0.0.0:*
LISTEN 0 128 0.0.0.0:31343 0.0.0.0:*
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 *:10250 *:*
LISTEN 0 128 *:10256 *:*
LISTEN 0 128 [::]:22 [::]:*
访问node2IP:映射的随机端口(31343)