1.前期准备:
centos7.9 3台
k8s-master 192.168.10.128
k8s-node1 192.168.10.129
k8s-node2 192.168.10.130
kubernetes
2.基本环境部署(所有节点)
1).设置主机名:
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
2).关闭防火墙、selinux
systemctl stop firewalld
systemctl disable firewalld
sed -i '/SELINUX=enforcing/ s/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
3).更改hosts文件
vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.128 k8s-master
192.168.10.129 k8s-node1
192.168.10.130 k8s-node2
4).配置ssh
ssh-keygen
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-master
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-node1
ssh-copy-id -i ~/.ssh/id_rsa.pub root@k8s-node2
5).时间同步
yum install chrony -y
systemctl start chronyd
systemctl enable chronyd
chronyc sources
6).关闭swap
swapoff -a #临时关闭
sed -ri 's/.*swap.*/#&/' /etc/fstab #永久关闭
7).运行iptables检查桥接流量
modprobe br_netfilter
lsmod | grep br_netfilter #检查br-netfilter模块是否以及加载
确认sysctl配置中的net.bridge.bridge-nf-call-iptables设置为1
cat <<EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
#设置所需的sysctl参数,参数在重新启动后保持不变
cat <<EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
#应用
sysctl --system
3.k8s部署(所有节点)
1)配置yum源
cd /etc/yum.repos.d
#安装自动寻找最快源的插件
yum install -y yum-plugin-fastestmirror
#打开
vim /etc/yum.conf
#添加这行
fastestmirror=true
#安装yum-config-manager配置工具
yum -y install yum-utils
#设置docker的yum源
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/centos/docker-ce.repo --no-check-certificate
#安装必要的软件
yum install -y yum-utils device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack ntpdate telnet ipvsadm
2)安装docker
#安装docker-ce
yum install -y docker-ce-20.10.6
# 开机自启
systemctl enable --now docker
# 查看版本号
docker --version
#docker镜像源设置
cat >/etc/docker/daemon.json<<EOF
{
"registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
#加载
systemctl daemon-reload && systemctl restart docker
# 查看
systemctl status docker
- 【温馨提示】dockerd 实际真实调用的还是 containerd 的 api 接口,containerd 是 dockerd 和 runC 之间的一个中间交流组件。所以启动 docker 服务的时候,也会启动 containerd 服务的。
3).配置K8s源
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
4.安装kubeadm,kubelet,kubectl(所以节点)
yum install -y kubelet-1.20.6 kubeadm-1.20.6 kubectl-1.20.6
# 设置为开机自启并现在立刻启动服务 --now:立刻启动服务
systemctl enable --now kubelet
查看版本
kubectl version
yum info kubeadm
提前下载好需要的镜像
我这里导入下载好的压缩包即可
没有压缩包可以行docker pull
docker load -i k8simage-1-20-6.tar.gz
#软件包包含的软件如下
[root@k8s-node1 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.aliyuncs.com/google_containers/kube-proxy v1.20.6 9a1ebfd8124d 2 years ago 118MB
registry.aliyuncs.com/google_containers/kube-scheduler v1.20.6 b93ab2ec4475 2 years ago 47.3MB
registry.aliyuncs.com/google_containers/kube-apiserver v1.20.6 b05d611c1af9 2 years ago 122MB
registry.aliyuncs.com/google_containers/kube-controller-manager v1.20.6 560dd11d4550 2 years ago 116MB
calico/pod2daemon-flexvol v3.18.0 2a22066e9588 2 years ago 21.7MB
calico/node v3.18.0 5a7c4970fbc2 2 years ago 172MB
calico/cni v3.18.0 727de170e4ce 2 years ago 131MB
calico/kube-controllers v3.18.0 9a154323fbf7 2 years ago 53.4MB
registry.aliyuncs.com/google_containers/etcd 3.4.13-0 0369cf4303ff 2 years ago 253MB
registry.aliyuncs.com/google_containers/coredns 1.7.0 bfe3a36ebd25 3 years ago 45.2MB
registry.aliyuncs.com/google_containers/pause 3.2 80d28bedfe5d 3 years ago 683kB
5.配置kubeadm.yaml文件便于初始化(master节点)
[root@master ~]# kubeadm config print init-defaults > kubeadm.yaml
[root@master ~]# vim kubeadm.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.10.128 #控制节点的ip
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master #控制节点主机名
taints: #修改taints为下列两行,设置不在master上启动pod,选择配置
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS #这里可以指定类型,不配置也是可以的
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.6
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
podSubnet: 10.244.0.0/16 #指定pod网段, 需要新增加这个
scheduler: {}
#追加如下几行
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
初始化
kubeadm init --config=kubeadm.yaml --ignore-preflight-errors=SystemVerification
之后执行下面命令
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
6将node节点加入集群
在node节点上执行上面的命令,将node加入集群
kubeadm join 192.168.10.128:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:0fea6db9974ad5fb456428de91864636a52b94880a4a3ac93e9b01abc79cd7b5
验证
kubectl get node
修改工作节点标签
kubectl label node k8s-node1 node-role.kubernetes.io/worker=worker
kubectl label node k8s-node2 node-role.kubernetes.io/worker=worker
[root@k8s-master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master Ready control-plane,master 13h v1.20.6
k8s-node1 NotReady worker 13h v1.20.6
k8s-node2 NotReady worker 13h v1.20.6
7.安装calico网络组件
在master执行命令
kubectl apply -f calico.yaml
到此,k8s集群的搭建完美结束。