1 规划
节点名称 | IP | 备注 |
---|---|---|
k8s-master | 192.168.0.156 | master |
k8s-node1 | 192.168.0.70 | node1 |
k8s-node2 | 192.168.0.167 | node2 |
2 环境初始化
#修改节点名
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02
#关闭防火墙和selinux
systemctl stop firewalld
systemctl disable firewalld
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
#关闭swap分区
swapoff -a # 临时关闭
vim /etc/fstab # 注释到swap那一行 永久关闭
#添加主机名与IP对应关系(三台主机都执行)
echo '''
192.168.0.156 k8s-master
192.168.0.70 k8s-node01
192.168.0.167 k8s-node02
''' >> /etc/hosts
#将桥接的IPv4流量传递到iptables的链(三台主机都执行):
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.ipv4.tcp_tw_recycle = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
#linux内核升级
#查看当前内核版本
uname -a
#安装epel源
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum install -y https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm
#查看内核版本并安装最新版本
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
#安装最新lt内核版本
yum --disablerepo='*' --enablerepo=elrepo-kernel install kernel-lt -y
#查看系统grub内核的启动列表,这里编号0的5.4.252的lt版本是我们新安装的
awk -F\' '$1=="menuentry " {print i++ " : " $2}' /etc/grub2.cfg
#指定以新安装的编号0的内核版本为默认启动内核
grub2-set-default 0
#卸载旧内核版本
yum remove kernel -y
#重启机器,以新内核版本加载启动
reboot
#查看内核版本
3 K8S安装
3.1 安装docker
#依赖安装:
yum install -y yum-utils device-mapper-persistent-data lvm2
#docker源安装
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
#docker安装
yum install -y docker-ce-18.09.0 docker-ce-cli-18.09.0 containerd.io
#启动并加入开机自启
systemctl start docker
systemctl enable docker
#Docker配置修改,设置cgroup驱动,这里用systemd
#配置修改为如下
vim /etc/docker/daemon.json
{
"graph": "/data/docker",
"registry-mirrors": ["https://01xxgaft.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
#重启docker
systemctl restart docker
3.2 安装kubeadm,kubelet和kubectl
#添加k8s阿里云YUM软件源
#所有终端执行
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#安装kubeadm,kubelet和kubectl
#所有机器执行
setenforce 0
yum install -y kubelet-1.16.2 kubeadm-1.16.2 kubectl-1.16.2
systemctl start kubelet
systemctl enable kubelet
#部署k8s的master和node节点
#部署master节点,在 192.168.0.156执行
#初始化master节点
kubeadm init --apiserver-advertise-address=192.168.0.156 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.16.0 --service-cidr=10.140.0.0/16 --pod-network-cidr=10.240.0.0/16
#网段问题,两个网段不要重,后面是/16,不要与当前机器网段一样
#这里执行完会生成一串命令用于node节点的加入,记录下来,接着执行以下命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#部署node节点,在 192.168.0.70和 192.168.0.167执行
#这里复制上面生成的一串命令,我这里只是示例,命令根据你实际生成的复制去node节点执行
kubeadm join 192.168.0.156:6443 --token 8ubdw9.63z7bsd97rfblb6z \
--discovery-token-ca-cert-hash sha256:c9abd2d72b082d39464546c246b72078c3d15f14e9972527c87514fba20670e6
3.3 安装网络插件(CNI)
#下面两种插件二选一,master上执行,如果是云服务器建议按照flannel,calico可能会和云网络环境有冲突
#安装flannel插件(轻量级用于快速搭建使用,初学推荐)
#创建配置文件命名为kube-flannel.yml
vi kube-flannel.yml
#修改net-conf.json下面的网段为上面init pod-network-cidr的网段地址(必须正确否则会导致集群网络问题)
net-conf.json: |
{
"Network": "10.240.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
#全部配置如下
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.240.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.0.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
image: quay.io/coreos/flannel:v0.15.1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.15.1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
修改完安装插件,执行
kubectl apply -f kube-flannel.yml
kubectl get pods -n kube-system
4 测试k8s集群
#在Kubernetes集群中创建一个pod,验证是否正常运行
#创建一个pod,开放对外端口访问,这里会随机映射一个端口使用ipvs替代iptables
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
#查看pod状态,必须是running状态而且ready是1,并查看nginx svc的80端口映射到了哪个端口
kubectl get pod,svc
#访问任意机器的刚刚查看的映射端口,看看是否nginx已经运行
5 使用ipvs替代iptables
#在Kubernetes中Service有两种带来模型,一种是基于iptables的,一种是基于ipvs的两者比较的话,ipvs的性能明显要高一些,但是如果要使用它,需要手动载入ipvs模块
# 1.安装ipset和ipvsadm
yum install ipset ipvsadm -y
# 2.添加需要加载的模块写入脚本文件
cat <<EOF> /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
# 3.为脚本添加执行权限
chmod +x /etc/sysconfig/modules/ipvs.modules
# 4.执行脚本文件
/bin/bash /etc/sysconfig/modules/ipvs.modules
# 5.查看对应的模块是否加载成功
lsmod | grep -e -ip_vs -e nf_conntrack
#编辑/etc/sysconfig/kubelet
# 4、配置kubelet的cgroup
#编辑/etc/sysconfig/kubelet
vim /etc/sysconfig/kubelet
#添加下面的配置
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"
# 5、重启并设置kubelet开机自启
systemctl restart kubelet
systemctl enable kubelet
#查看、修改configMap配置文件
#查看proxy现有运行模式(默认为iptables)
kubectl get pod -n kube-system
kubectl logs kube-proxy-xxxx -n kube-system
#修改configMap配置
kubectl edit configMap kube-proxy -n kube-system
#输入/mode回车查找mode位置,按i进入编辑状态,将其中 mode="" 修改为 mode="ipvs"
#删除原pod,kubernetes会根据configMap配置文件自动重启proxy
kubectl get pod -n kube-system | grep proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
#查验当前proxy的运行模式是否更改成功
kubectl get pod -n kube-system | grep proxy
kubectl logs kube-proxy-xxxx -n kube-system
查看当前配置的虚拟服务列表
ipvsadm -Ln
6 参考博客
https://blog.youkuaiyun.com/qq_40954652/article/details/123208590
https://blog.youkuaiyun.com/weixin_43950568/article/details/121852070
https://blog.youkuaiyun.com/Sy_spareribs/article/details/120179665
https://blog.youkuaiyun.com/weixin_45387943/article/details/123225090