有问题or坑 可以加我vx:Logout-y
结构规划
角色 | ip | 组件 |
---|---|---|
k8s-master01 | 10.255.32.21 | etcd kube-apiserver kube-controller-manager kube-scheduler kube-proxy |
k8s-master02 | 10.255.32.22 | etcd kube-apiserver kube-controller-manager kube-scheduler kube-proxy |
k8s-node01 | 10.255.32.23 | etcd kubelet kube-proxy |
k8s-node02 | 10.255.32.24 | kubelet kube-proxy |
VIP | 10.255.32.55 | keepalived |
操作系统初始化配置
# 1、关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 2、关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
setenforce 0 # 临时
# 3、关闭swap
swapoff -a # 临时
sed -ri 's/.*swap.*/#&/' /etc/fstab # 永久
# 4、在master添加hosts
cat >> /etc/hosts << EOF
10.255.32.21 k8s-master01
10.255.32.22 k8s-master02
10.255.32.23 k8s-node01
10.255.32.24 k8s-node02
EOF
# 5、将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system # 生效
# 6、时间同步
yum install ntpdate -y
ntpdate time.windows.com
# 7、 安装docker
yum install -y docker
sed -i s#systemd#cgroupfs#g /usr/lib/systemd/system/docker.service
systemctl daemon-reload
systemctl restart docker
1. 部署etcd集群
节点名称 | IP |
---|---|
etcd-01 | 10.255.32.21 |
etcd-02 | 10.255.32.22 |
etcd-03 | 10.255.32.23 |
1.1 准备cfssl证书生成工具
mkdir cfssl && cd cfssl/
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/bin/cfssl-certinfo
1.2 生成etcd证书
1.2.1 自签证书颁发机构(CA)
# 1、创建工作目录
mkdir -p ~/TLS/{etcd,k8s} && cd ~/TLS/etcd
# 2、自签CA
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"www": {
"expiry": "876000h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json << EOF
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
# 3、生成证书:会生成ca.pem和ca-key.pem文件
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
1.2.2 使用自签CA签发Etcd Https证书
# 创建证书请求文件
cat > server-csr.json << EOF
{
"CN": "etcd",
"hosts": [
"10.255.32.21",
"10.255.32.22",
"10.255.32.23"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
EOF
#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
1.3 部署etcd集群
1.3.1 下载压缩包并解压
wget https://github.com/etcd-io/etcd/releases/download/v3.4.9/etcd-v3.4.9-linux-amd64.tar.gz
mkdir /opt/etcd/{bin,cfg,ssl} -p
tar zxvf etcd-v3.4.9-linux-amd64.tar.gz
mv etcd-v3.4.9-linux-amd64/{etcd,etcdctl} /opt/etcd/bin/
1.3.2 创建etcd配置文件
cat > /opt/etcd/cfg/etcd.conf << EOF
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.255.32.21:2380"
ETCD_LISTEN_CLIENT_URLS="https://10.255.32.21:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.255.32.21:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://10.255.32.21:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://10.255.32.21:2380,etcd-2=https://10.255.32.22:2380,etcd-3=https://10.255.32.23:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
配置文件说明:ETCD_NAME:节点名称,集群中唯一
ETCD_DATA_DIR:数据目录
ETCD_LISTEN_PEER_URLS:集群通信监听地址
ETCD_LISTEN_CLIENT_URLS:客户端访问监听地址
ETCD_INITIAL_ADVERTISE_PEERURLS:集群通告地址
ETCD_ADVERTISE_CLIENT_URLS:客户端通告地址
ETCD_INITIAL_CLUSTER:集群节点地址
ETCD_INITIALCLUSTER_TOKEN:集群Token
ETCD_INITIALCLUSTER_STATE:加入集群的当前状态,new是新集群,existing表示加入已有集群
1.3.3 systemd管理etcd
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \
--cert-file=/opt/etcd/ssl/server.pem \
--key-file=/opt/etcd/ssl/server-key.pem \
--peer-cert-file=/opt/etcd/ssl/server.pem \
--peer-key-file=/opt/etcd/ssl/server-key.pem \
--trusted-ca-file=/opt/etcd/ssl/ca.pem \
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem \
--logger=zap
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
1.3.4 拷贝证书
cp ~/TLS/etcd/ca*pem ~/TLS/etcd/server*pem /opt/etcd/ssl/
1.3.4 拷贝证书到另外两台服务器
scp -r /opt/etcd/ root@10.255.32.22:/opt/
scp /usr/lib/systemd/system/etcd.service root@10.255.32.22:/usr/lib/systemd/system/
scp -r /opt/etcd/ root@10.255.32.23:/opt/
scp /usr/lib/systemd/system/etcd.service root@10.255.32.23:/usr/lib/systemd/system/
1.3.5 修改etcd.conf中的ip,节点名称
vim /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-1" #修改成etcd-2 etcd-3
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://10.255.32.22:2380" # 本机ip
ETCD_LISTEN_CLIENT_URLS="https://10.255.32.22:2379" # 本机ip
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.255.32.22:2380" # 本机ip
ETCD_ADVERTISE_CLIENT_URLS="https://10.255.32.22:2379" # 本机ip
ETCD_INITIAL_CLUSTER="etcd-1=https://10.255.32.21:2380,etcd-2=https://10.255.32.22:2380,etcd-3=https://10.255.32.23:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
1.3.6 启动etcd并设置开机启动
# 3台都启动
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
1.3.7 查看集群状态
ETCDCTL_API=3 /opt/etcd/bin/etcdctl --cacert=/opt/etcd/ssl/ca.pem --cert=/opt/etcd/ssl/server.pem --key=/opt/etcd/ssl/server-key.pem --endpoints="https://10.255.32.21:2379,https://10.255.32.22:2379,https://10.255.32.23:2379" endpoint health --write-out=table
+---------------------------+--------+-------------+-------+
| ENDPOINT | HEALTH | TOOK | ERROR |
+---------------------------+--------+-------------+-------+
| https://10.255.32.21:2379 | true | 23.512194ms | |
| https://10.255.32.22:2379 | true | 27.815236ms | |
| https://10.255.32.23:2379 | true | 34.938155ms | |
+---------------------------+--------+-------------+-------+
2. k8s集群部署
单Master集群:(后续扩容)
角色 | ip |
---|---|
k8s-master01 | 10.255.32.21 |
k8s-node01 | 10.255.32.23 |
k8s-node02 | 10.255.32.24 |
2.1 Master部署
2.1.1 kube-apiserver
自签证书签发机构(CA)
cd ~/TLS/k8s
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"expiry": "876000h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
# 生成证书:生成ca.pem和ca-key.pem文件
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
用自签CA签发kube-apiserver HTTPS证书
注意:以下件hosts字段中IP为所有Master/LB/VIP IP,一个都不能少!为了方便后期扩容可以多写几个预留的IP
# 创建证书请求文件
cat > server-csr.json << EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1", #这个ip必须留着!!!!
"127.0.0.1",
"10.255.32.21",
"10.255.32.22",
"10.255.32.23",
"10.255.32.24",
"10.255.32.55",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
# 生成证书,生成server.pem和server-key.pem
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
部署kube-apiserver
1. 下载kube-apiserver server包即可

2. 解压二进制软件包
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
tar zxvf kubernetes-server-linux-amd64.tar.gz
cd kubernetes/server/bin
cp kube-apiserver kube-scheduler kube-controller-manager /opt/kubernetes/bin
cp kubectl /usr/bin/
3. 创建配置文件
cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--etcd-servers=https://10.255.32.21:2379,https://10.255.32.22:2379,https://10.255.32.23:2379 \\
--bind-address=10.255.32.21 \\
--secure-port=6443 \\
--advertise-address=10.255.32.21 \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-32767 \\
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \\
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--service-account-issuer=api \\
--service-account-signing-key-file=/opt/kubernetes/ssl/server-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \\
--requestheader-client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--proxy-client-cert-file=/opt/kubernetes/ssl/server.pem \\
--proxy-client-key-file=/opt/kubernetes/ssl/server-key.pem \\
--requestheader-allowed-names=kubernetes \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--enable-aggregator-routing=true \\
--audit-log-maxage=30 \\
--audit-log-maxbackup=3 \\
--audit-log-maxsize=100 \\
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
EOF
注:上面两个\ \ 第一个是转义符,第二个是换行符,使用转义符是为了使用EOF保留换行符参数说明:
--logtostderr:启用日志
--v:日志等级
--log-dir:日志目录
--etcd-servers:etcd集群地址
--bind-address:监听地址
--secure-port:https安全端口
--advertise-address:集群通告地址
--allow-privileged:启用授权
--service-cluster-ip-range:Service虚拟IP地址段
--enable-admission-plugins:准入控制模块
--authorization-mode:认证授权,启用RBAC授权和节点自管理
--enable-bootstrap-token-auth:启用TLS bootstrap机制
--token-auth-file:bootstrap token文件
--service-node-port-range:Service nodeport类型默认分配端口范围
--kubelet-client-xxx:apiserver访问kubelet客户端证书
--tls-xxx-file:apiserver https证书1.20版本必须加的参数:--service-account-issuer,--service-account-signing-key-file
--etcd-xxxfile:连接Etcd集群证书
--audit-log-xxx:审计日志启动聚合层相关配置:--requestheader-client-ca-file,--proxy-client-cert-file,--proxy-client-key-file,--requestheader-allowed-names,--requestheader-extra-headers-prefix,--requestheader-group-headers,--requestheader-username-headers,--enable-aggregator-routing
4. 拷贝生成的证书
# 把刚才生成的证书拷贝到配置文件中的路径
cp ~/TLS/k8s/ca*pem ~/TLS/k8s/server*pem /opt/kubernetes/ssl/
5. 启用 TLS Bootstrapping 机制
TLS Bootstraping:Master apiserver启用TLS认证后,Node节点kubelet和kube-proxy要与kube-apiserver进行通信,必须使用CA签发的有效证书才可以,当Node节点很多时,这种客户端证书颁发需要大量工作,同样也会增加集群扩展复杂度。为了简化流程,Kubernetes引入了TLS bootstraping机制来自动颁发客户端证书,kubelet会以一个低权限用户自动向apiserver申请证书,kubelet的证书由apiserver动态签署。所以强烈建议在Node上使用这种方式,目前主要用于kubelet,kube-proxy还是由我们统一颁发一个证书。
6. 创建token文件
# 随机获取token
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
ffdc3282190b9766703a8c0e762e6824
# 创建token文件 (格式:token,用户名,UID,用户组)
cat > /opt/kubernetes/cfg/token.csv << EOF
ffdc3282190b9766703a8c0e762e6824,kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF
7. systemd管理apiserver
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
8. 启动并设置开机启动
systemctl daemon-reload
systemctl start kube-apiserver
systemctl enable kube-apiserver
2.1.2 部署kube-controller-manager
1.创建配置文件
cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect=true \\
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--bind-address=127.0.0.1 \\
--allocate-node-cidrs=true \\
--cluster-cidr=10.244.0.0/16 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--cluster-signing-duration=87600h0m0s"
EOF
# 配置说明:
--kubeconfig:连接apiserver配置文件
--leader-elect:当该组件启动多个时,自动选举(HA)
--cluster-signing-cert-file/--cluster-signing-key-file:自动为kubelet颁发证书的CA,与apiserver保持一致
2. 生成kubeconfig文件
生成kube-controller-manager证书:
# 切换工作目录
cd ~/TLS/k8s
# 创建证书请求文件
cat > kube-controller-manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
生成kubeconfig文件(以下是shell命令,直接在终端执行):
KUBE_CONFIG="/opt/kubernetes/cfg/kube-controller-manager.kubeconfig"
KUBE_APISERVER="https://10.255.32.21:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-controller-manager \
--client-certificate=./kube-controller-manager.pem \
--client-key=./kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-controller-manager \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
3. systemd管理controller-manager
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
4. 启动并设置开机启动
systemctl daemon-reload
systemctl start kube-controller-manager
systemctl enable kube-controller-manager
2.1.3 部署kube-scheduler
1. 创建配置文件
cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--leader-elect \\
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \\
--bind-address=127.0.0.1"
EOF
参数说明:
--kubeconfig:连接apiserver配置文件
--leader-elect:当该组件启动多个时,自动选举(HA)
2. 生成kubeconfig文件
生成kube-scheduler证书:
# 切换工作目录
cd ~/TLS/k8s
# 创建证书请求文件
cat > kube-scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
生成kubeconfig文件(以下是shell命令,直接在终端执行):
KUBE_CONFIG="/opt/kubernetes/cfg/kube-scheduler.kubeconfig"
KUBE_APISERVER="https://10.255.32.21:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-scheduler \
--client-certificate=./kube-scheduler.pem \
--client-key=./kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-scheduler \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
3.systemd管理scheduler
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
4. 启动并设置开机启动
systemctl daemon-reload
systemctl start kube-scheduler
systemctl enable kube-scheduler
5. 查看集群状态
生成kubectl连接集群的证书:
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
生成kubeconfig文件:
mkdir /root/.kube
KUBE_CONFIG="/root/.kube/config"
KUBE_APISERVER="https://10.255.32.21:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials cluster-admin \
--client-certificate=./admin.pem \
--client-key=./admin-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=cluster-admin \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
通过kubectl工具查看当前集群组件状态:
kubectl get cs
# 如下输出说明Master节点组件运行正常
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-2 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
6. 授权kubelet-bootstrap用户允许请求证书
kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap
7. 补全命令
yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
kubectl completion bash >/etc/bash_completion.d/kubectl
2.2 node节点
由于机器有限master也为node,还是在master上面操作
2.2.1 创建工作目录并拷贝二进制文件
# 在所有worker node创建工作目录(master已创建,新加入节点需要创建)
mkdir -p /opt/kubernetes/{bin,cfg,ssl,logs}
# 从解压的k8s server压缩包中拷贝文件
cd /root/kubernetes/server/bin
cp kubelet kube-proxy /opt/kubernetes/bin
2.2.2 部署kubelet
1. 创建配置文件
cat > /opt/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--hostname-override=k8s-master01 \\
--network-plugin=cni \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet-config.yml \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=rancher/pause-amd64:3.0"
EOF
参数说明:
--hostname-override:显示名称,集群中唯一
--network-plugin:启用CNI
--kubeconfig:空路径,会自动生成,后面用于连接apiserver
--bootstrap-kubeconfig:首次启动向apiserver申请证书
--config:配置参数文件
--cert-dir:kubelet证书生成目录
--pod-infra-container-image:管理Pod网络容器的镜像
2. 配置参数文件
cat > /opt/kubernetes/cfg/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
3. 生成kubelet初次加入集群引导kubeconfig文件
KUBE_CONFIG="/opt/kubernetes/cfg/bootstrap.kubeconfig"
KUBE_APISERVER="https://10.255.32.21:6443" # apiserver IP:PORT
TOKEN="5f6edcf8dde1f4f7f312213c5545c0bf" # 与token.csv里保持一致
# 生成 kubelet bootstrap kubeconfig 配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials "kubelet-bootstrap" \
--token=${TOKEN} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user="kubelet-bootstrap" \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
4. systemd管理kubelet
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
5. 启动并设置开机启动
systemctl daemon-reload
systemctl start kubelet
systemctl enable kubelet
6. 批准kubelet证书申请并加入集群
#查看证书申请
[root@k8s-master01 ~]# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-CAu0vQBFLE-5xD8tDcHaI3sVIcZfoBAgsXwAngmWhA0 26m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
# 批准申请
[root@k8s-master01 ~]# kubectl certificate approve node-csr-CAu0vQBFLE-5xD8tDcHaI3sVIcZfoBAgsXwAngmWhA0
# 查看节点(由于网络插件还没有部署,节点会没有准备就绪 NotReady
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady <none> 20s v1.20.9
2.2.3 部署kube-proxy
1.创建配置文件
cat > /opt/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--logtostderr=false \\
--v=2 \\
--log-dir=/opt/kubernetes/logs \\
--config=/opt/kubernetes/cfg/kube-proxy-config.yml"
EOF
2. 配置参数文件
cat > /opt/kubernetes/cfg/kube-proxy-config.yml << EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
metricsBindAddress: 0.0.0.0:10249
clientConnection:
kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
hostnameOverride: k8s-master01
clusterCIDR: 10.0.0.0/24
EOF
3. 生成kube-proxy.kubeconfig文件
生成kube-proxy证书:
# 切换工作目录
cd ~/TLS/k8s
# 创建证书请求文件
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
# 生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
生成kubeconfig文件:
KUBE_CONFIG="/opt/kubernetes/cfg/kube-proxy.kubeconfig"
KUBE_APISERVER="https://10.255.32.21:6443"
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-credentials kube-proxy \
--client-certificate=./kube-proxy.pem \
--client-key=./kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=${KUBE_CONFIG}
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=${KUBE_CONFIG}
kubectl config use-context default --kubeconfig=${KUBE_CONFIG}
4. systemd管理kube-proxy
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
5. 启动并设置开机启动
systemctl daemon-reload
systemctl start kube-proxy
systemctl enable kube-proxy
2.2.4 部署网络组件Flannel/Calico
1.Flannel
1.1 获取压缩包并解压
mkdir -p /opt/cni/bin && cd /opt/cni
wget https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz
tar xf cni-plugins-linux-amd64-v0.8.6.tgz -C /opt/cni/bin
1.2 部署CNI网络
注:如果获取不到kube-flannel.yml,可以去我有道云上复制粘贴即可
https://note.youdao.com/s/NQwpevUg
#获取地址 https://github.com/flannel-io/flannel/blob/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready <none> 20m v1.20.9
2.Calico
# 下载地址
https://docs.projectcalico.org/
# 运行即可
kubectl apply -f calico.yaml
2.2.5 授权apiserver访问kubelet
应用场景:例如kubectl logs
cd /opt/cni/
cat > apiserver-to-kubelet-rbac.yaml << EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
- pods/log
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubernetes
EOF
#创建
kubectl apply -f apiserver-to-kubelet-rbac.yaml
2.2.6 新增node节点
1.拷贝已部署好的Node相关文件到新节点
# 在Master节点将Worker Node涉及文件拷贝到新节点10.255.32.23/24
scp -r /opt/kubernetes root@10.255.32.23:/opt/
scp -r /opt/cni root@10.255.32.23:/opt/
scp -r /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@10.255.32.23:/usr/lib/systemd/system
scp /opt/kubernetes/ssl/ca.pem root@10.255.32.23:/opt/kubernetes/ssl
2. 删除kubelet证书和kubeconfig文件
rm -f /opt/kubernetes/cfg/kubelet.kubeconfig
rm -f /opt/kubernetes/ssl/kubelet*
3. 修改配置文件中的主机名
vim /opt/kubernetes/cfg/kubelet.conf
--hostname-override=k8s-node01
vim /opt/kubernetes/cfg/kube-proxy-config.yml
hostnameOverride: k8s-node01
4. 启动并设置开机启动
systemctl daemon-reload
systemctl start kubelet kube-proxy
systemctl enable kubelet kube-proxy
5. 在Master上批准新Node kubelet证书申请
# 查看证书请求
[root@k8s-master01 ~]# kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-BBiZR8U5DYj0I7SyceOHQpV2VUNpyCbIGMzGG21bwsE 86m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Approved,Issued
node-csr-NT_Ffl2L2vmvzbBkwIi5509LZA9qHFnlqGoKKoDHpQ0 3m32s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
node-csr-ShbH7nW5VGNq7n75jfg3htx40-PjU2TYCQJKeXQBvaw 3m24s kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
# 同意授权请求
kubectl certificate approve node-csr-NT_Ffl2L2vmvzbBkwIi5509LZA9qHFnlqGoKKoDHpQ0
6. 查看Node状态
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready <none> 55m v1.20.9
k8s-node01 Ready <none> 55s v1.20.9
k8s-node02 Ready <none> 44s v1.20.9
3. 部署coreDNS
CoreDNS用于集群内部Service名称解析
coredns.yaml
[root@k8s-master01 cni]# kubectl create -f coredns.yaml
[root@k8s-master01 cni]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-6cc56c94bd-kj9v9 1/1 Running 0 2m11s
kube-system kube-flannel-ds-5v24c 1/1 Running 0 19m
kube-system kube-flannel-ds-c6kc5 1/1 Running 0 12m
kube-system kube-flannel-ds-klk5h 1/1 Running 0 12m
DNS解析测试:
[root@k8s-master01 cni]# kubectl run -it --rm dns-test --image=busybox:1.28.4 sh
If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server: 10.0.0.2
Address 1: 10.0.0.2 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local
4. 扩容多master(高可用架构)
角色 | ip |
---|---|
k8s-master01 | 10.255.32.21 |
k8s-master02 | 10.255.32.22(新增master节点) |
k8s-node01 | 10.255.32.23 |
k8s-node02 | 10.255.32.24 |
vip(两个master上) | 10.255.32.55 |
4.1 部署Master02 节点
将Master1所有K8s文件拷贝过来,再修改下服务器IP和主机名启动即可
1. 拷贝master01上文件到master02
# 注: 这里由于我的etcd在master-02上面也有部署 所以不需要拷贝etcd证书,如果新增master节点不属于etcd集群,需要拷贝etcd证书文件,目录为:/opt/etcd/ssl
scp -r /opt/kubernetes root@10.255.32.22:/opt
scp -r /opt/cni root@10.255.32.22:/opt/cni
scp /usr/lib/systemd/system/kube* root@10.255.32.22:/usr/lib/systemd/system
scp /usr/bin/kubectl root@10.255.32.22:/usr/bin
scp -r ~/.kube root@10.255.32.22:~
2.删除证书文件
# 删除kubelet证书和kubeconfig文件
rm -f /opt/kubernetes/cfg/kubelet.kubeconfig
rm -f /opt/kubernetes/ssl/kubelet*
3. 修改配置文件IP和主机名
# 修改apiserver、kubelet和kube-proxy配置文件为本地IP
vim /opt/kubernetes/cfg/kube-apiserver.conf
...
--bind-address=10.255.32.22 \
--advertise-address=10.255.32.22 \
...
vim /opt/kubernetes/cfg/kubelet.conf
--hostname-override=k8s-master02
vim /opt/kubernetes/cfg/kube-proxy-config.yml
hostnameOverride: k8s-master02
5. 启动并设置开机启动
systemctl daemon-reload
systemctl start kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy
systemctl enable kube-apiserver kube-controller-manager kube-scheduler kubelet kube-proxy
6. 查看集群状态
# 修改连接master为本机IP
vim ~/.kube/config
...
server: https://10.255.32.22:6443
kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-2 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
7. 批准kubelet证书申请
# 查看证书请求
kubectl get csr
NAME AGE SIGNERNAME REQUESTOR CONDITION
node-csr-L_AJOkQRqDzdvAVZ24zQXTEjeZeryRWXU4DRS2kwVw0 14m kubernetes.io/kube-apiserver-client-kubelet kubelet-bootstrap Pending
# 授权请求
kubectl certificate approve node-csr-L_AJOkQRqDzdvAVZ24zQXTEjeZeryRWXU4DRS2kwVw0
# 查看Node
kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master1 Ready <none> 34h v1.20.9
k8s-master2 Ready <none> 2m v1.20.9
k8s-node1 Ready <none> 33h v1.20.9
k8s-node2 Ready <none> 33h v1.20.9
4.2 部署Nginx+Keepalived高可用负载均衡器(haproxy也可)
注: 为了节省机器,这里与K8s Master节点机器复用。也可以独立于k8s集群之外部署,只要nginx与apiserver能通信就行
1. 安装软件包(主/备)
yum install epel-release -y
yum install nginx keepalived -y
2. Nginx配置文件(主备一样)
cat > /etc/nginx/nginx.conf << EOF
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
# 四层负载均衡,为两台Master apiserver组件提供负载均衡
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 10.255.32.21:6443; # Master1 APISERVER IP:PORT
server 10.255.32.22:6443; # Master2 APISERVER IP:PORT
}
server {
listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
proxy_pass k8s-apiserver;
}
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
server {
listen 80 default_server;
server_name _;
location / {
}
}
}
EOF
# 启动并设置开机启动
systemctl daemon-reload
systemctl start nginx
systemctl enable nginx
#如果报错,需要安装stream模块
nginx: [emerg] unknown directive "stream" in /etc/nginx/nginx.conf:13
yum install nginx-mod-stream -y
3. keepalived配置文件
#主从需要修改的地方为,其他配置一样
router_id NGINX_MASTER #备服务器将这里修改为NGINX_BACKUP
state MASTER # 备服务器修改为BACKUP
priority 100 #备服务器设置 90
cat > /etc/keepalived/keepalived.conf << EOF
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER #备服务器将这里修改为NGINX_BACKUP
}
vrrp_script check_nginx {
script "/etc/keepalived/check_nginx.sh"
}
vrrp_instance VI_1 {
state MASTER # 备服务器修改为BACKUP
interface eth0 # 修改为实际网卡名
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 100 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass test
}
# 虚拟IP
virtual_ipaddress {
10.255.32.55/24
}
track_script {
check_nginx
}
}
EOF
准备上述配置文件中检查nginx运行状态的脚本:(主从都要)
注:keepalived根据脚本返回状态码(0为工作正常,非0不正常)判断是否故障转移
cat > /etc/keepalived/check_nginx.sh << EOF
#!/bin/bash
count=$(ss -antp |grep 16443 |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
exit 1
else
exit 0
fi
EOF
chmod +x /etc/keepalived/check_nginx.sh
4. 启动并设置开机启动
systemctl daemon-reload
systemctl start keepalived
systemctl enable keepalived
5. 查看keepalived工作状态
6. Nginx+Keepalived高可用测试
关闭主节点Nginx,测试VIP是否漂移到备节点服务器。在Nginx Master执行systemctl stop nginx;在Nginx Backup,ip addr命令查看已成功绑定VIP
7. 访问负载均衡器测试
找K8s集群中任意一个节点,使用curl查看K8s版本测试,使用VIP访问:
[root@k8s-node02 ~]# curl -k https://10.255.32.55:16443/version
{
"major": "1",
"minor": "20",
"gitVersion": "v1.20.9",
"gitCommit": "7a576bc3935a6b555e33346fd73ad77c925e9e4a",
"gitTreeState": "clean",
"buildDate": "2021-07-15T20:56:38Z",
"goVersion": "go1.15.14",
"compiler": "gc",
"platform": "linux/amd64"
}
可以正确获取到K8s版本信息,说明负载均衡器搭建正常。该请求数据流程:curl -> vip(nginx) -> apiserver
8. 修改所有节点连接vip
在所有节点执行(包括master)
sed -i 's#10.255.32.21:6443#10.255.32.55:16443#' /opt/kubernetes/cfg/*
systemctl restart kubelet kube-proxy
检查节点状态:kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master02 Ready <none> 26m v1.20.9
k8s-master1 Ready <none> 3h21m v1.20.9
k8s-node01 Ready <none> 172m v1.20.9
k8s-node02 Ready <none> 167m v1.20.9