二进制部署kubernetes 1.25.5(二)

四、etcd集群安装

4.1 etcd01

tar xf etcd-v3.5.6-linux-amd64.tar.gz 
mv etcd-v3.5.6-linux-amd64/etcd* /usr/local/bin/
​
#创建etcd配置文件
mkdir-p /etc/etcd/
​
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd01'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.10.10.30:2380'
listen-client-urls: 'https://10.10.10.30:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.10.10.30:2380'
advertise-client-urls: 'https://10.10.10.30:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd01=https://10.10.10.30:2380,etcd02=https://10.10.10.31:2380,etcd03=https://10.10.10.32:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

4.2 etcd02

tar xf etcd-v3.5.6-linux-amd64.tar.gz 
cp etcd-v3.5.6-linux-amd64/etcd* /usr/local/bin/
​
#创建etcd配置文件
mkdir-p /etc/etcd/
​
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd02'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.10.10.31:2380'
listen-client-urls: 'https://10.10.10.31:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.10.10.31:2380'
advertise-client-urls: 'https://10.10.10.31:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd01=https://10.10.10.30:2380,etcd02=https://10.10.10.31:2380,etcd03=https://10.10.10.32:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

4.2 etcd03

tar xf etcd-v3.5.6-linux-amd64.tar.gz 
cp etcd-v3.5.6-linux-amd64/etcd* /usr/local/bin/
​
#创建etcd配置文件
mkdir-p /etc/etcd/
​
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd03'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.10.10.32:2380'
listen-client-urls: 'https://10.10.10.32:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.10.10.32:2380'
advertise-client-urls: 'https://10.10.10.32:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd01=https://10.10.10.30:2380,etcd02=https://10.10.10.31:2380,etcd03=https://10.10.10.32:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/etcd/ssl/etcd-server.pem'
  key-file: '/etc/etcd/ssl/etcd-server-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/etcd/ssl/ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

4.4 创建service文件并启动服务

cat > /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
​
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
​
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF
​
#启动服务
systemctl daemon-reload
systemctl enable --now etcd
​
​
##配置etcdctl使用v3 api
cat > /etc/profile.d/etcdctl.sh <<EOF
#!/bin/bash
exportETCDCTL_API=3
exportETCDCTL_ENDPOINTS=https://127.0.0.1:2379
exportETCDCTL_CACERT=/etc/etcd/ssl/ca.pem
exportETCDCTL_CERT=/etc/etcd/ssl/etcd-client.pem
exportETCDCTL_KEY=/etc/etcd/ssl/etcd-client-key.pem
EOF
#生效
source /etc/profile
​
#验证集群状态
etcdctl member list --write-out='table'

五、安装 containerd

5.1 安装容器运行时containerd

容器配置私有仓库

#解压containerd安装
tar xf  cri-containerd-cni-1.6.13-linux-amd64.tar.gz -C /
​
#升级libseccomp模块解决runc以下报错
#runc: symbol lookup error: runc: undefined symbol: seccomp_notify_respond
#解决办法:
#这个是说缺少依赖包libseccomp,需要注意的是centos7中yum下载的版本是2.3的,版本不满足我们最新containerd的需求,需要下载2.4以上的
wget https://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/libseccomp-2.5.2-1.el8.x86_64.rpm
rpm -Uvh libseccomp-2.5.2-1.el8.x86_64.rpm
​
​
#创建配置文件
mkdir /etc/containerd
containerd config default > /etc/containerd/config.toml
​
#修改配置10.10.10.2/k8s/pause:3.6
#先下载阿里云修改
docker pull registry.aliyuncs.com/k8sxio/pause:3.6
docker tag 10.10.10.2/k8s/pause:3.6
#首先我们修改默认的 pause 镜像为国内的地址,替换 [plugins."io.containerd.grpc.v1.cri"] 下面的 sandbox_image:
[plugins."io.containerd.grpc.v1.cri"]
  sandbox_image ="10.10.10.2/k8s/pause:3.6"
#配置私有仓库地址
#查看harbor搭建仓库https
​
#启动
systemctl enable --now  containerd.service
​
#测试
crictl info 
​
​
all="node30 node31 node32 node20 node21"
for i in$all;do
  scp /etc/containerd/config.toml $i:/etc/containerd/config.toml
done

六,安装kubernetes集群

6.1 安装kube-apiserver

#创建ServiceAccount Key
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout-out /etc/kubernetes/pki/sa.pub
​
​
#分发master组件
master="node30 node31 node32"
for i in$master;do
  scp /etc/kubernetes/pki/{sa.pub,sa.key} $i:/etc/kubernetes/pki/
done
#创建service文件
a=`ifconfig ens33 | awk -rn 'NR==2{print $2}'`  #获取IP地址
​
cat > /etc/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
      --v=2  \\
      --logtostderr=true  \\
      --allow-privileged=true  \\
      --bind-address=$a  \\
      --secure-port=6443  \\
      --advertise-address=$a \\
      --service-cluster-ip-range=10.200.0.0/16  \\
      --service-node-port-range=30000-42767  \\
      --etcd-servers=https://10.10.10.30:2379,https://10.10.10.31:2379,https://10.10.10.32:2379 \\
      --etcd-cafile=/etc/etcd/ssl/ca.pem  \\
      --etcd-certfile=/etc/etcd/ssl/etcd-client.pem  \\
      --etcd-keyfile=/etc/etcd/ssl/etcd-client-key.pem  \\
      --client-ca-file=/etc/kubernetes/pki/ca.pem  \\
      --tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem  \\
      --tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem  \\
      --kubelet-client-certificate=/etc/kubernetes/pki/kube-apiserver.pem  \\
      --kubelet-client-key=/etc/kubernetes/pki/kube-apiserver-key.pem  \\
      --service-account-key-file=/etc/kubernetes/pki/sa.pub  \\
      --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \\
      --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
      --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \\
      --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \\
      --authorization-mode=Node,RBAC  \\
      --enable-bootstrap-token-auth=true  \\
      --requestheader-client-ca-file=/etc/kubernetes/pki/proxy-ca.pem  \\
      --proxy-client-cert-file=/etc/kubernetes/pki/proxy.pem  \\
      --proxy-client-key-file=/etc/kubernetes/pki/proxy-key.pem  \\
      --requestheader-allowed-names=aggregator  \\
      --requestheader-group-headers=X-Remote-Group  \\
      --requestheader-extra-headers-prefix=X-Remote-Extra-  \\
      --requestheader-username-headers=X-Remote-User  
​
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
​
[Install]
WantedBy=multi-user.target
EOF
​
​
#启动服务
systemctl enable --now kube-apiserver.service

注:上面两个\第一个是转义符,第二个是换行符,使用转义符是为了使用EOF保留换行符。

  • --logtostderr:启用日志

  • ---v:日志等级

  • --log-dir:日志目录

  • --etcd-servers:etcd集群地址

  • --bind-address:监听地址

  • --secure-port:https安全端口

  • --advertise-address:集群通告地址

  • --allow-privileged:启用授权

  • --service-cluster-ip-range:Service虚拟IP地址段

  • --enable-admission-plugins:准入控制模块

  • --authorization-mode:认证授权,启用RBAC授权和节点自管理

  • --enable-bootstrap-token-auth:启用TLS bootstrap机制

  • --token-auth-file:bootstrap token文件

  • --service-node-port-range:Service nodeport类型默认分配端口范围

  • --kubelet-client-xxx:apiserver访问kubelet客户端证书

  • --tls-xxx-file:apiserver https证书

  • 1.20版本必须加的参数:--service-account-issuer,--service-account-signing-key-file

  • --etcd-xxxfile:连接Etcd集群证书

  • --audit-log-xxx:审计日志

  • 启动聚合层相关配置:--requestheader-client-ca-file,--proxy-client-cert-file,--proxy-client-key-file,--requestheader-allowed-names,--requestheader-extra-headers-prefix,--requestheader-group-headers,--requestheader-username-headers,--enable-aggregator-routing

6.2 apiserver 高可用配置

#安装keepalived haproxy 
yum install -y keepalived haproxy 
​
#ha01配置文件
vim /etc/keepalived/keepalived.conf
​
global_defs {
   .....
   router_id node22
#  vrrp_strict
   .....
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2
    rise 1
}
​
vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51  #修改对应的id
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
       10.10.10.100 dev ens33 label ens33:0 #添加vip
    }
    track_script {
      chk_apiserver
    }
}
​
​
#ha02 配置文件
vim /etc/keepalived/keepalived.conf
​
global_defs {
   .....
   router_id node23
#  vrrp_strict
   .....
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 5
    weight -5
    fall 2
    rise 1
}
​
vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51  #修改对应的id
    priority 99
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
       10.10.10.100 dev ens33 label ens33:0 #添加vip
    }
    track_script {
      chk_apiserver
    }
}
​
​
​
#ha配置文件
cat > /etc/haproxy/haproxy.cfg <<EOF
global
   maxconn 2000
   ulimit-n 16384
   log 127.0.0.1 local0 err
   stats timeout 30s
​
defaults
   log global
   mode http
   option httplog
   timeout connect 5000
   timeout client 50000
   timeout server 50000
   timeout http-request 15s
   timeout http-keep-alive 15s
​
​
frontend monitor-in
   bind *:33305
   mode http
   option httplog
   monitor-uri /monitor
​
frontend k8s-master
   bind 0.0.0.0:8443
   bind 127.0.0.1:8443
   mode tcp
   option tcplog
   tcp-request inspect-delay 5s
   default_backend k8s-master
​
​
backend k8s-master
   mode tcp
   option tcplog
   option tcp-check
   balance roundrobin
   default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
   server k8s-master01 10.10.10.30:6443 check
   server k8s-master02 10.10.10.31:6443 check
   server k8s-master03 10.10.10.32:6443 check
EOF
​
## 检查ha进程脚本 
vim /etc/keepalived/check_apiserver.sh
#!/bin/bash
err=0
for k in$(seq 1 3)
do
    check_code=$(pgrep haproxy)
    if [[ $check_code=="" ]]; then
        err=$(expr $err + 1)
        sleep1
        continue
    else
        err=0
        break
    fi
done
​
if [[ $err !="0" ]]; then
    echo"systemctl stop keepalived"
    /usr/local/bin/systemctl stop keepalived
    exit1
else
    exit0
fi
​
​
#给脚本授权
chmod+x /etc/keepalived/check_apiserver.sh
#启动服务
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived 

6.3 安装kube-controller-manager

#生成service文件
cat > /etc/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
      --v=2 \
      --logtostderr=true \
      --root-ca-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
      --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
      --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
      --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
      --leader-elect=true \
      --use-service-account-credentials=true \
      --node-monitor-grace-period=40s \
      --node-monitor-period=5s \
      --pod-eviction-timeout=2m0s \
      --controllers=*,bootstrapsigner,tokencleaner \
      --allocate-node-cidrs=true \
      --cluster-cidr=10.200.0.0/16 \
      --requestheader-client-ca-file=/etc/kubernetes/pki/proxy-ca.pem \
      --node-cidr-mask-size=24
Restart=always
RestartSec=10s
​
[Install]
WantedBy=multi-user.target
EOF
​
​
#启动服务
systemctl enable --now kube-controller-manager.service
• --kubeconfig:连接apiserver配置文件
• --leader-elect:当该组件启动多个时,自动选举(HA)
• --cluster-signing-cert-file/--cluster-signing-key-file:自动为kubelet颁发证书的CA,与apiserver保持一致

6.4 安装kube-scheduler

#生成service文件
cat > /etc/systemd/system/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-scheduler \
      --v=2 \
      --logtostderr=true \
      --leader-elect=true \
      --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig
​
Restart=always
RestartSec=10s
​
[Install]
WantedBy=multi-user.target
EOF
​
#启动服务
systemctl enable --now kube-scheduler.service
​
#在master01上配置kubelet工具
#拷贝admin.kubeconfig到~/.kube/config
mkdir /root/.kube/ -p
cp /etc/kubernetes/admin.kubeconfig  /root/.kube/config
​
#验证集群状态,以下显示信息表示master节点的所有组件运行正常
kubectl get cs
  • --kubeconfig:连接apiserver配置文件

  • --leader-elect:当该组件启动多个时,自动选举(HA)

6.5 所有节点安装kubelet

#创建目录
mkdir /opt/pki/kubernetes/kubelet -p
cd  /opt/pki/kubernetes/kubelet
​
#生成随机认证key
a=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c6`
b=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c16`
​
#生成权限绑定文件
cat > bootstrap.secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-$a
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: $a
  token-secret: $b
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      -""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      -"*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF
​
​
​
#生成配置文件
kubectl config set-cluster kubernetes  \
--certificate-authority=../ca/ca.pem   \
--embed-certs=true   \
--server=https://10.10.10.100:8443   \
--kubeconfig=bootstrap-kubelet.kubeconfig
​
kubectl config set-credentials tls-bootstrap-token-user  \
--token=$a.$b \
--kubeconfig=bootstrap-kubelet.kubeconfig
​
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes   \
--user=tls-bootstrap-token-user  \
--kubeconfig=bootstrap-kubelet.kubeconfig
​
kubectl config use-context tls-bootstrap-token-user@kubernetes  \
--kubeconfig=bootstrap-kubelet.kubeconfig
​
kubectl apply -f bootstrap.secret.yaml
​
​
#拷贝配置文件到master节点和monion节点
all="node30 node31 node32 node20 node21"
for i in$all;do 
    scp /opt/pki/kubernetes/kubelet/bootstrap-kubelet.kubeconfig $i:/etc/kubernetes;
done
​
​
#生成配置文件
name=`ifconfig ens33 | awk -rn 'NR==2{print $2}'`
hostname=`hostname`
kubernetes_ssl_dir="/etc/kubernetes/pki"
​
cat > /etc/kubernetes/kubelet.conf << EOF
KUBELET_OPTS="--hostname-override=${hostname} \\
--container-runtime=remote \\
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
--config=/etc/kubernetes/kubelet-config.yml \\
--cert-dir=${kubernetes_ssl_dir}"
EOF
​
​
##生成kubelet-config.yml文件
cat > /etc/kubernetes/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${name}
port: 10250
readOnlyPort: 10255
cgroupDriver: systemd
clusterDNS:
-10.200.0.2   ##根据cidr修改该值
clusterDomain: cluster.local
failSwapOn: false
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: ${kubernetes_ssl_dir}/ca.pem
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
evictionHard:
  imagefs.available: 15%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
​
##生成kubelet.service文件
cat > /usr/lib/systemd/system/kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
After=docker.service
​
[Service]
EnvironmentFile=/etc/kubernetes/kubelet.conf
ExecStart=/usr/local/bin/kubelet $KUBELET_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
​
[Install]
WantedBy=multi-user.target
EOF
​
​
#拷贝配置文件到master节点和monion节点
all="node30 node31 node32 node20 node21"
for i in$all;do 
    scp /usr/lib/systemd/system/kubelet.service $i:/usr/lib/systemd/system/;
done
##使修改生效并启用服务
systemctl daemon-reload
systemctl enable --now kubelet

6.6,所有节点安装kube-proxy

#master节点执行
#创建目录
mkdir /opt/pki/kubernetes/kube-proxy/ -p
cd  /opt/pki/kubernetes/kube-proxy/
​
​
#生成配置文件
kubectl -n kube-system create serviceaccount kube-proxy
kubectl create clusterrolebinding  system:kube-proxy  --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
cat >kube-proxy-scret.yml<<EOF
apiVersion: v1
kind: Secret
metadata:
  name: kube-proxy
  namespace: kube-system
  annotations:
    kubernetes.io/service-account.name: "kube-proxy"
type: kubernetes.io/service-account-token
EOF
kubectl apply -f kube-proxy-scret.yml
​
JWT_TOKEN=$(kubectl -n kube-system get secret/kube-proxy \
--output=jsonpath='{.data.token}' | base64 -d)
​
​
kubectl config set-cluster kubernetes   \
--certificate-authority=/etc/kubernetes/pki/ca.pem    \
--embed-certs=true    \
--server=https://10.10.10.100:8443    \
--kubeconfig=kube-proxy.kubeconfig
​
kubectl config set-credentials kubernetes    \
--token=${JWT_TOKEN}   \
--kubeconfig=kube-proxy.kubeconfig
​
kubectl config set-context kubernetes    \
--cluster=kubernetes   \
--user=kubernetes   \
--kubeconfig=kube-proxy.kubeconfig
​
kubectl config use-context kubernetes   \
--kubeconfig=kube-proxy.kubeconfig
​
#拷贝配置文件到所有节点
all="node30 node31 node32 node20 node21"
for i in$all;do
  scp  /opt/pki/kubernetes/kube-proxy/kube-proxy.kubeconfig $i:/etc/kubernetes
done
​
#所有节点生成service文件
cat > /etc/systemd/system/kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
​
[Service]
ExecStart=/usr/local/bin/kube-proxy \
  --config=/etc/kubernetes/kube-proxy.conf \
  --v=2
​
Restart=always
RestartSec=10s
​
[Install]
WantedBy=multi-user.target
EOF
​
​
#所有节点生成配置文件
a=`ifconfig ens33| awk -rn 'NR==2{print $2}'`
cat > /etc/kubernetes/kube-proxy.conf <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: $a
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 10.100.0.0/16
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: "$a"
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
​
#启动服务
systemctl daemon-reload
systemctl enable --now kube-proxy.service
​
​
[root@node30 ~]# kubectl get nodes 
NAME     STATUS   ROLES    AGE     VERSION
node20   Ready    <none>   6m58s   v1.25.5
node21   Ready    <none>   6m48s   v1.25.5
node30   Ready    <none>   16m     v1.25.5
node31   Ready    <none>   8m50s   v1.25.5
node32   Ready    <none>   8m59s   v1.25.5
​
#给节点打标签
kubectl label nodes node30  node-role.kubernetes.io/master=master01
kubectl label nodes node31  node-role.kubernetes.io/master=master02
kubectl label nodes node32  node-role.kubernetes.io/master=master03
kubectl label nodes node20  node-role.kubernetes.io/node=monion01
kubectl label nodes node21  node-role.kubernetes.io/node=monion01​

七,安装网络插件calico

https://github.com/projectcalico/calico/tree/v3.24.5/manifests
vim calico.yaml
#修改配置
            - name: CALICO_IPV4POOL_CIDR
              value: "10.100.0.0/16"
​
kubectl apply -f calico.yaml
​
#查看pod运行情况
[root@node30 ~]# kubectl get pods -n kube-system -o wide 
​
NAME                                     READY   STATUS    RESTARTS   AGE     IP               NODE     NOMINATED NODE   READINESS GATES
calico-kube-controllers-d8fb77c4-2zz75   1/1     Running   0          2m50s   10.100.238.129   node31   <none>           <none>
calico-node-7jk2d                        1/1     Running   0          2m50s   10.10.10.30      node30   <none>           <none>
calico-node-rf9fw                        1/1     Running   0          2m50s   10.10.10.32      node32   <none>           <none>
calico-node-tknjl                        1/1     Running   0          2m50s   10.10.10.21      node21   <none>           <none>
calico-node-vmpbw                        1/1     Running   0          2m50s   10.10.10.31      node31   <none>           <none>
calico-node-wwh96                        1/1     Running   0          2m50s   10.10.10.20      node20   <none>           <none>
​
# 安装calico客户端工具
chmod+x  calicoctl-linux-amd64 && mv calicoctl-linux-amd64 /usr/local/bin/calicoctl
#创建配置文件
​
mkdir /etc/calico -p
cat >/etc/calico/calicoctl.cfg <<EOF
apiVersion: projectcalico.org/v3
kind: CalicoAPIConfig
metadata:
spec:
  datastoreType: "kubernetes"
  kubeconfig: "/root/.kube/config"
EOF
​
​
#验证
[root@node30 ~]# calicoctl node status
​
Calico process is running.
​
IPv4 BGP status
+--------------+-------------------+-------+----------+-------------+
| PEER ADDRESS |     PEER TYPE     | STATE |  SINCE   |    INFO     |
+--------------+-------------------+-------+----------+-------------+
| 10.10.10.20  | node-to-node mesh | up    | 07:43:31 | Established |
| 10.10.10.21  | node-to-node mesh | up    | 07:43:31 | Established |
| 10.10.10.31  | node-to-node mesh | up    | 07:43:30 | Established |
| 10.10.10.32  | node-to-node mesh | up    | 07:43:30 | Established |
+--------------+-------------------+-------+----------+-------------+
​
IPv6 BGP status
No IPv6 peers found.

八,安装coredns

#下载地址:https://github.com/coredns/deployment/blob/master/kubernetes/coredns.yaml.sed
​
vim  coredns.yaml
#修改配置
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {  #这里修改
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . 114.114.114.114 {   #外部dns解析服务器
          max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
  clusterIP: 10.200.0.2  #这里改为这个地址
​
#部署
kubectl apply -f coredns.yaml 
​
[root@node30 ~]# kubectl get pod -A |grep core
​
​
kube-system   coredns-54b8c69d54-2rnms                   1/1     Running   0          3h8m

九,安装metrics-server

#下载地址wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.1/components.yaml
​
#修改配置
vim components.yaml
      containers:
      - args:
        ---cert-dir=/tmp
        ---secure-port=4443
        ---kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
        ---kubelet-use-node-status-port
        ---metric-resolution=15s
        ---kubelet-insecure-tls
        ---requestheader-client-ca-file=/etc/kubernetes/pki/proxy-ca.pem 
        ---requestheader-username-headers=X-Remote-User
        ---requestheader-group-headers=X-Remote-Group
        ---requestheader-extra-headers-prefix=X-Remote-Extra-
        image: 10.10.10.2/metrics-servermetrics-server:v0.6.2
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
        - mountPath: /etc/kubernetes/pki
          name: ca-ssl
      nodeSelector:
        kubernetes.io/os: linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
      - emptyDir: {}
        name: tmp-dir
      - name: ca-ssl
        hostPath:
          path: /etc/kubernetes/pki
#创建
kubectl apply -f components.yaml 
#验证:
[root@node30 ~]# kubectl get pod -n kube-system metrics-server-5476c98f46-rhjt2
NAME                              READY   STATUS    RESTARTS   AGE
metrics-server-5476c98f46-rhjt2   1/1     Running   0          66s
​
[root@node30 ~]# kubectl top nodes 
NAME     CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
node20   219m         2%     1605Mi          41%       
node21   172m         2%     1564Mi          40%       
node30   650m         8%     2241Mi          58%       
node31   696m         8%     2632Mi          68%       
node32   604m         7%     2647Mi          69%

十、安装Dashboard

Dashboard 是基于网页的 Kubernetes 用户界面。 你可以使用 Dashboard 将容器应用部署到 Kubernetes 集群中,也可以对容器应用排错,还能管理集群资源。 你可以使用 Dashboard 获取运行在集群中的应用的概览信息,也可以创建或者修改 Kubernetes 资源 (如 Deployment,Job,DaemonSet 等等)。 例如,你可以对 Deployment 实现弹性伸缩、发起滚动升级、重启 Pod 或者使用向导创建新的应用。

Dashboard 同时展示了 Kubernetes 集群中的资源状态信息和所有报错信息。

10.1 部署 Dashboard UI

默认情况下不会部署 Dashboard。可以通过以下命令部署:

#github下载,recommended.yaml文件头部添加内容
vim recommended.yaml
​
#创建一个admin-user用户用于登录
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard
  
  
kubectl apply -f recommended.yaml
​
​
#创建用于登录的admin-user的token
kubectl -n kubernetes-dashboard create token admin-user
​
#记录下获取到的token字符串
​
#修改集群访问方式
kubectl edit services kubernetes-dashboard -n kubernetes-dashboard
​
将:type: ClusterIP
改:type: NodePort
​
#保存后再次查看会有一个随机的可用于访问的端口
kubectl get services kubernetes-dashboard -n kubernetes-dashboard
​
#访问https://10.10.10.30:port
​
#输入上面记录的token
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值