Kubernetes集群二进制部署

1、官方提供的三种部署方式

  • minikube

    Minikube是一个工具,可以在本地快速运行一个单点的Kubernetes,仅用于尝试Kubernetes或日常开发的用户使用。部署地址:https://kubernetes.io/docs/setup/minikube/

  • kubeadm

    Kubeadm也是一个工具,提供kubeadm init和kubeadm join,用于快速部署Kubernetes集群。部署地址:https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm

  • 二进制包

    推荐,从官方下载发行版的二进制包,手动部署每个组件,组成Kubernetes集群。下载地址:https://github.com/kubernetes/kubernetes/releases

2、Etcd数据库集群部署

  • 二进制包下载地址

    https://github.com/etcd-io/etcd/releases

  • 查看集群状态
    /opt/etcd/bin/etcdctl \

    –ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem \

    –endpoints=“https://192.168.0.x:2379,https://192.168.0.x:2379,https://192.168.0.x:2379” \

    cluster-health

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-BKXwPfjO-1611497659073)(C:\Users\谭文龙\AppData\Roaming\Typora\typora-user-images\image-20210119181912392.png)]

Master:192.168.100.23/24 kube-apiserver kube-controller-manager kube-scheduler etcd

Node01:192.168.100.25/24 kubelet kube-proxy docker flannel etcd

Node02:192.168.100.26/24 kubelet kube-proxy docker flannel etcd

  • master操作
//上传脚本etcd-cert.sh至该目录
[root@master01 ~]# mkdir k8s
[root@master01 ~]# cd k8s/
[root@master01 k8s]# mkdir etcd-cert
[root@master01 k8s]# cd etcd-cert/
[root@master01 etcd-cert]# ls
etcd-cert.sh

//下载证书制作工具(此文件下载比较慢,记得保存)
[root@master01 k8s]# vi cfssl.sh
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo

[root@master01 k8s]# bash cfssl.sh
[root@master01 k8s]# ls /usr/local/bin/
cfssl  cfssl-certinfo  cfssljson
//说明:cfssl 生成证书工具,cfssljson通过传入json文件生成证书,cfssl-certinfo查看证书信息

//开始制作证书
//定义ca证书
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "www": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"     
        ]  
      } 
    }         
  }
}
EOF 

//实现证书签名
cat > ca-csr.json <<EOF 
{   
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

//生成证书,生成ca-key.pem  ca.pem
[root@master01 etcd-cert]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
2021/01/19 21:11:22 [INFO] generating a new CA key and certificate from CSR
2021/01/19 21:11:22 [INFO] generate received request
2021/01/19 21:11:22 [INFO] received CSR
2021/01/19 21:11:22 [INFO] generating key: rsa-2048
2021/01/19 21:11:22 [INFO] encoded CSR
2021/01/19 21:11:22 [INFO] signed certificate with serial number 176275894424461504938665135521258544312699640135

//指定etcd三个节点之间的通信验证
cat > server-csr.json <<EOF
{
    "CN": "etcd",
    "hosts": [
    "192.168.100.23",
    "192.168.100.25",
    "192.168.100.26"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing"
        }
    ]
}
EOF

//生成ETCD证书 server-key.pem   server.pem
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
2021/01/19 21:21:35 [INFO] generate received request
2021/01/19 21:21:35 [INFO] received CSR
2021/01/19 21:21:35 [INFO] generating key: rsa-2048
2021/01/19 21:21:36 [INFO] encoded CSR
2021/01/19 21:21:36 [INFO] signed certificate with serial number 389073837428609116588941804122733225394346663709
2021/01/19 21:21:36 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").

[root@master01 k8s]# ls			上传etcd文件包
etcd-cert  etcd-v3.3.10-linux-amd64.tar.gz
[root@master01 k8s]# tar zxf etcd-v3.3.10-linux-amd64.tar.gz 
[root@master01 k8s]# mkdir /opt/etcd/{cfg,bin,ssl} -p	创建工作目录
[root@master01 k8s]# mv etcd-v3.3.10-linux-amd64/etcd etcd-v3.3.10-linux-amd64/etcdctl /opt/etcd/bin/
[root@master01 k8s]# cp etcd-cert/*.pem /opt/etcd/ssl/

//进入卡住状态等待其他节点加入
[root@master01 k8s]# bash etcd.sh etcd01 192.168.100.23 etcd02=https://192.168.100.25:2380,etcd03=https://192.168.100.26:2380

//使用另外一个会话打开,会发现etcd进程已经开启
[root@master01 k8s]# ps -ef | grep etcd


//拷贝证书去其他节点
[root@master01 k8s]# scp -r /opt/etcd/ root@192.168.100.25:/opt/
[root@master01 k8s]# scp -r /opt/etcd/ root@192.168.100.26:/opt/

//启动脚本拷贝其他节点
[root@master01 k8s]# scp /usr/lib/systemd/system/etcd.service root@192.168.100.25:/usr/lib/systemd/system/
[root@master01 k8s]# scp /usr/lib/systemd/system/etcd.service root@192.168.100.26:/usr/lib/systemd/system/

//在node01上修改
[root@node01 ~]# cat /opt/etcd/cfg/etcd 
#[Member]
ETCD_NAME="etcd02"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.100.25:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.100.25:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.25:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.25:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.100.23:2380,etcd02=https://192.168.100.25:2380,etcd03=https://192.168.100.26:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"


//在node02上修改
[root@node02 ~]# cat /opt/etcd/cfg/etcd 
#[Member]
ETCD_NAME="etcd03"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.100.26:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.100.26:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.100.26:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.100.26:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.100.23:2380,etcd02=https://192.168.100.25:2380,etcd03=https://192.168.100.26:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"

//master,node01,node02
[root@master01 k8s]# bash etcd.sh etcd01 192.168.100.23 etcd02=https://192.168.100.25:2380,etcd03=https://192.168.100.26:2380

[root@node01 ~]# systemctl start etcd
[root@node02 ~]# systemctl start etcd

//检查群集健康状态
[root@master01 etcd-cert]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.100.23:2379,https://192.168.100.25:2379,https://192.168.100.26:2379" cluster-health
member 1b6a7f863cc5a9f1 is healthy: got healthy result from https://192.168.100.25:2379
member 41d6e8f99d4cbe53 is healthy: got healthy result from https://192.168.100.23:2379
member b687505a248ebdd0 is healthy: got healthy result from https://192.168.100.26:2379
cluster is healthy

3、docker引擎部署

  • 所有node节点部署docker引擎
1.安装依赖包
yum -y install yum-utils device-mapper-persistent-data lvm2

2.设置阿里云镜像源
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

3.安装docker-ce
#安装社区版
yum install -y docker-ce
#开启docker,设置开机自启
systemctl start docker
systemctl enable docker

4.镜像加速
cd /etc/docker/
tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://13iz3smq.mirror.aliyuncs.com"]
}
EOF

//镜像加速需要去阿里云上查找
systemctl daemon-reload
systemctl restart docker

5.网络优化
vim /etc/sysctl.conf 

net.ipv4.ip_forward=1

sysctl -p

systemctl restart docker
systemctl restart network

4、flannel网络配置

  • 写入分配的子网段到ETCD中,供flannel使用

//master操作

[root@master01 ~]# cd /opt/etcd/ssl/
[root@master01 ssl]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.100.23:2379,https://192.168.100.25:2379,https://192.168.100.26:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'

//查看写入的信息
[root@master01 ssl]# /opt/etcd/bin/etcdctl --ca-file=ca.pem --cert-file=server.pem --key-file=server-key.pem --endpoints="https://192.168.100.23:2379,https://192.168.100.25:2379,https://192.168.100.26:2379" get /coreos.com/network/config
{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}

//拷贝到所有node节点(只需要部署在node节点即可)

[root@node02 ~]# ls
anaconda-ks.cfg  flannel-v0.10.0-linux-amd64.tar.gz
[root@node02 ~]# tar zxvf flannel-v0.10.0-linux-amd64.tar.gz 
flanneld
mk-docker-opts.sh
README.md
[root@node02 ~]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p
[root@node02 ~]# mv mk-docker-opts.sh flanneld /opt/kubernetes/bin/
[root@node02 ~]# bash flannel.sh https://192.168.100.23:2379,https:192.168.100.25:2379,https://192.168.100.26:2379
[root@node02 ~]# vim /usr/lib/systemd/system/docker.service
添加EnvironmentFile=/run/flannel/subnet.env,以及下一行的变量


[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity


[root@node02 ~]# systemctl daemon-reload
[root@node02 ~]# systemctl restart docker

[root@node02 ~]# docker run -it centos:7 bash
[root@8ed8fce70ed3 /]# yum -y install net-tools
[root@8ed8fce70ed3 /]# ifconfig 
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.8.2  netmask 255.255.255.0  broadcast 172.17.8.255
        ether 02:42:ac:11:08:02  txqueuelen 0  (Ethernet)
        RX packets 16689  bytes 12688265 (12.1 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 8305  bytes 451815 (441.2 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

lo: flags=73<UP,LOOPBACK,RUNNING>  mtu 65536
        inet 127.0.0.1  netmask 255.0.0.0
        loop  txqueuelen 1  (Local Loopback)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@8ed8fce70ed3 /]# ping 172.17.44.2			//node01节点上的
PING 172.17.44.2 (172.17.44.2) 56(84) bytes of data.
64 bytes from 172.17.44.2: icmp_seq=1 ttl=62 time=0.403 ms
64 bytes from 172.17.44.2: icmp_seq=2 ttl=62 time=1.14 ms
64 bytes from 172.17.44.2: icmp_seq=3 ttl=62 time=1.18 ms
64 bytes from 172.17.44.2: icmp_seq=4 ttl=62 time=1.18 ms
64 bytes from 172.17.44.2: icmp_seq=5 ttl=62 time=1.14 ms

ping通了两个node中的容器,说明了互通

5、部署master组件

  • 在master上操作,api-server生成证书
[root@master01 k8s]# unzip master.zip
[root@master01 k8s]# mkdir /opt/kubernetes/{cfg,bin,ssl} -p
[root@master01 k8s]# mkdir k8s-cert
[root@master01 k8s]# cd k8s-cert/
//上传k8s-cert.sh至k8s目录中,内容如下

cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
      	    "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

#-----------------------

cat > server-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
      "10.0.0.1",
      "127.0.0.1",
      "192.168.100.23",  //master1
      "192.168.100.24",  //master2
      "192.168.100.100",  //vip
      "192.168.100.21",  //lb (master)
      "192.168.100.22",  //lb (backup)
      "kubernetes",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "BeiJing",
            "ST": "BeiJing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

#-----------------------

cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

#-----------------------

cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy


//生成k8s证书
[root@master01 k8s-cert]# bash k8s-cert.sh 
[root@master01 k8s-cert]# ls *pem
admin-key.pem  ca-key.pem  kube-proxy-key.pem  server-key.pem
admin.pem      ca.pem      kube-proxy.pem      server.pem

[root@master01 k8s-cert]# cp ca*pem server*pem /opt/kubernetes/ssl/
[root@master01 k8s-cert]# cd ..


//解压kubernetes压缩包
[root@master01 k8s]# tar zxf kubernetes-server-linux-amd64.tar.gz
[root@master01 k8s]# cd kubernetes/server/bin/
[root@master01 bin]# ls
apiextensions-apiserver              kube-apiserver                      kubectl                kube-scheduler.docker_tag
cloud-controller-manager             kube-apiserver.docker_tag           kubelet                kube-scheduler.tar
cloud-controller-manager.docker_tag  kube-apiserver.tar                  kube-proxy             mounter
cloud-controller-manager.tar         kube-controller-manager             kube-proxy.docker_tag
hyperkube                            kube-controller-manager.docker_tag  kube-proxy.tar
kubeadm                              kube-controller-manager.tar         kube-scheduler

[root@master01 bin]# cp kube-apiserver kubectl kube-scheduler kube-controller-manager /opt/kubernetes/bin/
[root@master01 bin]# cd /root/k8s/
[root@master01 k8s]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
f2cba80e422d7960cfa3ed628275d29a
[root@master01 k8s]# vi /opt/kubernetes/cfg/token.csv
f2cba80e422d7960cfa3ed628275d29a,bubelet-bootstrap,10001,"system:kubelet-bootstrap"

//二进制文件,token,证书都准备好,开启apiserver
[root@master01 k8s]# bash apiserver.sh 192.168.100.23 https://192.168.100.23:2379,https://192.168.100.25:2379,https://192.168.100.26:2379
[root@master01 k8s]# ps aux | grep kube

//查看配置文件
[root@master01 k8s]# cat /opt/kubernetes/cfg/kube-apiserver 

KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://192.168.100.23:2379,https://192.168.100.25:2379,https://192.168.100.26:2379 \
--bind-address=192.168.100.23 \
--secure-port=6443 \
--advertise-address=192.168.100.23 \
--allow-privileged=true \
--service-cluster-ip-range=10.0.0.0/24 \
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/server.pem  \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"

//监听的https端口
[root@master01 k8s]# netstat -anpt | grep 6443
tcp        0      0 192.168.100.23:6443     0.0.0.0:*               LISTEN      19811/kube-apiserve 
tcp        0      0 192.168.100.23:52510    192.168.100.23:6443     ESTABLISHED 19811/kube-apiserve 
tcp        0      0 192.168.100.23:6443     192.168.100.23:52510    ESTABLISHED 19811/kube-apiserve 
[root@master01 k8s]# netstat -anpt | grep 8080
tcp        0      0 127.0.0.1:8080          0.0.0.0:*               LISTEN      19811/kube-apiserve 


//启动scheduler服务
[root@master01 k8s]# sh scheduler.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@master01 k8s]# ps aux | grep kube
root      19811  3.5 16.6 399212 311260 ?       Ssl  17:29   0:13 /opt/kubernetes/bin/kube-apiserver --logtostderr=true --v=4 --etcd-servers=https://192.168.100.23:2379,https://192.168.100.25:2379,https://192.168.100.26:2379 --bind-address=192.168.100.23 --secure-port=6443 --advertise-address=192.168.100.23 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --kubelet-https=true --enable-bootstrap-token-auth --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-50000 --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem
root      19875  1.0  1.0  44556 19532 ?        Ssl  17:35   0:00 /opt/kubernetes/bin/kube-scheduler --logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect
root      19885  0.0  0.0 112676   984 pts/1    S+   17:35   0:00 grep --color=auto kube


[root@master01 k8s]# sh controller-manager.sh 127.0.0.1
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.


[root@master01 k8s]# /opt/kubernetes/bin/kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
etcd-1               Healthy   {"health":"true"}   
etcd-2               Healthy   {"health":"true"}   
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   
[root@master01 k8s]# 
[root@master01 k8s]# 
[root@master01 k8s]# cat /opt/kubernetes/cfg/token.csv 
f2cba80e422d7960cfa3ed628275d29a,bubelet-bootstrap,10001,"system:kubelet-bootstrap"

6、部署node节点

//master上操作
//把 kubelet、kube-proxy拷贝到node节点上去
[root@master01 bin]# scp kubelet kube-proxy root@192.168.100.25:/opt/kubernetes/bin/
[root@master01 bin]# scp kubelet kube-proxy root@192.168.100.26:/opt/kubernetes/bin/

//nod01节点操作(复制node.zip到/root目录下再解压)
[root@master01 ~]# unzip node.zip

//在master上操作
[root@master01 k8s]# mkdir kubeconfig
[root@master01 k8s]# cd kubeconfig/
//拷贝kubeconfig.sh文件进行重命名
[root@master01 kubeconfig]# mv kubeconfig.sh kubeconfig
[root@master01 kubeconfig]# vim kubeconfig 
----------------删除以下部分----------------------------------------------------------------------
# 创建 TLS Bootstrapping Token
#BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
BOOTSTRAP_TOKEN=f2cba80e422d7960cfa3ed628275d29a

cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF


//获取token信息(红色部分)
[root@master01 ~]# cat /opt/kubernetes/cfg/token.csv 
f2cba80e422d7960cfa3ed628275d29a,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
//配置文件修改为tokenID
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
  --token=f2cba80e422d7960cfa3ed628275d29a \
  --kubeconfig=bootstrap.kubeconfig
//设置环境变量(可以写入到/etc/profile中)
[root@master01 kubeconfig]# export PATH=$PATH:/opt/kubernetes/bin/
[root@master01 kubeconfig]# kubectl get cs

//生成配置文件
[root@master01 kubeconfig]# bash kubeconfig 192.168.100.23 /root/k8s/k8s-cert/
Cluster "kubernetes" set.
User "kubelet-bootstrap" set.
Context "default" created.
Switched to context "default".
Cluster "kubernetes" set.
User "kube-proxy" set.
Context "default" created.
Switched to context "default".
[root@localhost kubeconfig]# ls
bootstrap.kubeconfig  kubeconfig  kube-proxy.kubeconfig

//拷贝配置文件到node节点
[root@master01 kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.100.25:/opt/kubernetes/cfg/
[root@master01 kubeconfig]# scp bootstrap.kubeconfig kube-proxy.kubeconfig root@192.168.100.26:/opt/kubernetes/cfg/

//创建bootstrap角色赋予权限用于连接apiserver请求签名(关键)
[root@master01 kubeconfig]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created

//在node01节点上操作
[root@master01 ~]# bash kubelet.sh 192.168.100.25
//检查kubelet服务启动
[root@master01 ~]# ps aux | grep kube

//master上操作
//检查到node01节点的请求
[root@master01 kubeconfig]# kubectl get csr
[root@master01 kubeconfig]# kubectl certificate approve node-csr-NOI-9vufTLIqJgMWq4fHPNPHKbjCXlDGHptj7FqTa8A
//继续查看证书状态
[root@master01 kubeconfig]# kubectl get csr
NAME                                                   AGE     REQUESTOR           CONDITION
node-csr-NOI-9vufTLIqJgMWq4fHPNPHKbjCXlDGHptj7FqTa8A   8m56s   kubelet-bootstrap   Approved,Issued(已经被允许加入群集)
//查看群集节点,成功加入node01节点
[root@master01 kubeconfig]# kubectl get node
NAME              STATUS   ROLES    AGE    VERSION
192.168.195.150   Ready    <none>   118s   v1.12.3
//在node01节点操作,启动proxy服务
[root@master01 ~]# bash proxy.sh 192.168.195.150
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
[root@master01 ~]# systemctl status kube-proxy.service 

node02部署

//在node01节点操作
//把现成的/opt/kubernetes目录复制到其他节点进行修改即可
[root@master01 ~]# scp -r /opt/kubernetes/ root@192.168.100.26:/opt/
//把kubelet,kube-proxy的service文件拷贝到node2中
[root@master01 ~]# scp /usr/lib/systemd/system/{kubelet,kube-proxy}.service root@192.168.100.26:/usr/lib/systemd/system/
//在node02上操作,进行修改
//首先删除复制过来的证书,等会node02会自行申请证书
[root@master01 ~]# cd /opt/kubernetes/ssl/
[root@master01 ssl]# rm -rf *
//修改配置文件kubelet  kubelet.config kube-proxy(三个配置文件)

[root@master01 ssl]# cd ../cfg/
[root@master01 cfg]# vim kubelet

KUBELET_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.100.26 \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--config=/opt/kubernetes/cfg/kubelet.config \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"

[root@master01 cfg]# vim kubelet.config 
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.195.151
port: 10250
readOnlyPort: 10255
cgroupDriver: cgroupfs
clusterDNS:
- 10.0.0.2
clusterDomain: cluster.local.
failSwapOn: false
authentication:
  anonymous:
    enabled: true
~                                  
[root@master01 cfg]# vim kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true \
--v=4 \
--hostname-override=192.168.195.151 \
--cluster-cidr=10.0.0.0/24 \
--proxy-mode=ipvs \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
//启动服务
[root@master01 cfg]# systemctl start kubelet.service 
[root@master01 cfg]# systemctl enable kubelet.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@master01 cfg]# systemctl start kube-proxy.service 
[root@master01 cfg]# systemctl enable kube-proxy.service 
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-proxy.service to /usr/lib/systemd/system/kube-proxy.service.
//在master上操作查看请求

[root@master01 k8s]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr-OaH9HpIKh6AKlfdjEKm4C6aJ0UT_1YxNaa70yEAxnsU   15s   kubelet-bootstrap   Pending
//授权许可加入群集
[root@master01 k8s]# kubectl certificate approve node-csr-OaH9HpIKh6AKlfdjEKm4C6aJ0UT_1YxNaa70yEAxnsU
certificatesigningrequest.certificates.k8s.io/node-csr-OaH9HpIKh6AKlfdjEKm4C6aJ0UT_1YxNaa70yEAxnsU approved
//查看群集中的节点
[root@master01 k8s]# kubectl get node
NAME              STATUS   ROLES    AGE   VERSION
192.168.100.25    Ready    <none>   21h   v1.12.3
192.168.100.26    Ready    <none>   37s   v1.12.3

7、部署master02组件

先具备单master节点部署环境

  • master02
//优先关闭防火墙和selinux服务
//在master01上操作
//复制kubernetes目录到master02
[root@master01 k8s]# scp -r /opt/kubernetes/ root@192.168.100.24:/opt

//复制master中的三个组件启动脚本kube-apiserver.service                   kube-controller-manager.service        kube-scheduler.service  
[root@master01 k8s]# scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@192.168.100.24:/usr/lib/systemd/system/


//master02上操作
//修改配置文件kube-apiserver中的IP
[root@master02 ~]# cd /opt/kubernetes/cfg/
[root@master02 cfg]# vim kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \
--v=4 \
--etcd-servers=https://192.168.195.149:2379,https://192.168.195.150:2379,https://192.168.195.151:2379 \
--bind-address=192.168.100.24 \
--secure-port=6443 \
--advertise-address=192.168.100.24 \
--allow-privileged=true \


//特别注意:master02一定要有etcd证书
//需要拷贝master01上已有的etcd证书给master02使用
[root@master01 k8s]# scp -r /opt/etcd/ root@192.168.100.24:/opt/

//启动master02中的三个组件服务
[root@master01 cfg]# systemctl start kube-apiserver.service 
[root@master01 cfg]# systemctl start kube-controller-manager.service 
[root@master01 cfg]# systemctl start kube-scheduler.service 
//增加环境变量
[root@master01 cfg]# vim /etc/profile
#末尾添加
export PATH=$PATH:/opt/kubernetes/bin/
[root@master01 cfg]# source /etc/profile
[root@master01 cfg]# kubectl get node
NAME              STATUS   ROLES    AGE     VERSION
192.168.100.25    Ready    <none>   2d12h   v1.12.3
192.168.100.26    Ready    <none>   38h     v1.12.3

8、负载均衡

  • nginx01,nginx02
//安装nginx服务,把nginx.sh和keepalived.conf脚本拷贝到家目录
[root@nginx01 ~]# systemctl stop firewalld.service 
[root@nginx01 ~]# setenforce 0
[root@nginx01 ~]# vim /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
[root@mginx01 ~]# yum install nginx -y
//添加四层转发
[root@nginx01 ~]# vim /etc/nginx/nginx.conf 

events {
    worker_connections  1024;
}
stream {

   log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log  /var/log/nginx/k8s-access.log  main;

    upstream k8s-apiserver {
        server 192.168.100.23:6443;
        server 192.168.100.24:6443;
    }
    server {
                listen 6443;
                proxy_pass k8s-apiserver;
    }
    }
http {
[root@nginx01 ~]# systemctl start nginx

//部署keepalived服务
[root@nginx01 ~]# yum install keepalived -y
//修改配置文件
[root@nginx01 ~]# cp keepalived.conf /etc/keepalived/keepalived.conf 
cp:是否覆盖"/etc/keepalived/keepalived.conf"? yes
//注意:lb01是Mster配置如下:

! Configuration File for keepalived 
 
global_defs { 
   # 接收邮件地址 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   # 邮件发送地址 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER 
} 

vrrp_script check_nginx {
    script "/etc/nginx/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state MASTER 
    interface ens33
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 100    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        192.168.100.100/24 
    } 
    track_script {
        check_nginx
    } 
}//注意:lb02是Backup配置如下:
! Configuration File for keepalived 
 
global_defs { 
   # 接收邮件地址 
   notification_email { 
     acassen@firewall.loc 
     failover@firewall.loc 
     sysadmin@firewall.loc 
   } 
   # 邮件发送地址 
   notification_email_from Alexandre.Cassen@firewall.loc  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER 
} 

vrrp_script check_nginx {
    script "/etc/nginx/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state BACKUP 
    interface ens33
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 90    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        192.168.100.100/24 
    } 
    track_script {
        check_nginx
    } 
}

[root@nginx01 ~]# vim /etc/nginx/check_nginx.sh
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")

if [ "$count" -eq 0 ];then
    systemctl stop keepalived
fi
[root@nginx01 ~]# chmod +x /etc/nginx/check_nginx.sh
[root@nginx01 ~]# systemctl start keepalived

//查看lb01地址信息
[root@nginx01 ~]# ip a
//验证地址漂移(lb01中使用pkill nginx,再在lb02中使用ip a 查看)
//恢复操作(在lb01中先启动nginx服务,再启动keepalived服务)
//nginx站点/usr/share/nginx/html

//开始修改node节点配置文件统一VIP(bootstrap.kubeconfig,kubelet.kubeconfig)
[root@node01 cfg]# vim /opt/kubernetes/cfg/bootstrap.kubeconfig
[root@node01 cfg]# vim /opt/kubernetes/cfg/kubelet.kubeconfig
[root@node01 cfg]# vim /opt/kubernetes/cfg/kube-proxy.kubeconfig
//统统修改为VIP
server: https://192.168.100.100:6443
[root@node01 cfg]# systemctl restart kubelet.service 
[root@node01 cfg]# systemctl restart kube-proxy.service 
//替换完成直接自检
[root@node01 cfg]# grep 100 *
bootstrap.kubeconfig:    server: https://192.168.100.100:6443
kubelet.kubeconfig:    server: https://192.168.100.100:6443
kube-proxy.kubeconfig:    server: https://192.168.100.100:6443
//在lb01上查看nginx的k8s日志
[root@node01 ~]# tail /var/log/nginx/k8s-access.log 

//在master01上操作
//测试创建pod
[root@master01 ~]# kubectl run nginx --image=nginx
//查看状态
[root@master01 ~]# kubectl get pods

//注意日志问题
[root@master01 ~]# kubectl logs nginx-dbddb74b8-nf9sk
[root@master01 ~]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created

//查看pod网络
[root@master01 ~]# kubectl get pods -o wide
//在对应网段的node节点上操作可以直接访问
[root@node01 cfg]# curl 172.17.31.3
//访问就会产生日志
//回到master01操作
[root@master01 ~]# kubectl logs nginx-dbddb74b8-nf9sk

9、dashboard

//在master01上操作
//创建dashborad工作目录
[root@master01 k8s]# mkdir dashboard
[root@master01 k8s]# cd dashboard
//上传官方的文件至此目录下
[root@master01 dashboard]# ls
dashboard-configmap.yaml   dashboard-rbac.yaml    dashboard-service.yaml
dashboard-controller.yaml  dashboard-secret.yaml  k8s-admin.yaml

[root@master01 dashboard]# kubectl create -f dashboard-rbac.yaml 
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
[root@master01 dashboard]# kubectl create -f dashboard-secret.yaml 
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-key-holder created
[root@master01 dashboard]# kubectl create -f dashboard-configmap.yaml 
configmap/kubernetes-dashboard-settings created
[root@master01 dashboard]# kubectl create -f dashboard-controller.yaml 
serviceaccount/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
[root@master01 dashboard]# kubectl create -f dashboard-service.yaml 
service/kubernetes-dashboard created

//完成后查看创建在指定的kube-system命名空间下
[root@master01 ~]# kubectl get pods -n kube-system
NAME                                    READY   STATUS    RESTARTS   AGE
kubernetes-dashboard-65f974f565-t6dqn   1/1     Running   0          2d11h
//查看如何访问
[root@master01 ~]# kubectl get pods,svc -n kube-system
NAME                                        READY   STATUS    RESTARTS   AGE
pod/kubernetes-dashboard-65f974f565-t6dqn   1/1     Running   0          2d11h

NAME                           TYPE       CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
service/kubernetes-dashboard   NodePort   10.0.0.223   <none>        443:30001/TCP   2d11h
//访问nodeIP就可以了(火狐浏览器可以之间访问)
https://192.168.100.25:30001/

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-aRccE8Xy-1611497659074)(C:\Users\谭文龙\AppData\Roaming\Typora\typora-user-images\image-20210124214559950.png)]

//生产令牌
[root@localhost dashboard]# kubectl create -f k8s-admin.yaml 
//保存
[root@localhost dashboard]# kubectl get secret -n kube-system
//查看令牌
[root@localhost dashboard]# kubectl describe secret dashboard-admin-token-qctfr -n kube-system

其他浏览器不能访问的问题

[root@localhost dashboard]# vim dashboard-cert.sh
cat > dashboard-csr.json <<EOF
{
   "CN": "Dashboard",
   "hosts": [],
   "key": {
       "algo": "rsa",
       "size": 2048
   },
   "names": [
       {
           "C": "CN",
           "L": "BeiJing",
           "ST": "BeiJing"
       }
   ]
}
EOF

K8S_CA=$1
cfssl gencert -ca=$K8S_CA/ca.pem -ca-key=$K8S_CA/ca-key.pem -config=$K8S_CA/ca-config.json -profile=kubernetes dashboard-csr.json | cfssljson -bare dashboard
kubectl delete secret kubernetes-dashboard-certs -n kube-system
kubectl create secret generic kubernetes-dashboard-certs --from-file=./ -n kube-system

[root@localhost dashboard]# bash dashboard-cert.sh /root/k8s/k8s-cert/
[root@localhost dashboard]# vim dashboard-controller.yaml
args:
          # PLATFORM-SPECIFIC ARGS HERE
          - --auto-generate-certificates
          - --tls-key-file=dashboard-key.pem
          - --tls-cert-file=dashboard.pem
//重新部署(注意:当apply不生效时,先使用delete清除资源,再apply创建资源)
[root@localhost dashboard]# kubectl apply -f dashboard-controller.yaml 
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值