kubernetes 主备集群部署

kubernetes 安装
1 安装环境
# 操作系统版本
# cat /etc/redhat-release
CentOS Linux release 7.8.2003 (Core)
# 内核版本
# uname -a
Linux master.k8s.com 3.10.0-1127.el7.x86_64 #1 SMP Tue Mar 31 23:36:51 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
主机名						ip地址
master.ks.com			10.0.0.7
nodes.ks.com			10.0.0.17
node2.ks.com			10.0.0.27
2 修改 master 和 node 节点hosts 文件
# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
10.0.0.7 master.ks.com
10.0.0.17 node1.ks.com
10.0.0.27 node2.ks.com
3 安装 chrony 实现所有主机的时间同步
# yum -y install chrony
# systemctl start chronyd
# sed -i -e '/^server/s/^/#/' -e '1a server ntp.aliyun.com iburst' /etc/chrony.conf
# systemctl restart chronyd
# timedatectl set-timezone Asia/Shanghai
[root@master ~]# timedatectl
      Local time: Thu 2020-12-10 21:07:34 CST
  Universal time: Thu 2020-12-10 13:07:34 UTC
        RTC time: Thu 2020-12-10 13:07:34
       Time zone: Asia/Shanghai (CST, +0800)
     NTP enabled: yes
NTP synchronized: yes
 RTC in local TZ: no
      DST active: n/a
4 关闭 master 和 node 节点 selinux 和 firewalld
# systemctl stop firewalld && systemctl disable firewalld
# sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# getenforce

关闭 swap
# swapoff -a
永久关闭 swap,注释掉 swap 那行
# vi /etc/fstab 
#UUID=7ca8f821-027c-44d6-8eac-a4a83b409087 swap                    swap    defaults        0 0
5 配置系统内核参数和调优
配置 sysctl 内核参数
# cat > /etc/sysctl.conf <<EOF
vm.max_map_count=262144
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
生效文件
# sysctl -p
调整 ulimit 大小
# cat /etc/security/limits.conf
* soft nofile 655360
* hard nofile 655360
* soft nproc 655360
* hard nproc 655360
* soft  memlock  unlimited
* hard memlock  unlimited
6 master 和 node 安装docker
# 安装依赖包
# yum install -y yum-utils device-mapper-persistent-data lvm2
# 添加docker软件包的yum源
# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# 关闭测试版本list(只显示稳定版)
# yum-config-manager --enable docker-ce-edge
# yum-config-manager --enable docker-ce-test
# 更新yum包索引
# yum makecache fast

# 安装docker
# 直接安装Docker CE
# yum install docker-ce

# 安装指定版本的Docker CE
# yum list docker-ce --showduplicates|sort -r  #找到需要安装的    
# yum -y install docker-ce-19.03.9   
# 启动docker
# systemctl start docker & systemctl enable docker

# 配置 docker 使用阿里云加速
# vi /etc/docker/daemon.json 
{  
 "registry-mirrors": ["https://v16stybc.mirror.aliyuncs.com"]
}
# systemctl daemon-reload && systemctl restart docker
7 master 和 node 节点安装 ks 工具
# 更换为阿里云官方源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 安装 ks 工具
# 如果不指定版本,默认安装最新版本
yum install -y kubelet kubeadm kubectl
# 指定版本安装
yum install -y kubelet-1.19.4 kubeadm-1.19.4 kubectl-1.19.4  --disableexcludes=kubernetes
# 启动服务
systemctl enable kubelet && systemctl start kubelet
# 查看版本
kubeadm version
8 master 节点初始化操作
[root@master ~]#kubeadm init --image-repository registry.aliyuncs.com/google_containers\
> --control-plane-endpoint master.k8s.com --kubernetes-version=v1.19.4 \
> --pod-network-cidr=10.224.0.0/16 --service-cidr=10.96.0.0/12 --token-ttl=0

--image-repository registry.aliyuncs.com/google_containers
--control-plane-endpoint master.k8s.com
--kubernetes-version=v1.19.4
--pod-network-cidr=10.224.0.0/16
--service-cidr=10.96.0.0/12
--token-ttl=0

范例:

[root@master ~]# kubeadm init --image-repository registry.aliyuncs.com/google_containers \
> --control-plane-endpoint master.k8s.com --kubernetes-version=v1.19.4 \
> --pod-network-cidr=10.224.0.0/16 --service-cidr=10.96.0.0/12 --token-ttl=0
W1211 15:56:37.395986    1486 configset.go:348] WARNING: kubeadm cannot validate component co                                        nfigs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.19.4
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connecti                                        on
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default      .svc.cluster.local master.k8s.com] and IPs [10.96.0.1 10.0.0.7]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master.k8s.com] and IPs [10.0.0.7 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master.k8s.com] and IPs [10.0.0.7 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manife      sts". This can take up to 4m0s
[apiclient] All control plane components are healthy after 6.502050 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.19" in namespace kube-system with the configuration for the kubelets                in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node master.k8s.com as control-plane by adding the label "node-role.kubernetes.io/mas               ter=''"
[mark-control-plane] Marking the node master.k8s.com as control-plane by adding the taints [node-role.kubernetes.io/ma               ster:NoSchedule]
[bootstrap-token] Using token: hviz69.351bibhx5hp91hdt
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long ter               m certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Boo               tstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join master.k8s.com:6443 --token hviz69.351bibhx5hp91hdt \
    --discovery-token-ca-cert-hash sha256:15c61ad635e97151d8cbccae5bdd26775c7bd5d919e859de4dd097823745a5bf \
    --control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join master.k8s.com:6443 --token hviz69.351bibhx5hp91hdt \
    --discovery-token-ca-cert-hash sha256:15c61ad635e97151d8cbccae5bdd26775c7bd5d919e859de4dd097823745a5bf
# 执行输出
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 集群主节点安装成功,这里要记得保存这条命令,以便之后各个节点加入集群:
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join master.k8s.com:6443 --token hviz69.351bibhx5hp91hdt \
    --discovery-token-ca-cert-hash sha256:15c61ad635e97151d8cbccae5bdd26775c7bd5d919e859de4dd097823745a5bf
#查看一下集群pod,确认个组件都处于Running 状态
#注意由于master节点上存在污点,所以coredns 暂时还无法正常启动。
[root@master ~]# kubectl  get pod -n kube-system
NAME                                     READY   STATUS    RESTARTS   AGE
coredns-f9fd979d6-24cl7                  0/1     Pending   0          3m34s
coredns-f9fd979d6-fp8gh                  0/1     Pending   0          3m34s
etcd-master.k8s.com                      1/1     Running   0          3m44s
kube-apiserver-master.k8s.com            1/1     Running   0          3m44s
kube-controller-manager-master.k8s.com   1/1     Running   0          3m44s
kube-proxy-gxqxq                         1/1     Running   0          3m34s
kube-scheduler-master.k8s.com            1/1     Running   0          3m44s
9 集群部署 flannel 网络
[root@master ~]# mkdir /root/k8s
[root@master ~]# cd /root/k8s/
[root@master k8s]# kubectl apply -f flannel.yml
[root@master ~]# cat /root/k8s/flannel.yml

注意:由于各种原因会导致无法下载,这里建议手动复制

---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
    - configMap
    - secret
    - emptyDir
    - hostPath
  allowedHostPaths:
    - pathPrefix: "/etc/cni/net.d"
    - pathPrefix: "/etc/kube-flannel"
    - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false<
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值