kubernetes笔记

Kubernetes

kubeadm部署高可用集群

主机规划

VIP: 192.168.5.100

主机名IP角色
kubemaster001192.168.5.9master,keepalived,haproxy,dns
kubemaster002192.168.5.10master,keepalived,haproxy
kubeworker001192.168.5.11worker,etcd
kubeworker002192.168.5.12worker,etcd
kubeworker003192.168.5.13worker,etcd
registry192.168.5.128harbor

主机初始化

systemctl stop firewalld 
systemctl disable firewalld 

sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
 
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab

 
cat >> /etc/hosts << EOF 
192.168.5.9  kubemaster001
192.168.5.10 kubemaster002
192.168.5.11 kubeworker001
192.168.5.12 kubeworker002
192.168.5.13 kubeworker003 
EOF
 
cat > /etc/sysctl.d/k8s.conf << EOF 
net.bridge.bridge-nf-call-ip6tables = 1 
net.bridge.bridge-nf-call-iptables = 1 
EOF 
sysctl --system  
 
yum install ntpdate nfs-utils wget vim net-tools -y 
ntpdate time.windows.com

wget -O /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum clean all
yum makecache
yum -y  install docker
systemctl enable docker --now
mkdir -p /data/app_data/docker
cat >/etc/docker/daemon.json<<EOF
{
  "insecure-registry": ["registry.fine.com"],
  "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"],
  "data-root": "/data/app_data/docker"

}
EOF
systemctl restart docker

wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.5/cri-dockerd-0.2.5-3.el7.x86_64.rpm
rpm -ivh cri-dockerd-0.2.5-3.el7.x86_64.rpm 
sed "s#ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd://#ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7#" /usr/lib/systemd/system/cri-docker.service -i
systemctl daemon-reload 
systemctl enable cri-docker && systemctl start cri-docker

reboot

部署etcd(在kubeworker001上工作)

  • 安装cfssl工具
mkdir -p /data/k8s-work
cd /data/k8s-work
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64

chmod +x cfssl*
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
  • 自签证书颁发机构(CA)
cat > ca-csr.json <<"EOF"
{
  "CN": "kubernetes",
  "key": {
      "algo": "rsa",
      "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "JiangSu",
      "L": "shiyan",
      "O": "k8s",
      "OU": "system"
    }
  ],
  "ca": {
          "expiry": "87600h"
  }
}
EOF

#配置ca证书策略

cat > ca-config.json <<"EOF"
{
  "signing": {
      "default": {
          "expiry": "87600h"
        },
      "profiles": {
          "kubernetes": {
              "usages": [
                  "signing",
                  "key encipherment",
                  "server auth",
                  "client auth"
              ],
              "expiry": "87600h"
          }
      }
  }
}
EOF

#创建ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
  • 使用自签CA签发Etcd Https证书
# 可多添加几个ip便于扩容etcd
cat > etcd-csr.json <<"EOF"
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.5.11",
    "192.168.5.12",
    "192.168.5.13",
    "192.168.5.14",
    "192.168.5.15".
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [{
    "C": "CN",
    "ST": "JiangSu",
    "L": "shiyan",
    "O": "k8s",
    "OU": "system"
  }]
}
EOF

#生成证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson  -bare etcd
  • 下载分发etcd
wget https://github.com/etcd-io/etcd/releases/download/v3.5.0/etcd-v3.5.0-linux-amd64.tar.gz
tar -xvf etcd-v3.5.0-linux-amd64.tar.gz
cp -p etcd-v3.5.0-linux-amd64/etcd* /usr/local/bin/
for i in kubeworker002 kubeworker003;do scp etcd-v3.5.0-linux-amd64/etcd* ${i}:/usr/local/bin/;done
  • 创建etcd配置文件
# 其他etcd节点配置
# ETCD_NAME唯一
# IP地址更改为对应服务器IP
cat >  etcd.conf <<"EOF"
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.5.11:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.5.11:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.5.11:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.5.11:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.5.11:2380,etcd-2=https://192.168.5.12:2380,etcd-3=https://192.168.5.13:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
  • systemd管理etcd
cat > etcd.service <<"EOF"
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=-/etc/etcd/etcd.conf
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/local/bin/etcd \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-client-cert-auth \
  --client-cert-auth
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
  • 各节点创建etcd目录
mkdir -p /etc/etcd
mkdir -p /etc/etcd/ssl
mkdir -p /var/lib/etcd/default.etcd
  • 同步到各节点
# 同步到master节点是因为要供给kube-apiserver访问etcd使用
cp ca*.pem /etc/etcd/ssl/
cp etcd*.pem /etc/etcd/ssl/
cp etcd.conf /etc/etcd/
cp etcd.service /usr/lib/systemd/system/
for i in kubemaster001 kubemaster002 kubeworker002 kubeworker003;do scp  etcd.conf $i:/etc/etcd/;done
for i in kubemaster001 kubemaster002 kubeworker002 kubeworker003;do scp  etcd*.pem ca*.pem $i:/etc/etcd/ssl/;done
for i in kubemaster001 kubemaster002 kubeworker002 kubeworker003;do scp  etcd.service $i:/usr/lib/systemd/system/;done
  • 启动etcd并设置开机启动
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
systemctl status etcd
  • 查看集群状态
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table --cacert=/etc/etcd/ssl/ca.pem --cert=/etc/etcd/ssl/etcd.pem --key=/etc/etcd/ssl/etcd-key.pem --endpoints=https://192.168.5.11:2379,https://192.168.5.12:2379,https://192.168.5.13:2379 endpoint health

可参考ansibe部署etcd集群更快捷方便

部署高可用环境

yum -y install epel-release keepalived haproxy

# kubemaster001
cat>/etc/keepalived/keepalived.conf<<EOF
! Configuration File for keepalived

global_defs {
   router_id LVS_DEVEL1
}
vrrp_script check_kube_api {
  script "/etc/keepalived/kube_api_check.sh" #检验kube_apiserver存活shell
  interval 2
  weigth 2
}
vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    192.168.5.100
    }
    track_script {
    check_kube_api
    }
}
EOF

# kubemaster002
cat>/etc/keepalived/keepalived.conf<<EOF
! Configuration File for keepalived

global_defs {
   router_id LVS_DEVEL2
}
vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 51
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    192.168.5.100
    }
}
EOF
systemctl enable keepalived --now

# kubemaster001 kubemaster002
cat > /etc/haproxy/haproxy.cfg<<EOF
global
    log /dev/log  local0 warning
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
   stats socket /var/lib/haproxy/stats
defaults
  log global
  option  httplog
  option  dontlognull
        timeout connect 5000
        timeout client 50000
        timeout server 50000
frontend kube-apiserver
  bind *:9443
  mode tcp
  option tcplog
  default_backend kube-apiserver
backend kube-apiserver
    mode tcp
    option tcplog
    option tcp-check
    balance roundrobin
    default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
    server kube-apiserver1 192.168.5.9:6443 check # Replace the IP address with your own.
    server kube-apiserver2 192.168.5.10:6443 check # Replace the IP address with your own.
EOF

systemctl enable haproxy --now

部署kubernetes

  • 部署
# all node
cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum install -y kubelet-1.28.2 kubeadm-1.28.2 kubectl-1.28.2
systemctl enable kubelet

# kubemaster001
kubeadm config print init-defaults>kube-init.yaml
kubeadm config images pull --image-repository=registry.fine.com/library --kubernetes-version=1.28.2 --cri-socket=unix:///var/run/cri-dockerd.sock

# 修改init文件后执行
kubeadm init --config kube-init.yaml --upload-certs
  • init文件
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.5.9
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  name: kubemaster001
  taints: null
---
controlPlaneEndpoint: "192.168.5.100:9443"
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  external:
    endpoints:
    - https://192.168.5.11:2379
    - https://192.168.5.12:2379
    - https://192.168.5.13:2379
    caFile: /etc/etcd/ssl/ca.pem
    certFile: /etc/etcd/ssl/server.pem
    keyFile: /etc/etcd/ssl/server-key.pem
imageRepository: registry.fine.com/library
kind: ClusterConfiguration
kubernetesVersion: 1.28.2
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.10.0.0/16
  podSubnet: 12.12.0.0/16
scheduler: {}
  • keepalived检测脚本
#!/bin/bash
url=https://192.168.5.9:6443
cmd=$(curl -k -v ${url}/healthz &>/dev/null)
if [ $? -eq 0 ];then
        exit 0
else
        pkill keepalived
fi

Adons部署

  • calico部署
    待续…
  • ingress部署
    待续…
  • nfs动态存储部署
    待续…
  • 资源监控插件部署
    待续…
  • 分布式存储ceph部署
    待续…

DNS服务部署

  • 部署
yum -y install bind bind-utils
# 注意权限
ll /etc/named.conf
-rw-r----- 1 root named 500 Mar  9 08:23 /etc/named.conf
# 配置文件(记得备份原文件)
cat /etc/named.conf

//
// named.conf
//
// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS
// server as a caching only nameserver (as a localhost DNS resolver only).
//
// See /usr/share/doc/bind*/sample/ for example named configuration files.
//
// See the BIND Administrator's Reference Manual (ARM) for details about the
// configuration located in /usr/share/doc/bind-{version}/Bv9ARM.html

options {
        directory       "/var/named";
};

zone "fine.com" IN {
        type master;
        file "fine.com.zone";
};
  • fine.com.zone文件配置
cd /var/named/
# 使用cp -p解决复制权限问题
cp -p named.localhost fine.com.zone

# cat fine.com.zone
$TTL 1D
@       IN SOA  @ rname.invalid. (
                                        0       ; serial
                                        1D      ; refresh
                                        1H      ; retry
                                        1W      ; expire
                                        3H )    ; minimum
fine.com.       NS      kubemaster001
kubemaster001   A       192.168.5.9
kubemaster002   A       192.168.5.10
kubeworker001   A       192.168.5.11
kubeworker002   A       192.168.5.12
kubeworker003   A       192.168.5.13
registry        A       192.168.5.128
vip             A       192.168.5.100
  • 启动
    systemctl enable named --now

  • 添加dns ip

  echo "nameserver 192.168.5.9"  >> /etc/resolv.conf
  • 访问
例如 ping registry.fine.com
解析为:192.168.5.128

Harbor部署

  • 证书生成
#!/bin/bash
# docker连接https形式harbor也需要用到证书
docker_cert=/etc/docker/certs.d/registry.fine.com
openssl genrsa -out ca.key 4096
openssl req -x509 -new -nodes -sha512 -days 3650  -subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=registry.fine.com"  -key ca.key -out ca.crt
openssl genrsa -out registry.fine.com.key 4096
openssl req -sha512 -new     -subj "/C=CN/ST=Beijing/L=Beijing/O=example/OU=Personal/CN=registry.fine.com"     -key registry.fine.com.key -out registry.fine.com.csr
cat > v3.ext <<-EOF
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1=registry.fine.com
EOF
openssl x509 -req -sha512 -days 3650     -extfile v3.ext     -CA ca.crt -CAkey ca.key -CAcreateserial     -in registry.fine.com.csr -out registry.fine.com.crt
openssl x509 -inform PEM -in registry.fine.com.crt -out registry.fine.com.cert
[ ! -d ${docker_cert} ] && mkdir -p ${docker_cert}
cp registry.fine.com.crt registry.fine.com.key registry.fine.com.cert  ${docker_cert}
harbor
├── common
├── common.sh
├── docker-compose.yml
├── harbor.v2.10.0.tar.gz
├── harbor.yml.tmpl
├── install.sh
├── LICENSE
└── prepare
  • 配置
cp harbor.yml.tmpl harbor.yml
  • https配置部分
    在这里插入图片描述

  • 数据持久化配置部分
    在这里插入图片描述
    在这里插入图片描述

  • 启动(需要安装docker-compose)

./prepare
./install.sh
  • 解决harbor开机启动失败问题
cat >/usr/lib/systemd/system/harbor.service<<EOF
[Unit]
Description=Harbor
After=docker.service systemd-networkd.service systemd-resolved.service
Requires=docker.service
Documentation=http://github.com/vmware/harbor

[Service]
Type=simple
Restart=on-failure
RestartSec=5
ExecStart=/usr/bin/docker-compose -f /data/software/harbor/docker-compose.yml up
ExecStop=/usr/bin/docker-compose -f /data/software/harbor/docker-compose.yml down

[Install]
WantedBy=multi-user.target
EOF
  • 重载
systemctl daemon-reload
systemctl enable harbor

注意: docker 还需要进行以下配置才能登录harbor

cat /etc/docker/daemon.json
{
  "insecure-registry": ["registry.fine.com"],
  "data-root": "/data/app_data/docker"

}

EFK准备

kubectl create secret docker-registry registry-auth \
--docker-username=admin \
--docker-password=Harbor12345 \
--docker-server=192.168.5.128

docker run --name elastic-charts-certs -i -w /usr/share/elasticsearch/data registry.fine.com/library/elasticsearch:7.16.3 /bin/sh -c  \
  "elasticsearch-certutil ca --out /usr/share/elasticsearch/data/elastic-stack-ca.p12 --pass '' && \
    elasticsearch-certutil cert --name elasticsearch-master-headless --dns \
    elasticsearch-master-headless --ca /usr/share/elasticsearch/data/elastic-stack-ca.p12 --pass '' --ca-pass '' --out /usr/share/elasticsearch/data/elastic-certificates.p12"

docker cp  elastic-charts-certs:/usr/share/elasticsearch/data/elastic-certificates.p12 .


kubectl create secret -n elk generic elastic-certificates --from-file=elastic-certificates.p12
kubectl -n elk create secret generic elastic-certificate-pem --from-file=elastic-certificate.pem

kubectl create secret -n elk generic elastic-credentials \
  --from-literal=username=elastic --from-literal=password=13579@mf
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值