(高可用)八、k8s(二)

五、pod管理

概念

1. pod

  • 是创建和管理k8s计算的最小可部署单元,一个pod代表集群中一个运行的进程,每个pod都有唯一的ip
  • 一个pod可包含多个容器,容器间共享IPC、net、ns(共享网络和数据卷,数据卷的生命周期仅与pod关联与容器无关)
  • 名为pause的pod,用于初始化其他pod
  • 检查harbor仓库状态
[root@k8s1 ~]# cd /data/harbor/harbor/
[root@k8s1 harbor]# ls
common  common.sh  docker-compose.yml  harbor.v2.5.0.tar.gz  harbor.yml  harbor.yml.tmpl  install.sh  LICENSE  prepare
[root@k8s1 harbor]# docker-compose ps
NAME                COMMAND                  SERVICE             STATUS              PORTS
chartmuseum         "./docker-entrypoint…"   chartmuseum         running (healthy)
harbor-core         "/harbor/entrypoint.…"   core                running (healthy)
harbor-db           "/docker-entrypoint.…"   postgresql          running (healthy)
harbor-exporter     "/harbor/entrypoint.…"   exporter            running
harbor-jobservice   "/harbor/entrypoint.…"   jobservice          running (healthy)
harbor-log          "/bin/sh -c /usr/loc…"   log                 running (healthy)   127.0.0.1:1514->10514/tcp
harbor-portal       "nginx -g 'daemon of…"   portal              running (healthy)
nginx               "nginx -g 'daemon of…"   proxy               running (healthy)   0.0.0.0:80->8080/tcp, 0.0.0.0:443->8443/tcp, 0.0.0.0:9090->9090/tcp, :::80->8080/tcp, :::443->8443/tcp, :::9090->9090/tcp
redis               "redis-server /etc/r…"   redis               running (healthy)
registry            "/home/harbor/entryp…"   registry            running (healthy)
registryctl         "/home/harbor/start.…"   registryctl         running (healthy)
trivy-adapter       "/home/scanner/entry…"   trivy-adapter       running (healthy)
  • 查看集群状态
[root@k8s2 ~]# kubectl get node
NAME   STATUS   ROLES                  AGE   VERSION
k8s2   Ready    control-plane,master   19h   v1.23.16
k8s3   Ready    <none>                 18h   v1.23.15
k8s4   Ready    <none>                 18h   v1.23.15

- 配置介绍
[root@k8s2 ~]# kubectl describe pod myapp-79fdcd5ff-khsgr
Name:         myapp-79fdcd5ff-khsgr
Namespace:    default
Priority:     0
Node:         k8s3/192.168.147.102  # 部署在哪个节点
Start Time:   Sat, 21 Jan 2023 02:52:20 -0800
Labels:       app=myapp
              pod-template-hash=79fdcd5ff
Annotations:  <none>
Status:       Running
IP:           10.244.1.12
IPs:
  IP:           10.244.1.12
Controlled By:  ReplicaSet/myapp-79fdcd5ff
Containers:
  myapp:
    Container ID:   docker://7d240bbd752e420578297afa5b405594edc36a801c1469b96fda90f218a724e4
    Image:          alexw.com/library/myapp:v1
    Image ID:       docker-pullable://alexw.com/library/myapp@sha256:9eeca44ba2d410e54fccc54cbe9c021802aa8b9836a0bcf3d3229354e4c8870e
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Sat, 21 Jan 2023 02:52:20 -0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-24bfj (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  kube-api-access-24bfj:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort  # BestEffort表示在服务器资源不够时,此pod首先被回收
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:                      <none>






[root@k8s2 ~]# kubectl get pod myapp-79fdcd5ff-khsgr -o yaml
apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: "2023-01-21T10:52:20Z"
  generateName: myapp-79fdcd5ff-
  labels:
    app: myapp
    pod-template-hash: 79fdcd5ff
  name: myapp-79fdcd5ff-khsgr
  namespace: default
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: myapp-79fdcd5ff
    uid: 724198e1-da85-4d46-a2ef-810ae48f775e
  resourceVersion: "9987"
  uid: bf2dbdc6-85fe-40cd-beb9-b9eca5dba179
spec:
  containers:
  - image: alexw.com/library/myapp:v1
    imagePullPolicy: IfNotPresent
    name: myapp
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-24bfj
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  nodeName: k8s3
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: default
  serviceAccountName: default
  terminationGracePeriodSeconds: 30
  tolerations:
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: kube-api-access-24bfj
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2023-01-21T10:52:20Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2023-01-21T10:52:22Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2023-01-21T10:52:22Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2023-01-21T10:52:20Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: docker://7d240bbd752e420578297afa5b405594edc36a801c1469b96fda90f218a724e4
    image: alexw.com/library/myapp:v1
    imageID: docker-pullable://alexw.com/library/myapp@sha256:9eeca44ba2d410e54fccc54cbe9c021802aa8b9836a0bcf3d3229354e4c8870e
    lastState: {}
    name: myapp
    ready: true
    restartCount: 0
    started: true
    state:
      running:
        startedAt: "2023-01-21T10:52:20Z"
  hostIP: 192.168.147.102
  phase: Running
  podIP: 10.244.1.12
  podIPs:
  - ip: 10.244.1.12
  qosClass: BestEffort
  startTime: "2023-01-21T10:52:20Z"

2. service

  • 是一个抽象概念,定义了一个服务的多个pod逻辑合集和访问pod的策略,一般称为微服务
  • 自动注册
  • 用于向外暴露pod内的服务
[root@k8s2 ~]# kubectl get svc -o wide
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE   SELECTOR
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP        20h   <none>
myapp        NodePort    10.107.222.140   <none>        80:31532/TCP   18h   app=myapp
[root@k8s2 ~]# kubectl describe svc myapp
Name:                     myapp
Namespace:                default
Labels:                   app=myapp
Annotations:              <none>
Selector:                 app=myapp
Type:                     NodePort
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       10.107.222.140
IPs:                      10.107.222.140
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
NodePort:                 <unset>  31532/TCP
Endpoints:                10.244.1.12:80
Session Affinity:         None
External Traffic Policy:  Cluster
Events:                   <none>

3. 资源清单

  • 总体结构如下
[root@k8s2 ~]# kubectl get pod myapp-79fdcd5ff-khsgr -o yaml
apiVersion: v1  # kubectl api-versions查询
kind: Pod  # 标记创建的资源类型,k8s主要支持资源类型:Pod、ReplicaSet、Deployment、StatefulSet、DaemonSet、Job、Cronjob
metadata:
        name:
        namespace:
        labels:
  
spec:  # 定义目标资源的期望状态
  
status:  # 定义目标资源的实际状态
  
  • 查询帮助文档
[root@k8s2 ~]# kubectl explain pod
KIND:     Pod
VERSION:  v1

DESCRIPTION:
     Pod is a collection of containers that can run on a host. This resource is
     created by clients and scheduled onto hosts.

FIELDS:
   apiVersion   <string>
     APIVersion defines the versioned schema of this representation of an
     object. Servers should convert recognized schemas to the latest internal
     value, and may reject unrecognized values. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources

   kind <string>
     Kind is a string value representing the REST resource this object
     represents. Servers may infer this from the endpoint the client submits
     requests to. Cannot be updated. In CamelCase. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds

   metadata     <Object>
     Standard object's metadata. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata

   spec <Object>
     Specification of the desired behavior of the pod. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

   status       <Object>
     Most recently observed status of the pod. This data may not be up to date.
     Populated by the system. Read-only. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

  • 编辑资源清单创建pod
[root@k8s2 ~]# kubectl run web1 --image alexw.com/library/nginx --dry-run=client -o yaml > pod.yaml
[root@k8s2 ~]# vim pod.yaml

apiVersion: v1
kind: Pod
metadata:
  labels:
    run: web1
  name: web1
spec:
  containers:
  - image: alexw.com/library/nginx
    name: web1
    imagePullPolicy: IfNotPresent
    resources: {}
    ports:
    - name: http
      containerPort: 80   # 通过node port暴露的服务与通过svc暴露的服务不同,通过node port仅在pod部署的那个节点做了端口映射,而svc在所有节点都做了端口映射,因此svc方式暴露的服务在任意节点都能访问到
      hostPort: 80
  restartPolicy: Always


[root@k8s2 ~]# kubectl create -f pod.yaml
pod/web1 created
[root@k8s2 ~]# kubectl get pod -o wide
NAME                    READY   STATUS    RESTARTS   AGE   IP            NODE   NOMINATED NODE   READINESS GATES
demo1                   1/1     Running   0          19h   10.244.1.4    k8s3   <none>           <none>
myapp-79fdcd5ff-khsgr   1/1     Running   0          19h   10.244.1.12   k8s3   <none>           <none>
web1                    1/1     Running   0          12s   10.244.2.33   k8s4   <none>           <none>
[root@k8s2 ~]# vim pod.yaml
[root@k8s2 ~]# curl 192.168.147.103
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>



# 使用配置文件删除相关资源
[root@k8s2 ~]# kubectl delete -f pod.yaml
pod "web1" deleted
[root@k8s2 ~]# kubectl get pod
NAME                    READY   STATUS    RESTARTS   AGE
demo1                   1/1     Running   0          19h
myapp-79fdcd5ff-khsgr   1/1     Running   0          18h
  • 指定资源
[root@k8s2 ~]# vim pod.yaml

    resources:
      limits:
        memory: 100Mi
        cpu: 500m
      requests:
        memory: 100Mi
        cpu: 500m


[root@k8s2 ~]# kubectl describe pod web1

QoS Class:                   Guaranteed  # 定义为敏感性业务,优先级最高


    resources:
      limits:
        memory: 500Mi
        cpu: 1000m
      requests:
        memory: 100Mi
        cpu: 500m

QoS Class:                   Burstable  # 定义为次敏感性业务,优先级其次

不设置resources
QoS Class:                   BestEffort # 优先级最低
  • 同一pod部署多个容器
[root@k8s2 ~]# vim pod.yaml

apiVersion: v1
kind: Pod
metadata:
  labels:
    run: web1
  name: web1
spec:
  containers:
  - image: alexw.com/library/nginx
    name: web1
    imagePullPolicy: IfNotPresent
    resources:
      limits:
        memory: 500Mi
        cpu: 1000m
      requests:
        memory: 100Mi
        cpu: 500m
    ports:
    - name: http
      containerPort: 80
      hostPort: 80
  - name: myshell
    image: alexw.com/library/busybox
    imagePullPolicy: IfNotPresent
    tty: true
    stdin: true
  restartPolicy: Always

[root@k8s2 ~]# kubectl exec -it web1 -c myshell -- sh
/ #
/ # ls
bin    dev    etc    home   lib    lib64  proc   root   sys    tmp    usr    var
  • 调度pod到指定node
[root@k8s2 ~]# kubectl get pod --show-labels
NAME                    READY   STATUS    RESTARTS   AGE    LABELS
demo1                   1/1     Running   0          20h    run=demo1
myapp-79fdcd5ff-khsgr   1/1     Running   0          19h    app=myapp,pod-template-hash=79fdcd5ff
web1                    2/2     Running   0          6m6s   run=web1


[root@k8s2 ~]# kubectl get nodes --show-labels
NAME   STATUS   ROLES                  AGE   VERSION    LABELS
k8s2   Ready    control-plane,master   21h   v1.23.16   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s2,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=
k8s3   Ready    <none>                 20h   v1.23.15   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s3,kubernetes.io/os=linux
k8s4   Ready    <none>                 20h   v1.23.15   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s4,kubernetes.io/os=linux

# 给k8s3这个node添加标签
[root@k8s2 ~]# kubectl label nodes k8s3 run=web1
node/k8s3 labeled
[root@k8s2 ~]# kubectl get nodes --show-labels
NAME   STATUS   ROLES                  AGE   VERSION    LABELS
k8s2   Ready    control-plane,master   21h   v1.23.16   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s2,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=
k8s3   Ready    <none>                 20h   v1.23.15   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s3,kubernetes.io/os=linux,run=web1
k8s4   Ready    <none>                 20h   v1.23.15   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=k8s4,kubernetes.io/os=linux

# 编辑yaml配置
[root@k8s2 ~]# vim pod.yaml

  restartPolicy: Always
  nodeSelector:
    run: web1
  • 直接使用宿主机网络
[root@k8s2 ~]# vim pod.yaml

  nodeSelector:
    run: web1
  hostNetwork: true

pod生命周期

1. init容器

    • 容器有启动顺序,运行完退出(失败会自动重启)
    • 主容器由探针(readiness【容器是否准备好服务,默认为成功】、Liveness【容器是否正常启动,默认为成功】、startup【容器内应用是否正常启动,启用此探针会禁用其他探针,默认为成功】)监控
    • 可以包含应用镜像中不能包含的实用工具或个性化代码,安全且轻量
    • 由initContainers关键字指定
[root@k8s2 ~]# vim init_pod.yaml

apiVersion: v1
kind: Pod
metadata:
  name: myapp-pod
  labels:
    app.kubernetes.io/name: MyApp
spec:
  containers:
  - name: myapp-container
    image: busybox:1.28
    command: ['sh', '-c', 'echo The app is running! && sleep 3600']
  initContainers:
  - name: init-myservice
    image: busybox:1.28
    command: ['sh', '-c', "until nslookup myservice.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"]  # 表示如果地址解析失败,则会一直阻塞重试,不会创建容器
  - name: init-mydb
    image: busybox:1.28
    command: ['sh', '-c', "until nslookup mydb.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for mydb; sleep 2; done"]


[root@k8s2 ~]# kubectl create -f init_pod.yaml
pod/myapp-pod created
[root@k8s2 ~]# kubectl get pod
NAME                    READY   STATUS     RESTARTS   AGE
demo1                   1/1     Running    0          20h
myapp-79fdcd5ff-khsgr   1/1     Running    0          20h
myapp-pod               0/1     Init:0/2   0          9s

# 查看日志,查看第一个 Init 容器,问题是nslookup无法解析
[root@k8s2 ~]# kubectl logs myapp-pod -c init-myservice

nslookup: can't resolve 'myservice.default.svc.cluster.local'
waiting for myservice
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

waiting for myservice
nslookup: can't resolve 'myservice.default.svc.cluster.local'


# 需要创建对应的service
[root@k8s2 ~]# vim init_pod.yaml

······

---
apiVersion: v1
kind: Service
metadata:
  name: myservice
spec:
  ports:
  - protocol: TCP
    port: 80
    targetPort: 9376
---
apiVersion: v1
kind: Service
metadata:
  name: mydb
spec:
  ports:
  - protocol: TCP
    port: 80
    targetPort: 9377


# 更新pod配置
[root@k8s2 ~]# kubectl apply -f init_pod.yaml
pod/myapp-pod unchanged
service/myservice created
service/mydb created
[root@k8s2 ~]# kubectl get pod
NAME                    READY   STATUS    RESTARTS   AGE
demo1                   1/1     Running   0          20h
myapp-79fdcd5ff-khsgr   1/1     Running   0          20h
myapp-pod               1/1     Running   0          5s

# 容器正常启动
[root@k8s2 ~]# kubectl logs myapp-pod
The app is running!


# 查看service在容器内被分配的地址
[root@k8s2 ~]# kubectl exec -it myapp-pod -c myapp-container -- sh
/ # ls
bin   dev   etc   home  proc  root  sys   tmp   usr   var

/ # nslookup myservice
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      myservice
Address 1: 10.98.196.110 myservice.default.svc.cluster.local


/ # nslookup mydb
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      mydb
Address 1: 10.96.238.173 mydb.default.svc.cluster.local

2.探针

    • 探针:探针是k8s对容器执行的定期诊断
      • ExecAction:在容器内执行指定命令,返回值为0表示成功执行
      • TCPSockerAction:对指定端口上的容器的IP进行TCP检查,判断端口开放
      • HTTPGetAction:对指定的端口和路径上的容器的IP地址执行HTTP GET请求,响应>=200且<=400则认为正常
# 创建探针
[root@k8s2 ~]# vim http_liveness.yaml
apiVersion: v1
kind: Pod
metadata:
  labels:
    test: liveness
  name: liveness-http
spec:
  containers:
  - name: liveness
    image: alexw.com/library/nginx
    livenessProbe:
      tcpSocket:
        port: 8080
      initialDelaySeconds: 3
      periodSeconds: 3


# 查看pod状态,发现一直在重启
[root@k8s2 ~]# kubectl apply -f http_liveness.yaml
pod/liveness-http created
[root@k8s2 ~]# kubectl get pod -w
NAME                    READY   STATUS    RESTARTS   AGE
demo1                   1/1     Running   0          21h
liveness-http           1/1     Running   0          8s
myapp-79fdcd5ff-khsgr   1/1     Running   0          20h
myapp-pod               1/1     Running   0          22m
liveness-http           1/1     Running   1 (1s ago)   13s
liveness-http           1/1     Running   2 (2s ago)   26s
liveness-http           1/1     Running   3 (1s ago)   37s


# 查看事件,端口连接失败,所以自动重启
[root@k8s2 ~]# kubectl describe pod liveness-http

Events:
  Type     Reason     Age                From               Message
  ----     ------     ----               ----               -------
  Normal   Scheduled  51s                default-scheduler  Successfully assigned default/liveness-http to k8s4
  Normal   Pulled     51s                kubelet            Successfully pulled image "alexw.com/library/nginx" in 86.703886ms
  Normal   Pulled     40s                kubelet            Successfully pulled image "alexw.com/library/nginx" in 45.132516ms
  Normal   Pulled     28s                kubelet            Successfully pulled image "alexw.com/library/nginx" in 110.992848ms
  Normal   Created    27s (x3 over 51s)  kubelet            Created container liveness
  Normal   Started    27s (x3 over 51s)  kubelet            Started container liveness
  Normal   Pulling    16s (x4 over 51s)  kubelet            Pulling image "alexw.com/library/nginx"
  Warning  Unhealthy  16s (x9 over 46s)  kubelet            Liveness probe failed: dial tcp 10.244.2.39:8080: connect: connection refused
  Normal   Killing    16s (x3 over 40s)  kubelet            Container liveness failed liveness probe, will be restarted
## 添加一个探针
[root@k8s2 ~]vim http_liveness.yaml

apiVersion: v1
kind: Pod
metadata:
  labels:
    test: liveness
  name: liveness-http
spec:
  containers:
  - name: liveness
    image: alexw.com/library/nginx
    livenessProbe:
      tcpSocket:
        port: 80
      initialDelaySeconds: 3
      periodSeconds: 3
    readinessProbe:
      httpGet:
        path: /index.html
        port: 80
      initialDelaySeconds: 5
      periodSeconds: 5


# 查看探针状态
[root@k8s2 ~]# kubectl describe pod liveness-http | grep Readiness
    Readiness:      http-get http://:80/index.html delay=5s timeout=1s period=5s #success=1 #failure=3
[root@k8s2 ~]# kubectl describe pod liveness-http | grep Liveness
    Liveness:       tcp-socket :80 delay=3s timeout=1s period=3s #success=1 #failure=3

六、控制器

1、pod分类

  • 自主式pod:pod退出后不会被创建
  • 控制器管理的pod:在控制器的生命周期里,始终要维护pod的副本数量

2、控制器类型

  • Replication Controller和ReplicaSet(控制应用副本)
  • Deployment(控制版本迭代,一个rs相当于Deployment的一个版本,过渡时期默认每次更新25%的pod)
  • DaemonSet(确保每个node都运行一个pod的副本,新增node会自动运行一个pod副本,删除node自动回收pod副本,删除DaemonSet会删除其创建的所有副本;场景:分布式存储、日志收集、监控)
  • StatefulSet(用来管理有状态的应用,为pod提供唯一性标识)
  • Job(离线任务)
  • CronJob(周期性拉起Job)
  • HPA(Horizontal Pod Autoscaler)(根据资源利用情况自动调整pod)

3、示例

rs示例

# rs控制器通过标签(label)匹配pod
[root@k8s2 ~]# vim rs_demo.yaml

apiVersion: apps/v1
kind: ReplicaSet
metadata:
  name: frontend
  labels:
    app: frontend
spec:
  replicas: 3
  selector:
    matchLabels:
      app: frontend
  template:
    metadata:
      labels:
        app: frontend
    spec:
      containers:
      - name: web1
        image: alexw.com/library/nginx


[root@k8s2 ~]# kubectl create -f rs_demo.yaml
replicaset.apps/frontend created

[root@k8s2 ~]# kubectl get pod --show-labels
NAME                    READY   STATUS    RESTARTS   AGE     LABELS
frontend-2ssd7          1/1     Running   0          46s     app=frontend
frontend-9snbf          1/1     Running   0          46s     app=frontend
frontend-tjvrb          1/1     Running   0          46s     app=frontend

deployment示例

[root@k8s2 ~]# vim deployment_demo.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: demo
  labels:
    app: demo
spec:
  replicas: 3
  selector:
    matchLabels:
      app: demo
  template:
    metadata:
      labels:
        app: demo
    spec:
      containers:
      - name: web1
        image: alexw.com/library/myapp:v1  # 从这里变更版本号


# 然后执行,版本迭代后,旧pod会被回收
[root@k8s2 ~]# kubectl apply -f deployment_demo.yaml

# 同样也可以通过命令实现版本迭代
[root@k8s2 ~]# kubectl set image deployment demo web1=alexw.com/library/myapp:v2
deployment.apps/demo image updated


# 暂停资源上线,可以在暂停期间进行更多配置,一次性配置完成再上线
kubectl rollout pause deployment/nginx-deployment
# 恢复
kubectl rollout resume deployment/nginx-deployment
# 查看更新状态
kubectl rollout status deployment/nginx-deployment

daemonset示例

[root@k8s2 controllers]# vim daemonset_demo.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: daemonsetdemo
  labels:
    app: zabbix-agent
spec:
  selector:
    matchLabels:
      app: zabbix-agent
  template:
    metadata:
      labels:
        app: zabbix-agent
    spec:
      containers:
      - name: zabbix-agent
        image: alexw.com/zabbix/zabbix-agent

[root@k8s2 controllers]# kubectl apply -f daemonset_demo.yaml
daemonset.apps/daemonsetdemo created

[root@k8s2 controllers]# kubectl get pod -o wide
NAME                    READY   STATUS    RESTARTS   AGE   IP            NODE   NOMINATED NODE   READINESS GATES
daemonsetdemo-67ht8     1/1     Running   0          16s   10.244.2.69   k8s4   <none>           <none>
daemonsetdemo-jz5k6     1/1     Running   0          16s   10.244.1.45   k8s3   <none>           <none>

# 默认在k8s的master上不调度
[root@k8s2 controllers]# kubectl describe nodes k8s2 | grep Tain
Taints:             node-role.kubernetes.io/master:NoSchedule


# 修改配置,在控制节点也参与调度
[root@k8s2 controllers]# vim daemonset_demo.yaml

    spec:
      tolerations:
      - operator: Exists
        effect: NoSchedule
      containers:
      - name: zabbix-agent
        image: alexw.com/zabbix/zabbix-agent


[root@k8s2 controllers]# kubectl apply -f daemonset_demo.yaml
daemonset.apps/daemonsetdemo configured

[root@k8s2 controllers]# kubectl get pod -o wide
NAME                    READY   STATUS    RESTARTS   AGE   IP            NODE   NOMINATED NODE   READINESS GATES
daemonsetdemo-2ptxs     1/1     Running   0          8s    10.244.2.70   k8s4   <none>           <none>
daemonsetdemo-95zx2     1/1     Running   0          12s   10.244.0.18   k8s2   <none>           <none>
daemonsetdemo-crwvd     1/1     Running   0          5s    10.244.1.46   k8s3   <none>           <none>

Job示例

[root@k8s2 controllers]# vim job_demo.yaml

apiVersion: batch/v1
kind: Job
metadata:
  name: pi
spec:
  template:
    spec:
      containers:
      - name: pi
        image: alexw.com/library/perl:5.34.0
        command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
      restartPolicy: Never
  backoffLimit: 4   # 表示重试4次仍然失败,则不再重试

# 拉取镜像
[root@k8s1 ~]# docker pull perl:5.34.0


[root@k8s2 controllers]# kubectl get pod
NAME                    READY   STATUS      RESTARTS   AGE
pi-9rzzj                0/1     Completed   0          2m25s
web1-6fc5554c45-hv695   1/1     Running     0          166m
web1-6fc5554c45-np5jl   1/1     Running     0          166m
web1-6fc5554c45-zsp7t   1/1     Running     0          166m
web2-f988c7556-2z4b4    1/1     Running     0          166m
web2-f988c7556-mvx9s    1/1     Running     0          166m
web2-f988c7556-wpphb    1/1     Running     0          166m
[root@k8s2 controllers]# kubectl logs pi-9rzzj
3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275901

CronJob示例

  • cronjob周期性创建一个job,再由去创建pod
  • 创建数量有上限
[root@k8s2 ingress]# vim cronjob_demo.yaml

apiVersion: batch/v1
kind: CronJob
metadata:
  name: hello
spec:
  schedule: "* * * * *"
  jobTemplate:
    spec:
      template:
        spec:
          containers:
          - name: hello
            image: alexw.com/library/busybox
            imagePullPolicy: IfNotPresent
            command:
            - /bin/sh
            - -c
            - date; echo Hello from the Kubernetes cluster
          restartPolicy: OnFailure

# 等待一段时间后
[root@k8s2 controllers]# kubectl logs hello-27907672-gsns6
Mon Jan 23 07:52:01 UTC 2023
Hello from the Kubernetes cluster

StatefulSet示例

  • StatefulSet将应用状态抽象成两种:拓扑状态(应用实例必须按顺序启动,新pod和旧pod网络标识一样)、存储状态(应用实例绑定了不同存储数据)
  • pod被重建后,网络标识不会改变,即pod对应的DNS记录
# 创建一个headless服务
[root@k8s2 controllers]# vim headless_service.yaml
apiVersion: v1
kind: Service
metadata:
  name: headless-service
spec:
  ports:
    - protocol: TCP
      port: 80
      targetPort: 80
  selector:
      app: nginx
  clusterIP: None


# 创建StatefulSet控制器
[root@k8s2 controllers]# vim statefulset_demo.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
  labels:
    app: nginx
spec:
  serviceName: "nginx-svc"
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: web1
        image: alexw.com/library/nginx

# StatefulSet成功拉起pod
[root@k8s2 controllers]# kubectl get svc -o wide
NAME               TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE    SELECTOR
headless-service   ClusterIP   None         <none>        80/TCP    15m    app=nginx
kubernetes         ClusterIP   10.96.0.1    <none>        443/TCP   3d5h   <none>
[root@k8s2 controllers]# kubectl get pod -o wide --show-labels
NAME    READY   STATUS    RESTARTS   AGE     IP               NODE   NOMINATED NODE   READINESS GATES   LABELS
web-0   1/1     Running   0          12m     10.244.106.136   k8s4   <none>           <none>            app=nginx,controller-revision-hash=web-77698cbbf7,statefulset.kubernetes.io/pod-name=web-0
web-1   1/1     Running   0          12m     10.244.219.21    k8s3   <none>           <none>            app=nginx,controller-revision-hash=web-77698cbbf7,statefulset.kubernetes.io/pod-name=web-1



## 查看DNS解析
10: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default
    link/ether 56:a6:a9:63:9f:c6 brd ff:ff:ff:ff:ff:ff
    inet 10.96.0.10/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.104.73.158/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.96.0.1/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.105.141.99/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 192.168.147.50/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
    inet 10.105.16.79/32 scope global kube-ipvs0
       valid_lft forever preferred_lft forever
[root@k8s2 controllers]# dig -t A web-0.nginx-svc.default.svc.cluster.local. @10.96.0.10      #######  解析kube-ipvs0这张网卡

; <<>> DiG 9.9.4-RedHat-9.9.4-72.el7 <<>> -t A web-0.nginx-svc.default.svc.cluster.local. @10.96.0.10
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 1606
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; WARNING: recursion requested but not available

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;web-0.nginx-svc.default.svc.cluster.local. IN A

;; ANSWER SECTION:
web-0.nginx-svc.default.svc.cluster.local. 30 IN A 10.244.219.27

;; Query time: 20 msec
;; SERVER: 10.96.0.10#53(10.96.0.10)
;; WHEN: Tue Jan 24 07:05:47 PST 2023
;; MSG SIZE  rcvd: 127



# 结合动态存储挂载
[root@k8s2 controllers]# vim statefulset_demo.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: web
  labels:
    app: nginx
spec:
  serviceName: "nginx-svc"
  replicas: 2
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: web1
        image: alexw.com/library/nginx
        volumeMounts:
          - name: www
            mountPath: /usr/share/nginx/html
  volumeClaimTemplates:
    - metadata:
        name: www
      spec:
        storageClassName: nfs-client
        accessModes:
        - ReadWriteOnce
        resources:
          requests:
            storage: 1Gi


···

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值