湖北省技能大赛云计算赛道解题过程【B模块:容器云平台部署与运维(30分)】

B模块:容器云平台部署与运维(30分)

节点(开启虚拟化)IP地址配置
master192.168.200.10CPU:6核16G 硬盘:150G
node192.168.200.20CPU:6核16G 硬盘:150G

任务1 容器云服务搭建(2分)

1. 部署容器云平台(2分)

在master节点和node节点将root密码设为000000,完成Kubernetes集群的部署,并完成Istio服务网格、KubeVirt虚拟化和Harbor镜像仓库的部署(master节点依次执行k8s_harbor_install.sh、k8s_image_push.sh、k8s_master_install.sh、k8s_project _install.sh,node节点执行k8s_node_install.sh)。
请将kubectl cluster-info&&kubectl -n istio-system get all&&kubectl -n kubevirt get deployment命令的返回结果提交到答题框。


配免密方便做题

[root@master ~]# ssh-keygen -t rsa
[root@node ~]# ssh-keygen -t rsa

挂载镜像并复制到本地(注意:这里的k8s镜像的文件,必须把所有文件复制到/opt/目录下,脚本中已写死路径,不然执行的过程中会报路径错误)

[root@master ~]# mkdir /mnt/iaas
[root@master ~]# mount /opt/kubernetes_v2.1.iso /mnt/k8s/

[root@master ~]# mkdir /mnt/centos
[root@master ~]# mount /opt/linux_CentOS-7-x86_64-DVD-2009.iso /mnt/centos/
[root@master ~]# cp -rvf /mnt/centos/ /opt/
[root@master ~]# cp -rvf /mnt/k8s/* /opt/

[root@master ~]# rm -rf /etc/yum.repos.d/*
[root@master ~]# vi /etc/yum.repos.d/local.repo
[centos]
baseurl=file:///opt/centos/
name=centos
enabled=1
gpgcheck=0

[k8s]
baseurl=file:///opt/kubernetes-repo/
name=k8s
enabled=1
gpgcheck=0

安装软件包

[root@master ~]# yum install vim bash-com* lsof net-tools -y

关闭selinux

[root@master ~]# vim /etc/selinux/config
SELINUX=disabled

关闭防火墙自启并重启

[root@master ~]# systemctl disable firewalld.service
[root@master ~]# reboot
[root@master ~]# vim /etc/vsftpd/vsftpd.conf
anon_root=/opt/

[root@master ~]# systemctl enable --now vsftpd

配置hosts

[root@master ~]# vim /etc/hosts
192.168.200.10 master
192.168.200.20 node

移动配置文件

[root@master ~]# ssh-copy-id node
[root@master ~]# ssh-copy-id master
[root@master ~]# scp /etc/selinux/config node:/etc/selinux/config
[root@master ~]# scp /etc/hosts node:/etc/hosts
[root@master ~]# scp /etc/yum.repos.d/local.repo node:/etc/yum.repos.d/remote.repo

node节点配置yum

[root@node ~]# rm -rf /etc/yum.repos.d/CentOS-*
[root@node ~]# vi /etc/yum.repos.d/remote.repo
[centos]
baseurl=ftp://master/centos/
name=centos
enabled=1
gpgcheck=0

[k8s]
baseurl=ftp://master/kubernetes-repo/
name=k8s
enabled=1
gpgcheck=0
[root@node ~]# yum install vim bash-com* lsof net-tools -y

安装harbor(脚本以及相关文件必须在/opt目录下)

[root@master ~]# cd /opt/
[root@master opt]# ./k8s_harbor_install.sh
...

[root@master opt]# ./k8s_image_push.sh
输入镜像仓库地址(不加http/https): 192.168.200.10
输入镜像仓库用户名: admin
输入镜像仓库用户密码: Harbor12345
您设置的仓库地址为: 192.168.200.10,用户名: admin,密码: xxx
是否确认(Y/N): Y

安装master节点

[root@master opt]# ./k8s_master_install.sh
[root@master opt]# ./k8s_project_install.sh

安装node节点

[root@master opt]# scp /opt/k8s_node_install.sh node:/root/
[root@node ~]# ./k8s_node_install.sh

使用命令kubectl -n kubernetes-dashboard create token admin-user即可获取登录k8s网页端的token


答案:

Kubernetes control plane is running at https://192.168.200.10:6443
CoreDNS is running at https://192.168.200.10:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
Warning: kubevirt.io/v1 VirtualMachineInstancePresets is now deprecated and will be removed in v2.
NAME                                       READY   STATUS    RESTARTS   AGE
pod/grafana-56bdf8bf85-4mz2m               1/1     Running   0          3m33s
pod/istio-egressgateway-85649899f8-tn7j9   1/1     Running   0          4m53s
pod/istio-ingressgateway-f56888458-tbv6j   1/1     Running   0          4m53s
pod/istiod-64848b6c78-xm77j                1/1     Running   0          4m55s
pod/jaeger-76cd7c7566-4x5nn                1/1     Running   0          3m33s
pod/kiali-646db7568f-zcnp2                 1/1     Running   0          3m33s
pod/prometheus-85949fddb-hd69q             2/2     Running   0          3m33s

NAME                           TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)                                                                      AGE
service/grafana                ClusterIP      10.99.229.56     <none>        3000/TCP                                                                     3m33s
service/istio-egressgateway    ClusterIP      10.107.63.162    <none>        80/TCP,443/TCP                                                               4m53s
service/istio-ingressgateway   LoadBalancer   10.96.43.106     <pending>     15021:30934/TCP,80:30691/TCP,443:30075/TCP,31400:30825/TCP,15443:30382/TCP   4m53s
service/istiod                 ClusterIP      10.101.91.95     <none>        15010/TCP,15012/TCP,443/TCP,15014/TCP                                        4m55s
service/jaeger-collector       ClusterIP      10.97.104.76     <none>        14268/TCP,14250/TCP,9411/TCP                                                 3m33s
service/kiali                  ClusterIP      10.102.1.13      <none>        20001/TCP,9090/TCP                                                           3m33s
service/prometheus             ClusterIP      10.109.215.71    <none>        9090/TCP                                                                     3m33s
service/tracing                ClusterIP      10.104.202.190   <none>        80/TCP,16685/TCP                                                             3m33s
service/zipkin                 ClusterIP      10.98.127.164    <none>        9411/TCP                                                                     3m33s

NAME                                   READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/grafana                1/1     1            1           3m33s
deployment.apps/istio-egressgateway    1/1     1            1           4m53s
deployment.apps/istio-ingressgateway   1/1     1            1           4m53s
deployment.apps/istiod                 1/1     1            1           4m55s
deployment.apps/jaeger                 1/1     1            1           3m33s
deployment.apps/kiali                  1/1     1            1           3m33s
deployment.apps/prometheus             1/1     1            1           3m33s

NAME                                             DESIRED   CURRENT   READY   AGE
replicaset.apps/grafana-56bdf8bf85               1         1         1       3m33s
replicaset.apps/istio-egressgateway-85649899f8   1         1         1       4m53s
replicaset.apps/istio-ingressgateway-f56888458   1         1         1       4m53s
replicaset.apps/istiod-64848b6c78                1         1         1       4m55s
replicaset.apps/jaeger-76cd7c7566                1         1         1       3m33s
replicaset.apps/kiali-646db7568f                 1         1         1       3m33s
replicaset.apps/prometheus-85949fddb             1         1         1       3m33s
NAME              READY   UP-TO-DATE   AVAILABLE   AGE
virt-api          2/2     2            2           2m29s
virt-controller   2/2     2            2           119s
virt-operator     2/2     2            2           2m54s

任务2 容器云服务运维(15.5分)

1. 容器化部署Node-Exporter(0.5分)

编写Dockerfile文件构建exporter镜像,要求基于centos完成Node-Exporter服务的安装与配置,并设置服务开机自启。(需要的包在Technology_packageV1.0.iso中Monitor.tar.gz)

(1)基础镜像:centos:centos7.9.2009;

(2)使用二进制包node_exporter-0.18.1.linux-amd64.tar.gz安装node-exporter服务;

(3)声明端口:9100;

(4)设置服务开机自启。

请使用docker build命令进行构建镜像monitor-exporter:v1.0并使用 docker run 命令运行该容器。

将docker run -d --name exporter-test monitor-exporter:v1.0 && sleep 5 && docker exec exporter-test ps -aux && docker rm -f exporter-test 命令的返回结果提交到答题框。


[root@master ~]# tar -zxvf Monitor.tar.gz
[root@master ~]# cd Monitor
[root@master Monitor]# vim Dockerfile-exporter
FROM centos:centos7.9.2009
ADD node_exporter-0.18.1.linux-amd64.tar.gz /root/
RUN mv /root/node_exporter-0.18.1.linux-amd64/node_exporter /usr/local/bin
EXPOSE 9100
ENTRYPOINT ["node_exporter"]
[root@master Monitor]# docker load -i CentOS_7.9.2009.tar
[root@master Monitor]# docker build -t monitor-exporter:v1.0 -f Dockerfile-exporter .

答案

d00689a1de0634e0cb3f4d8e9fbf1a5850b0549040ad2a1021737d6b87a076d4
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root          1  0.4  0.0 113932  5424 ?        Ssl  15:33   0:00 node_exporter
root         13  0.0  0.0  51732  1700 ?        Rs   15:33   0:00 ps -aux
exporter-test

2. 容器化部署Alertmanager(0.5分)

编写Dockerfile文件构建alert镜像,要求基于centos完成Alertmanager服务的安装与配置,并设置服务开机自启。(需要的包在Technology_packageV1.0.iso中Monitor.tar.gz)

(1)基础镜像:centos:centos7.9.2009

(2)使用提供的二进制包alertmanager-0.19.0.linux-amd64.tar.gz安装Alertmanager服务;

(3)声明端口:9093、9094;

(4)设置服务开机自启。

请使用docker build命令进行构建镜像monitor-alert:v1.0并使用 docker run 命令运行该容器。

将docker run -d --name alert-test monitor-alert:v1.0 && sleep 5 && docker exec alert-test ps -aux && docker rm -f alert-test命令的返回结果提交到答题框。


[root@master Monitor]# vim Dockerfile-alert
FROM centos:centos7.9.2009
ADD alertmanager-0.19.0.linux-amd64.tar.gz /usr/local/bin
WORKDIR /usr/local/bin/alertmanager-0.19.0.linux-amd64
EXPOSE 9093 9094
ENTRYPOINT ["./alertmanager"]
[root@master Monitor]# docker build -t monitor-alert:v1.0 -f Dockerfile-alert .

答案

bf316dc0042579e50095f3a0386292d1e9d96e38b79a4347179b2d4d19eb84b2
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root          1  2.2  0.1 123920 16512 ?        Ssl  15:39   0:00 ./alertmanager
root         22  0.0  0.0  51732  1700 ?        Rs   15:39   0:00 ps -aux
alert-test

3. 容器化部署Grafana(0.5分)

编写Dockerfile文件构建grafana镜像,要求基于centos完成Grafana服务的安装与配置,并设置服务开机自启。(需要的包在Technology_packageV1.0.iso中Monitor.tar.gz)

(1)基础镜像:centos:centos7.9.2009;

(2)使用提供的二进制包grafana-6.4.1.linux-amd64.tar.gz安装grafana服务;

(3)声明端口:3000;

(4)设置nacos服务开机自启。

请使用docker build命令进行构建镜像monitor-grafana:v1.0并使用 docker run 命令运行该容器。

将docker run -d --name grafana-test monitor-grafana:v1.0 && sleep 5 && docker exec grafana-test ps -aux && docker rm -f grafana-test 命令的返回结果提交到答题框。


[root@master Monitor]# vim Dockerfile-grafana
FROM centos:centos7.9.2009
ADD grafana-6.4.1.linux-amd64.tar.gz /usr/local/bin
EXPOSE 3000
WORKDIR /usr/local/bin/grafana-6.4.1/bin
ENTRYPOINT ["./grafana-server"]
[root@master Monitor]# docker build -t monitor-grafana:v1.0 -f Dockerfile-grafana .

答案

f8d3fc0348c498d60680a897972a56842e4d7df85707ac68eba4e05c7be89a64
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root          1  5.8  0.1 801180 28036 ?        Ssl  15:44   0:00 ./grafana-server
root         21  0.0  0.0  51732  1700 ?        Rs   15:44   0:00 ps -aux
grafana-test

4. 容器化部署Prometheus(0.5分)

编写Dockerfile文件构建prometheus镜像,要求基于centos完成Promethues服务的安装与配置,并设置服务开机自启。(需要的包在Technology_packageV1.0.iso中Monitor.tar.gz)

(1)基础镜像:centos:centos7.9.2009;

(2)使用提供的二进制包prometheus-2.13.0.linux-amd64.tar.gz安装promethues服务;

(3)编写prometheus.yml文件,创建3个任务模板:prometheus、node-exporter和alertmanager,并将该文件拷贝到/data/prometheus/目录下;

(4)声明端口:9090;

(5)设置服务开机自启。

请使用docker build命令进行构建镜像monitor-prometheus:v1.0并使用 docker run 命令运行该容器。

将docker run -d --name prometheus-test monitor-prometheus:v1.0 && sleep 5 && docker exec prometheus-test ps -aux && docker rm -f prometheus-test命令的返回结果提交到答题框。


[root@master Monitor]# vim Dockerfile-prometheus
FROM centos:centos7.9.2009
ADD prometheus-2.13.0.linux-amd64.tar.gz /usr/local/bin
WORKDIR /usr/local/bin/prometheus-2.13.0.linux-amd64
RUN mkdir -p /data/prometheus/
COPY prometheus.yml /usr/local/bin/prometheus-2.13.0.linux-amd64
COPY prometheus.yml /data/prometheus/
EXPOSE 9090
CMD ["./prometheus","--config.file=/data/prometheus/prometheus.yml"]
[root@master Monitor]# vim prometheus.yml
# prometheus.yml配置文件示例
# my global config
global:
  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
  alertmanagers:
  - static_configs:
    - targets:
      # - alertmanager:9093

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  - job_name: 'prometheus'
    static_configs:
    - targets: ['192.168.200.10:9090']
  - job_name: 'node-exporter'
    static_configs:
    - targets: ['192.168.200.10:9100']
  - job_name: 'alertmanager'
    static_configs:
    - targets: ['192.168.200.10:9093']
[root@master Monitor]# docker build -t monitor-prometheus:v1.0 -f Dockerfile-prometheus .

答案

eb72b439a4907a23cd8fc8d233e148c2ebf76543115979424def788d2d8f8b3c
USER        PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
root          1  2.3  0.1 161540 26940 ?        Ssl  18:27   0:00 ./prometheus --config.file=/data/prometheus/prometheus.yml
root         22  0.0  0.0  51732  1700 ?        Rs   18:27   0:00 ps -aux
prometheus-test

5. 编排部署监控系统(1分)

编写docker-compose.yaml文件,使用镜像exporter、alert、grafana和prometheus完成监控系统的编排部署。(需要的包在Technology_packageV1.0.iso中Monitor.tar.gz)

(1)容器monitor-node;镜像:monitor-exporter:v1.0;端口映射:9100:9100;

(2)容器monitor- alertmanager;镜像:monitor-alert:v1.0;端口映射:9093:9093、9094:9094;

(3)容器monitor-grafana;镜像:monitor-grafana:v1.0;端口映射:3000:3000;

(4)容器monitor-prometheus;镜像:monitor-prometheus:v1.0;端口映射:9090:9090。

1.使用docker-compose ps命令进行查看,将返回结果提交至答题框。

2.将curl -L http://$(hostname -i):9090/targets | grep up 命令的返回结果提交到答题框。


[root@master Monitor]# vim docker-compose.yaml
version: '3'
services:
  monitor-node:
    image: monitor-exporter:v1.0
    container_name: monitor-node
    ports:
    - "9100:9100"
  monitor-alertmanager:
    image: monitor-alert:v1.0
    container_name: monitor-alertmanager
    ports:
    - "9093:9093"

  monitor-grafana:
    image: monitor-grafana:v1.0
    container_name: monitor-grafana
    ports:
    - "3000:3000"
    depends_on:
    - monitor-prometheus

  monitor-prometheus:
    image: monitor-prometheus:v1.0
    container_name: monitor-prometheus
    ports:
    - "9090:9090"
[root@master Monitor]# docker-compose up -d
[+] Running 5/5
 ⠿ Network monitor_default         Created                                                       0.0s
 ⠿ Container monitor-alertmanager  Started                                                       0.5s
 ⠿ Container monitor-node          Started                                                       0.5s
 ⠿ Container monitor-prometheus    Started                                                       0.4s
 ⠿ Container monitor-grafana       Started                                                       0.7s

答案

NAME                   COMMAND                  SERVICE                STATUS              PORTS
monitor-alertmanager   "./alertmanager"         monitor-alertmanager   running             0.0.0.0:9093->9093/tcp, :::9093->9093/tcp, 9094/tcp
monitor-grafana        "./grafana-server"       monitor-grafana        running             0.0.0.0:3000->3000/tcp, :::3000->3000/tcp
monitor-node           "node_exporter"          monitor-node           running             0.0.0.0:9100->9100/tcp, :::9100->9100/tcp
monitor-prometheus     "./prometheus --conf…"   monitor-prometheus     running             0.0.0.0:9090->9090/tcp, :::9090->9090/tcp
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  9178    0  9178    0     0  5241k      0 --:--:-- --:--:-- --:--:-- 8962k
                            <a href="#" class="nav-link dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">Status <span class="caret"></span></a>
    <div id="showTargets" class="btn-group btn-group-toggle" data-toggle="buttons">
        <a id="job-alertmanager" href="#job-alertmanager">alertmanager (1/1 up)</a>
              <span class="alert alert-success state_indicator text-uppercase">up</span>
        <a id="job-node-exporter" href="#job-node-exporter">node-exporter (1/1 up)</a>
              <span class="alert alert-success state_indicator text-uppercase">up</span>
        <a id="job-prometheus" href="#job-prometheus">prometheus (1/1 up)</a>
              <span class="alert alert-success state_indicator text-uppercase">up</span>

6. 导入jenkins镜像(0.5分)

基于Kubernetes构建持续集成,master节点、harbor节点和cicd节点对应的IP都为master节点的IP, CICD_OFF.TAR(需要的包在Technology_packageV1.0.iso中CICD_CICD_Offline.TAR)。把CICD_CICD_Offline.TAR移动到/opt目录下然后解压。导入jenkins.tar文件中的镜像。

将docker images | grep jenkins命令的返回结果提交到答题框。


[root@master Monitor]# mv /root/CICD_Offline.tar /opt/
[root@master Monitor]# cd /opt/
[root@master opt]# tar -zxvf CICD_Offline.tar
[root@master opt]# docker load -i jenkins.tar

答案

jenkins/jenkins                                                   2.262-centos     f04839b3e211   4 years ago     638MB

7. 安装Jenkins(1分)

编写Jenkins编排文件,启动并配置Jenkins。

(1)编写docker-compose.yaml启动Jenkins。

(2)新建用户springcloud,密码000000

(3)修改系统配置Resource root URL。

将docker-compose ps命令的返回结果提交到答题框。


[root@master opt]# mkdir jenkins
[root@master opt]# cd jenkins/
[root@master jenkins]# vim docker-compose.yaml
version: '3.1'
services:
  jenkins:
    image: 'jenkins/jenkins:2.262-centos'
    volumes:
      - /home/jenkins_home:/var/jenkins_home
      - /var/run/docker.sock:/var/run/docker.sock
      - /usr/bin/docker:/usr/bin/docker
      - /usr/bin/kubectl:/usr/local/bin/kubectl
      - /root/.kube:/root/.kube
    ports:
      - "8080:8080"
    expose:
      - "8080"
      - "50000"
    privileged: true
    user: root
    restart: always
    container_name: jenkins
[root@master jenkins]# docker-compose -f docker-compose.yaml up -d
[root@master jenkins]# cp -rfv /opt/plugins/* /home/jenkins_home/plugins/
[root@master jenkins]# docker restart jenkins

访问web(IP:http://IP:8080),拿到登陆密码,进行登录

[root@master jenkins]# docker exec jenkins cat /var/jenkins_home/secrets/initialAdminPassword
9fed58a96bec43c091a95e692672e0f6

点击:系统管理 -> 管理用户 -> 新建用户 -> 输入对应名称(springcloud)密码(000000) -> 创建用户

退出登录,使用springcloud用户与密码登录,点击:系统管理 -> 系统配置

Resource root URL填入http://IP:8080/


答案

NAME                COMMAND                  SERVICE             STATUS              PORTS
jenkins             "/sbin/tini -- /usr/…"   jenkins             running             0.0.0.0:8080->8080/tcp, :::8080->8080/tcp, 50000/tcp

8. 部署Gitlab(1分)

编写Gitlab编排文件并启动Gitlab。

(1)编写docker-compose.yaml启动Gitlab。

(2)使用root用户登录Gitlab。

(3)在harbor仓库创建公开项目springcloud。

将docker-compose ps命令的返回结果提交到答题框。


[root@master jenkins]# mkdir /root/gitlab
[root@master jenkins]# cd /root/gitlab/
[root@master gitlab]# vim docker-compose.yaml
version: '3'
services:
  gitlab:
    image: 'gitlab/gitlab-ce:12.9.2-ce.0'
    container_name: gitlab
    restart: always
    hostname: '192.168.200.10'
    privileged: true
    environment:
      TZ: 'Asia/Shanghai'
    ports:
      - '81:80'
      - '443:443'
      - '1022:22'
    volumes:
      - /srv/gitlab/config:/etc/gitlab
      - /srv/gitlab/gitlab/logs:/var/log/gitlab
      - /srv/gitlab/gitlab/data:/var/opt/gitlab
[root@master gitlab]# docker-compose up -d

等到docker-compose ps命令中的starting变成了healthy,访问web界面:http://192.168.200.10:81

进去之后为root用户创建密码,之后登录

选择Create a project -> 在Project name输入项目名称 -> 更改Visibility LevelPublic -> 点击 Create project创建项目


答案

NAME                COMMAND             SERVICE             STATUS              PORTS
gitlab              "/assets/wrapper"   gitlab              running (healthy)   0.0.0.0:443->443/tcp, :::443->443/tcp, 0.0.0.0:1022->22/tcp, :::1022->22/tcp, 0.0.0.0:81->80/tcp, :::81->80/tcp

9. push源代码(1分)

push源代码到gitlab的springcloud项目,并完成相关配置。

将git push -u origin master命令成功push的返回结果提交到答题框。


[root@master gitlab]# cd /opt/springcloud/
[root@master springcloud]# yum install -y git
[root@master springcloud]# git remote add origin http://192.168.200.10:81/root/springcloud.git
[root@master springcloud]# git config --global user.name "Administrator"
[root@master springcloud]# git config --global user.email "admin@example.com"
[root@master springcloud]# git push -u origin --all

答案

Username for 'http://192.168.200.10:81': root
Password for 'http://root@192.168.200.10:81':
Counting objects: 3192, done.
Delta compression using up to 6 threads.
Compressing objects: 100% (1428/1428), done.
Writing objects: 100% (3192/3192), 1.40 MiB | 0 bytes/s, done.
Total 3192 (delta 1233), reused 3010 (delta 1207)
remote: Resolving deltas: 100% (1233/1233), done.
To http://192.168.200.10:81/root/springcloud.git
 * [new branch]      master -> master
分支 master 设置为跟踪来自 origin 的远程分支 master。

10. Jenkins连接maven (1分)

配置Jenkins连接Gitlab,安装maven并完成相关配置。

将docker exec jenkins bash -c "source /etc/profile && mvn -v"命令的返回结果提交到答题框。


  1. 登录进gitlab页面 -> 点击最上方扳手进入Admin Area -> 在左侧找到并点击 Setttings 中的 Network -> 右侧找到 Outbound requests -> 勾选 Allow requests to the local network from web hooks and services -> 点击Save changes保存修改

  2. 点击右上角用户图标 -> 点击Settings -> 在右侧找到并点击Access Tokens -> 找到Add a personal access token -> 填写Name -> 填写Expires at -> Scopes全部选中 -> Create personal access token创建token -> 复制Your New Personal Access Token中的token:REsA_zJ8FVcJS4itNsez

  3. 登录Jenkins -> 点击系统管理 -> 点击系统配置 -> 找到Gitlab -> 取消勾选Enable authentication for '/project' end-point -> 填入Connection name -> 将gitlab地址(http://IP:81)填入Gitlab host URL -> 找到Credentials 选中添加中的Jenkins -> 类型选择GitLab API token -> API token填入上一步拿到的token -> 点击添加 -> 选中刚添加的GitLab API token -> 点击 Test Connection直到出现Success -> 点击最下方保存


[root@master springcloud]# cp -rf /opt/apache-maven-3.6.3-bin.tar.gz /home/jenkins_home/
[root@master springcloud]# docker exec -it jenkins bash
[root@8965dacea4c5 /]# tar -zxvf /var/jenkins_home/apache-maven-3.6.3-bin.tar.gz -C .
[root@8965dacea4c5 /]# mv apache-maven-3.6.3/ /usr/local/maven
[root@8965dacea4c5 /]# vi /etc/profile
export M2_HOME=/usr/local/maven
export PATH=$PATH:$M2_HOME/bin # 行末添加

[root@8965dacea4c5 /]# vi /root/.bashrc
source /etc/profile  # 行末添加

[root@8965dacea4c5 /]# exit
[root@master springcloud]# docker exec -it jenkins bash
[root@8965dacea4c5 /]# mvn -v
Apache Maven 3.6.3 (cecedd343002696d0abb50b32b541b8a6ba2883f)
Maven home: /usr/local/maven
Java version: 1.8.0_265, vendor: Oracle Corporation, runtime: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.265.b01-0.el8_2.x86_64/jre
Default locale: en_US, platform encoding: ANSI_X3.4-1968
OS name: "linux", version: "3.10.0-1160.el7.x86_64", arch: "amd64", family: "unix"

登录Jenkins -> 点击系统管理 -> 点击全局工具配置 -> 找到Maven 安装 -> 点击新增Maven -> 取消勾选自动安装 -> 输入NameMAVEN_HOME/usr/local/maven)-> 点击最下方的保存


答案

Apache Maven 3.6.3 (cecedd343002696d0abb50b32b541b8a6ba2883f)
Maven home: /usr/local/maven
Java version: 1.8.0_265, vendor: Oracle Corporation, runtime: /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.265.b01-0.el8_2.x86_64/jre
Default locale: en_US, platform encoding: ANSI_X3.4-1968
OS name: "linux", version: "3.10.0-1160.el7.x86_64", arch: "amd64", family: "unix"

11. 配置并触发CI/CD(1分)

编写流水线脚本配置CI/CD,habor仓库创建springcloud项目,上传代码触发自动构建。

构建成功后将curl `kubectl get endpoints -n springcloud gateway |grep -v AGE| awk '{print $2}'` 命令的返回结果提交到答题框。

命令可能后面要加 | grep Pig,因为不加结果太长先按照这么写


登录Jenkins -> 点击右侧点击新建任务 -> 选择流水线 -> 点击确定

构建触发器中选中Build when a change is pushed to GitLab. GitLab webhook URL: http://192.168.200.10:8080/project/springcloud使用触发器,记录下gitlab中webhook要用到的urlhttp://192.168.200.10:8080/project/springcloud

点击流水线语法 -> 在示例步骤 中选择 git: Git -> 配置仓库 URL为gitlab项目地址http://IP:81/root/springcloud.git -> 凭据中选择添加中的Jenkins -> 用户名密码填入gitlab仓库的用户名与密码 -> 点击添加后选择对应凭据 -> 点击生成流水线脚本

git credentialsId: '18919ffa-6c3e-4e92-bb36-79d7460a2647', url: 'http://IP:81/root/springcloud.git'

在网站中写入完整流水线脚本并点击应用

node{

    stage('git clone'){
        //check CODE
git credentialsId: '18919ffa-6c3e-4e92-bb36-79d7460a2647', url: 'http://192.168.200.10:81/root/springcloud.git'
    }
    stage('maven build'){
        sh '''/usr/local/maven/bin/mvn package -DskipTests -f /var/jenkins_home/workspace/springcloud'''
    }
    stage('image build'){
        sh '''
              echo $BUILD_ID
              docker build -t 192.168.200.10/springcloud/gateway:$BUILD_ID -f /var/jenkins_home/workspace/springcloud/gateway/Dockerfile  /var/jenkins_home/workspace/springcloud/gateway
              docker build -t 192.168.200.10/springcloud/config:$BUILD_ID -f /var/jenkins_home/workspace/springcloud/config/Dockerfile  /var/jenkins_home/workspace/springcloud/config'''
    }
    stage('test'){
        sh '''docker run -itd --name gateway 192.168.200.10/springcloud/gateway:$BUILD_ID
        docker ps -a|grep springcloud|grep Up
        if [[ $? -eq 0 ]];then
            echo "Success!"
            docker rm -f gateway
        else
            docker rm -f gateway
            exit 1
            fi
       
        '''
    }
    stage('upload registry'){
        sh '''docker login 192.168.200.10 -u=admin -p=Harbor12345
            docker push 192.168.200.10/springcloud/gateway:$BUILD_ID
            docker push 192.168.200.10/springcloud/config:$BUILD_ID'''
    }
    stage('deploy Rancher'){
        //执行部署脚本
       sh 'sed -i "s/sqshq\\/piggymetrics-gateway/192.168.200.10\\/springcloud\\/gateway:$BUILD_ID/g" /var/jenkins_home/workspace/springcloud/yaml/deployment/gateway-deployment.yaml'
       sh 'sed -i "s/sqshq\\/piggymetrics-config/192.168.200.10\\/springcloud\\/config:$BUILD_ID/g" /var/jenkins_home/workspace/springcloud/yaml/deployment/config-deployment.yaml'
       sh 'kubectl create ns springcloud'
       sh 'kubectl apply -f /var/jenkins_home/workspace/springcloud/yaml/deployment/gateway-deployment.yaml --kubeconfig=/root/.kube/config'
       sh 'kubectl apply -f /var/jenkins_home/workspace/springcloud/yaml/deployment/config-deployment.yaml --kubeconfig=/root/.kube/config'
       sh 'kubectl apply -f /var/jenkins_home/workspace/springcloud/yaml/svc/gateway-svc.yaml --kubeconfig=/root/.kube/config'
       sh 'kubectl apply -f /var/jenkins_home/workspace/springcloud/yaml/svc/config-svc.yaml --kubeconfig=/root/.kube/config'
       
    }
}


登录Jenkins主页 -> 点击系统管理 -> 点击全局安全配置 -> 找到授权策略 -> 选中匿名用户具有可读权限 -> 点击下方应用


登录gitlab -> 进入springcloud项目 -> 在左侧导航栏找到Settings中的Webhooks -> 在右侧URL中填入对应webhook地址http://IP:8080/project/springcloud -> 取消勾选Enable SSL verification -> 点击 Add webhook进行添加

到最下方Project Hooks找到刚才添加的webhook -> 点击Test打开列表选中Push events -> 可以看到上方出现Hook executed successfully: HTTP 200表示测试成功


登录Harbor -> 新建项目springcloud -> 访问级别设为公开


测试webhook

[root@master springcloud]# docker exec jenkins mkdir -p /root/.m2/
[root@master ~]# docker cp /opt/repository/ jenkins:/root/.m2/
[root@master ~]# docker restart jenkins
[root@master springcloud]# cd /opt/springcloud/
[root@master springcloud]# touch testfile
[root@master springcloud]# git add .
[root@master springcloud]# git commit -m "Initial commit"
[master c943d8b] Initial commit
 1 file changed, 0 insertions(+), 0 deletions(-)
 create mode 100644 testfile
 
[root@master springcloud]# git push -u origin master
Username for 'http://192.168.200.10:81': root
Password for 'http://root@192.168.200.10:81':
Counting objects: 3, done.
Delta compression using up to 6 threads.
Compressing objects: 100% (2/2), done.
Writing objects: 100% (2/2), 294 bytes | 0 bytes/s, done.
Total 2 (delta 1), reused 0 (delta 0)
To http://192.168.200.10:81/root/springcloud.git
   d83b53a..c943d8b  master -> master
分支 master 设置为跟踪来自 origin 的远程分支 master。

在jenkins的主页中可以看到构建队列中出现springcloud,点进去即可查看构建信息

等待构建完毕,k8s中可以看到pod已经正在运行,等待数分钟,访问http://IP:30010/即可看到服务已成功启动

注意:建议在k8s运行时使用命令kubectl get pods -n springcloud -o wide查看两个pod是否在同一节点上,如果均为同一节点,则使用对应节点的IP进行访问,如果两者不在同一节点上就需要对调度进行处理:

kubectl drain node将node节点设为不调度状态,使用kubectl rollout restart -n springcloud deployment/config重新部署,这时可以看到两个节点全部处在了同一节点上,使用命令kubectl uncordon node取消node节点不可调度状态


答案

  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100 32164  100 32164    0     0  7696k      0 --:--:-- --:--:-- --:--:-- 10.2M
    <title>Piggy Metrics</title>
                        <span class="frominfo" id="infosubtitle">Piggy Metrics is the new simple way to deal with personal finances</span>
                        <a id="infofooter" href="https://github.com/sqshq/PiggyMetrics">&copy; 2016 sqshq.com</a>

12. 服务网格:创建Ingress Gateway(1分)

在提供的kubernetes镜像中,使用 project/istio/istio-1.17.2/services/bookinfo.yaml部署bookinfo应用,将Bookinfo应用部署到default命名空间下,使用Istio Gateway可以实现应用程序从外部访问,请为Bookinfo应用创建一个名为bookinfo-gateway的网关,指定所有HTTP流量通过80端口流入网格,然后将网关绑定到虚拟服务bookinfo上。

使用curl -L http:// ( h o s t n a m e − i ) : (hostname -i): (hostnamei):(kubectl -n istio-system get service istio-ingressgateway -o jsonpath=‘{.spec.ports[?(@.name==“http2”)].nodePort}’)/productpage | grep brand命令的返回结果提交到答题框。


[root@master ~]# kubectl apply -f /opt/project/istio/istio-1.17.2/services/bookinfo.yaml
[root@master ~]# vim bookinfo-gateway.yaml
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
  name: bookinfo-gateway
spec:
  selector:
    istio: ingressgateway # use istio default controller
  servers:
  - port:
      number: 80
      name: http
      protocol: HTTP
    hosts:
    - "*"
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: bookinfo
spec:
  hosts:
  - "*"
  gateways:
  - bookinfo-gateway
  http:
  - match:
    - uri:
        exact: /productpage
    - uri:
        prefix: /static
    - uri:
        exact: /login
    - uri:
        exact: /logout
    - uri:
        prefix: /api/v1/products
    route:
    - destination:
        host: productpage
        port:
          number: 9080
[root@master ~]# kubectl apply -f bookinfo-gateway.yaml

答案:

  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  4174  100  4174    0     0    178      0  0:00:23  0:00:23 --:--:--   948
      <a class="navbar-brand" href="#">BookInfo Sample</a>
      <a class="navbar-brand" href="#">Microservices Fabric BookInfo Demo</a>

13. 服务网格:创建VirtualService(1分)

在我们部署好的Bookinfo服务中,访问Bookinfo应用发现,其中一个微服务 reviews 的三个不同版本已经部署并同时运行 ,在浏览器中访问 Bookinfo 应用程序的/productpage 并刷新几次,您会注意到,有时书评的输出包含星级评分,有时则不包含。这是因为没有明确的默认服务版本可路由,Istio将以循环方式将请求路由到所有可用版本。

(1)将default命名空间下的pod全部删除,并添加自动注入Sidecar并进行重新调度管理。

(2)请为Bookinfo应用创建DestinationRule规则,然后创建VirtualService服务,将所有流量路由到微服务的 v1 版本。

完成这些内容,我们再次访问Bookinfo网页就会发现,流量只会访问到我们的v1版本中。

1、将kubectl get namespace default --show-labels命令的返回结果提交至答题框

2、将kubectl describe vs reviews命令的返回结果提交至答题框


[root@master ~]# kubectl label namespace default istio-injection=enabled
[root@master ~]# kubectl delete pod --all
[root@master ~]# vim dr.yaml
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
  name: productpage
spec:
  host: productpage
  subsets:
  - name: v1
    labels:
      version: v1
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
  name: reviews
spec:
  host: reviews
  subsets:
  - name: v1
    labels:
      version: v1
  - name: v2
    labels:
      version: v2
  - name: v3
    labels:
      version: v3
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
  name: ratings
spec:
  host: ratings
  subsets:
  - name: v1
    labels:
      version: v1
  - name: v2
    labels:
      version: v2
  - name: v2-mysql
    labels:
      version: v2-mysql
  - name: v2-mysql-vm
    labels:
      version: v2-mysql-vm
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
  name: details
spec:
  host: details
  subsets:
  - name: v1
    labels:
      version: v1
  - name: v2
    labels:
      version: v2
[root@master ~]# vim vs.yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: productpage
spec:
  hosts:
  - productpage
  http:
  - route:
    - destination:
        host: productpage
        subset: v1

---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: reviews
spec:
  hosts:
  - reviews
  http:
  - route:
    - destination:
        host: reviews
        subset: v1

---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: ratings
spec:
  hosts:
  - ratings
  http:
  - route:
    - destination:
        host: ratings
        subset: v1

---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: details
spec:
  hosts:
  - details
  http:
  - route:
    - destination:
        host: details
        subset: v1
[root@master ~]# kubectl apply -f dr.yaml
[root@master ~]# kubectl apply -f vs.yaml

答案

NAME      STATUS   AGE    LABELS
default   Active   2d1h   istio-injection=enabled,kubernetes.io/metadata.name=default
Name:         reviews
Namespace:    default
Labels:       <none>
Annotations:  <none>
API Version:  networking.istio.io/v1beta1
Kind:         VirtualService
Metadata:
  Creation Timestamp:  2025-03-20T16:02:17Z
  Generation:          1
  Managed Fields:
    API Version:  networking.istio.io/v1alpha3
    Fields Type:  FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          .:
          f:kubectl.kubernetes.io/last-applied-configuration:
      f:spec:
        .:
        f:hosts:
        f:http:
    Manager:         kubectl-client-side-apply
    Operation:       Update
    Time:            2025-03-20T16:02:17Z
  Resource Version:  302959
  UID:               42a46314-f3a1-4ecd-8d73-15d0974f8a1c
Spec:
  Hosts:
    reviews
  Http:
    Route:
      Destination:
        Host:    reviews
        Subset:  v1
Events:          <none>

14. KubeVirt运维:创建VM(1分)

使用提供的镜像(images/fedora-virt_v1.0.tar)在default命名空间下创建一台VM,名称为test-vm,内存大小为1Gi,磁盘驱动:virtio,运行策略:Manual。

(1)如果VM出现调度失败的情况,请修改kubevirt配置,让其支持硬件仿真。将kubectl edit kubevirts.kubevirt.io -n kubevirt命令的返回结果提交至答题框。

(2)将kubectl describe vm test-vm命令的返回结果提交到答题框。


[root@master ~]# docker load -i fedora-virt_v1.0.tar
[root@master ~]# docker image save fedora-virt:v1.0 -o fedora-virt_v1.0.docker.tar
[root@master ~]# ctr -n k8s.io i import fedora-virt_v1.0.docker.tar

[root@master ~]# kubectl edit kubevirts.kubevirt.io -n kubevirt
spec:
  certificateRotateStrategy: {}
  configuration:
    developerConfiguration:      # 将这一行后面的{}删掉
      useEmulation: true         # 添加这一行
[root@master ~]# vim vm.yaml
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
  name: test-vm
spec:
  runStrategy: Manual
  template:
    spec:
      domain:
        resources:
          requests:
            memory: 1Gi
        devices:
          disks:
          - name: containerdisk
            disk:
              bus: virtio
      volumes:
        - name: containerdisk
          containerDisk:
            image: fedora-virt:v1.0
            imagePullPolicy: IfNotPresent
[root@master ~]# kubectl apply -f vm.yaml
[root@master ~]# virtctl start test-vm

如果vm无法正常启动,需要检测node节点防火墙是否关闭


答案

 configuration:
    developerConfiguration:
      useEmulation: true
Name:         test-vm
Namespace:    default
Labels:       <none>
Annotations:  kubevirt.io/latest-observed-api-version: v1
              kubevirt.io/storage-observed-api-version: v1alpha3
API Version:  kubevirt.io/v1
Kind:         VirtualMachine
Metadata:
  Creation Timestamp:  2025-03-21T19:39:20Z
  Finalizers:
    kubevirt.io/virtualMachineControllerFinalize
  Generation:  1
  Managed Fields:
    API Version:  kubevirt.io/v1alpha3
    Fields Type:  FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          f:kubevirt.io/latest-observed-api-version:
          f:kubevirt.io/storage-observed-api-version:
        f:finalizers:
          .:
          v:"kubevirt.io/virtualMachineControllerFinalize":
    Manager:      Go-http-client
    Operation:    Update
    Time:         2025-03-21T19:39:20Z
    API Version:  kubevirt.io/v1
    Fields Type:  FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          .:
          f:kubectl.kubernetes.io/last-applied-configuration:
      f:spec:
        .:
        f:runStrategy:
        f:template:
          .:
          f:spec:
            .:
            f:domain:
              .:
              f:devices:
                .:
                f:disks:
              f:resources:
                .:
                f:requests:
                  .:
                  f:memory:
            f:volumes:
    Manager:      kubectl-client-side-apply
    Operation:    Update
    Time:         2025-03-21T19:39:20Z
    API Version:  kubevirt.io/v1alpha3
    Fields Type:  FieldsV1
    fieldsV1:
      f:status:
        .:
        f:conditions:
        f:created:
        f:desiredGeneration:
        f:observedGeneration:
        f:printableStatus:
        f:volumeSnapshotStatuses:
    Manager:         Go-http-client
    Operation:       Update
    Subresource:     status
    Time:            2025-03-23T12:37:49Z
  Resource Version:  607842
  UID:               dce207ec-e22f-43db-957a-cfaf22580afc
Spec:
  Run Strategy:  Manual
  Template:
    Metadata:
      Creation Timestamp:  <nil>
    Spec:
      Domain:
        Devices:
          Disks:
            Disk:
              Bus:  virtio
            Name:   containerdisk
        Machine:
          Type:  q35
        Resources:
          Requests:
            Memory:  1Gi
      Volumes:
        Container Disk:
          Image:              fedora-virt:v1.0
          Image Pull Policy:  IfNotPresent
        Name:                 containerdisk
Status:
  Conditions:
    Last Probe Time:       <nil>
    Last Transition Time:  2025-03-23T12:30:18Z
    Message:               containers with unready status: [istio-proxy]
    Reason:                ContainersNotReady
    Status:                False
    Type:                  Ready
    Last Probe Time:       <nil>
    Last Transition Time:  <nil>
    Message:               cannot migrate VMI which does not use masquerade to connect to the pod network or bridge with kubevirt.io/allow-pod-bridge-network-live-migration VM annotation
    Reason:                InterfaceNotLiveMigratable
    Status:                False
    Type:                  LiveMigratable
  Created:                 true
  Desired Generation:      1
  Observed Generation:     1
  Printable Status:        Running
  Volume Snapshot Statuses:
    Enabled:  false
    Name:     containerdisk
    Reason:   Snapshot is not supported for this volumeSource type [containerdisk]
Events:       <none>

15. KubeVirt运维:开启功能优化(1分)

在KubeVirt中有很多功能,为了更好的使用,更改kubevirt配置开启以下功能

(1)启用快照/恢复支持功能

(2)启动热插拔卷功能

(3)启动实时迁移功能

(4)启动边车功能

(5)启动主机磁盘功能

更改完成后,将kubectl describe kubevirt kubevirt -n kubevirt命令的返回结果提交到答题框。


[root@master ~]# kubectl edit kubevirt kubevirt -n kubevirt
spec:
  certificateRotateStrategy: {}
  configuration:
    developerConfiguration:
      useEmulation: true
      ## 加入以下内容
      featureGates:
      - Snapshot
      - HotplugVolumes
      - LiveMigration
      - Sidecar
      - HostDisk
     ## 加入以上内容
  customizeComponents: {}
  imagePullPolicy: IfNotPresent
  workloadUpdateStrategy: {}

答案

Name:         kubevirt
Namespace:    kubevirt
Labels:       <none>
Annotations:  kubevirt.io/latest-observed-api-version: v1
              kubevirt.io/storage-observed-api-version: v1alpha3
API Version:  kubevirt.io/v1
Kind:         KubeVirt
Metadata:
  Creation Timestamp:  2025-03-18T14:58:13Z
  Finalizers:
    foregroundDeleteKubeVirt
  Generation:  4
  Managed Fields:
    API Version:  kubevirt.io/v1
    Fields Type:  FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          .:
          f:kubectl.kubernetes.io/last-applied-configuration:
      f:spec:
        .:
        f:certificateRotateStrategy:
        f:configuration:
          .:
          f:developerConfiguration:
        f:customizeComponents:
        f:imagePullPolicy:
        f:workloadUpdateStrategy:
    Manager:      kubectl-client-side-apply
    Operation:    Update
    Time:         2025-03-18T14:58:13Z
    API Version:  kubevirt.io/v1alpha3
    Fields Type:  FieldsV1
    fieldsV1:
      f:metadata:
        f:annotations:
          f:kubevirt.io/latest-observed-api-version:
          f:kubevirt.io/storage-observed-api-version:
        f:finalizers:
          .:
          v:"foregroundDeleteKubeVirt":
    Manager:      Go-http-client
    Operation:    Update
    Time:         2025-03-18T14:59:06Z
    API Version:  kubevirt.io/v1
    Fields Type:  FieldsV1
    fieldsV1:
      f:spec:
        f:configuration:
          f:developerConfiguration:
            f:featureGates:
            f:useEmulation:
    Manager:      kubectl-edit
    Operation:    Update
    Time:         2025-03-23T17:11:37Z
    API Version:  kubevirt.io/v1alpha3
    Fields Type:  FieldsV1
    fieldsV1:
      f:status:
        .:
        f:conditions:
        f:generations:
        f:observedDeploymentConfig:
        f:observedDeploymentID:
        f:observedGeneration:
        f:observedKubeVirtRegistry:
        f:observedKubeVirtVersion:
        f:operatorVersion:
        f:outdatedVirtualMachineInstanceWorkloads:
        f:phase:
        f:targetDeploymentConfig:
        f:targetDeploymentID:
        f:targetKubeVirtRegistry:
        f:targetKubeVirtVersion:
    Manager:         Go-http-client
    Operation:       Update
    Subresource:     status
    Time:            2025-03-23T17:11:51Z
  Resource Version:  660712
  UID:               6b36da6a-ddd6-45f5-ab28-e5a11aab5ec0
Spec:
  Certificate Rotate Strategy:
  Configuration:
    Developer Configuration:
      Feature Gates:
        Snapshot
        HotplugVolumes
        LiveMigration
        Sidecar
        HostDisk
      Use Emulation:  true
  Customize Components:
  Image Pull Policy:  IfNotPresent
  Workload Update Strategy:
Status:
  Conditions:
    Last Probe Time:       2025-03-23T17:11:51Z
    Last Transition Time:  2025-03-23T17:11:51Z
    Message:               All components are ready.
    Reason:                AllComponentsReady
    Status:                True
    Type:                  Available
    Last Probe Time:       2025-03-23T17:11:51Z
    Last Transition Time:  2025-03-23T17:11:51Z
    Message:               All components are ready.
    Reason:                AllComponentsReady
    Status:                False
    Type:                  Progressing
    Last Probe Time:       2025-03-23T17:11:51Z
    Last Transition Time:  2025-03-23T17:11:51Z
    Message:               All components are ready.
    Reason:                AllComponentsReady
    Status:                False
    Type:                  Degraded
    Last Probe Time:       2025-03-18T14:59:32Z
    Last Transition Time:  <nil>
    Message:               All resources were created.
    Reason:                AllResourcesCreated
    Status:                True
    Type:                  Created
  Generations:
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachineinstances.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachineinstancepresets.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachineinstancereplicasets.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachines.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachineinstancemigrations.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachinesnapshots.snapshot.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachinesnapshotcontents.snapshot.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachinerestores.snapshot.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachineinstancetypes.instancetype.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachineclusterinstancetypes.instancetype.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachinepools.pool.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       migrationpolicies.migrations.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachinepreferences.instancetype.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachineclusterpreferences.instancetype.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachineexports.export.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      apiextensions.k8s.io/v1
    Last Generation:                            1
    Name:                                       virtualmachineclones.clone.kubevirt.io
    Resource:                                   customresourcedefinitions
    Group:                                      admissionregistration.k8s.io
    Last Generation:                            4
    Name:                                       virt-operator-validator
    Resource:                                   validatingwebhookconfigurations
    Group:                                      admissionregistration.k8s.io
    Last Generation:                            4
    Name:                                       virt-api-validator
    Resource:                                   validatingwebhookconfigurations
    Group:                                      admissionregistration.k8s.io
    Last Generation:                            4
    Name:                                       virt-api-mutator
    Resource:                                   mutatingwebhookconfigurations
    Group:                                      apps
    Last Generation:                            3
    Name:                                       virt-api
    Namespace:                                  kubevirt
    Resource:                                   deployments
    Group:                                      apps
    Last Generation:                            1
    Name:                                       virt-api-pdb
    Namespace:                                  kubevirt
    Resource:                                   poddisruptionbudgets
    Group:                                      apps
    Last Generation:                            3
    Name:                                       virt-controller
    Namespace:                                  kubevirt
    Resource:                                   deployments
    Group:                                      apps
    Last Generation:                            1
    Name:                                       virt-controller-pdb
    Namespace:                                  kubevirt
    Resource:                                   poddisruptionbudgets
    Group:                                      apps
    Last Generation:                            1
    Name:                                       virt-handler
    Namespace:                                  kubevirt
    Resource:                                   daemonsets
  Observed Deployment Config:                   {"id":"39b954499819e4a31e4de6b06d9a2a85fc3c2d36","namespace":"kubevirt","registry":"quay.io/kubevirt","kubeVirtVersion":"v1.0.0-alpha.0","virtOperatorImage":"quay.io/kubevirt/virt-operator:v1.0.0-alpha.0","additionalProperties":{"CertificateRotationStrategy":"\u003cv1.KubeVirtCertificateRotateStrategy Value\u003e","Configuration":"\u003cv1.KubeVirtConfiguration Value\u003e","CustomizeComponents":"\u003cv1.CustomizeComponents Value\u003e","ImagePullPolicy":"IfNotPresent","ImagePullSecrets":"null","Infra":"\u003c*v1.ComponentConfig Value\u003e","MonitorAccount":"","MonitorNamespace":"","ProductComponent":"","ProductName":"","ProductVersion":"","ServiceMonitorNamespace":"","UninstallStrategy":"","WorkloadUpdateStrategy":"\u003cv1.KubeVirtWorkloadUpdateStrategy Value\u003e","Workloads":"\u003c*v1.ComponentConfig Value\u003e"}}
  Observed Deployment ID:                       39b954499819e4a31e4de6b06d9a2a85fc3c2d36
  Observed Generation:                          4
  Observed Kube Virt Registry:                  quay.io/kubevirt
  Observed Kube Virt Version:                   v1.0.0-alpha.0
  Operator Version:                             v0.0.0-master+$Format:%h$
  Outdated Virtual Machine Instance Workloads:  0
  Phase:                                        Deployed
  Target Deployment Config:                     {"id":"39b954499819e4a31e4de6b06d9a2a85fc3c2d36","namespace":"kubevirt","registry":"quay.io/kubevirt","kubeVirtVersion":"v1.0.0-alpha.0","virtOperatorImage":"quay.io/kubevirt/virt-operator:v1.0.0-alpha.0","additionalProperties":{"CertificateRotationStrategy":"\u003cv1.KubeVirtCertificateRotateStrategy Value\u003e","Configuration":"\u003cv1.KubeVirtConfiguration Value\u003e","CustomizeComponents":"\u003cv1.CustomizeComponents Value\u003e","ImagePullPolicy":"IfNotPresent","ImagePullSecrets":"null","Infra":"\u003c*v1.ComponentConfig Value\u003e","MonitorAccount":"","MonitorNamespace":"","ProductComponent":"","ProductName":"","ProductVersion":"","ServiceMonitorNamespace":"","UninstallStrategy":"","WorkloadUpdateStrategy":"\u003cv1.KubeVirtWorkloadUpdateStrategy Value\u003e","Workloads":"\u003c*v1.ComponentConfig Value\u003e"}}
  Target Deployment ID:                         39b954499819e4a31e4de6b06d9a2a85fc3c2d36
  Target Kube Virt Registry:                    quay.io/kubevirt
  Target Kube Virt Version:                     v1.0.0-alpha.0
Events:                                         <none>

16. Deployment管理:创建deployment(1分)

在master节点打上标签“tty=master”,然后编写deployment.yaml文件创建deployment,具体的要求如下。

(1)创建的deployment的名字为test-deployment

(2)使用nginx镜像

(3)Pod只能调度到标签为“tty=master”的节点上

创建deployment后将cat deployment.yaml &&kubectl describe deployment test-deployment命令的返回结果提交至答题框。


[root@master ~]# kubectl label nodes master tty=master
node/master labeled
[root@master ~]# vim deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: test-deployment
  namespace: default
spec:
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: 192.168.200.10/library/nginx:latest
        imagePullPolicy: IfNotPresent
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: tty
                operator: In
                values:
                - master
[root@master ~]# kubectl apply -f deployment.yaml
deployment.apps/test-deployment created

答案

apiVersion: apps/v1
kind: Deployment
metadata:
  name: test-deployment
  namespace: default
spec:
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: 192.168.200.10/library/nginx:latest
        imagePullPolicy: IfNotPresent
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: tty
                operator: In
                values:
                - master

Name:                   test-deployment
Namespace:              default
CreationTimestamp:      Mon, 24 Mar 2025 01:19:05 +0800
Labels:                 <none>
Annotations:            deployment.kubernetes.io/revision: 1
Selector:               app=nginx
Replicas:               1 desired | 1 updated | 1 total | 1 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  app=nginx
  Containers:
   nginx:
    Image:        192.168.200.10/library/nginx:latest
    Port:         <none>
    Host Port:    <none>
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      True    MinimumReplicasAvailable
  Progressing    True    NewReplicaSetAvailable
OldReplicaSets:  <none>
NewReplicaSet:   test-deployment-6b49cc498f (1/1 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  44s   deployment-controller  Scaled up replica set test-deployment-6b49cc498f to 1

17. PV卷管理:创建PV卷(1分)

创建一个 pv,名字为 app-config,大小为 2Gi, 访问权限为 ReadWriteMany。Volume 的类型为 hostPath,路径为 /srv/app-config。

创建完成后,将kubectl describe pv命令的返回结果提交至答题框。


[root@master ~]# mkdir -pv  /srv/app-config
mkdir: 已创建目录 "/srv/app-config"
[root@master ~]# vim pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: app-config
spec:
  capacity:
    storage: 2Gi
  accessModes:
    - ReadWriteMany
  hostPath:
    path: "/srv/app-config"
[root@master ~]# kubectl apply -f pv.yaml
persistentvolume/app-config created

答案

Name:            app-config
Labels:          <none>
Annotations:     <none>
Finalizers:      [kubernetes.io/pv-protection]
StorageClass:
Status:          Available
Claim:
Reclaim Policy:  Retain
Access Modes:    RWX
VolumeMode:      Filesystem
Capacity:        2Gi
Node Affinity:   <none>
Message:
Source:
    Type:          HostPath (bare host directory volume)
    Path:          /srv/app-config
    HostPathType:
Events:            <none>

18. Ingress资源管理:创建Ingress(1分)

创建一个新的 nginx lngress资源:

(1)名称:pong

(2)Namespace:ing-internal

(3)使用服务端口 5678 在路径 /hello 上公开服务 hello

将kubectl describe ingress -n ing-internal命令的返回结果提交至答题框。


[root@master ~]# kubectl create ns ing-internal
[root@master ~]# vim ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: pong                   
  namespace: ing-internal      
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
spec:
  ingressClassName: nginx
  rules:
  - http:
      paths:
      - path: /hello           
        pathType: Prefix
        backend:
          service:
            name: test
            port:
              number: 5678
[root@master ~]# kubectl apply -f ingress.yaml
ingress.networking.k8s.io/pong created

答案

[root@master ~]# kubectl describe ingress -n ing-internal
Name:             pong
Labels:           <none>
Namespace:        ing-internal
Address:
Ingress Class:    nginx
Default backend:  <default>
Rules:
  Host        Path  Backends
  ----        ----  --------
  *
              /hello   test:5678 (<error: endpoints "test" not found>)
Annotations:  nginx.ingress.kubernetes.io/rewrite-target: /
Events:       <none>

任务3 部署Owncloud网盘服务(4分)

ownCloud 是一个开源免费专业的私有云存储项目,它能帮你快速在个人电脑或服务器上架设一套专属的私有云文件同步网盘,可以像 百度云那样实现文件跨平台同步、共享、版本控制、团队协作等。

1. 创建PV和PVC(1分)

编写yaml文件(文件名自定义)创建PV和PVC来提供持久化存储,以便保存 ownCloud 服务中的文件和数据。

要求:PV(名称为owncloud-pv,访问模式为读写,只能被单个节点挂载;存储为5Gi;存储类型为hostPath,存储路径自定义);PVC(名称为owncloud-pvc,访问模式为读写,只能被单个节点挂载;申请存储空间大小为5Gi)。

将kubectl get pv,pvc命令的返回结果提交到答题框。


[root@master ~]# vim owncloud-pvc.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: owncloud-pv
spec:
  accessModes:
    - ReadWriteOnce
  capacity:
    storage: 5Gi
  hostPath:
    path: /data/owncloud
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: owncloud-pvc
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
[root@master ~]# kubectl apply -f owncloud-pvc.yaml
persistentvolume/owncloud-pv created
persistentvolumeclaim/owncloud-pvc created

答案

NAME                           CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                  STORAGECLASS   REASON   AGE
persistentvolume/owncloud-pv   5Gi        RWO            Retain           Bound    default/owncloud-pvc                           18s

NAME                                 STATUS   VOLUME        CAPACITY   ACCESS MODES   STORAGECLASS   AGE
persistentvolumeclaim/owncloud-pvc   Bound    owncloud-pv   5Gi        RWO                           18s

2. 配置ConfigMap(0.5分)

编写yaml文件(文件名自定义)创建一个configMap对象,名称为owncloud-config,指定OwnCloud的环境变量。登录账号对应的环境变量为OWNCLOUD_ADMIN_USERNAME,密码对应的环境变量为OWNCLOUD_ADMIN_PASSWORD。(变量值自定义)

将kubectl get ConfigMap命令的返回结果提交到答题框。


[root@master ~]# vim owncloud-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: owncloud-config
data:
  OWNCLOUD_ADMIN_USERNAME: "admin"
  OWNCLOUD_ADMIN_PASSWORD: "123456"
[root@master ~]# kubectl apply -f owncloud-configmap.yaml
configmap/owncloud-config created

答案

NAME                 DATA   AGE
istio-ca-root-cert   1      4d21h
kube-root-ca.crt     1      4d21h
owncloud-config      2      13s

3. 创建Secret(0.5分)

编写yaml文件(文件名自定义)创建一个Secret对象,名称为owncloud-db-password,以保存OwnCloud数据库的密码。对原始密码采用base64编码格式进行加密。

将kubectl get Secret命令的返回结果提交到答题框。


[root@master ~]# echo 123456 | base64
MTIzNDU2Cg==
[root@master ~]# vim owncloud-secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: owncloud-db-password
type: Opaque
data:
  password: MTIzNDU2Cg==
[root@master ~]# kubectl apply -f owncloud-secret.yaml
secret/owncloud-db-password created

答案

NAME                   TYPE     DATA   AGE
owncloud-db-password   Opaque   1      38s

4. 部署Owncloud Deployment应用(1分)

编写yaml文件(文件名自定义) 创建Deployment对象, 指定OwnCloud的容器和相关的环境变量。(Deployment资源命名为owncloud-deployment,镜像为Harbor仓库中的owncloud:latest,存储的挂载路径为/var/www/html,其它根据具体情况进行配置)

kubectl describe pod $(kubectl get pod | grep owncloud-deployment | awk -F \ '{print $1}')命令的返回结果提交到答题框。


[root@master ~]# vim owncloud-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: owncloud-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: owncloud
  template:
    metadata:
      labels:
        app: owncloud
    spec:
      containers:
      - name: owncloud
        image: 192.168.200.10/library/owncloud:latest
        imagePullPolicy: IfNotPresent
        envFrom:
        - configMapRef:
            name: owncloud-config
        env:
        - name: OWNCLOUD_DB_PASSWORD
          valueFrom:
            secretKeyRef:
              name: owncloud-db-password
              key: password
        ports:
        - containerPort: 80
        volumeMounts:
        - name: owncloud-pv
          mountPath: /var/www/html
      volumes:
      - name: owncloud-pv
        persistentVolumeClaim:
          claimName: owncloud-pvc
[root@master ~]# kubectl apply -f owncloud-deploy.yaml
deployment.apps/owncloud-deployment created

答案

Name:             owncloud-deployment-6fcc6c5c6-rpnwh
Namespace:        default
Priority:         0
Service Account:  default
Node:             master/192.168.200.10
Start Time:       Sun, 23 Mar 2025 20:37:47 +0800
Labels:           app=owncloud
                  pod-template-hash=6fcc6c5c6
                  security.istio.io/tlsMode=istio
                  service.istio.io/canonical-name=owncloud
                  service.istio.io/canonical-revision=latest
Annotations:      k8s.v1.cni.cncf.io/network-status:
                    [{
                        "name": "cbr0",
                        "interface": "eth0",
                        "ips": [
                            "10.244.0.45"
                        ],
                        "mac": "fe:55:da:f1:82:67",
                        "default": true,
                        "dns": {}
                    }]
                  k8s.v1.cni.cncf.io/networks-status:
                    [{
                        "name": "cbr0",
                        "interface": "eth0",
                        "ips": [
                            "10.244.0.45"
                        ],
                        "mac": "fe:55:da:f1:82:67",
                        "default": true,
                        "dns": {}
                    }]
                  kubectl.kubernetes.io/default-container: owncloud
                  kubectl.kubernetes.io/default-logs-container: owncloud
                  prometheus.io/path: /stats/prometheus
                  prometheus.io/port: 15020
                  prometheus.io/scrape: true
                  sidecar.istio.io/status:
                    {"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-socket","credential-socket","workload-certs","istio-env...
Status:           Running
IP:               10.244.0.45
IPs:
  IP:           10.244.0.45
Controlled By:  ReplicaSet/owncloud-deployment-6fcc6c5c6
Init Containers:
  istio-init:
    Container ID:  containerd://3b1481fdb705027ca062cd697a0b8253af9b38c43726124c2809bd8af1a4b211
    Image:         docker.io/istio/proxyv2:1.17.2
    Image ID:      sha256:3944a6baf515cabc77e05986b76d498f77b5c47a65aa736cb6c7086f368b8339
    Port:          <none>
    Host Port:     <none>
    Args:
      istio-iptables
      -p
      15001
      -z
      15006
      -u
      1337
      -m
      REDIRECT
      -i
      *
      -x

      -b
      *
      -d
      15090,15021,15020
      --log_output_level=default:info
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Sun, 23 Mar 2025 20:37:48 +0800
      Finished:     Sun, 23 Mar 2025 20:37:48 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:        10m
      memory:     40Mi
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-b2rnj (ro)
Containers:
  owncloud:
    Container ID:   containerd://cf5730bd3df92785eb5dd1c29743f1ecf495c07e97e432b2f965b5a53ff5fb8a
    Image:          192.168.200.10/library/owncloud:latest
    Image ID:       192.168.200.10/library/owncloud@sha256:5c77bfdf8cfaf99ec94309be2687032629f4f985d6bd388354dfd85475aa5f21
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Sun, 23 Mar 2025 20:37:55 +0800
    Ready:          True
    Restart Count:  0
    Environment Variables from:
      owncloud-config  ConfigMap  Optional: false
    Environment:
      OWNCLOUD_DB_PASSWORD:  <set to the key 'password' in secret 'owncloud-db-password'>  Optional: false
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-b2rnj (ro)
      /var/www/html from owncloud-pv (rw)
  istio-proxy:
    Container ID:  containerd://a3c14c3c8afb6d576005a52e18ae5aa426df82a105bcc2c9e4c0bc38438a4aed
    Image:         docker.io/istio/proxyv2:1.17.2
    Image ID:      sha256:3944a6baf515cabc77e05986b76d498f77b5c47a65aa736cb6c7086f368b8339
    Port:          15090/TCP
    Host Port:     0/TCP
    Args:
      proxy
      sidecar
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --proxyLogLevel=warning
      --proxyComponentLogLevel=misc:error
      --log_output_level=default:info
      --concurrency
      2
    State:          Running
      Started:      Sun, 23 Mar 2025 20:37:55 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:      10m
      memory:   40Mi
    Readiness:  http-get http://:15021/healthz/ready delay=1s timeout=3s period=2s #success=1 #failure=30
    Environment:
      JWT_POLICY:                    third-party-jwt
      PILOT_CERT_PROVIDER:           istiod
      CA_ADDR:                       istiod.istio-system.svc:15012
      POD_NAME:                      owncloud-deployment-6fcc6c5c6-rpnwh (v1:metadata.name)
      POD_NAMESPACE:                 default (v1:metadata.namespace)
      INSTANCE_IP:                    (v1:status.podIP)
      SERVICE_ACCOUNT:                (v1:spec.serviceAccountName)
      HOST_IP:                        (v1:status.hostIP)
      PROXY_CONFIG:                  {}

      ISTIO_META_POD_PORTS:          [
                                         {"containerPort":80,"protocol":"TCP"}
                                     ]
      ISTIO_META_APP_CONTAINERS:     owncloud
      ISTIO_META_CLUSTER_ID:         Kubernetes
      ISTIO_META_NODE_NAME:           (v1:spec.nodeName)
      ISTIO_META_INTERCEPTION_MODE:  REDIRECT
      ISTIO_META_WORKLOAD_NAME:      owncloud-deployment
      ISTIO_META_OWNER:              kubernetes://apis/apps/v1/namespaces/default/deployments/owncloud-deployment
      ISTIO_META_MESH_ID:            cluster.local
      TRUST_DOMAIN:                  cluster.local
    Mounts:
      /etc/istio/pod from istio-podinfo (rw)
      /etc/istio/proxy from istio-envoy (rw)
      /var/lib/istio/data from istio-data (rw)
      /var/run/secrets/credential-uds from credential-socket (rw)
      /var/run/secrets/istio from istiod-ca-cert (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-b2rnj (ro)
      /var/run/secrets/tokens from istio-token (rw)
      /var/run/secrets/workload-spiffe-credentials from workload-certs (rw)
      /var/run/secrets/workload-spiffe-uds from workload-socket (rw)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  workload-socket:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  credential-socket:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  workload-certs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  istio-envoy:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  <unset>
  istio-data:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  istio-podinfo:
    Type:  DownwardAPI (a volume populated by information about the pod)
    Items:
      metadata.labels -> labels
      metadata.annotations -> annotations
  istio-token:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  43200
  istiod-ca-cert:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-ca-root-cert
    Optional:  false
  owncloud-pv:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  owncloud-pvc
    ReadOnly:   false
  kube-api-access-b2rnj:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason          Age    From               Message
  ----    ------          ----   ----               -------
  Normal  Scheduled       2m59s  default-scheduler  Successfully assigned default/owncloud-deployment-6fcc6c5c6-rpnwh to master
  Normal  AddedInterface  3m     multus             Add eth0 [10.244.0.45/24] from cbr0
  Normal  Pulled          3m     kubelet            Container image "docker.io/istio/proxyv2:1.17.2" already present on machine
  Normal  Created         2m59s  kubelet            Created container istio-init
  Normal  Started         2m59s  kubelet            Started container istio-init
  Normal  Pulling         2m58s  kubelet            Pulling image "192.168.200.10/library/owncloud:latest"
  Normal  Pulled          2m52s  kubelet            Successfully pulled image "192.168.200.10/library/owncloud:latest" in 6.25418039s
  Normal  Created         2m52s  kubelet            Created container owncloud
  Normal  Started         2m52s  kubelet            Started container owncloud
  Normal  Pulled          2m52s  kubelet            Container image "docker.io/istio/proxyv2:1.17.2" already present on machine
  Normal  Created         2m52s  kubelet            Created container istio-proxy
  Normal  Started         2m52s  kubelet            Started container istio-proxy

5. 创建Service(1分)

编写yaml文件(文件名自定义)创建一个Service对象使用NodePort的方式将OwnCloud公开到集群外部,名称为owncloud-service。通过http://IP:端口号可查看owncloud。

将kubectl get svc -A命令的返回结果提交到答题框。


[root@master ~]# vim owncloud-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: owncloud-service
spec:
  selector:
    app: owncloud
  ports:
    - name: http
      port: 80
  type: NodePort
[root@master ~]# kubectl apply -f owncloud-svc.yaml
service/owncloud-service created

答案

NAMESPACE              NAME                          TYPE           CLUSTER-IP       EXTERNAL-IP   PORT(S)                                                                      AGE
default                details                       ClusterIP      10.102.112.13    <none>        9080/TCP                                                                     3d18h
default                kubernetes                    ClusterIP      10.96.0.1        <none>        443/TCP                                                                      4d22h
default                owncloud-service              NodePort       10.111.240.204   <none>        80:32675/TCP                                                                 2m58s
default                productpage                   ClusterIP      10.108.135.237   <none>        9080/TCP                                                                     3d18h
default                ratings                       ClusterIP      10.102.77.205    <none>        9080/TCP                                                                     3d18h
default                reviews                       ClusterIP      10.103.141.115   <none>        9080/TCP                                                                     3d18h
istio-system           grafana                       ClusterIP      10.99.229.56     <none>        3000/TCP                                                                     4d21h
istio-system           istio-egressgateway           ClusterIP      10.107.63.162    <none>        80/TCP,443/TCP                                                               4d21h
istio-system           istio-ingressgateway          LoadBalancer   10.96.43.106     <pending>     15021:30934/TCP,80:30691/TCP,443:30075/TCP,31400:30825/TCP,15443:30382/TCP   4d21h
istio-system           istiod                        ClusterIP      10.101.91.95     <none>        15010/TCP,15012/TCP,443/TCP,15014/TCP                                        4d21h
istio-system           jaeger-collector              ClusterIP      10.97.104.76     <none>        14268/TCP,14250/TCP,9411/TCP                                                 4d21h
istio-system           kiali                         ClusterIP      10.102.1.13      <none>        20001/TCP,9090/TCP                                                           4d21h
istio-system           prometheus                    ClusterIP      10.109.215.71    <none>        9090/TCP                                                                     4d21h
istio-system           tracing                       ClusterIP      10.104.202.190   <none>        80/TCP,16685/TCP                                                             4d21h
istio-system           zipkin                        ClusterIP      10.98.127.164    <none>        9411/TCP                                                                     4d21h
kube-system            kube-dns                      ClusterIP      10.96.0.10       <none>        53/UDP,53/TCP,9153/TCP                                                       4d22h
kubernetes-dashboard   dashboard-metrics-scraper     ClusterIP      10.105.147.37    <none>        8000/TCP                                                                     4d22h
kubernetes-dashboard   kubernetes-dashboard          NodePort       10.96.210.77     <none>        443:30001/TCP                                                                4d22h
kubevirt               kubevirt-operator-webhook     ClusterIP      10.107.91.91     <none>        443/TCP                                                                      4d21h
kubevirt               kubevirt-prometheus-metrics   ClusterIP      None             <none>        443/TCP                                                                      4d21h
kubevirt               virt-api                      ClusterIP      10.97.255.142    <none>        443/TCP                                                                      4d21h
kubevirt               virt-exportproxy              ClusterIP      10.104.188.8     <none>        443/TCP                                                                      4d21h
springcloud            config                        NodePort       10.98.108.191    <none>        8888:30015/TCP                                                               3d19h
springcloud            gateway                       NodePort       10.111.13.225    <none>        4000:30010/TCP                                                               3d19h

任务3 部署nacos集群(8.5分)

Nacos是Dynamic Naming and Configuration Service(动态命名和配置服务)的首字母简称,它是一个更易于构建云原生应用的动态服务发现、配置管理和服务管理平台。Nacos由阿里巴巴开源,致力于帮助用户发现、配置和管理微服务。

Nacos提供了一组简单易用的特性集,帮助用户快速实现动态服务发现、服务配置、服务元数据及流量管理。它是构建以“服务”为中心的现代应用架构(如微服务范式、云原生范式)的服务基础设施。

1. 导入镜像(0.5分)

将Nacos-Ingress.tar.gz压缩包解压至root目录下,使用/root/nacos-ingress作为后续题目操作目录,将nacos-ingress.tar导入docker中。

在master节点将docker images | grep ingress && nerdctl images | grep nacos命令的返回结果提交到答题框。


[root@master ~]# tar -zxvf Nacos-ingress.tar.gz
[root@master ~]# cd nacos-ingress
[root@master nacos-ingress]# docker load -i nacos-ingress.tar
[root@master nacos-ingress]# nerdctl load -i nacos-ingress.tar
[root@master nacos-ingress]# scp nacos-ingress.tar node:/root/
[root@master nacos-ingress]# ssh node docker load -i /root/nacos-ingress.tar
[root@master nacos-ingress]# ssh node nerdctl load -i /root/nacos-ingress.tar

答案

registry.k8s.io/ingress-nginx/controller                          v1.8.0           61b98eba16a3   2 years ago    446MB
registrt.k8s.io/ingress-nginx/kube-webhook-certgen                latest           c41e9fcadf5a   3 years ago    47.7MB
nacos/nacos-mysql                                     5.7               eb139beb9692    52 seconds ago    linux/amd64    447.4 MiB    427.3 MiB
nacos/nacos-peer-finder-plugin                        v1.1              409c241064be    52 seconds ago    linux/amd64    216.9 MiB    207.3 MiB
nacos/nacos-server                                    latest            43dee4ce13ed    52 seconds ago    linux/amd64    278.4 MiB    274.8 MiB

2. 配置NFS Server端(0.5分)

在master节点安装nfs服务作为服务端,创建目录/root/data/nacos作为nacos共享目录,创建目录/root/data/mysql作为mysql共享目录,将上述创建的两个文件夹赋予777权限,修改nfs配置文件将创建的文件夹暴露给任意用户使其进行读写操作,重启nfs服务并设置开机自启。

在master节点将systemctl status nfs && showmount -e命令的返回结果提交到答题框。


[root@master nacos-ingress]# yum install -y nfs-utils
[root@master nacos-ingress]# mkdir -p /root/data/{nacos,mysql}
[root@master nacos-ingress]# chmod 777 /root/data/*
[root@master nacos-ingress]# vim /etc/exports
/root/data/nacos *(insecure,rw,async,no_root_squash)
/root/data/mysql *(insecure,rw,async,no_root_squash)
[root@master nacos-ingress]# systemctl restart nfs && systemctl enable nfs

答案

● nfs-server.service - NFS server and services
   Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; enabled; vendor preset: disabled)
  Drop-In: /run/systemd/generator/nfs-server.service.d
           └─order-with-mounts.conf
   Active: active (exited) since 日 2025-03-23 21:16:25 CST; 47s ago
 Main PID: 92307 (code=exited, status=0/SUCCESS)
   CGroup: /system.slice/nfs-server.service

3月 23 21:16:25 master systemd[1]: Starting NFS server and services...
3月 23 21:16:25 master systemd[1]: Started NFS server and services.
Export list for master:
/root/data/mysql *
/root/data/nacos *

3. 创建ServiceAccount并配置RBAC(1分)

在master节点编写/root/nacos-ingress/rbac.yaml完成以下内容:

1)创建名称为nfs-client-provisioner的ServiceAccount账号。

2)创建访问控制的角色(Role):leader-locking-nfs-client-provisioner和角色绑定(RoleBinding):leader-locking-nfs-client-provisioner,要求Role实现以下操作:

①允许对endpoints进行get,list,watch,create,update,patch操作。

②将leader-locking-nfs-client-provisioner关联对象ServiceAccount:nfs-client-provisioner,引用Role: leader-locking-nfs-client-provisioner。

3)创建集群范围的角色(ClusterRole):nfs-client-provisioner-runner和集群角色绑定(ClusterRoleBinding):run-nfs-client-provisioner,要求ClusterRole实现以下操作:

①允许对persistentvolumes进行get,list,watch,create,delete操作。

②允许对persistentvolumeclaims进行get,list,watch,update操作。

③允许对endpoints进行get,list,watch,create,update,patch操作。

④允许对storageclasses进行get,list,watch操作。

⑤允许对events行create,update,patch操作。

⑥将run-nfs-client-provisioner关联对象ServiceAccount:nfs-client-provisioner,引用ClusterRole: nfs-client-provisioner-runner。

在master节点创建完成后将cat /root/nacos-ingress/rbac.yaml && kubectl describe role.rbac.authorization.k8s.io命令的返回结果提交到答题框。


[root@master nacos-ingress]# vim /root/nacos-ingress/rbac.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
  resources: ["persistentvolumes"]
  verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
  resources: ["persistentvolumeclaims"]
  verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
  resources: ["storageclasses"]
  verbs: ["get", "list", "watch"]
- apiGroups: [""]
  resources: ["events"]
  verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
  name: nfs-client-provisioner
  namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
rules:
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
  name: nfs-client-provisioner
  namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
[root@master nacos-ingress]# kubectl apply -f /root/nacos-ingress/rbac.yaml
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created

答案

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
  resources: ["persistentvolumes"]
  verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
  resources: ["persistentvolumeclaims"]
  verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
  resources: ["storageclasses"]
  verbs: ["get", "list", "watch"]
- apiGroups: [""]
  resources: ["events"]
  verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
  name: nfs-client-provisioner
  namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
rules:
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
  name: nfs-client-provisioner
  namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io

Name:         leader-locking-nfs-client-provisioner
Labels:       <none>
Annotations:  <none>
PolicyRule:
  Resources  Non-Resource URLs  Resource Names  Verbs
  ---------  -----------------  --------------  -----
  endpoints  []                 []              [get list watch create update patch]

4. 为nacos创建pv和pvc(1分)

在/root/nacos-ingress/目录下编写nacos-pv.yaml文件用于创建名为nacos-pv的pv,编写nacos-pvc.yaml用于创建名为nacos-data的pvc,要求挂载到nfs的nacos目录下,大小为2Gi。

在master节点将cat /root/nacos-ingress/nacos-pv* && kubectl get pv,pvc && kubectl describe pv/nacos-pv pvc/nacos-data命令的返回结果提交到答题框。


[root@master nacos-ingress]# vim /root/nacos-ingress/nacos-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nacos-pv
spec:
  capacity:
    storage: 2Gi
  accessModes:
  - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    path: /root/data/nacos
    server: 192.168.200.10
[root@master nacos-ingress]# vim /root/nacos-ingress/nacos-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nacos-data
spec:
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
[root@master nacos-ingress]# kubectl apply -f nacos-pv.yaml
persistentvolume/nacos-pv created
[root@master nacos-ingress]# kubectl apply -f nacos-pvc.yaml
persistentvolumeclaim/nacos-data created

答案

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: nacos-data
spec:
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 2Gi

apiVersion: v1
kind: PersistentVolume
metadata:
  name: nacos-pv
spec:
  capacity:
    storage: 2Gi
  accessModes:
  - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    path: /root/data/nacos
    server: 192.168.200.10

NAME                           CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                  STORAGECLASS   REASON   AGE
persistentvolume/nacos-pv      2Gi        RWX            Retain           Bound    default/nacos-data                             81s
persistentvolume/owncloud-pv   5Gi        RWO            Retain           Bound    default/owncloud-pvc                           121m

NAME                                 STATUS   VOLUME        CAPACITY   ACCESS MODES   STORAGECLASS   AGE
persistentvolumeclaim/nacos-data     Bound    nacos-pv      2Gi        RWX
              78s
persistentvolumeclaim/owncloud-pvc   Bound    owncloud-pv   5Gi        RWO
              121m
Name:            nacos-pv
Labels:          <none>
Annotations:     pv.kubernetes.io/bound-by-controller: yes
Finalizers:      [kubernetes.io/pv-protection]
StorageClass:
Status:          Bound
Claim:           default/nacos-data
Reclaim Policy:  Retain
Access Modes:    RWX
VolumeMode:      Filesystem
Capacity:        2Gi
Node Affinity:   <none>
Message:
Source:
    Type:      NFS (an NFS mount that lasts the lifetime of a pod)
    Server:    192.168.200.10
    Path:      /root/data/nacos
    ReadOnly:  false
Events:        <none>


Name:          nacos-data
Namespace:     default
StorageClass:
Status:        Bound
Volume:        nacos-pv
Labels:        <none>
Annotations:   pv.kubernetes.io/bind-completed: yes
               pv.kubernetes.io/bound-by-controller: yes
Finalizers:    [kubernetes.io/pvc-protection]
Capacity:      2Gi
Access Modes:  RWX
VolumeMode:    Filesystem
Used By:       <none>
Events:        <none>

5. 部署NFS Provisioner(1分)

编写nfs-deployment.yaml文件,基于nfs-subdir-external-provisioner镜像创建nfs-client-provisioner的deployment对象,副本数量2,PROVISIONER_NAME的值使用nacos.brics.com,绑定nfs服务端的nacos共享目录。

在master节点创建完成后将cat /root/nacos-ingress/nfs-deployment.yaml && kubectl get pod,deploy && kubectl describe pod $(kubectl get pod | grep nfs-client-provisioner | awk -F\ '{print $1}')的返回结果提交到答题框。


[root@master nacos-ingress]# vim /root/nacos-ingress/nfs-deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-client-provisioner
spec:
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccount: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: easzlab/nfs-subdir-external-provisioner:v4.0.1
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - name: nfs-data
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: nacos.brics.com
            - name: NFS_SERVER
              value: 192.168.200.10
            - name: NFS_PATH
              value: /root/data/nacos
      volumes:
        - name: nfs-data
          nfs:
            server: 192.168.200.10
            path: /root/data/nacos
[root@master nacos-ingress]# kubectl apply -f nfs-deployment.yaml
deployment.apps/nfs-client-provisioner created

答案

kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-client-provisioner
spec:
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccount: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: easzlab/nfs-subdir-external-provisioner:v4.0.1
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - name: nfs-data
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: nacos.brics.com
            - name: NFS_SERVER
              value: 192.168.200.10
            - name: NFS_PATH
              value: /root/data/nacos
      volumes:
        - name: nfs-data
          nfs:
            server: 192.168.200.10
            path: /root/data/nacos
NAME                                          READY   STATUS    RESTARTS        AGE
pod/details-v1-5597657cfd-hsqkr               2/2     Running   0               149m
pod/nfs-client-provisioner-64b8bfcfbb-zfq2b   2/2     Running   1 (7h42m ago)   37s
pod/owncloud-deployment-6fcc6c5c6-rpnwh       2/2     Running   0               144m
pod/productpage-v1-7fc5c466b4-mmwk2           2/2     Running   0               149m
pod/ratings-v1-56968c967b-s4p2k               2/2     Running   0               149m
pod/reviews-v1-64d7bd94f5-ptq74               2/2     Running   0               149m
pod/reviews-v2-79d87db899-ldp82               2/2     Running   0               149m
pod/reviews-v3-787c88b85d-zwtk7               2/2     Running   0               149m
pod/virt-launcher-test-vm-56wtz               2/3     Running   0               151m

NAME                                     READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/details-v1               1/1     1            1           3d20h
deployment.apps/nfs-client-provisioner   1/1     1            1           65m
deployment.apps/owncloud-deployment      1/1     1            1           168m
deployment.apps/productpage-v1           1/1     1            1           3d20h
deployment.apps/ratings-v1               1/1     1            1           3d20h
deployment.apps/reviews-v1               1/1     1            1           3d20h
deployment.apps/reviews-v2               1/1     1            1           3d20h
deployment.apps/reviews-v3               1/1     1            1           3d20h
Name:             nfs-client-provisioner-64b8bfcfbb-zfq2b
Namespace:        default
Priority:         0
Service Account:  nfs-client-provisioner
Node:             node/192.168.200.20
Start Time:       Sun, 23 Mar 2025 15:19:46 +0800
Labels:           app=nfs-client-provisioner
                  pod-template-hash=64b8bfcfbb
                  security.istio.io/tlsMode=istio
                  service.istio.io/canonical-name=nfs-client-provisioner
                  service.istio.io/canonical-revision=latest
Annotations:      k8s.v1.cni.cncf.io/network-status:
                    [{
                        "name": "cbr0",
                        "interface": "eth0",
                        "ips": [
                            "10.244.1.80"
                        ],
                        "mac": "d2:96:5b:2b:fb:1f",
                        "default": true,
                        "dns": {}
                    }]
                  k8s.v1.cni.cncf.io/networks-status:
                    [{
                        "name": "cbr0",
                        "interface": "eth0",
                        "ips": [
                            "10.244.1.80"
                        ],
                        "mac": "d2:96:5b:2b:fb:1f",
                        "default": true,
                        "dns": {}
                    }]
                  kubectl.kubernetes.io/default-container: nfs-client-provisioner
                  kubectl.kubernetes.io/default-logs-container: nfs-client-provisioner
                  kubectl.kubernetes.io/restartedAt: 2025-03-23T23:01:21+08:00
                  prometheus.io/path: /stats/prometheus
                  prometheus.io/port: 15020
                  prometheus.io/scrape: true
                  sidecar.istio.io/status:
                    {"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-socket","credential-socket","workload-certs","istio-env...
Status:           Running
IP:               10.244.1.80
IPs:
  IP:           10.244.1.80
Controlled By:  ReplicaSet/nfs-client-provisioner-64b8bfcfbb
Init Containers:
  istio-init:
    Container ID:  containerd://316d84d78e5d2b6536013b4f16b51db54552de384d0ec6350c19a7830509763c
    Image:         docker.io/istio/proxyv2:1.17.2
    Image ID:      sha256:3944a6baf515cabc77e05986b76d498f77b5c47a65aa736cb6c7086f368b8339
    Port:          <none>
    Host Port:     <none>
    Args:
      istio-iptables
      -p
      15001
      -z
      15006
      -u
      1337
      -m
      REDIRECT
      -i
      *
      -x

      -b
      *
      -d
      15090,15021,15020
      --log_output_level=default:info
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Sun, 23 Mar 2025 15:19:47 +0800
      Finished:     Sun, 23 Mar 2025 15:19:47 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:        10m
      memory:     40Mi
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-mgkmn (ro)
Containers:
  nfs-client-provisioner:
    Container ID:   containerd://e891b02d1d03dfece6db2c51edd956083dad08d2d176f8a58a99264073bf3f9b
    Image:          easzlab/nfs-subdir-external-provisioner:v4.0.1
    Image ID:       sha256:686d3731280a72025a09c057d89f208d39d757be89c9291075a36f91a5abd550
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Sun, 23 Mar 2025 15:19:48 +0800
    Last State:     Terminated
      Reason:       Error
      Exit Code:    255
      Started:      Sun, 23 Mar 2025 15:19:47 +0800
      Finished:     Sun, 23 Mar 2025 15:19:47 +0800
    Ready:          True
    Restart Count:  1
    Environment:
      PROVISIONER_NAME:  nacos.brics.com
      NFS_SERVER:        192.168.200.10
      NFS_PATH:          /root/data/nacos
    Mounts:
      /persistentvolumes from nfs-data (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-mgkmn (ro)
  istio-proxy:
    Container ID:  containerd://f0f6965dfba3665fcc76e95909512b2f7a6d3c284d5504afb7bc5cc8de4dcd49
    Image:         docker.io/istio/proxyv2:1.17.2
    Image ID:      sha256:3944a6baf515cabc77e05986b76d498f77b5c47a65aa736cb6c7086f368b8339
    Port:          15090/TCP
    Host Port:     0/TCP
    Args:
      proxy
      sidecar
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --proxyLogLevel=warning
      --proxyComponentLogLevel=misc:error
      --log_output_level=default:info
      --concurrency
      2
    State:          Running
      Started:      Sun, 23 Mar 2025 15:19:47 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:      10m
      memory:   40Mi
    Readiness:  http-get http://:15021/healthz/ready delay=1s timeout=3s period=2s #success=1 #failure=30
    Environment:
      JWT_POLICY:                    third-party-jwt
      PILOT_CERT_PROVIDER:           istiod
      CA_ADDR:                       istiod.istio-system.svc:15012
      POD_NAME:                      nfs-client-provisioner-64b8bfcfbb-zfq2b (v1:metadata.name)
      POD_NAMESPACE:                 default (v1:metadata.namespace)
      INSTANCE_IP:                    (v1:status.podIP)
      SERVICE_ACCOUNT:                (v1:spec.serviceAccountName)
      HOST_IP:                        (v1:status.hostIP)
      PROXY_CONFIG:                  {}

      ISTIO_META_POD_PORTS:          [
                                     ]
      ISTIO_META_APP_CONTAINERS:     nfs-client-provisioner
      ISTIO_META_CLUSTER_ID:         Kubernetes
      ISTIO_META_NODE_NAME:           (v1:spec.nodeName)
      ISTIO_META_INTERCEPTION_MODE:  REDIRECT
      ISTIO_META_WORKLOAD_NAME:      nfs-client-provisioner
      ISTIO_META_OWNER:              kubernetes://apis/apps/v1/namespaces/default/deployments/nfs-client-provisioner
      ISTIO_META_MESH_ID:            cluster.local
      TRUST_DOMAIN:                  cluster.local
    Mounts:
      /etc/istio/pod from istio-podinfo (rw)
      /etc/istio/proxy from istio-envoy (rw)
      /var/lib/istio/data from istio-data (rw)
      /var/run/secrets/credential-uds from credential-socket (rw)
      /var/run/secrets/istio from istiod-ca-cert (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-mgkmn (ro)
      /var/run/secrets/tokens from istio-token (rw)
      /var/run/secrets/workload-spiffe-credentials from workload-certs (rw)
      /var/run/secrets/workload-spiffe-uds from workload-socket (rw)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  workload-socket:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  credential-socket:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  workload-certs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  istio-envoy:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  <unset>
  istio-data:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  istio-podinfo:
    Type:  DownwardAPI (a volume populated by information about the pod)
    Items:
      metadata.labels -> labels
      metadata.annotations -> annotations
  istio-token:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  43200
  istiod-ca-cert:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-ca-root-cert
    Optional:  false
  nfs-data:
    Type:      NFS (an NFS mount that lasts the lifetime of a pod)
    Server:    192.168.200.10
    Path:      /root/data/nacos
    ReadOnly:  false
  kube-api-access-mgkmn:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason          Age                    From               Message
  ----    ------          ----                   ----               -------
  Normal  Scheduled       36s                    default-scheduler  Successfully assigned default/nfs-client-provisioner-64b8bfcfbb-zfq2b to node
  Normal  AddedInterface  7h42m                  multus             Add eth0 [10.244.1.80/24] from cbr0
  Normal  Pulled          7h42m                  kubelet            Container image "docker.io/istio/proxyv2:1.17.2" already present on machine
  Normal  Created         7h42m                  kubelet            Created container istio-init
  Normal  Started         7h42m                  kubelet            Started container istio-init
  Normal  Pulled          7h42m                  kubelet            Container image "docker.io/istio/proxyv2:1.17.2" already present on machine
  Normal  Created         7h42m                  kubelet            Created container istio-proxy
  Normal  Started         7h42m                  kubelet            Started container istio-proxy
  Normal  Pulled          7h42m (x2 over 7h42m)  kubelet            Container image "easzlab/nfs-subdir-external-provisioner:v4.0.1" already present on machine
  Normal  Created         7h42m (x2 over 7h42m)  kubelet            Created container nfs-client-provisioner
  Normal  Started         7h42m (x2 over 7h42m)  kubelet            Started container nfs-client-provisioner

6. StorageClass动态绑定(0.5分)

编写storageclass.yaml文件,创建名为managed-nfs-storage的StorageClass动态绑定nfs-provisioner,完成后查看nfs-provisioner的storageclasses对象。

在master节点创建完成后将cat /root/nacos-ingress/storageclass.yaml && kubectl describe storageclasses managed-nfs-storage命令的返回结果提交到答题框。


[root@master nacos-ingress]# vim /root/nacos-ingress/storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: nacos.brics.com
parameters:
  archiveOnDelete: "false"
[root@master nacos-ingress]# kubectl apply -f storageclass.yaml
storageclass.storage.k8s.io/managed-nfs-storage created

答案

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: nacos.brics.com
parameters:
  archiveOnDelete: "false"
Name:            managed-nfs-storage
IsDefaultClass:  No
Annotations:     kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"storage.k8s.io/v1","kind":"StorageClass","metadata":{"annotations":{},"name":"managed-nfs-storage"},"parameters":{"archiveOnDelete":"false"},"provisioner":"nacos.brics.com"}

Provisioner:           nacos.brics.com
Parameters:            archiveOnDelete=false
AllowVolumeExpansion:  <unset>
MountOptions:          <none>
ReclaimPolicy:         Delete
VolumeBindingMode:     Immediate
Events:                <none>

7. 为mysql创建pv和pvc(1分)

在/root/nacos-ingress/目录下编写mysql-pv.yaml文件用于创建名为nacos-mysql-pv的pv,编写mysql-pvc.yaml用于创建名为mysql-data的pvc,要求挂载到nfs的mysql目录下,大小为2Gi。

在master节点创建完成后将cat /root/nacos-ingress/mysql-pv* && kubectl get pv,pvc && kubectl describe pv/nacos-mysql-pv pvc/mysql-data命令的返回结果提交到答题框。


[root@master nacos-ingress]# vim /root/nacos-ingress/mysql-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nacos-mysql-pv
spec:
  capacity:
    storage: 2Gi
  accessModes:
  - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    path: /root/data/mysql
    server: 192.168.200.10
[root@master nacos-ingress]# vim /root/nacos-ingress/mysql-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-data
spec:
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
[root@master nacos-ingress]# kubectl apply -f mysql-pv.yaml
persistentvolume/nacos-mysql-pv created
[root@master nacos-ingress]# kubectl apply -f mysql-pvc.yaml
persistentvolumeclaim/mysql-data created

答案

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-data
spec:
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 2Gi

apiVersion: v1
kind: PersistentVolume
metadata:
  name: nacos-mysql-pv
spec:
  capacity:
    storage: 2Gi
  accessModes:
  - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    path: /root/data/mysql
    server: 192.168.200.10
NAME                              CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                  STORAGECLASS   REASON   AGE
persistentvolume/nacos-mysql-pv   2Gi        RWX            Retain           Bound    default/mysql-data                             84s
persistentvolume/nacos-pv         2Gi        RWX            Retain           Bound    default/nacos-data                             77m
persistentvolume/owncloud-pv      5Gi        RWO            Retain           Bound    default/owncloud-pvc                           3h17m

NAME                                 STATUS   VOLUME           CAPACITY   ACCESS MODES   STORAGECLASS   AGE
persistentvolumeclaim/mysql-data     Bound    nacos-mysql-pv   2Gi        RWX
                 81s
persistentvolumeclaim/nacos-data     Bound    nacos-pv         2Gi        RWX
                 77m
persistentvolumeclaim/owncloud-pvc   Bound    owncloud-pv      5Gi        RWO
                 3h17m
Name:            nacos-mysql-pv
Labels:          <none>
Annotations:     pv.kubernetes.io/bound-by-controller: yes
Finalizers:      [kubernetes.io/pv-protection]
StorageClass:
Status:          Bound
Claim:           default/mysql-data
Reclaim Policy:  Retain
Access Modes:    RWX
VolumeMode:      Filesystem
Capacity:        2Gi
Node Affinity:   <none>
Message:
Source:
    Type:      NFS (an NFS mount that lasts the lifetime of a pod)
    Server:    192.168.200.10
    Path:      /root/data/mysql
    ReadOnly:  false
Events:        <none>


Name:          mysql-data
Namespace:     default
StorageClass:
Status:        Bound
Volume:        nacos-mysql-pv
Labels:        <none>
Annotations:   pv.kubernetes.io/bind-completed: yes
               pv.kubernetes.io/bound-by-controller: yes
Finalizers:    [kubernetes.io/pvc-protection]
Capacity:      2Gi
Access Modes:  RWX
VolumeMode:    Filesystem
Used By:       <none>
Events:        <none>

8. 部署mysql(1分)

编写mysql-nfs.yaml,通过ReplicationController构建mysql的pod,副本数量1,名字为mysql,并且使用上述storageclass提供的存储,使用nacos-mysql:5.7镜像,声明容器的3306端口,绑定nfs服务端的mysql共享目录,设置mysql的root密码为123123 ,创建数据库nacos_devtest,创建用户nacos密码为123123;创建名为mysql的Service,声明端口3306。

在master节点创建完成后将cat /root/nacos-ingress/mysql-nfs.yaml && kubectl get pod | grep mysql && kubectl describe replicationcontroller/mysql命令的返回结果提交到答题框。


[root@master nacos-ingress]# vim /root/nacos-ingress/mysql-nfs.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql
  labels:
    name: mysql
spec:
  replicas: 1
  selector:
    name: mysql
  template:
    metadata:
      labels:
        name: mysql
    spec:
      containers:
      - name: mysql
        image: nacos/nacos-mysql:5.7
        ports:
        - containerPort: 3306
        volumeMounts:
        - name: mysql-data
          mountPath: /var/lib/mysql
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: "123123"
        - name: MYSQL_DATABASE
          value: "nacos_devtest"
        - name: MYSQL_USER
          value: "nacos"
        - name: MYSQL_PASSWORD
          value: "123123"
      volumes:
      - name: mysql-data
        nfs:
          server: 192.168.200.10
          path: /root/data/mysql
---
apiVersion: v1
kind: Service
metadata:
  name: mysql
  labels:
    name: mysql
spec:
  ports:
  - port: 3306
    targetPort: 3306
  selector:
    name: mysql
[root@master nacos-ingress]# kubectl apply -f mysql-nfs.yaml
replicationcontroller/mysql created
service/mysql created

答案

apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql
  labels:
    name: mysql
spec:
  replicas: 1
  selector:
    name: mysql
  template:
    metadata:
      labels:
        name: mysql
    spec:
      containers:
      - name: mysql
        image: nacos/nacos-mysql:5.7
        ports:
        - containerPort: 3306
        volumeMounts:
        - name: mysql-data
          mountPath: /var/lib/mysql
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: "123123"
        - name: MYSQL_DATABASE
          value: "nacos_devtest"
        - name: MYSQL_USER
          value: "nacos"
        - name: MYSQL_PASSWORD
          value: "123123"
      volumes:
      - name: mysql-data
        nfs:
          server: 192.168.200.10
          path: /root/data/mysql
---
apiVersion: v1
kind: Service
metadata:
  name: mysql
  labels:
    name: mysql
spec:
  ports:
  - port: 3306
    targetPort: 3306
  selector:
    name: mysql

mysql-zb447                               2/2     Running   0               34s
Name:         mysql
Namespace:    default
Selector:     name=mysql
Labels:       name=mysql
Annotations:  <none>
Replicas:     1 current / 1 desired
Pods Status:  1 Running / 0 Waiting / 0 Succeeded / 0 Failed
Pod Template:
  Labels:  name=mysql
  Containers:
   mysql:
    Image:      nacos/nacos-mysql:5.7
    Port:       3306/TCP
    Host Port:  0/TCP
    Environment:
      MYSQL_ROOT_PASSWORD:  123123
      MYSQL_DATABASE:       nacos_devtest
      MYSQL_USER:           nacos
      MYSQL_PASSWORD:       123123
    Mounts:
      /var/lib/mysql from mysql-data (rw)
  Volumes:
   mysql-data:
    Type:      NFS (an NFS mount that lasts the lifetime of a pod)
    Server:    192.168.200.10
    Path:      /root/data/mysql
    ReadOnly:  false
Events:
  Type    Reason            Age   From                    Message
  ----    ------            ----  ----                    -------
  Normal  SuccessfulCreate  34s   replication-controller  Created pod: mysql-zb447

9. 部署nacos集群(1分)

使用提供的nacos-nfs.yaml文件,结合上下文参数,将端口等空置的参数补全,通过该文件创建StatefulSet和Service。

在master节点创建完成后将cat /root/nacos-ingress/nacos-nfs.yaml && kubectl describe pod nacos-0 && kubectl logs --tail=40 nacos-0 | grep -v ^$命令的返回结果提交到答题框。


[root@master nacos-ingress]# vim /root/nacos-ingress/nacos-nfs.yaml
---
apiVersion: v1
kind: Service
metadata:
  name: nacos-headless
  labels:
    app: nacos
  annotations:
    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
  ports:
    - port: 8848
      name: server
      targetPort: 8848
    - port: 9848
      name: client-rpc
      targetPort: 9848
    - port: 9849
      name: raft-rpc
      targetPort: 9849
    - port: 7848
      name: old-raft-rpc
      targetPort: 7848
  clusterIP: None
  selector:
    app: nacos
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: nacos-cm
data:
  mysql.db.name: "nacos_devtest"
  mysql.port: "3306"
  mysql.user: "root"
  mysql.password: "123123"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nacos
spec:
  serviceName: nacos-headless
  template:
    metadata:
      labels:
        app: nacos
      annotations:
        pod.alpha.kubernetes.io/initialized: "true"
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                      - nacos
              topologyKey: "kubernetes.io/hostname"
      serviceAccountName: nfs-client-provisioner
      initContainers:
        - name: peer-finder-plugin-install
          image: nacos/nacos-peer-finder-plugin:v1.1
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - mountPath: /home/nacos/plugins/peer-finder
              name: nacos-data
              subPath: peer-finder
      containers:
        - name: nacos
          imagePullPolicy: IfNotPresent
          image: nacos/nacos-server:latest
          resources:
            requests:
              memory: "2Gi"
              cpu: "500m"
          ports:
            - containerPort: 8848
              name: client-port
            - containerPort: 9848
              name: client-rpc
            - containerPort: 9849
              name: raft-rpc
            - containerPort: 7848
              name: old-raft-rpc
          env:
            - name: NACOS_REPLICAS
              value: "2"
            - name: SERVICE_NAME
              value: "nacos-headless"
            - name: DOMAIN_NAME
              value: "cluster.local"
            - name: SPRING_DATASOURCE_PLATFORM
              value: "mysql"
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  apiVersion: v1
                  fieldPath: metadata.namespace
            - name: MYSQL_SERVICE_DB_NAME
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.db.name
            - name: MYSQL_SERVICE_PORT
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.port
            - name: MYSQL_SERVICE_USER
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.user
            - name: MYSQL_SERVICE_PASSWORD
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.password
            - name: NACOS_SERVER_PORT
              value: "8848"
            - name: NACOS_APPLICATION_PORT
              value: "8848"
            - name: PREFER_HOST_MODE
              value: "hostname"
          volumeMounts:
            - name: nacos-data
              mountPath: /home/nacos/plugins/peer-finder
              subPath: peer-finder
            - name: nacos-data
              mountPath: /home/nacos/data
              subPath: data
            - name: nacos-data
              mountPath: /home/nacos/logs
              subPath: logs
  volumeClaimTemplates:
    - metadata:
        name: nacos-data
        annotations:
          volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
      spec:
        accessModes: [ "ReadWriteMany" ]
        resources:
          requests:
            storage: 20Gi
  selector:
    matchLabels:
      app: nacos
[root@master nacos-ingress]# kubectl apply -f nacos-nfs.yaml
service/nacos-headless created
configmap/nacos-cm created
statefulset.apps/nacos created

答案:

---
apiVersion: v1
kind: Service
metadata:
  name: nacos-headless
  labels:
    app: nacos
  annotations:
    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
  ports:
    - port: 8848
      name: server
      targetPort: 8848
    - port: 9848
      name: client-rpc
      targetPort: 9848
    - port: 9849
      name: raft-rpc
      targetPort: 9849
    - port: 7848
      name: old-raft-rpc
      targetPort: 7848
  clusterIP: None
  selector:
    app: nacos
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: nacos-cm
data:
  mysql.db.name: "nacos_devtest"
  mysql.port: "3306"
  mysql.user: "root"
  mysql.password: "123123"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nacos
spec:
  serviceName: nacos-headless
  template:
    metadata:
      labels:
        app: nacos
      annotations:
        pod.alpha.kubernetes.io/initialized: "true"
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                      - nacos
              topologyKey: "kubernetes.io/hostname"
      serviceAccountName: nfs-client-provisioner
      initContainers:
        - name: peer-finder-plugin-install
          image: nacos/nacos-peer-finder-plugin:v1.1
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - mountPath: /home/nacos/plugins/peer-finder
              name: nacos-data
              subPath: peer-finder
      containers:
        - name: nacos
          imagePullPolicy: IfNotPresent
          image: nacos/nacos-server:latest
          resources:
            requests:
              memory: "2Gi"
              cpu: "500m"
          ports:
            - containerPort: 8848
              name: client-port
            - containerPort: 9848
              name: client-rpc
            - containerPort: 9849
              name: raft-rpc
            - containerPort: 7848
              name: old-raft-rpc
          env:
            - name: NACOS_REPLICAS
              value: "2"
            - name: SERVICE_NAME
              value: "nacos-headless"
            - name: DOMAIN_NAME
              value: "cluster.local"
            - name: SPRING_DATASOURCE_PLATFORM
              value: "mysql"
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  apiVersion: v1
                  fieldPath: metadata.namespace
            - name: MYSQL_SERVICE_DB_NAME
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.db.name
            - name: MYSQL_SERVICE_PORT
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.port
            - name: MYSQL_SERVICE_USER
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.user
            - name: MYSQL_SERVICE_PASSWORD
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.password
            - name: NACOS_SERVER_PORT
              value: "8848"
            - name: NACOS_APPLICATION_PORT
              value: "8848"
            - name: PREFER_HOST_MODE
              value: "hostname"
          volumeMounts:
            - name: nacos-data
              mountPath: /home/nacos/plugins/peer-finder
              subPath: peer-finder
            - name: nacos-data
              mountPath: /home/nacos/data
              subPath: data
            - name: nacos-data
              mountPath: /home/nacos/logs
              subPath: logs
  volumeClaimTemplates:
    - metadata:
        name: nacos-data
        annotations:
          volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
      spec:
        accessModes: [ "ReadWriteMany" ]
        resources:
          requests:
            storage: 20Gi
  selector:
    matchLabels:
      app: nacos

Name:             nacos-0
Namespace:        default
Priority:         0
Service Account:  nfs-client-provisioner
Node:             master/192.168.200.10
Start Time:       Sun, 23 Mar 2025 23:31:53 +0800
Labels:           app=nacos
                  controller-revision-hash=nacos-6457dc6f96
                  security.istio.io/tlsMode=istio
                  service.istio.io/canonical-name=nacos
                  service.istio.io/canonical-revision=latest
                  statefulset.kubernetes.io/pod-name=nacos-0
Annotations:      k8s.v1.cni.cncf.io/network-status:
                    [{
                        "name": "cbr0",
                        "interface": "eth0",
                        "ips": [
                            "10.244.0.47"
                        ],
                        "mac": "12:b2:1a:93:e9:bb",
                        "default": true,
                        "dns": {}
                    }]
                  k8s.v1.cni.cncf.io/networks-status:
                    [{
                        "name": "cbr0",
                        "interface": "eth0",
                        "ips": [
                            "10.244.0.47"
                        ],
                        "mac": "12:b2:1a:93:e9:bb",
                        "default": true,
                        "dns": {}
                    }]
                  kubectl.kubernetes.io/default-container: nacos
                  kubectl.kubernetes.io/default-logs-container: nacos
                  pod.alpha.kubernetes.io/initialized: true
                  prometheus.io/path: /stats/prometheus
                  prometheus.io/port: 15020
                  prometheus.io/scrape: true
                  sidecar.istio.io/status:
                    {"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-socket","credential-socket","workload-certs","istio-env...
Status:           Running
IP:               10.244.0.47
IPs:
  IP:           10.244.0.47
Controlled By:  StatefulSet/nacos
Init Containers:
  peer-finder-plugin-install:
    Container ID:   containerd://d38c1f886766c1dcd53359f5f7fe3b778d83f9d8927677b3bc799cc5a19639db
    Image:          nacos/nacos-peer-finder-plugin:v1.1
    Image ID:       sha256:311427258b69f739c084c3861593091821a7168e67a77fd60136933634f3c28e
    Port:           <none>
    Host Port:      <none>
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Sun, 23 Mar 2025 23:31:54 +0800
      Finished:     Sun, 23 Mar 2025 23:31:54 +0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /home/nacos/plugins/peer-finder from nacos-data (rw,path="peer-finder")
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-58r6s (ro)
  istio-init:
    Container ID:  containerd://958c0cfa9ab1affc72ed89a47ae52f1b10eb24ae3be30c38b0143352b95333bd
    Image:         docker.io/istio/proxyv2:1.17.2
    Image ID:      sha256:3944a6baf515cabc77e05986b76d498f77b5c47a65aa736cb6c7086f368b8339
    Port:          <none>
    Host Port:     <none>
    Args:
      istio-iptables
      -p
      15001
      -z
      15006
      -u
      1337
      -m
      REDIRECT
      -i
      *
      -x

      -b
      *
      -d
      15090,15021,15020
      --log_output_level=default:info
    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Sun, 23 Mar 2025 23:31:55 +0800
      Finished:     Sun, 23 Mar 2025 23:31:55 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:        10m
      memory:     40Mi
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-58r6s (ro)
Containers:
  nacos:
    Container ID:   containerd://efa93fb9f9bda679c68ca78880307a97df3c3e2f87775842584af855518cf950
    Image:          nacos/nacos-server:latest
    Image ID:       sha256:fc86d4833afb7abc64006f04c715946303f6b15d694c2f05c2895a27d967ed88
    Ports:          8848/TCP, 9848/TCP, 9849/TCP, 7848/TCP
    Host Ports:     0/TCP, 0/TCP, 0/TCP, 0/TCP
    State:          Running
      Started:      Sun, 23 Mar 2025 23:31:56 +0800
    Ready:          True
    Restart Count:  0
    Requests:
      cpu:     500m
      memory:  2Gi
    Environment:
      NACOS_REPLICAS:              2
      SERVICE_NAME:                nacos-headless
      DOMAIN_NAME:                 cluster.local
      SPRING_DATASOURCE_PLATFORM:  mysql
      POD_NAMESPACE:               default (v1:metadata.namespace)
      MYSQL_SERVICE_DB_NAME:       <set to the key 'mysql.db.name' of config map 'nacos-cm'>   Optional: false
      MYSQL_SERVICE_PORT:          <set to the key 'mysql.port' of config map 'nacos-cm'>      Optional: false
      MYSQL_SERVICE_USER:          <set to the key 'mysql.user' of config map 'nacos-cm'>      Optional: false
      MYSQL_SERVICE_PASSWORD:      <set to the key 'mysql.password' of config map 'nacos-cm'>  Optional: false
      NACOS_SERVER_PORT:           8848
      NACOS_APPLICATION_PORT:      8848
      PREFER_HOST_MODE:            hostname
    Mounts:
      /home/nacos/data from nacos-data (rw,path="data")
      /home/nacos/logs from nacos-data (rw,path="logs")
      /home/nacos/plugins/peer-finder from nacos-data (rw,path="peer-finder")
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-58r6s (ro)
  istio-proxy:
    Container ID:  containerd://9095dd9c3fb088c1ec5dc5748de2fb6dc89f0cc04fdab2cf3f50eaffb84b40cd
    Image:         docker.io/istio/proxyv2:1.17.2
    Image ID:      sha256:3944a6baf515cabc77e05986b76d498f77b5c47a65aa736cb6c7086f368b8339
    Port:          15090/TCP
    Host Port:     0/TCP
    Args:
      proxy
      sidecar
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --proxyLogLevel=warning
      --proxyComponentLogLevel=misc:error
      --log_output_level=default:info
      --concurrency
      2
    State:          Running
      Started:      Sun, 23 Mar 2025 23:31:56 +0800
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:      10m
      memory:   40Mi
    Readiness:  http-get http://:15021/healthz/ready delay=1s timeout=3s period=2s #success=1 #failure=30
    Environment:
      JWT_POLICY:                    third-party-jwt
      PILOT_CERT_PROVIDER:           istiod
      CA_ADDR:                       istiod.istio-system.svc:15012
      POD_NAME:                      nacos-0 (v1:metadata.name)
      POD_NAMESPACE:                 default (v1:metadata.namespace)
      INSTANCE_IP:                    (v1:status.podIP)
      SERVICE_ACCOUNT:                (v1:spec.serviceAccountName)
      HOST_IP:                        (v1:status.hostIP)
      PROXY_CONFIG:                  {}

      ISTIO_META_POD_PORTS:          [
                                         {"name":"client-port","containerPort":8848,"protocol":"TCP"}
                                         ,{"name":"client-rpc","containerPort":9848,"protocol":"TCP"}
                                         ,{"name":"raft-rpc","containerPort":9849,"protocol":"TCP"}
                                         ,{"name":"old-raft-rpc","containerPort":7848,"protocol":"TCP"}
                                     ]
      ISTIO_META_APP_CONTAINERS:     nacos
      ISTIO_META_CLUSTER_ID:         Kubernetes
      ISTIO_META_NODE_NAME:           (v1:spec.nodeName)
      ISTIO_META_INTERCEPTION_MODE:  REDIRECT
      ISTIO_META_WORKLOAD_NAME:      nacos
      ISTIO_META_OWNER:              kubernetes://apis/apps/v1/namespaces/default/statefulsets/nacos
      ISTIO_META_MESH_ID:            cluster.local
      TRUST_DOMAIN:                  cluster.local
    Mounts:
      /etc/istio/pod from istio-podinfo (rw)
      /etc/istio/proxy from istio-envoy (rw)
      /var/lib/istio/data from istio-data (rw)
      /var/run/secrets/credential-uds from credential-socket (rw)
      /var/run/secrets/istio from istiod-ca-cert (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-58r6s (ro)
      /var/run/secrets/tokens from istio-token (rw)
      /var/run/secrets/workload-spiffe-credentials from workload-certs (rw)
      /var/run/secrets/workload-spiffe-uds from workload-socket (rw)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  workload-socket:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  credential-socket:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  workload-certs:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  istio-envoy:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:     Memory
    SizeLimit:  <unset>
  istio-data:
    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:
    SizeLimit:  <unset>
  istio-podinfo:
    Type:  DownwardAPI (a volume populated by information about the pod)
    Items:
      metadata.labels -> labels
      metadata.annotations -> annotations
  istio-token:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  43200
  istiod-ca-cert:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-ca-root-cert
    Optional:  false
  nacos-data:
    Type:       PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
    ClaimName:  nacos-data-nacos-0
    ReadOnly:   false
  kube-api-access-58r6s:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   Burstable
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason          Age   From               Message
  ----    ------          ----  ----               -------
  Normal  Scheduled       50s   default-scheduler  Successfully assigned default/nacos-0 to master
  Normal  AddedInterface  50s   multus             Add eth0 [10.244.0.47/24] from cbr0
  Normal  Pulled          50s   kubelet            Container image "nacos/nacos-peer-finder-plugin:v1.1" already present on machine
  Normal  Created         50s   kubelet            Created container peer-finder-plugin-install
  Normal  Started         50s   kubelet            Started container peer-finder-plugin-install
  Normal  Pulled          49s   kubelet            Container image "docker.io/istio/proxyv2:1.17.2" already present on machine
  Normal  Created         49s   kubelet            Created container istio-init
  Normal  Started         49s   kubelet            Started container istio-init
  Normal  Pulled          48s   kubelet            Container image "nacos/nacos-server:latest" already present on machine
  Normal  Created         48s   kubelet            Created container nacos
  Normal  Started         48s   kubelet            Started container nacos
  Normal  Pulled          48s   kubelet            Container image "docker.io/istio/proxyv2:1.17.2" already present on machine
  Normal  Created         48s   kubelet            Created container istio-proxy
  Normal  Started         48s   kubelet            Started container istio-proxy
+ echo 'Nacos is starting, you can docker logs your container'
+ exec /usr/lib/jvm/java-1.8-openjdk/bin/java -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection '-XX:CMSInitiatingOccupancyFraction=70' -XX:+CMSParallelRemarkEnabled '-XX:SoftRefLRUPolicyMSPerMB=0' -XX:+CMSClassUnloadingEnabled '-XX:SurvivorRatio=8' -server -Xms1g -Xmx1g -Xmn512m '-XX:MetaspaceSize=128m' '-XX:MaxMetaspaceSize=320m' -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError '-XX:HeapDumpPath=/home/nacos/logs/java_heapdump.hprof' -XX:-UseLargePages '-Dnacos.preferHostnameOverIp=true' '-Dnacos.member.list=' -Xloggc:/home/nacos/logs/nacos_gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation '-XX:NumberOfGCLogFiles=10' '-XX:GCLogFileSize=100M' '-Dloader.path=/home/nacos/plugins,/home/nacos/plugins/health,/home/nacos/plugins/cmdb,/home/nacos/plugins/selector' '-Dnacos.home=/home/nacos' -jar /home/nacos/target/nacos-server.jar '--spring.config.additional-location=file:/home/nacos/conf/' '--spring.config.name=application' '--logging.config=/home/nacos/conf/nacos-logback.xml' '--server.max-http-header-size=524288'
Nacos is starting, you can docker logs your container
OpenJDK 64-Bit Server VM warning: UseCMSCompactAtFullCollection is deprecated and will likely be removed in a future release.
         ,--.
       ,--.'|
   ,--,:  : |                                           Nacos 2.4.3
,`--.'`|  ' :                       ,---.               Running in cluster mode, All function modules
|   :  :  | |                      '   ,'\   .--.--.    Port: 8848
:   |   \ | :  ,--.--.     ,---.  /   /   | /  /    '   Pid: 1
|   : '  '; | /       \   /     \.   ; ,. :|  :  /`./   Console: http://nacos-0.nacos-headless.default.svc.cluster.local:8848/nacos/index.html
'   ' ;.    ;.--.  .-. | /    / ''   | |: :|  :  ;_
|   | | \   | \__\/: . ..    ' / '   | .; : \  \    `.      https://nacos.io
'   : |  ; .' ," .--.; |'   ; :__|   :    |  `----.   \
|   | '`--'  /  /  ,.  |'   | '.'|\   \  /  /  /`--'  /
'   : |     ;  :   .'   \   :    : `----'  '--'.     /
;   |.'     |  ,     .-./\   \  /            `--'---'
'---'        `--`---'     `----'
2025-03-23 23:32:28,507 INFO The server IP list of Nacos is [nacos-0.nacos-headless.default.svc.cluster.local:8848]
2025-03-23 23:32:29,508 INFO Nacos is starting...
2025-03-23 23:32:30,509 INFO Nacos is starting...
2025-03-23 23:32:31,511 INFO Nacos is starting...
2025-03-23 23:32:32,512 INFO Nacos is starting...
2025-03-23 23:32:33,513 INFO Nacos is starting...
2025-03-23 23:32:34,513 INFO Nacos is starting...
2025-03-23 23:32:35,515 INFO Nacos is starting...
2025-03-23 23:32:36,515 INFO Nacos is starting...
2025-03-23 23:32:36,585 INFO Nacos started successfully in cluster mode. use external storage

10. 部署ingress访问nacos集群(1分)

使用提供的ingress-nginx.yaml部署ingress,编写nacos-ingress.yaml文件创建名为nacos-ingress-http的ingress,匹配策略为Prefix,使nacos集群可以使用域名nacos.brics.com访问,编写完成后修改master节点主机映射,使域名nacos.brics.com映射本机ip。

在master节点创建完成后将cat /root/nacos-ingress/nacos-ingress.yaml && kubectl describe ingress nacos-ingress-http && kubectl get svc -n ingress-nginx && curl http://nacos.brics.com:31200/nacos/v1/core/cluster/nodes?withInstances=false | sed s/\"//g命令的返回结果提交到答题框。


ingress-nginx.yaml文件缺失,参考文档:https://blog.youkuaiyun.com/weixin_58410911/article/details/143915207

[root@master nacos-ingress]# kubectl apply -f ingress-nginx.yaml
[root@master nacos-ingress]# vim nacos-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: nacos-ingress-http
  labels:
    nacos: ingress-http
  annotations:
    kubernetes.io/ingress.class: nginx
spec:
  rules:
  - host: nacos.brics.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: nacos-headless
            port:
              number: 8848
[root@master nacos-ingress]# kubectl apply -f nacos-ingress.yaml
ingress.networking.k8s.io/nacos-ingress-http created

答案

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: nacos-ingress-http
  labels:
    nacos: ingress-http
  annotations:
    kubernetes.io/ingress.class: nginx
spec:
  rules:
  - host: nacos.brics.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: nacos-headless
            port:
              number: 8848

Name:             nacos-ingress-http
Labels:           nacos=ingress-http
Namespace:        default
Address:
Ingress Class:    <none>
Default backend:  <default>
Rules:
  Host             Path  Backends
  ----             ----  --------
  nacos.brics.com
                   /   nacos-headless:8848 (10.244.0.72:8848)
Annotations:       kubernetes.io/ingress.class: nginx
Events:
  Type    Reason  Age    From                      Message
  ----    ------  ----   ----                      -------
  Normal  Sync    2m43s  nginx-ingress-controller  Scheduled for sync
NAME                                 TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx-controller             LoadBalancer   10.97.170.205   <pending>     80:31200/TCP,443:31300/TCP   58m
ingress-nginx-controller-admission   ClusterIP      10.96.98.200    <none>        443/TCP                      58m
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100  1259    0  1259    0     0   132k      0 --:--:-- --:--:-- --:--:--  136k
{code:200,message:null,data:[{ip:nacos-0.nacos-headless.default.svc.cluster.local,port:8848,state:UP,extendInfo:{lastRefreshTime:1742898710335,raftMetaData:{metaDataMap:{naming_instance_metadata:{leader:nacos-0.nacos-headless.default.svc.cluster.local:7848,raftGroupMember:[nacos-0.nacos-headless.default.svc.cluster.local:7848],term:3},naming_persistent_service:{leader:nacos-0.nacos-headless.default.svc.cluster.local:7848,raftGroupMember:[nacos-0.nacos-headless.default.svc.cluster.local:7848],term:3},naming_persistent_service_v2:{leader:nacos-0.nacos-headless.default.svc.cluster.local:7848,raftGroupMember:[nacos-0.nacos-headless.default.svc.cluster.local:7848],term:3},naming_service_metadata:{leader:nacos-0.nacos-headless.default.svc.cluster.local:7848,raftGroupMember:[nacos-0.nacos-headless.default.svc.cluster.local:7848],term:3}}},raftPort:7848,readyToUpgrade:true,version:2.4.3},address:nacos-0.nacos-headless.default.svc.cluster.local:8848,failAccessCnt:0,abilities:{remoteAbility:{supportRemoteConnection:true,grpcReportEnabled:true},configAbility:{supportRemoteMetrics:false},namingAbility:{supportJraft:true}},grpcReportEnabled:true}]}
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值