Canary金丝雀发布
什么是金丝雀发布
金丝雀
发布也称为灰度发布
,是一种软件
发布策略
- 主要
目的
是在将新版本
的软件
全面推广到生产环境之前
,先在一小部分用户
或服务器上
进行测试
和验证
,以降低
因新版本
引入重大问题
而对整个系统
造成的影响
- 是一种
Pod
的发布方式
。金丝雀
发布采取先添加
、再删除
的方式,保证Pod
的总量
不低于期望值
。并且在更新部分Pod
后,暂停更新
,当确认新Pod版本
运行正常后再进行其他版本
的Pod的更新
Canary发布方式
三种发布方式
:
- 优先级:
head
大于cookie
大于weight
- 其中
header
和weight
用的最多
基于header(http包头)灰度发布
我们可以看到如果包头
中有stage=gray
的键值对
,就访问新版本
,包头
中没有那个键值对
,就访问旧版本
- 通过
Annotaion
扩展 - 创建灰度
ingress
,配置灰度头部key
以及value
灰度
流量验证完毕后,切换正式ingress
到新版本
- 之前我们在
做升级
时可以通过控制器
做滚动更新
,默认25%
利用header
可以使升级
更为平滑
,通过key
和value
测试新的业务体系
是否有问题
创建2
个deployment
控制器
#发现没有运行的pod
[root@k8s-master service]# kubectl get pods
No resources found in default namespace.
#创建一个deployment控制器,控制器中运行一个pod
[root@k8s-master service]# kubectl create deployment deployment --image myapp:v1 --dry-run=client -o yaml > deployment-v1.yml
[root@k8s-master service]# vim deployment-v1.yml
[root@k8s-master service]# cat deployment-v1.yml
apiVersion: apps/v1
kind: Deployment #指明这是一个deployment控制器
metadata: #控制器的元数据
labels: #控制器的标签
app: deployment1 #控制器的标签
name: deployment1 #控制器的名字
spec: 控制器的规格
replicas: 1 #pod数量
selector: #pod选择器
matchLabels: #声明的要管理的pod
app: myappv1 #标签为app=myappv1的pod会被管理
template: #pod的模板
metadata: #pod的元数据
labels: #pod的标签
app: myappv1
spec: #pod的规格
containers:
- image: myapp:v1
name: myappv1
#在克隆一份
[root@k8s-master service]# cp deployment-v1.yml deployment-v2.yml
[root@k8s-master service]# vim deployment-v2.yml
[root@k8s-master services]# cat deployment-v2.yml
apiVersion: apps/v1
kind: Deployment #指明这个一个deployment控制器
metadata: #控制器的元数据
labels:
app: deployment2 #控制器的标签
name: deployment2 #控制器的名字
spec: #控制器的规格
replicas: 1 #pod数量
selector: #pod选择器
matchLabels:
app: myappv2 #声明需要管理的标签
template: #pod的模板
metadata: #pod的元数据
labels:
app: myappv2 #pod的标签
spec: #pod的规格
containers:
- image: myapp:v2
name: myappv2
[root@k8s-master service]# kubectl apply -f deployment-v1.yml
deployment.apps/deployment1 created
[root@k8s-master service]# kubectl apply -f deployment-v2.yml
deployment.apps/deployment2 created
[root@k8s-master service]# kubectl get pods -o wide --show-labels
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES LABELS
deployment1-5c47495d84-ds4cl 1/1 Running 0 3m20s 10.244.2.15 k8s-node2.org <none> <none> app=myappv1,pod-template-hash=5c47495d84
deployment2-67cc8c4845-bfnvx 1/1 Running 0 6m49s 10.244.2.14 k8s-node2.org <none> <none> app=myappv2,pod-template-hash=67cc8c4845
创建2
个service
微服务
[root@k8s-master service]# kubectl expose deployment deployment1 --port 8080 --target-port 80 --dry-run=client -o yaml >> deployment-v1.yml
[root@k8s-master service]# kubectl expose deployment deployment2 --port 8080 --target-port 80 --dry-run=client -o yaml >> deployment-v2.yml
[root@k8s-master service]# vim deployment-v1.yml
[root@k8s-master service]# cat deployment-v1.yml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: deployment1
name: deployment1
spec:
replicas: 1
selector:
matchLabels:
app: myappv1
template:
metadata:
labels:
app: myappv1
spec:
containers:
- image: myapp:v1
name: myappv1
---
apiVersion: v1
kind: Service #指明这是一个service微服务
metadata: #微服务元数据
labels:
app: deployment1 #微服务标签
name: deployment1 #微服务的名字
spec: #微服务的规格
ports: #是一个端口列表,用于描述service应该监听的端口以及如何将流量转发给pod
- port: 8080 #service微服务监听的端口号
protocol: TCP #使用的协议,这里是TCP
targetPort: 80 #pod上应用程序监听的端口
selector: #标签选择器,用于确定哪些pods应该被这个service管理
app: myappv1 #pod标签为app=myappv1的被该service管理
[root@k8s-master service]# vim deployment-v2.yml
[root@k8s-master service]# cat deployment-v2.yml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: deployment2
name: deployment2
spec:
replicas: 1
selector:
matchLabels:
app: myappv2
template:
metadata:
labels:
app: myappv2
spec:
containers:
- image: myapp:v2
name: myappv2
---
apiVersion: v1
kind: Service
metadata:
labels:
app: deployment2
name: deployment2
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 80
selector:
app: myappv2
[root@k8s-master service]# kubectl apply -f deployment-v1.yml
deployment.apps/deployment1 unchanged
service/deployment1 created
[root@k8s-master service]# kubectl apply -f deployment-v2.yml
deployment.apps/deployment2 unchanged
service/deployment2 created
[root@k8s-master service]# kubectl get pods -o wide --show-labels
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES LABELS
deployment1-5c47495d84-ds4cl 1/1 Running 0 15m 10.244.2.15 k8s-node2.org <none> <none> app=myappv1,pod-template-hash=5c47495d84
deployment2-67cc8c4845-bfnvx 1/1 Running 0 18m 10.244.2.14 k8s-node2.org <none> <none> app=myappv2,pod-template-hash=67cc8c4845
[root@k8s-master service]# kubectl get svc -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
deployment1 ClusterIP 10.108.37.167 <none> 8080/TCP 3m8s app=myappv1
deployment2 ClusterIP 10.96.202.197 <none> 8080/TCP 3m2s app=myappv2
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 27d <none>
[root@k8s-master service]# kubectl describe svc deployment1
Name: deployment1
Namespace: default
Labels: app=deployment1
Annotations: <none>
Selector: app=myappv1
Type: ClusterIP
IP Family Policy: SingleStack
IP Families: IPv4
IP: 10.108.37.167
IPs: 10.108.37.167
Port: <unset> 8080/TCP
TargetPort: 80/TCP
Endpoints: 10.244.2.15:80
Session Affinity: None
Events: <none>
[root@k8s-master service]# kubectl describe svc deployment2
Name: deployment2
Namespace: default
Labels: app=deployment2
Annotations: <none>
Selector: app=myappv2
Type: ClusterIP
IP Family Policy: SingleStack
IP Families: IPv4
IP: 10.96.202.197
IPs: 10.96.202.197
Port: <unset> 8080/TCP
TargetPort: 80/TCP
Endpoints: 10.244.2.14:80
Session Affinity: None
Events: <none>
- 创建
ingress1.yml
[root@k8s-master service]# kubectl create ingress ingress1 --class nginx --rule='/=deployment1:8080' --dry-run=client -o yaml > ingress1.yml
[root@k8s-master service]# vim ingress1.yml
[root@k8s-master service]# cat ingress1.yml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress1
spec:
ingressClassName: nginx
rules:
- http:
paths:
- backend:
service:
name: deployment1
port:
number: 8080
path: /
pathType: Prefix
- 创建
ingress2.yml
[root@k8s-master service]# kubectl create ingress ingress2 --class nginx --rule='/=deployment2:8080' --dry-run=client -o yaml > ingress2.yml
[root@k8s-master service]# vim ingress2.yml
[root@k8s-master service]# cat ingress2.yml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/canary: "true"
nginx.ingress.kubernetes.io/canary-by-header: "name" #键
nginx.ingress.kubernetes.io/canary-by-header-value: "huazi" #值
name: ingress2
spec:
ingressClassName: nginx
rules:
- http:
paths:
- backend:
service:
name: deployment2
port:
number: 8080
path: /
pathType: Prefix
[root@k8s-master service]# kubectl apply -f ingress1.yml
ingress.networking.k8s.io/ingress1 created
[root@k8s-master service]# kubectl apply -f ingress2.yml
ingress.networking.k8s.io/ingress2 created
[root@k8s-master service]# kubectl describe ingress ingress1
Name: ingress1
Labels: <none>
Namespace: default
Address: 172.25.254.10
Ingress Class: nginx
Default backend: <default>
Rules:
Host Path Backends
---- ---- --------
*
/ deployment1:8080 (10.244.2.15:80)
Annotations: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Sync 12m (x2 over 13m) nginx-ingress-controller Scheduled for sync
[root@k8s-master service]# kubectl describe ingress ingress2
Name: ingress2
Labels: <none>
Namespace: default
Address: 172.25.254.10
Ingress Class: nginx
Default backend: <default>
Rules:
Host Path Backends
---- ---- --------
*
/ deployment2:8080 (10.244.2.14:80)
Annotations: nginx.ingress.kubernetes.io/canary: true
nginx.ingress.kubernetes.io/canary-by-header: name
nginx.ingress.kubernetes.io/canary-by-header-value: huazi
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Sync 2m21s (x2 over 3m3s) nginx-ingress-controller Scheduled for sync
基于权重的金丝雀发布
- 通过
Annotaion
拓展 - 创建
灰度ingress
,配置灰度权重
以及总权重
灰度流量
验证完毕后,切换正式ingress
到新版本
#删掉基于header的ingress
[root@k8s-master service]# kubectl delete -f ingress2.yml
ingress.networking.k8s.io "ingress2" deleted
[root@k8s-master service]# vim ingress2.yml
[root@k8s-master service]# cat ingress2.yml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/canary: "true"
nginx.ingress.kubernetes.io/canary-weight: "10" #10/100=10%,将有10%的流量打到新版本上
nginx.ingress.kubernetes.io/canary-weight-total: "100"
name: ingress2
spec:
ingressClassName: nginx
rules:
- http:
paths:
- backend:
service:
name: deployment2
port:
number: 8080
path: /
pathType: Prefix
[root@k8s-master service]# kubectl apply -f ingress2.yml
ingress.networking.k8s.io/ingress2 created
[root@k8s-master service]# kubectl describe ingress ingress1
Name: ingress1
Labels: <none>
Namespace: default
Address: 172.25.254.10
Ingress Class: nginx
Default backend: <default>
Rules:
Host Path Backends
---- ---- --------
*
/ deployment1:8080 (10.244.2.15:80)
Annotations: <none>
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Sync 27m (x2 over 27m) nginx-ingress-controller Scheduled for sync
[root@k8s-master service]# kubectl describe ingress ingress2
Name: ingress2
Labels: <none>
Namespace: default
Address: 172.25.254.10
Ingress Class: nginx
Default backend: <default>
Rules:
Host Path Backends
---- ---- --------
*
/ deployment2:8080 (10.244.2.14:80)
Annotations: nginx.ingress.kubernetes.io/canary: true
nginx.ingress.kubernetes.io/canary-weight: 10
nginx.ingress.kubernetes.io/canary-weight-total: 100
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Sync 34s (x2 over 44s) nginx-ingress-controller Scheduled for sync
- 写检测脚本
[root@harbor ~]# vim check.sh
#!/bin/bash
v1=0
v2=0
for ((i=0;i<100;i++))
do
response=`curl -s 172.25.254.50 | grep -c v1`
v1=`expr $v1 + $response`
v2=`expr $v2 + 1 - $response`
done
echo "v1:$v1 v2:$v2"
[root@harbor ~]# bash check.sh
v1:89 v2:11
[root@harbor ~]# bash check.sh
v1:87 v2:13
#我们发现比例接近于9:1
当我们增加权重
后
[root@k8s-master service]# vim ingress2.yml
[root@k8s-master service]# cat ingress2.yml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
nginx.ingress.kubernetes.io/canary: "true"
nginx.ingress.kubernetes.io/canary-weight: "20"
nginx.ingress.kubernetes.io/canary-weight-total: "100"
name: ingress2
spec:
ingressClassName: nginx
rules:
- http:
paths:
- backend:
service:
name: deployment2
port:
number: 8080
path: /
pathType: Prefix
[root@k8s-master service]# kubectl apply -f ingress2.yml
ingress.networking.k8s.io/ingress2 configured
[root@harbor ~]# bash check.sh
v1:76 v2:24
[root@harbor ~]# bash check.sh
v1:79 v2:21
#我们发现比例进阶于8:2