在ippool里将ipipMode改成CrossSubnet即可
[root@shen-master150 calico]# kubectl get ippool -o yaml
apiVersion: v1
items:
- apiVersion: crd.projectcalico.org/v1
kind: IPPool
metadata:
annotations:
projectcalico.org/metadata: '{"uid":"5150aaef-11c2-4928-a852-6e9bdacbf51a","creationTimestamp":"2020-07-22T02:32:12Z"}'
creationTimestamp: "2020-07-22T02:32:12Z"
generation: 2
name: default-ipv4-ippool
resourceVersion: "1685668"
selfLink: /apis/crd.projectcalico.org/v1/ippools/default-ipv4-ippool
uid: 2b0cc4b2-5035-43a0-81ba-fc4ee58cc794
spec:
blockSize: 26
cidr: 172.56.0.0/16
ipipMode: CrossSubnet
natOutgoing: true
nodeSelector: all()
vxlanMode: Never
kind: List
metadata:
resourceVersion: ""
selfLink: ""
遇到的坑:
如果宿主机有多张网卡,apiserver在启动的时候会绑定有默认路由的那张网卡,可能导致跨网段节点在启动calico node容器时,因访问不到kubernetes这个svc而起不来。
排查过程(结果已修改):
查看svc的endpoint:
[root@shen-master150 manifests]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 172.56.0.1 <none> 443/TCP 67m
[root@shen-master150 manifests]# kubectl get ep kubernetes
NAME ENDPOINTS AGE
kubernetes 10.19.161.150:6443 67m
查看ipvs情况:
[root@shen-master150 manifests]# ipvsadm -lnc
IPVS connection entries
pro expire state source virtual destination
TCP 14:59 ESTABLISHED 172.56.0.1:52640 172.56.0.1:443 10.19.161.150:6443
TCP 14:59 ESTABLISHED 172.56.0.1:52642 172.56.0.1:443 10.19.161.150:6443
TCP 14:59 ESTABLISHED 172.56.196.2:43842 172.56.0.1:443 192.168.122.150:6443
解决方法:
修改apiserver启动参数:
修改部分:
--advertise-address=10.19.161.150
[root@shen-master150 calico]# cd /etc/kubernetes/manifests/
[root@shen-master150 manifests]# cat kube-apiserver.yaml
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver
namespace: kube-system
spec:
containers:
- command:
- kube-apiserver
- --advertise-address=10.19.161.150
- --allow-privileged=true
- --authorization-mode=Node,RBAC
- --client-ca-file=/etc/kubernetes/pki/ca.crt
- --enable-admission-plugins=NodeRestriction
- --enable-bootstrap-token-auth=true
- --etcd-servers=http://10.19.161.150:2379,http://10.19.161.151:2379,http://10.19.161.152:2379
- --insecure-port=0
- --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
- --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
- --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
- --requestheader-allowed-names=front-proxy-client
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --secure-port=6443
- --service-account-key-file=/etc/kubernetes/pki/sa.pub
- --service-cluster-ip-range=172.56.0.0/13
- --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
- --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.16.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 8
httpGet:
host: 10.19.161.150
path: /healthz
port: 6443
scheme: HTTPS
initialDelaySeconds: 15
timeoutSeconds: 15
name: kube-apiserver
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /etc/ssl/certs
name: ca-certs
readOnly: true
- mountPath: /etc/pki
name: etc-pki
readOnly: true
- mountPath: /etc/kubernetes/pki
name: k8s-certs
readOnly: true
hostNetwork: true
priorityClassName: system-cluster-critical
volumes:
- hostPath:
path: /etc/ssl/certs
type: DirectoryOrCreate
name: ca-certs
- hostPath:
path: /etc/pki
type: DirectoryOrCreate
name: etc-pki
- hostPath:
path: /etc/kubernetes/pki
type: DirectoryOrCreate
name: k8s-certs
status: {}