上一篇内容写的是部署支持mysql的nacos,今天实验部署一下支持postgresql的nacos
一、k8s架构
master: 11.0.1.3
node: 11.0.1.4,11.0.1.5(nfs)
nfs: 11.0.1.5
二、安装nfs
安装nfs-utils和rpcbind
nfs客户端和服务端都安装nfs-utils包
yum install nfs-utils rpcbind -y
创建共享目录
mkdir -p /nfsdata
chmod 777 /nfsdata
编辑/etc/exports文件添加如下内容
vi /etc/exports
/nfsdata *(rw,sync,no_root_squash)
启动服务
systemctl enable rpcbind.service --now
systemctl enable nfs.service --now
启动顺序一定是rpcbind->nfs,否则有可能出现错误
三、部署自动分配PV的相关程序
先从github上拉取nacos的代码:
git clone https://github.com/nacos-group/nacos-k8s.git
内容结构如下:
[root@master1 ~]# tree nacos-k8s/deploy
nacos-k8s/deploy
├── ceph
│ ├── pvc.yaml
│ └── sc.yaml
├── ingress
│ ├── ingress-nginx.yaml
│ └── service-nodeport.yaml
├── mysql
│ ├── mysql-ceph.yaml
│ ├── mysql-local.yaml
│ ├── mysql-nfs.yaml
│ └── mysql.yaml
├── nacos
│ ├── nacos-ingress.yaml
│ ├── nacos-no-pvc-ingress.yaml
│ ├── nacos-pvc-ceph.yaml
│ ├── nacos-pvc-nfs.yaml
│ ├── nacos-quick-start.yaml
│ └── nacos-tmp.yaml
└── nfs
├── class.yaml
├── deployment.yaml
└── rbac.yaml
因为StorageClass可以实现自动配置,所以使用StorageClass之前,我们需要先安装存储驱动的自动配置程序,而这个配置程序必须拥有一定的权限去访问我们的kubernetes集群(类似dashboard一样,必须有权限访问各种api,才能实现管理)。
创建rbac:
[root@master1 ~]# cat nacos-k8s/deploy/nfs/rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
namespace: devops
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: devops
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: devops
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
创建StorageClass
[root@master1 ~]# cat nacos-k8s/deploy/nfs/class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs
parameters:
archiveOnDelete: "false"
创建自动配置程序 - NFS客户端
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
namespace: devops
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccount: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
# 这里IP是你nfs服务器的IP
value: 11.0.1.5
- name: NFS_PATH
# 这里路径就是你上面的nfs服务器上的路径
value: /nfsdata
volumes:
- name: nfs-client-root
nfs:
# 这里IP是你nfs服务器的IP
server: 11.0.1.5
# 这里路径就是你上面的nfs服务器上的路径
path: /nfsdata
四、构建nacos-pg镜像
(一)手动构建镜像
下载源码:https://download.youkuaiyun.com/download/weixin_43334786/87792995
下载完成后进行build即可。
五、部署PG数据库
部署所使用的yaml在上面源码包里,按需修改名称空间、configmap内参数等。
部署完成后,检查nacos三个pod是否运行正常,然后通过浏览器访问服务页面,检查三个节点是否都处于在线状态。
用户名和密码均为nacos。