k8s & ceph rbd(动态pvc)

官方参考文档:
GitHub - ceph/ceph-csi at v3.9.0

测试版本

Ceph Version

Ceph CSI Version

Container Orchestrator Name

Version Tested

v17.2.7

v3.9.0

Kubernetes

v1.25.6

安装Ceph-csi

Step 1 Download GitHub - ceph/ceph-csi at v3.9.0

root@sd-k8s-master-1:~# wget https://github.com/ceph/ceph-csi/archive/refs/tags/v3.9.0.zip
root@sd-k8s-master-1:~# unzip v3.9.0.zip

Step 2 创建csidriver

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# cat csidriver.yaml 
#
# /!\ DO NOT MODIFY THIS FILE
#
# This file has been automatically generated by Ceph-CSI yamlgen.
# The source for the contents can be found in the api/deploy directory, make
# your modifications there.
#
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
  name: "rbd.csi.ceph.com"
spec:
  attachRequired: true
  podInfoOnMount: false
  seLinuxMount: true
  fsGroupPolicy: File
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl create -f csidriver.yaml

Step 3 为sidecar容器和节点插件部署RBAC:

root@sd-k8s-master-1:~# cd /root/ceph-csi-3.9.0/deploy/rbd/kubernetes

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# cat csi-provisioner-rbac.yaml 
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: rbd-csi-provisioner
  # replace with non-cephfs namespace name
  namespace: cephfs

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-external-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["list", "watch", "create", "update", "patch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims/status"]
    verbs: ["update", "patch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshots"]
    verbs: ["get", "list", "patch"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshots/status"]
    verbs: ["get", "list", "patch"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshotcontents"]
    verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshotclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["volumeattachments"]
    verbs: ["get", "list", "watch", "update", "patch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["volumeattachments/status"]
    verbs: ["patch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["csinodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshotcontents/status"]
    verbs: ["update", "patch"]
  - apiGroups: [""]
    resources: ["configmaps"]
    verbs: ["get"]
  - apiGroups: [""]
    resources: ["serviceaccounts"]
    verbs: ["get"]
  - apiGroups: [""]
    resources: ["serviceaccounts/token"]
    verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-csi-provisioner-role
subjects:
  - kind: ServiceAccount
    name: rbd-csi-provisioner
    # replace with non-cephfs namespace name
    namespace: cephfs
roleRef:
  kind: ClusterRole
  name: rbd-external-provisioner-runner
  apiGroup: rbac.authorization.k8s.io

---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  # replace with non-cephfs namespace name
  namespace: cephfs
  name: rbd-external-provisioner-cfg
rules:
  - apiGroups: [""]
    resources: ["configmaps"]
    verbs: ["get", "list", "watch", "create", "update", "delete"]
  - apiGroups: ["coordination.k8s.io"]
    resources: ["leases"]
    verbs: ["get", "watch", "list", "delete", "update", "create"]

---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-csi-provisioner-role-cfg
  # replace with non-cephfs namespace name
  namespace: cephfs
subjects:
  - kind: ServiceAccount
    name: rbd-csi-provisioner
    # replace with non-cephfs namespace name
    namespace: cephfs
roleRef:
  kind: Role
  name: rbd-external-provisioner-cfg
  apiGroup: rbac.authorization.k8s.io

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# cat csi-nodeplugin-rbac.yaml 
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: rbd-csi-nodeplugin
  # replace with non-cephfs namespace name
  namespace: cephfs
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-csi-nodeplugin
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get"]
  # allow to read Vault Token and connection options from the Tenants namespace
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["get"]
  - apiGroups: [""]
    resources: ["configmaps"]
    verbs: ["get"]
  - apiGroups: [""]
    resources: ["serviceaccounts"]
    verbs: ["get"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["volumeattachments"]
    verbs: ["list", "get"]
  - apiGroups: [""]
    resources: ["serviceaccounts/token"]
    verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: rbd-csi-nodeplugin
subjects:
  - kind: ServiceAccount
    name: rbd-csi-nodeplugin
    # replace with non-cephfs namespace name
    namespace: cephfs
roleRef:
  kind: ClusterRole
  name: rbd-csi-nodeplugin
  apiGroup: rbac.authorization.k8s.io

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl create -f csi-provisioner-rbac.yaml

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl create -f csi-nodeplugin-rbac.yaml

Step 4 编辑configmap文件

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# cat csi-config-map.yaml 
---
apiVersion: v1
kind: ConfigMap
data:
  config.json: |-
    [
      {
        "clusterID": "92ab6c78-7edc-11ee-aec4-5e807f521aec",
        "monitors": [
          "10.220.9.13:6789",
          "10.220.9.14:6789",
          "10.220.9.15:6789"
        ]
      }
    ]
metadata:
  name: ceph-csi-config
  namespace: cephfs

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# cat ceph-config-map.yaml 
---
apiVersion: v1
kind: ConfigMap
data:
  ceph.conf: |
    [global]
    fsid = 92ab6c78-7edc-11ee-aec4-5e807f521aec
    auth_cluster_required = cephx
    auth_service_required = cephx
    auth_client_required = cephx
  # keyring is a required key and its value should be empty
  keyring: |
metadata:
  name: ceph-config
  namespace: cephfs

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl create -f ceph-csi-config.yaml

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl create -f ceph-config.yaml

Step 5 部署 CSI Sidecar 容器:

#注释ceph-csi-encryption-kms-config volume & oidc token

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# sed -i 's/registry.k8s.io/k8s.dockerproxy.com/g' csi-rbdplugin-provisioner.yaml

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# sed -i 's/registry.k8s.io/k8s.dockerproxy.com/g' csi-rbdplugin.yaml

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# cat csi-rbdplugin-provisioner.yaml 
---
kind: Service
apiVersion: v1
metadata:
  name: csi-rbdplugin-provisioner
  # replace with non-cephfs namespace name
  namespace: cephfs
  labels:
    app: csi-metrics
spec:
  selector:
    app: csi-rbdplugin-provisioner
  ports:
    - name: http-metrics
      port: 8080
      protocol: TCP
      targetPort: 8680

---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: csi-rbdplugin-provisioner
  # replace with non-cephfs namespace name
  namespace: cephfs
spec:
  replicas: 3
  selector:
    matchLabels:
      app: csi-rbdplugin-provisioner
  template:
    metadata:
      labels:
        app: csi-rbdplugin-provisioner
    spec:
      serviceAccountName: rbd-csi-provisioner
      priorityClassName: system-cluster-critical
      containers:
        - name: csi-provisioner
          image: k8s.dockerproxy.com/sig-storage/csi-provisioner:v3.5.0
          args:
            - "--csi-address=$(ADDRESS)"
            - "--v=1"
            - "--timeout=150s"
            - "--retry-interval-start=500ms"
            - "--leader-election=true"
            #  set it to true to use topology based provisioning
            - "--feature-gates=Topology=false"
            - "--feature-gates=HonorPVReclaimPolicy=true"
            - "--prevent-volume-mode-conversion=true"
            # if fstype is not specified in storageclass, ext4 is cephfs
            - "--default-fstype=ext4"
            - "--extra-create-metadata=true"
          env:
            - name: ADDRESS
              value: unix:///csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
        - name: csi-snapshotter
          image: k8s.dockerproxy.com/sig-storage/csi-snapshotter:v6.2.2
          args:
            - "--csi-address=$(ADDRESS)"
            - "--v=1"
            - "--timeout=150s"
            - "--leader-election=true"
            - "--extra-create-metadata=true"
          env:
            - name: ADDRESS
              value: unix:///csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
        - name: csi-attacher
          image: k8s.dockerproxy.com/sig-storage/csi-attacher:v4.3.0
          args:
            - "--v=1"
            - "--csi-address=$(ADDRESS)"
            - "--leader-election=true"
            - "--retry-interval-start=500ms"
            - "--default-fstype=ext4"
          env:
            - name: ADDRESS
              value: /csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
        - name: csi-resizer
          image: k8s.dockerproxy.com/sig-storage/csi-resizer:v1.8.0
          args:
            - "--csi-address=$(ADDRESS)"
            - "--v=1"
            - "--timeout=150s"
            - "--leader-election"
            - "--retry-interval-start=500ms"
            - "--handle-volume-inuse-error=false"
            - "--feature-gates=RecoverVolumeExpansionFailure=true"
          env:
            - name: ADDRESS
              value: unix:///csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
        - name: csi-rbdplugin
          image: quay.io/cephcsi/cephcsi:v3.9.0
          args:
            - "--nodeid=$(NODE_ID)"
            - "--type=rbd"
            - "--controllerserver=true"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
            - "--v=5"
            - "--drivername=rbd.csi.ceph.com"
            - "--pidlimit=-1"
            - "--rbdhardmaxclonedepth=8"
            - "--rbdsoftmaxclonedepth=4"
            - "--enableprofiling=false"
            - "--setmetadata=true"
          env:
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
            - name: NODE_ID
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            # - name: KMS_CONFIGMAP_NAME
            #   value: encryptionConfig
            - name: CSI_ENDPOINT
              value: unix:///csi/csi-provisioner.sock
            - name: CSI_ADDONS_ENDPOINT
              value: unix:///csi/csi-addons.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
            - mountPath: /dev
              name: host-dev
            - mountPath: /sys
              name: host-sys
            - mountPath: /lib/modules
              name: lib-modules
              readOnly: true
            - name: ceph-csi-config
              mountPath: /etc/ceph-csi-config/
            - name: keys-tmp-dir
              mountPath: /tmp/csi/keys
            - name: ceph-config
              mountPath: /etc/ceph/
        - name: csi-rbdplugin-controller
          image: quay.io/cephcsi/cephcsi:v3.9.0
          args:
            - "--type=controller"
            - "--v=5"
            - "--drivername=rbd.csi.ceph.com"
            - "--drivernamespace=$(DRIVER_NAMESPACE)"
            - "--setmetadata=true"
          env:
            - name: DRIVER_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: ceph-csi-config
              mountPath: /etc/ceph-csi-config/
            - name: keys-tmp-dir
              mountPath: /tmp/csi/keys
            - name: ceph-config
              mountPath: /etc/ceph/
        - name: liveness-prometheus
          image: quay.io/cephcsi/cephcsi:v3.9.0
          args:
            - "--type=liveness"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--metricsport=8680"
            - "--metricspath=/metrics"
            - "--polltime=60s"
            - "--timeout=3s"
          env:
            - name: CSI_ENDPOINT
              value: unix:///csi/csi-provisioner.sock
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
          imagePullPolicy: "IfNotPresent"
      volumes:
        - name: host-dev
          hostPath:
            path: /dev
        - name: host-sys
          hostPath:
            path: /sys
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: socket-dir
          emptyDir: {
            medium: "Memory"
          }
        - name: ceph-config
          configMap:
            name: ceph-config
        - name: ceph-csi-config
          configMap:
            name: ceph-csi-config
        - name: keys-tmp-dir
          emptyDir: {
            medium: "Memory"
          }

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# cat csi-rbdplugin.yaml 
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: csi-rbdplugin
  # replace with non-cephfs namespace name
  namespace: cephfs
spec:
  selector:
    matchLabels:
      app: csi-rbdplugin
  template:
    metadata:
      labels:
        app: csi-rbdplugin
    spec:
      serviceAccountName: rbd-csi-nodeplugin
      hostNetwork: true
      hostPID: true
      priorityClassName: system-node-critical
      # to use e.g. Rook orchestrated cluster, and mons' FQDN is
      # resolved through k8s service, set dns policy to cluster first
      dnsPolicy: ClusterFirstWithHostNet
      containers:
        - name: driver-registrar
          # This is necessary only for systems with SELinux, where
          # non-privileged sidecar containers cannot access unix domain socket
          # created by privileged CSI driver container.
          securityContext:
            privileged: true
            allowPrivilegeEscalation: true
          image: k8s.dockerproxy.com/sig-storage/csi-node-driver-registrar:v2.8.0
          args:
            - "--v=1"
            - "--csi-address=/csi/csi.sock"
            - "--kubelet-registration-path=/var/lib/kubelet/plugins/rbd.csi.ceph.com/csi.sock"
          env:
            - name: KUBE_NODE_NAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
            - name: registration-dir
              mountPath: /registration
        - name: csi-rbdplugin
          securityContext:
            privileged: true
            capabilities:
              add: ["SYS_ADMIN"]
            allowPrivilegeEscalation: true
          image: quay.io/cephcsi/cephcsi:v3.9.0
          args:
            - "--nodeid=$(NODE_ID)"
            - "--pluginpath=/var/lib/kubelet/plugins"
            - "--stagingpath=/var/lib/kubelet/plugins/kubernetes.io/csi/"
            - "--type=rbd"
            - "--nodeserver=true"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--csi-addons-endpoint=$(CSI_ADDONS_ENDPOINT)"
            - "--v=5"
            - "--drivername=rbd.csi.ceph.com"
            - "--enableprofiling=false"
            # If topology based provisioning is desired, configure required
            # node labels representing the nodes topology domain
            # and pass the label names below, for CSI to consume and advertise
            # its equivalent topology domain
            # - "--domainlabels=failure-domain/region,failure-domain/zone"
            #
            # Options to enable read affinity.
            # If enabled Ceph CSI will fetch labels from kubernetes node and
            # pass `read_from_replica=localize,crush_location=type:value` during
            # rbd map command. refer:
            # https://docs.ceph.com/en/latest/man/8/rbd/#kernel-rbd-krbd-options
            # for more details.
            # - "--enable-read-affinity=true"
            # - "--crush-location-labels=topology.io/zone,topology.io/rack"
          env:
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
            - name: NODE_ID
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
            # - name: KMS_CONFIGMAP_NAME
            #   value: encryptionConfig
            - name: CSI_ENDPOINT
              value: unix:///csi/csi.sock
            - name: CSI_ADDONS_ENDPOINT
              value: unix:///csi/csi-addons.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
            - mountPath: /dev
              name: host-dev
            - mountPath: /sys
              name: host-sys
            - mountPath: /run/mount
              name: host-mount
            - mountPath: /etc/selinux
              name: etc-selinux
              readOnly: true
            - mountPath: /lib/modules
              name: lib-modules
              readOnly: true
            - name: ceph-csi-config
              mountPath: /etc/ceph-csi-config/
            - name: plugin-dir
              mountPath: /var/lib/kubelet/plugins
              mountPropagation: "Bidirectional"
            - name: mountpoint-dir
              mountPath: /var/lib/kubelet/pods
              mountPropagation: "Bidirectional"
            - name: keys-tmp-dir
              mountPath: /tmp/csi/keys
            - name: ceph-logdir
              mountPath: /var/log/ceph
            - name: ceph-config
              mountPath: /etc/ceph/
        - name: liveness-prometheus
          securityContext:
            privileged: true
            allowPrivilegeEscalation: true
          image: quay.io/cephcsi/cephcsi:v3.9.0
          args:
            - "--type=liveness"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--metricsport=8682"
            - "--metricspath=/metrics"
            - "--polltime=60s"
            - "--timeout=3s"
          env:
            - name: CSI_ENDPOINT
              value: unix:///csi/csi.sock
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
          imagePullPolicy: "IfNotPresent"
      volumes:
        - name: socket-dir
          hostPath:
            path: /var/lib/kubelet/plugins/rbd.csi.ceph.com
            type: DirectoryOrCreate
        - name: plugin-dir
          hostPath:
            path: /var/lib/kubelet/plugins
            type: Directory
        - name: mountpoint-dir
          hostPath:
            path: /var/lib/kubelet/pods
            type: DirectoryOrCreate
        - name: ceph-logdir
          hostPath:
            path: /var/log/ceph
            type: DirectoryOrCreate
        - name: registration-dir
          hostPath:
            path: /var/lib/kubelet/plugins_registry/
            type: Directory
        - name: host-dev
          hostPath:
            path: /dev
        - name: host-sys
          hostPath:
            path: /sys
        - name: etc-selinux
          hostPath:
            path: /etc/selinux
        - name: host-mount
          hostPath:
            path: /run/mount
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: ceph-config
          configMap:
            name: ceph-config
        - name: ceph-csi-config
          configMap:
            name: ceph-csi-config
        - name: keys-tmp-dir
          emptyDir: {
            medium: "Memory"
          }
---
# This is a service to expose the liveness metrics
apiVersion: v1
kind: Service
metadata:
  name: csi-metrics-rbdplugin
  # replace with non-cephfs namespace name
  namespace: cephfs
  labels:
    app: csi-metrics
spec:
  ports:
    - name: http-metrics
      port: 8682
      protocol: TCP
      targetPort: 8682
  selector:
    app: csi-rbdplugin

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl create -f csi-rbdplugin-provisioner.yaml

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl create -f csi-rbdplugin.yaml

Step 6 Ceph创建ceph rbd pool

[root@ceph01 ~]# ceph osd pool kubernetes

[root@ceph01 ~]# ceph auth get-or-create client.kubernetes mon 'allow r' osd 'allow rwx pool=kubernetes, profile rbd' mds 'allow rw' mgr 'allow rw'
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

[root@ceph01 ~]# ceph osd pool application enable kubernetes rbd

Step 7 创建ceph secret

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# cat ceph-sc-rbd.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: csi-cephrbd-secret
  namespace: cephfs
stringData:
  userID: kubernetes
  userKey: AQDJy5tlv8SMCBAAvEV6nr66zGXizw/3O7Z+kw== 

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl create -f ceph-sc-rbd.yaml

Step 8 创建storageclass

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# cat ceph-storage-rbd-class.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: csi-cephrbd-sc
  namespace: cephfs
provisioner: rbd.csi.ceph.com 
parameters:
  clusterID: 92ab6c78-7edc-11ee-aec4-5e807f521aec 
  fsName: testfs          #cephfs名称,上面创建
  #  mounter: fuse       #挂载方式
  csi.storage.k8s.io/provisioner-secret-name: csi-cephrbd-secret
  csi.storage.k8s.io/provisioner-secret-namespace: cephfs
  csi.storage.k8s.io/controller-expand-secret-name: csi-cephrbd-secret
  csi.storage.k8s.io/controller-expand-secret-namespace: cephfs
  csi.storage.k8s.io/node-stage-secret-name: csi-cephrbd-secret
  csi.storage.k8s.io/node-stage-secret-namespace: cephfs
reclaimPolicy: Delete
allowVolumeExpansion: true

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl create -f ceph-storage-rbd-class.yaml

Step 9 创建pvc测试

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# cat test-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: cephrbd-test-pvc
  namespace: cephfs
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  storageClassName: csi-cephrbd-sc 

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl create -f test-pvc.yaml

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl get pvc -n cephfs
NAME                STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS     AGE
cephrbd-test-pvc    Bound    pvc-54481cce-a790-4237-ada5-06989567d1aa   1Gi        RWO            csi-cephrbd-sc   15h

Step 10 创建pod测试

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# cat test-pod.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: test-pd
  namespace: cephfs
spec:
  terminationGracePeriodSeconds: 0
  containers:
  - image: harbor.zetyun.cn/gcp/nginx:1.25.3
    name: test-container
    volumeMounts:
    - mountPath: /cache
      name: cache-volume
  volumes:
  - name: cache-volume
    persistentVolumeClaim:
      claimName: cephrbd-test-pvc  #填写pvc名称

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl create -f test-pod.yaml 

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/rbd/kubernetes# kubectl get po test-pd 
NAME      READY   STATUS    RESTARTS   AGE
test-pd   1/1     Running   0          6m49s

<think>好的,我现在需要帮助用户了解如何将Kubernetesk8s)与Ceph存储系统集成。首先,我需要回忆一下相关的知识,并确保步骤正确。用户提到的是设置和配置,所以应该从基础开始,逐步引导。 首先,Ceph是一个分布式存储系统,而Kubernetes需要存储卷来持久化数据。集成这两者通常涉及使用Ceph提供动态存储给K8s集群。常见的做法是通过RBD(RADOS Block Device)或CephFS(Ceph文件系统)。不过,用户可能对这两种方式不太清楚,可能需要解释两者的区别。 接下来,用户可能需要安装Ceph客户端工具在Kubernetes节点上,这样才能让节点与Ceph集群通信。比如,每个节点都需要安装ceph-common包,否则kubelet无法使用RBD。这一步很重要,但容易被忽略,需要提醒用户。 然后,创建Ceph用户和密钥。Kubernetes需要通过密钥来访问Ceph集群,所以需要生成一个用户并获取其密钥。通常使用ceph auth get-key命令来获取,然后将密钥以Secret的形式存储在K8s中。这样Pod挂载存储卷时就可以通过Secret认证。 接下来是StorageClass的配置。StorageClass用于动态分配存储,需要指定Ceph的monitor地址、池名称、用户等信息。这里需要注意使用正确的管理员ID和密钥。同时,需要确定使用的是RBD还是CephFS,两者的配置方式不同。如果是RBD,StorageClass的provisioner应该是rbd.csi.ceph.com,如果是CephFS,则需要不同的provisioner。 然后,创建PVC(Persistent Volume Claim)来申请存储资源。用户可能需要示例YAML文件,所以提供具体的配置示例会有帮助。比如,PVC中指定StorageClass名称、访问模式等。 测试阶段,用户需要部署一个应用来使用这个PVC,验证存储是否正常挂载。例如,使用一个简单的Nginx Pod,挂载PVC到指定路径,然后检查文件读写是否正常。 此外,还需要考虑CSI驱动的问题。较新版本的Kubernetes可能推荐使用CSI插件来对接Ceph,比如RBD CSICephFS CSI。这可能需要额外的部署步骤,比如部署CSI驱动程序,并配置相应的StorageClass。如果用户使用的是旧版本Kubernetes,可能还需要检查FlexVolume的支持情况,不过FlexVolume已经逐渐被CSI取代。 可能的问题点包括网络连通性,确保Kubernetes节点能够访问Ceph的monitor和OSD节点,以及权限问题,确保Ceph用户有足够的权限访问指定的存储池。此外,密钥的base64编码是否正确,Secret的命名空间是否与应用一致,这些细节都需要注意。 最后,用户可能需要一些优化或高级配置,比如多路径配置、性能调优,或者使用CephFS的Subvolume功能。不过根据问题描述,用户可能更需要基础配置步骤,所以先覆盖基础部分,再在相关问题中提示高级主题。 需要确保所有命令和YAML示例正确无误,比如ceph-common的安装命令,Secret的生成方式,StorageClass的参数是否正确。同时,引用提供的资料中,引用[1]提到了Ceph配置文件的自动分发,这可能涉及到Ceph集群的配置管理,但用户的问题更多是K8sCeph的集成,所以可能引用部分需要适当关联,比如在配置Ceph客户端时,提到的配置文件自动分发可能相关,可以引用。 总结步骤: 1. 前提条件:确保Ceph集群正常运行,K8s节点安装ceph-common。 2. 创建Ceph用户和密钥,生成Secret。 3. 配置StorageClass,根据RBDCephFS选择对应的provisioner和参数。 4. 创建PVC并测试应用挂载。 5. 验证和故障排除。 在回答中需要结构化这些步骤,用清晰的中文说明,并附上示例代码,同时注意LaTeX格式的正确使用,比如变量用$...$包裹,独立公式用$$...$$。但这个问题可能涉及较少的数学公式,更多是命令和配置,所以可能用代码块较多。 最后,生成相关问题,可能涉及CSI驱动器的使用、持久卷的性能优化、多集群配置等。</think>### KubernetesCeph存储系统集成步骤 #### 1. **前提条件** - **Ceph集群已部署**:确保Ceph集群正常运行,并记录Monitor节点地址(如`10.0.0.1:6789`)。 - **Kubernetes节点配置**:在所有Kubernetes节点上安装Ceph客户端工具(如`ceph-common`): ```bash apt-get install -y ceph-common # Debian/Ubuntu yum install -y ceph-common # CentOS/RHEL [^1] ``` #### 2. **创建Ceph用户及密钥** -Ceph集群中创建专用用户(如`kubernetes`)并授权访问存储池(如`rbd`): ```bash ceph auth get-or-create client.kubernetes mon 'allow r' osd 'allow rwx pool=rbd' ``` - 获取密钥并保存为Kubernetes Secret: ```bash # 获取密钥(假设输出为AQBz...==) ceph auth get-key client.kubernetes # 创建Kubernetes Secret kubectl create secret generic ceph-secret --type="kubernetes.io/rbd" \ --from-literal=key='AQBz...==' ``` #### 3. **配置StorageClass(以RBD为例)** ```yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: ceph-rbd provisioner: kubernetes.io/rbd parameters: monitors: 10.0.0.1:6789 pool: rbd adminId: kubernetes adminSecretName: ceph-secret userId: kubernetes userSecretName: ceph-secret fsType: ext4 imageFormat: "2" imageFeatures: layering ``` #### 4. **创建PVC并测试** ```yaml # PVC示例 apiVersion: v1 kind: PersistentVolumeClaim metadata: name: ceph-pvc spec: storageClassName: ceph-rbd accessModes: - ReadWriteOnce resources: requests: storage: 5Gi # Pod挂载示例 apiVersion: v1 kind: Pod metadata: name: nginx-pod spec: containers: - name: nginx image: nginx volumeMounts: - name: ceph-vol mountPath: /data volumes: - name: ceph-vol persistentVolumeClaim: claimName: ceph-pvc ``` #### 5. **验证与调试** - 检查PVC状态是否为`Bound`: ```bash kubectl get pvc ``` - 进入Pod写入测试文件: ```bash kubectl exec -it nginx-pod -- touch /data/test.txt ``` --- ###
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

医者运维

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值