k8s & cephfs(动态pvc)

部署运行你感兴趣的模型镜像

官方参考文档:
GitHub - ceph/ceph-csi at v3.9.0

测试版本

Ceph Version

Ceph CSI Version

Container Orchestrator Name

Version Tested

v17.2.7

v3.9.0

Kubernetes

v1.25.6

安装Ceph-csi

Step 1 Download GitHub - ceph/ceph-csi at v3.9.0

root@sd-k8s-master-1:~# wget https://github.com/ceph/ceph-csi/archive/refs/tags/v3.9.0.zip
root@sd-k8s-master-1:~# unzip v3.9.0.zip

Step 2 创建csidriver

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# cat csidriver.yaml 
#
# /!\ DO NOT MODIFY THIS FILE
#
# This file has been automatically generated by Ceph-CSI yamlgen.
# The source for the contents can be found in the api/deploy directory, make
# your modifications there.
#
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
  name: "cephfs.csi.ceph.com"
  namespace: cephfs
spec:
  attachRequired: false
  podInfoOnMount: false
  fsGroupPolicy: File
  seLinuxMount: true
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl create -f csidriver.yaml

Step 3 为sidecar容器和节点插件部署RBAC:

root@sd-k8s-master-1:~# cd /root/ceph-csi-3.9.0/deploy/cephfs/kubernetes
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# cat csi-provisioner-rbac.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: cephfs-csi-provisioner
  namespace: cephfs

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-external-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["list", "watch", "create", "update", "patch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims/status"]
    verbs: ["update", "patch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshots"]
    verbs: ["get", "list"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshots/status"]
    verbs: ["get", "list", "patch"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshotcontents"]
    verbs: ["get", "list", "watch", "update", "patch"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshotclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["csinodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["snapshot.storage.k8s.io"]
    resources: ["volumesnapshotcontents/status"]
    verbs: ["update", "patch"]
  - apiGroups: [""]
    resources: ["configmaps"]
    verbs: ["get"]
  - apiGroups: [""]
    resources: ["serviceaccounts"]
    verbs: ["get"]
  - apiGroups: [""]
    resources: ["serviceaccounts/token"]
    verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-csi-provisioner-role
subjects:
  - kind: ServiceAccount
    name: cephfs-csi-provisioner
    namespace: cephfs
roleRef:
  kind: ClusterRole
  name: cephfs-external-provisioner-runner
  apiGroup: rbac.authorization.k8s.io

---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  # replace with non-cephfs namespace name
  namespace: cephfs
  name: cephfs-external-provisioner-cfg
rules:
  - apiGroups: [""]
    resources: ["configmaps"]
    verbs: ["get", "list", "watch"]
  - apiGroups: ["coordination.k8s.io"]
    resources: ["leases"]
    verbs: ["get", "watch", "list", "delete", "update", "create"]

---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-csi-provisioner-role-cfg
  # replace with non-cephfs namespace name
  namespace: cephfs
subjects:
  - kind: ServiceAccount
    name: cephfs-csi-provisioner
    # replace with non-cephfs namespace name
    namespace: cephfs
roleRef:
  kind: Role
  name: cephfs-external-provisioner-cfg
  apiGroup: rbac.authorization.k8s.io
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# cat csi-nodeplugin-rbac.yaml 
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: cephfs-csi-nodeplugin
  namespace: cephfs
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-csi-nodeplugin
rules:
  - apiGroups: [""]
    resources: ["secrets"]
    verbs: ["get"]
  - apiGroups: [""]
    resources: ["configmaps"]
    verbs: ["get"]
  - apiGroups: [""]
    resources: ["serviceaccounts"]
    verbs: ["get"]
  - apiGroups: [""]
    resources: ["serviceaccounts/token"]
    verbs: ["create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: cephfs-csi-nodeplugin
subjects:
  - kind: ServiceAccount
    name: cephfs-csi-nodeplugin
    # replace with non-cephfs namespace name
    namespace: cephfs
roleRef:
  kind: ClusterRole
  name: cephfs-csi-nodeplugin
  apiGroup: rbac.authorization.k8s.io
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl create -f csi-provisioner-rbac.yaml
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl create -f csi-nodeplugin-rbac.yaml

Step 4 编辑configmap文件

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# cat csi-config-map.yaml
apiVersion: v1
kind: ConfigMap
data:
  config.json: |-
    [
          {
        "clusterID": "92ab6c78-7edc-11ee-aec4-5e807f521aec",
        "monitors": [
          "10.220.9.13:6789",
          "10.220.9.14:6789",
          "10.220.9.15:6789"
         ]
       }
    ]
metadata:
  name: ceph-csi-config
  namespace: cephfs
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# cat ceph-config.yaml 
apiVersion: v1
kind: ConfigMap
data:
  ceph.conf: |
    [global]
    auth_cluster_required = cephx
    auth_service_required = cephx
    auth_client_required = cephx

    # Workaround for http://tracker.ceph.com/issues/23446
    fuse_set_user_groups = false

    # ceph-fuse which uses libfuse2 by default has write buffer size of 2KiB
    # adding 'fuse_big_writes = true' option by default to override this limit
    # see https://github.com/ceph/ceph-csi/issues/1928
    fuse_big_writes = true
  # keyring is a required key and its value should be empty
  keyring: |
metadata:
  name: ceph-config
  namespace: cephfs
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl create -f ceph-csi-config.yaml
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl create -f ceph-config.yaml

Step 5 部署 CSI Sidecar 容器:
#注释ceph-csi-encryption-kms-config volume

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# sed -i 's/registry.k8s.io/k8s.dockerproxy.com/g' csi-cephfsplugin-provisioner.yaml
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# sed -i 's/registry.k8s.io/k8s.dockerproxy.com/g' csi-cephfsplugin.yaml
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# cat csi-cephfsplugin-provisioner.yaml
---
kind: Service
apiVersion: v1
metadata:
  name: csi-cephfsplugin-provisioner
  namespace: cephfs
  labels:
    app: csi-metrics
spec:
  selector:
    app: csi-cephfsplugin-provisioner
  ports:
    - name: http-metrics
      port: 8080
      protocol: TCP
      targetPort: 8681

---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: csi-cephfsplugin-provisioner
  namespace: cephfs
spec:
  selector:
    matchLabels:
      app: csi-cephfsplugin-provisioner
  replicas: 3
  template:
    metadata:
      labels:
        app: csi-cephfsplugin-provisioner
    spec:
      #      affinity:
      #        podAntiAffinity:
      #          requiredDuringSchedulingIgnoredDuringExecution:
      #            - labelSelector:
      #                matchExpressions:
      #                  - key: app
      #                    operator: In
      #                    values:
      #                      - csi-cephfsplugin-provisioner
      #              topologyKey: "kubernetes.io/hostname"
      serviceAccountName: cephfs-csi-provisioner
      priorityClassName: system-cluster-critical
      containers:
        - name: csi-provisioner
          image: k8s.dockerproxy.com/sig-storage/csi-provisioner:v3.5.0
          args:
            - "--csi-address=$(ADDRESS)"
            - "--v=1"
            - "--timeout=150s"
            - "--leader-election=true"
            - "--retry-interval-start=500ms"
            - "--feature-gates=Topology=false"
            - "--feature-gates=HonorPVReclaimPolicy=true"
            - "--prevent-volume-mode-conversion=true"
            - "--extra-create-metadata=true"
          env:
            - name: ADDRESS
              value: unix:///csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
        - name: csi-resizer
          image: k8s.dockerproxy.com/sig-storage/csi-resizer:v1.8.0
          args:
            - "--csi-address=$(ADDRESS)"
            - "--v=1"
            - "--timeout=150s"
            - "--leader-election"
            - "--retry-interval-start=500ms"
            - "--handle-volume-inuse-error=false"
            - "--feature-gates=RecoverVolumeExpansionFailure=true"
          env:
            - name: ADDRESS
              value: unix:///csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
        - name: csi-snapshotter
          image: k8s.dockerproxy.com/sig-storage/csi-snapshotter:v6.2.2
          args:
            - "--csi-address=$(ADDRESS)"
            - "--v=1"
            - "--timeout=150s"
            - "--leader-election=true"
            - "--extra-create-metadata=true"
          env:
            - name: ADDRESS
              value: unix:///csi/csi-provisioner.sock
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
        - name: csi-cephfsplugin
          image: quay.io/cephcsi/cephcsi:v3.9.0
          args:
            - "--nodeid=$(NODE_ID)"
            - "--type=cephfs"
            - "--controllerserver=true"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--v=5"
            - "--drivername=cephfs.csi.ceph.com"
            - "--pidlimit=-1"
            - "--enableprofiling=false"
            - "--setmetadata=true"
          env:
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
            - name: NODE_ID
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: CSI_ENDPOINT
              value: unix:///csi/csi-provisioner.sock
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          # - name: KMS_CONFIGMAP_NAME
          #   value: encryptionConfig
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
            - name: host-sys
              mountPath: /sys
            - name: lib-modules
              mountPath: /lib/modules
              readOnly: true
            - name: host-dev
              mountPath: /dev
            - name: ceph-config
              mountPath: /etc/ceph/
            - name: ceph-csi-config
              mountPath: /etc/ceph-csi-config/
            - name: keys-tmp-dir
              mountPath: /tmp/csi/keys
                # - name: ceph-csi-encryption-kms-config
##                #   mountPath: /etc/ceph-csi-encryption-kms-config/
        - name: liveness-prometheus
          image: quay.io/cephcsi/cephcsi:v3.9.0
          args:
            - "--type=liveness"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--metricsport=8681"
            - "--metricspath=/metrics"
            - "--polltime=60s"
            - "--timeout=3s"
          env:
            - name: CSI_ENDPOINT
              value: unix:///csi/csi-provisioner.sock
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
          imagePullPolicy: "IfNotPresent"
      volumes:
        - name: socket-dir
          emptyDir: {
            medium: "Memory"
          }
        - name: host-sys
          hostPath:
            path: /sys
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: host-dev
          hostPath:
            path: /dev
        - name: ceph-config
          configMap:
            name: ceph-config
        - name: ceph-csi-config
          configMap:
            name: ceph-csi-config
        - name: keys-tmp-dir
          emptyDir: {
            medium: "Memory"
          }
      # - name: ceph-csi-encryption-kms-config
          # configMap:
            # name: ceph-csi-encryption-kms-config
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# cat csi-cephfsplugin.yaml
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: csi-cephfsplugin
  namespace: cephfs
spec:
  selector:
    matchLabels:
      app: csi-cephfsplugin
  template:
    metadata:
      labels:
        app: csi-cephfsplugin
    spec:
      serviceAccountName: cephfs-csi-nodeplugin
      priorityClassName: system-node-critical
      hostNetwork: true
      hostPID: true
      # to use e.g. Rook orchestrated cluster, and mons' FQDN is
      # resolved through k8s service, set dns policy to cluster first
      dnsPolicy: ClusterFirstWithHostNet
      containers:
        - name: driver-registrar
          # This is necessary only for systems with SELinux, where
          # non-privileged sidecar containers cannot access unix domain socket
          # created by privileged CSI driver container.
          securityContext:
            privileged: true
            allowPrivilegeEscalation: true
          image: k8s.dockerproxy.com/sig-storage/csi-node-driver-registrar:v2.8.0
          args:
            - "--v=1"
            - "--csi-address=/csi/csi.sock"
            - "--kubelet-registration-path=/var/lib/kubelet/plugins/cephfs.csi.ceph.com/csi.sock"
          env:
            - name: KUBE_NODE_NAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
            - name: registration-dir
              mountPath: /registration
        - name: csi-cephfsplugin
          securityContext:
            privileged: true
            capabilities:
              add: ["SYS_ADMIN"]
            allowPrivilegeEscalation: true
          image: quay.io/cephcsi/cephcsi:v3.9.0
          args:
            - "--nodeid=$(NODE_ID)"
            - "--type=cephfs"
            - "--nodeserver=true"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--v=5"
            - "--drivername=cephfs.csi.ceph.com"
            - "--enableprofiling=false"
            # If topology based provisioning is desired, configure required
            # node labels representing the nodes topology domain
            # and pass the label names below, for CSI to consume and advertise
            # its equivalent topology domain
            # - "--domainlabels=failure-domain/region,failure-domain/zone"
          env:
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
            - name: NODE_ID
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            - name: CSI_ENDPOINT
              value: unix:///csi/csi.sock
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          # - name: KMS_CONFIGMAP_NAME
          #   value: encryptionConfig
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
            - name: mountpoint-dir
              mountPath: /var/lib/kubelet/pods
              mountPropagation: Bidirectional
            - name: plugin-dir
              mountPath: /var/lib/kubelet/plugins
              mountPropagation: "Bidirectional"
            - name: host-sys
              mountPath: /sys
            - name: etc-selinux
              mountPath: /etc/selinux
              readOnly: true
            - name: lib-modules
              mountPath: /lib/modules
              readOnly: true
            - name: host-dev
              mountPath: /dev
            - name: host-mount
              mountPath: /run/mount
            - name: ceph-config
              mountPath: /etc/ceph/
            - name: ceph-csi-config
              mountPath: /etc/ceph-csi-config/
            - name: keys-tmp-dir
              mountPath: /tmp/csi/keys
            - name: ceph-csi-mountinfo
              mountPath: /csi/mountinfo
          # - name: ceph-csi-encryption-kms-config
          #   mountPath: /etc/ceph-csi-encryption-kms-config/
        - name: liveness-prometheus
          securityContext:
            privileged: true
            allowPrivilegeEscalation: true
          image: quay.io/cephcsi/cephcsi:v3.9.0
          args:
            - "--type=liveness"
            - "--endpoint=$(CSI_ENDPOINT)"
            - "--metricsport=8681"
            - "--metricspath=/metrics"
            - "--polltime=60s"
            - "--timeout=3s"
          env:
            - name: CSI_ENDPOINT
              value: unix:///csi/csi.sock
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
          volumeMounts:
            - name: socket-dir
              mountPath: /csi
          imagePullPolicy: "IfNotPresent"
      volumes:
        - name: socket-dir
          hostPath:
            path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/
            type: DirectoryOrCreate
        - name: registration-dir
          hostPath:
            path: /var/lib/kubelet/plugins_registry/
            type: Directory
        - name: mountpoint-dir
          hostPath:
            path: /var/lib/kubelet/pods
            type: DirectoryOrCreate
        - name: plugin-dir
          hostPath:
            path: /var/lib/kubelet/plugins
            type: Directory
        - name: host-sys
          hostPath:
            path: /sys
        - name: etc-selinux
          hostPath:
            path: /etc/selinux
        - name: lib-modules
          hostPath:
            path: /lib/modules
        - name: host-dev
          hostPath:
            path: /dev
        - name: host-mount
          hostPath:
            path: /run/mount
        - name: ceph-config
          configMap:
            name: ceph-config
        - name: ceph-csi-config
          configMap:
            name: ceph-csi-config
        - name: keys-tmp-dir
          emptyDir: {
            medium: "Memory"
          }
        - name: ceph-csi-mountinfo
          hostPath:
            path: /var/lib/kubelet/plugins/cephfs.csi.ceph.com/mountinfo
            type: DirectoryOrCreate
              #- name: ceph-csi-encryption-kms-config
              #  configMap:
              #    name: ceph-csi-encryption-kms-config
---
# This is a service to expose the liveness metrics
apiVersion: v1
kind: Service
metadata:
  name: csi-metrics-cephfsplugin
  namespace: cephfs
  labels:
    app: csi-metrics
spec:
  ports:
    - name: http-metrics
      port: 8080
      protocol: TCP
      targetPort: 8681
  selector:
    app: csi-cephfsplugin
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl create -f csi-cephfsplugin-provisioner.yaml
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl create -f csi-cephfsplugin.yaml

Step 6 Ceph创建cephfs

[root@ceph01 ~]# ceph osd pool cephfs
[root@ceph01 ~]# ceph auth get-key client.admin
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

Step 7 创建ceph secret

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# cat ceph-sc.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: csi-cephfs-secret
  namespace: cephfs
stringData:
  userID: admin
  userKey: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
  adminID: admin
  adminKey: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl create -f ceph-sc.yaml
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl get secrets -n cephfs
NAME                TYPE     DATA   AGE
csi-cephfs-secret   Opaque   4      49m

Step 8 创建storageclass

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# cat ceph-storage-class.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: csi-cephfs-sc
provisioner: cephfs.csi.ceph.com
parameters:
  clusterID: 92ab6c78-7edc-11ee-aec4-5e807f521aec 
  fsName: cephfs          #cephfs名称,上面创建
  pool: cephfs_data           #cephfs pool名称
  #  mounter: fuse       #挂载方式
  csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/provisioner-secret-namespace: cephfs
  csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/controller-expand-secret-namespace: cephfs
  csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret
  csi.storage.k8s.io/node-stage-secret-namespace: cephfs
reclaimPolicy: Delete
allowVolumeExpansion: true
#mountOptions:
 # - discard
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl create -f ceph-storage-class.yaml
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl get storageclass
NAME                   PROVISIONER                                     RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
csi-cephfs-sc          cephfs.csi.ceph.com                             Delete          Immediate           true                   138m

Step 9 创建pvc测试

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# cat test-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: cephfs-test-pvc
  namespace: cephfs
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi
  storageClassName: csi-cephfs-sc
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl create -f test-pvc.yaml
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl get pvc -n cephfs
NAME              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS    AGE
cephfs-test-pvc   Bound    pvc-94236c61-a5b7-494d-89ad-e1e18eaad175   1Gi        RWX            csi-cephfs-sc   49m

Step 10 创建pod测试

root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# cat test-pod.yaml
---
apiVersion: v1
kind: Pod
metadata:
  name: test-pd
spec:
  terminationGracePeriodSeconds: 0
  containers:
  - image: harbor.zetyun.cn/gcp/nginx:1.25.3
    name: test-container
    volumeMounts:
    - mountPath: /cache
      name: cache-volume
  volumes:
  - name: cache-volume
    persistentVolumeClaim:
      claimName: cephfs-test-pvc1  #填写pvc名称
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl create -f test-pod.yaml
root@sd-k8s-master-1:~/ceph-csi-3.9.0/deploy/cephfs/kubernetes# kubectl get po test-pd 
NAME      READY   STATUS    RESTARTS   AGE
test-pd   1/1     Running   0          6m49s

您可能感兴趣的与本文相关的镜像

ACE-Step

ACE-Step

音乐合成
ACE-Step

ACE-Step是由中国团队阶跃星辰(StepFun)与ACE Studio联手打造的开源音乐生成模型。 它拥有3.5B参数量,支持快速高质量生成、强可控性和易于拓展的特点。 最厉害的是,它可以生成多种语言的歌曲,包括但不限于中文、英文、日文等19种语言

<think>好的,用户的问题是关于Kubernetes中使用CephFSPVC一直处于Pending状态,特别是涉及到外部供应程序'cephfs.csi.ceph.com'的配置问题。我需要结合用户提供的引用内容以及之前的对话历史来给出解决方案。首先,用户提到引用[1]中的内容,其中指出PVC状态为Pending,并且事件显示无法找到ceph-admin-secret。这说明Secret配置有问题,或者StorageClass引用了错误的Secret名称或命名空间。用户之前的问题是关于CephFSPVC问题,而当前的问题更具体到供应程序'cephfs.csi.ceph.com'的配置。接下来,我需要回忆之前的回答,其中提到供应程序配置错误、selfLink问题、CSI驱动未正确部署以及认证问题。当前用户特别关注CephFS的CSI驱动,所以需要确保StorageClass中的provisioner字段正确指向'cephfs.csi.ceph.com',而不是之前提到的NFS供应程序。 用户提供的错误信息显示,供应程序无法找到ceph-admin-secret,这位于kube-system命名空间。这可能是因为Secret没有正确创建,或者StorageClass中的参数引用了错误的Secret名称或命名空间。需要检查Secret的配置,确保其存在且命名空间正确,同时StorageClass的参数是否正确指向该Secret。另外,用户可能需要确认CephFS的CSI驱动是否已经正确安装,包括相关的RBAC配置和部署文件。如果CSI驱动没有正确运行,供应程序也无法正常工作。需要指导用户检查相关Pod的状态,确保CSI驱动组件处于Running状态。此外,用户可能还面临Ceph集群的权限问题,比如提供的adminKey或userKey是否正确,以及对应的Ceph用户是否具有足够的权限访问指定的文件系统和存储池。需要验证Secret中的密钥是否正确,并且Base64编码无误。 还需要考虑Kubernetes版本的问题,特别是1.20+版本中的selfLink问题,虽然之前的回答提到过,但用户当前的错误信息中没有直接提到selfLink,但可能需要作为潜在因素考虑,尤其是在其他配置都正确的情况下仍然出现Pending状态时。最后,用户可能需要手动创建PV和PVC作为临时解决方案,或者进行连通性测试,比如通过临时Pod挂载CephFS来验证配置是否正确。总结下来,解决方案应包括检查StorageClass配置、验证Secret的存在和正确性、确保CSI驱动正确部署、检查RBAC权限、处理可能的selfLink问题,以及提供手动创建PV的方法作为备选。同时,需要指导用户如何通过日志排查问题,例如查看PVC的事件描述和CSI驱动的日志。</think>### Kubernetes对接CephFSPVC处于Pending状态的解决方案(针对`cephfs.csi.ceph.com`供应程序) --- #### 一、核心问题定位 1. **Secret配置缺失**:PVC事件报错`Failed to get admin secret`表明Kubernetes未找到Ceph集群的认证密钥[^1] 2. **CephFS CSI驱动未正确部署**:供应程序`cephfs.csi.ceph.com`未运行或配置错误 3. **StorageClass参数不匹配**:存储类未正确引用CephFS参数或密钥 4. **Ceph集群权限问题**:Ceph用户未授权访问对应文件系统或存储池 --- #### 二、分步解决方案 ##### 步骤1:验证CephFS CSI驱动状态 ```bash # 检查CSI驱动组件是否运行 kubectl get pods -l app=csi-cephfsplugin -o wide ``` 预期输出应包含`csi-cephfsplugin-provisioner-*`和`csi-cephfsplugin-*`且状态为`Running`。若未运行,需重新部署CSI驱动: ```bash git clone https://github.com/ceph/ceph-csi.git cd ceph-csi/deploy/cephfs/kubernetes kubectl apply -f csi-provisioner-rbac.yaml kubectl apply -f csi-nodeplugin-rbac.yaml kubectl apply -f csi-cephfsplugin-provisioner.yaml kubectl apply -f csi-cephfsplugin.yaml ``` ##### 步骤2:配置Ceph认证密钥 创建包含Ceph管理员和用户密钥的Secret: ```yaml # ceph-secret.yaml apiVersion: v1 kind: Secret metadata: name: cephfs-secret namespace: kube-system # 必须与StorageClass引用的命名空间一致 type: kubernetes.io/cephfs data: adminKey: $(echo -n "<ceph-admin-key>" | base64) userKey: $(echo -n "<ceph-user-key>" | base64) ``` 应用并验证: ```bash kubectl apply -f ceph-secret.yaml kubectl get secret -n kube-system cephfs-secret ``` ##### 步骤3:修正StorageClass配置 ```yaml # cephfs-sc.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: cephfs-sc provisioner: cephfs.csi.ceph.com parameters: clusterID: <ceph-cluster-id> # 通过`ceph fsid`获取 fsName: <cephfs-name> # 通过`ceph fs ls`查看 pool: <metadata-pool-name> # CephFS元数据池 csi.storage.k8s.io/provisioner-secret-name: cephfs-secret csi.storage.k8s.io/provisioner-secret-namespace: kube-system csi.storage.k8s.io/node-stage-secret-name: cephfs-secret csi.storage.k8s.io/node-stage-secret-namespace: kube-system reclaimPolicy: Delete allowVolumeExpansion: true ``` 关键参数说明: - `provisioner-secret-name`和`node-stage-secret-name`必须与Secret名称一致 - `provisioner-secret-namespace`需明确指定命名空间[^1] ##### 步骤4:验证PVC创建流程 1. 创建PVC: ```yaml # pvc.yaml apiVersion: v1 kind: PersistentVolumeClaim metadata: name: cephfs-pvc spec: accessModes: - ReadWriteMany storageClassName: cephfs-sc resources: requests: storage: 5Gi ``` 2. 检查状态: ```bash kubectl get pvc cephfs-pvc kubectl describe pvc cephfs-pvc | grep Events -A10 ``` ##### 步骤5:日志排查 查看CSI驱动日志定位具体错误: ```bash # 获取provisioner Pod名称 PROVISIONER_POD=$(kubectl get pods -l app=csi-cephfsplugin-provisioner -o jsonpath='{.items[0].metadata.name}') # 查看日志 kubectl logs -f $PROVISIONER_POD -c csi-provisioner | grep -i error ``` --- #### 三、手动创建PV/PVC(备用方案) 若动态供应持续失败,可手动创建PV: ```yaml # pv.yaml apiVersion: v1 kind: PersistentVolume metadata: name: cephfs-manual-pv spec: capacity: storage: 5Gi accessModes: - ReadWriteMany persistentVolumeReclaimPolicy: Retain storageClassName: "" csi: driver: cephfs.csi.ceph.com volumeHandle: <unique-volume-id> # 自定义唯一标识符 volumeAttributes: clusterID: <ceph-cluster-id> fsName: <cephfs-name> nodeStageSecretRef: name: cephfs-secret namespace: kube-system ``` --- #### 四、关键验证点 1. **Ceph集群连通性**: ```bash # 在任意节点测试CephFS挂载 mkdir -p /mnt/cephfs mount -t ceph <monitor-ip>:6789:/ /mnt/cephfs -o name=admin,secret=<admin-key> ``` 2. **RBAC权限检查**: ```bash kubectl describe clusterrole csi-provisioner-role | grep -A10 "cephfs" ``` --- ### 相关问题 1. **如何确认CephFS CSI驱动与Kubernetes版本的兼容性?** *答*:参考Ceph CSI官方文档的版本矩阵表,例如CSI 3.5支持Kubernetes 1.19+[^1] 2. **CephFS动态供应时如何设置存储配额?** *答*:在StorageClass中添加参数`rootPath=/k8s-volumes/<namespace>`限制目录范围 3. **为什么PVC删除后CephFS中的文件未被清理?** *答*:需检查StorageClass的`reclaimPolicy`是否为`Delete`,或手动清理CephFS对应目录 --- ### 引用总结 - Secret的命名空间必须与StorageClass参数匹配[^1] - CephFS CSI驱动需要同时配置`provisioner-secret`和`node-stage-secret`[^1] - 动态供应依赖Ceph集群的pool配置和CSI驱动的正确部署[^1]
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

医者运维

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值