YJWANG

[Kubernetes] Kubernetes 1.20.4 + CephFS (Octopus) 생성 / resize (Expand) 본문

60.Cloud/80.Kubernetes

[Kubernetes] Kubernetes 1.20.4 + CephFS (Octopus) 생성 / resize (Expand)

왕영주 2021. 3. 19. 16:25

Prerequisite


Ceph 정보 확인


root@yjwang0-ceph-01:~# ceph mon dump
dumped monmap epoch 1
epoch 1
fsid 454a3b6e-9d06-4811-82dd-ae216b7a3fa2
last_changed 2021-03-18T16:08:07.455129+0900
created 2021-03-18T16:08:07.455129+0900
min_mon_release 17 (quincy)
election_strategy: 1
0: [v2:10.99.99.30:3300/0,v1:10.99.99.30:6789/0] mon.yjwang0-ceph-01
1: [v2:10.99.99.31:3300/0,v1:10.99.99.31:6789/0] mon.yjwang0-ceph-02
2: [v2:10.99.99.32:3300/0,v1:10.99.99.32:6789/0] mon.yjwang0-ceph-03

Ceph 구성


Ceph fs volume 생성

root@yjwang0-ceph-01:~# ceph fs volume create kubernetes
Volume created successfully (no MDS daemons created)
root@yjwang0-ceph-01:~# ceph fs ls
name: kubernetes, metadata pool: cephfs.kubernetes.meta, data pools: [cephfs.kubernetes.data ]

ceph fs auth 생성

root@yjwang0-ceph-01:~# ceph auth get-or-create client.cephfs mon 'allow r' osd 'allow rwx pool=kubernetes'
[client.cephfs]
    key = AQBgJ1Rg9DMzEhAA/y3+g2tDGHOujxPVjFHS6A==

ceph mds stat

root@yjwang0-ceph-01:~# ceph mds stat
kubernetes:1 cephfs:1 {cephfs:0=yjwang0-ceph-01=up:active,kubernetes:0=yjwang0-ceph-02=up:active} 1 up:standby

Kubernetes 구성


configmap 구성
ClusterID에는 위 fsid를 입력한다.

root@node1:~/cephfs# cat csi-config-map.yaml 
---
apiVersion: v1
kind: ConfigMap
data:
  config.json: |-
    [
      {
        "clusterID": "454a3b6e-9d06-4811-82dd-ae216b7a3fa2",
        "monitors": [
          "10.99.99.30:6789",
          "10.99.99.31:6789",
          "10.99.99.32:6789"
        ],
        "cephFS": {
          "subvolumeGroup": "kubernetes"
        }
      }  
    ]
metadata:
  name: ceph-csi-config

admin auth 정보

root@yjwang0-ceph-01:~# ceph auth get client.admin
exported keyring for client.admin
[client.admin]
    key = AQBs/FJgC6z2OhAAoPp0WFfPvhp0DiLEKk01xw==
    caps mds = "allow *"
    caps mgr = "allow *"
    caps mon = "allow *"
    caps osd = "allow *"

배포
위 admin정보와 처음 생성한 client.cephfs를 이용해서 아래 secret을 구성합니다.

root@node1:~/cephfs# kubectl apply -f csi-config-map.yaml 

secret 구성

root@node1:~/cephfs# cat csi-fs-secret.yaml 
---
apiVersion: v1
kind: Secret
metadata:
  name: csi-fs-secret
  namespace: default
stringData:
  userID: cephfs
  userKey: AQBgJ1Rg9DMzEhAA/y3+g2tDGHOujxPVjFHS6A==
  adminID: admin
  adminKey: AQBs/FJgC6z2OhAAoPp0WFfPvhp0DiLEKk01xw==

배포

root@node1:~/cephfs# kubectl apply -f csi-fs-secret.yaml 
secret/csi-fs-secret created

StorageClass 구성

root@node1:~/cephfs# cat csi-fs-sc.yaml 
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
   name: csi-fs-sc
allowVolumeExpansion: true
provisioner: cephfs.csi.ceph.com
parameters:
   clusterID: 454a3b6e-9d06-4811-82dd-ae216b7a3fa2
   fsName: kubernetes
   csi.storage.k8s.io/controller-expand-secret-name: csi-fs-secret
   csi.storage.k8s.io/controller-expand-secret-namespace: default
   csi.storage.k8s.io/provisioner-secret-name: csi-fs-secret
   csi.storage.k8s.io/provisioner-secret-namespace: default
   csi.storage.k8s.io/node-stage-secret-name: csi-fs-secret
   csi.storage.k8s.io/node-stage-secret-namespace: default
reclaimPolicy: Delete
mountOptions:
   - debug

배포

root@node1:~/cephfs# kubectl apply -f csi-fs-sc.yaml 
storageclass.storage.k8s.io/csi-fs-sc created

rbac배포

root@node1:~/cephfs# kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/cephfs/kubernetes/csi-provisioner-rbac.yaml
serviceaccount/cephfs-csi-provisioner created
clusterrole.rbac.authorization.k8s.io/cephfs-external-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/cephfs-csi-provisioner-role created
role.rbac.authorization.k8s.io/cephfs-external-provisioner-cfg created
rolebinding.rbac.authorization.k8s.io/cephfs-csi-provisioner-role-cfg created

root@node1:~/cephfs# kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/cephfs/kubernetes/csi-nodeplugin-rbac.yamlserviceaccount/cephfs-csi-nodeplugin created
clusterrole.rbac.authorization.k8s.io/cephfs-csi-nodeplugin created
clusterrolebinding.rbac.authorization.k8s.io/cephfs-csi-nodeplugin created

provisioner 배포

root@node1:~/cephfs# kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/cephfs/kubernetes/csi-cephfsplugin-provisioner.yaml
service/csi-cephfsplugin-provisioner created
deployment.apps/csi-cephfsplugin-provisioner created

Driver 배포

root@node1:~/cephfs# kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/cephfs/kubernetes/csi-cephfsplugin.yaml
daemonset.apps/csi-cephfsplugin created
service/csi-metrics-cephfsplugin created

Deploy 상태 확인

root@node1:~/cephfs# kubectl get pod
NAME                                           READY   STATUS    RESTARTS   AGE
csi-cephfsplugin-mqwfn                         3/3     Running   0          19s
csi-cephfsplugin-provisioner-69c94cc48-gdj74   6/6     Running   0          40s
csi-cephfsplugin-provisioner-69c94cc48-nvvpr   6/6     Running   0          40s
csi-cephfsplugin-tzrjn                         3/3     Running   0          19s

pvc 생성

root@node1:~/cephfs# cat fs-pvc.yaml 
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: csi-cephfs-pvc
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi
  storageClassName: csi-fs-sc

pvc 배포

root@node1:~/cephfs# kubectl apply -f fs-pvc.yaml 
persistentvolumeclaim/csi-cephfs-pvc created

확인

root@node1:~/cephfs# kubectl get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
csi-cephfs-pvc   Bound    pvc-dcc50cf4-5d58-4b04-8f90-b6942755b1a2   1Gi        RWX            csi-fs-sc      3s

ceph에서 확인

root@yjwang0-ceph-01:/mnt/volumes/kubernetes# ceph fs subvolume ls kubernetes kubernetes
[
    {
        "name": "csi-vol-8d13f788-8882-11eb-8e8b-22b3dcc00ebf"
    },
    {
        "name": "csi-vol-f80fe59d-8882-11eb-8e8b-22b3dcc00ebf"
    }
]

Expand FS


kubectl expand

root@node1:~/cephfs# kubectl edit pvc csi-cephfs-pvc
...
  resources:
    requests:
      storage: 5Gi
  storageClassName: csi-fs-sc
  volumeMode: Filesystem
  volumeName: pvc-dcc50cf4-5d58-4b04-8f90-b6942755b1a2
...

확인

root@node1:~/cephfs# kubectl get pvc
NAME              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
csi-cephfs-pvc    Bound    pvc-dcc50cf4-5d58-4b04-8f90-b6942755b1a2   5Gi        RWX            csi-fs-sc      9m51s

ceph에서 확인

root@yjwang0-ceph-01:/mnt/volumes/kubernetes# ceph fs subvolume info kubernetes csi-vol-8d13f788-8882-11eb-8e8b-22b3dcc00ebf kubernetes
{
    "atime": "2021-03-19 16:13:06",
    "bytes_pcent": "0.00",
    "bytes_quota": 5368709120,
    "bytes_used": 0,
    "created_at": "2021-03-19 16:13:06",
    "ctime": "2021-03-19 16:22:32",
    "data_pool": "cephfs.kubernetes.data",
    "features": [
        "snapshot-clone",
        "snapshot-autoprotect",
        "snapshot-retention"
    ],
    "gid": 0,
    "mode": 16895,
    "mon_addrs": [
        "10.99.99.30:6789",
        "10.99.99.31:6789",
        "10.99.99.32:6789"
    ],
    "mtime": "2021-03-19 16:13:06",
    "path": "/volumes/kubernetes/csi-vol-8d13f788-8882-11eb-8e8b-22b3dcc00ebf/cd5310e4-df75-4302-b53e-b7223d311e0e",
    "pool_namespace": "",
    "state": "complete",
    "type": "subvolume",
    "uid": 0
}
반응형