일 | 월 | 화 | 수 | 목 | 금 | 토 |
---|---|---|---|---|---|---|
1 | 2 | 3 | 4 | 5 | 6 | 7 |
8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | 16 | 17 | 18 | 19 | 20 | 21 |
22 | 23 | 24 | 25 | 26 | 27 | 28 |
29 | 30 | 31 |
Tags
- i3
- cephadm
- pacman
- Ansible
- port open
- cloud-init
- awx
- Docker
- terraform
- yum
- HTML
- archlinux
- Octavia
- Linux
- OpenStack
- ceph-ansible
- Kubernetes
- ubuntu
- Arch
- libvirt
- nfs-provisioner
- KVM
- repository
- k8s
- ceph
- kolla-ansible
- kolla
- grafana-loki
- golang
- Kubeflow
Archives
- Today
- Total
YJWANG
[Kubernetes] Kubernetes 1.20.4 + Ceph rbd (Octopus) 생성 / resize (Expand) 본문
60.Cloud/80.Kubernetes
[Kubernetes] Kubernetes 1.20.4 + Ceph rbd (Octopus) 생성 / resize (Expand)
왕영주 2021. 3. 18. 18:01Prerequisite
- Ceph Cluster
- Kubernetes Cluster
- Ceph와 Kubernetes가 Network으로 서로 통신이 가능해야합니다.
Ceph RBD Provisioner 사용
ceph node에서 실행
ceph rbd pool 생성
root@yjwang0-ceph-01:~# ceph osd pool create kubernetes
pool 'kubernetes' created
root@yjwang0-ceph-01:~# rbd pool init kubernetes
ceph auth 생성
root@yjwang0-ceph-01:~# ceph auth get-or-create client.kubernetes mon 'profile rbd' osd 'profile rbd pool=kubernetes' mgr 'profile rbd pool=kubernetes'
[client.kubernetes]
key = AQATC1NglDFYJhAAv/N8AkW1DQ5bMS3R3VMTtQ==
configmap 저장을 위해 아래 명령어 내용 기록
root@yjwang0-ceph-01:~# ceph mon dump |grep -E 'fsid|mon'
dumped monmap epoch 1
fsid 454a3b6e-9d06-4811-82dd-ae216b7a3fa2
min_mon_release 17 (quincy)
0: [v2:10.99.99.30:3300/0,v1:10.99.99.30:6789/0] mon.yjwang0-ceph-01
1: [v2:10.99.99.31:3300/0,v1:10.99.99.31:6789/0] mon.yjwang0-ceph-02
2: [v2:10.99.99.32:3300/0,v1:10.99.99.32:6789/0] mon.yjwang0-ceph-03
k8s node에서 실행ceph-csi
라는 provisioner를 사용할 예정이다.
위에서 확인한 정보들을 바탕으로 configmap을 생성한다. clusterID 는 fsid 의 정보를 사용하면 된다.
root@node1:~# cat csi-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "454a3b6e-9d06-4811-82dd-ae216b7a3fa2",
"monitors": [
"10.99.99.30:6789",
"10.99.99.31:6789",
"10.99.99.32:6789"
]
}
]
metadata:
name: ceph-csi-config
배포
root@node1:~# kubectl apply -f csi-config-map.yaml
configmap/ceph-csi-config created
위에서 생성한 auth 값을 이용해서 secret을 생성한다.
---
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: default
stringData:
userID: kubernetes
userKey: AQATC1NglDFYJhAAv/N8AkW1DQ5bMS3R3VMTtQ==
배포
root@node1:~# kubectl apply -f csi-rbd-secret.yaml
secret/csi-rbd-secret created
ceph-csi Plugin 설정
root@node1:~# kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml
serviceaccount/rbd-csi-provisioner created
clusterrole.rbac.authorization.k8s.io/rbd-external-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/rbd-csi-provisioner-role created
role.rbac.authorization.k8s.io/rbd-external-provisioner-cfg created
rolebinding.rbac.authorization.k8s.io/rbd-csi-provisioner-role-cfg created
root@node1:~# kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-nodeplugin-rbac.yaml
serviceaccount/rbd-csi-nodeplugin created
clusterrole.rbac.authorization.k8s.io/rbd-csi-nodeplugin created
clusterrolebinding.rbac.authorization.k8s.io/rbd-csi-nodeplugin created
관련 configmap 추가 생성
root@node1:~# cat kms-config.yaml
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
{
"vault-test": {
"encryptionKMSType": "vault",
"vaultAddress": "http://vault.default.svc.cluster.local:8200",
"vaultAuthPath": "/v1/auth/kubernetes/login",
"vaultRole": "csi-kubernetes",
"vaultPassphraseRoot": "/v1/secret",
"vaultPassphrasePath": "ceph-csi/",
"vaultCAVerify": "false"
}
}
metadata:
name: ceph-csi-encryption-kms-config
배포
root@node1:~# kubectl apply -f kms-config.yaml
configmap/ceph-csi-encryption-kms-config created
provisioner 및 node 서비스 설정
root@node1:~# kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml
service/csi-rbdplugin-provisioner created
deployment.apps/csi-rbdplugin-provisioner created
root@node1:~# kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-rbdplugin.yaml
daemonset.apps/csi-rbdplugin created
service/csi-metrics-rbdplugin created
Storage class 설정
root@node1:~# cat csi-rbd-sc.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
allowVolumeExpansion: true
parameters:
clusterID: 454a3b6e-9d06-4811-82dd-ae216b7a3fa2
pool: kubernetes
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
reclaimPolicy: Delete
mountOptions:
- discard
배포
root@node1:~# kubectl apply -f csi-rbd-sc.yaml
storageclass.storage.k8s.io/csi-rbd-sc created
pvc 생성
root@node1:~# cat rbd-pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-rbd
spec:
accessModes:
- ReadWriteOnce
volumeMode: Block
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc
배포
root@node1:~# kubectl apply -f rbd-pvc.yaml
persistentvolumeclaim/test-rbd created
생성된 pvc 확인
root@node1:~# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-rbd Bound pvc-86d1aff3-c732-4557-aebb-b0cd48f3da55 1Gi RWO csi-rbd-sc 3s
ceph 에서 생성된 pvc 확인
root@yjwang0-ceph-01:~# rbd ls kubernetes
csi-vol-0271c4ae-87c8-11eb-8f13-fedf14d6325e
root@yjwang0-ceph-01:~# rbd info kubernetes/csi-vol-0271c4ae-87c8-11eb-8f13-fedf14d6325e
rbd image 'csi-vol-0271c4ae-87c8-11eb-8f13-fedf14d6325e':
size 1 GiB in 256 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 12f03b43b5bc
block_name_prefix: rbd_data.12f03b43b5bc
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Thu Mar 18 17:57:47 2021
access_timestamp: Thu Mar 18 17:57:47 2021
modify_timestamp: Thu Mar 18 17:57:47 2021
Expand pvc
Expand가 가능하려면 StorageClass에 아래 parameter (controller-expand-secret-*)
및 allowVolumeExpansion
이 true로 돼있어야한다.
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
allowVolumeExpansion: true
provisioner: rbd.csi.ceph.com
parameters:
clusterID: 454a3b6e-9d06-4811-82dd-ae216b7a3fa2
pool: kubernetes
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
reclaimPolicy: Delete
mountOptions:
- discard
아래 pvc를 10GB로 Expand 진행
root@node1:~# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-rbd Bound pvc-6cad4f5e-0868-41e3-aac7-e86c7d887b73 5Gi RWO csi-rbd-sc 4m54s
root@yjwang0-ceph-01:~# rbd info kubernetes/csi-vol-dfb3b58c-8860-11eb-8f13-fedf14d6325e
rbd image 'csi-vol-dfb3b58c-8860-11eb-8f13-fedf14d6325e':
size 5 GiB in 1280 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 12f0d1988b66
block_name_prefix: rbd_data.12f0d1988b66
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Fri Mar 19 12:12:02 2021
access_timestamp: Fri Mar 19 12:12:02 2021
modify_timestamp: Fri Mar 19 12:12:02 2021
edit 진행
root@node1:~# kubectl edit pvc test-rbd
persistentvolumeclaim/test-rbd edited
...
resources:
requests:
storage: 10Gi
확인
root@node1:~# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
test-rbd Bound pvc-6cad4f5e-0868-41e3-aac7-e86c7d887b73 10Gi RWO csi-rbd-sc 6m45s
root@yjwang0-ceph-01:~# rbd info kubernetes/csi-vol-dfb3b58c-8860-11eb-8f13-fedf14d6325e
rbd image 'csi-vol-dfb3b58c-8860-11eb-8f13-fedf14d6325e':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 12f0d1988b66
block_name_prefix: rbd_data.12f0d1988b66
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Fri Mar 19 12:12:02 2021
access_timestamp: Fri Mar 19 12:12:02 2021
modify_timestamp: Fri Mar 19 12:12:02 2021
Log 확인
ceph-csi 내부에서 externel-resizer가 동작하고 이로인해 resize됨을 확인할 수 있다.
root@node1:~# kubectl describe pvc test-rbd
...
Normal Resizing 37s (x2 over 6m44s) external-resizer rbd.csi.ceph.com External resizer is resizing volume pvc-6cad4f5e-0868-41e3-aac7-e86c7d887b73
Normal VolumeResizeSuccessful 36s (x2 over 6m43s) external-resizer rbd.csi.ceph.com Resize volume succeeded
반응형