일 | 월 | 화 | 수 | 목 | 금 | 토 |
---|---|---|---|---|---|---|
1 | 2 | 3 | 4 | 5 | 6 | 7 |
8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | 16 | 17 | 18 | 19 | 20 | 21 |
22 | 23 | 24 | 25 | 26 | 27 | 28 |
29 | 30 | 31 |
Tags
- ceph
- kolla
- libvirt
- golang
- ubuntu
- KVM
- awx
- cloud-init
- port open
- HTML
- Linux
- pacman
- i3
- OpenStack
- Arch
- Docker
- nfs-provisioner
- k8s
- kolla-ansible
- Octavia
- cephadm
- archlinux
- Ansible
- grafana-loki
- Kubernetes
- terraform
- repository
- yum
- ceph-ansible
- Kubeflow
Archives
- Today
- Total
YJWANG
kubespray (CentOS 8) 본문
refer to
Deploy
[root@kf01_deploy_0 ~]# yum -y install python3-pip wget git vim epel-release
[root@kf01_deploy_0 ~]# yum -y install sshpass
-
[root@kf01_deploy_0 ~]# git clone https://github.com/kubernetes-sigs/kubespray.git
[root@kf01_deploy_0 ~]# cd kubespray/
-
[root@kf01_deploy_0 kubespray]# pip3 install -r requirements.txt
[root@kf01_deploy_0 kubespray]# cp -rfp inventory/sample inventory/mycluster
inventory 파일 수정 ssh key로 진행해도 되나 password로 진행함
만약 key를 배포한다면 아래 명령을 사용하면 좀 편하게 할 수 있다.
[root@ha02_master_0 kubespray]# grep 'ansible_host=' inventory/mycluster/inventory.ini |grep -v '#' |awk '{print $2}' |sed 's/ansible_host=/ssh-copy-id /'
ssh-copy-id 10.96.90.10
ssh-copy-id 10.96.90.11
ssh-copy-id 10.96.90.12
ssh-copy-id 10.96.90.20
ssh-copy-id 10.96.90.21
inventory 제일 앞에 있는 node의 이름이 각 node의 hostname이됨
# 환경에 맞게 수정
[root@kf01_deploy_0 kubespray]# vim inventory/mycluster/inventory.ini
# ## Configure 'ip' variable to bind kubernetes services on a
# ## different ip than the default iface
# ## We should set etcd_member_name for etcd cluster. The node that is not a etcd member do not need to set the value, or can set the empty string value.
[all]
master01 ansible_host=10.13.90.10 ip=10.13.62.10 etcd_member_name=etcd1
master02 ansible_host=10.13.90.11 ip=10.13.62.11
worker01 ansible_host=10.13.90.20 ip=10.13.62.20
worker02 ansible_host=10.13.90.21 ip=10.13.62.21
# ## configure a bastion host if your nodes are not directly reachable
# bastion ansible_host=x.x.x.x ansible_user=some_user
[kube-master]
master01
master02
[etcd]
master01
[kube-node]
worker01
worker02
[calico-rr]
[k8s-cluster:children]
kube-master
kube-node
calico-rr
[all:vars]
ansible_connection=ssh
ansible_user=root
ansible_ssh_pass=testtest
[root@kf01_deploy_0 kubespray]# ansible -m ping -i inventory/mycluster/inventory.ini all
worker02 | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/libexec/platform-python"
},
"changed": false,
"ping": "pong"
}
master01 | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/libexec/platform-python"
},
"changed": false,
"ping": "pong"
}
master02 | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/libexec/platform-python"
},
"changed": false,
"ping": "pong"
}
worker01 | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/libexec/platform-python"
},
"changed": false,
"ping": "pong"
}
-
# 환경에 맞게 수정
[root@kf01_deploy_0 kubespray]# vim inventory/mycluster/group_vars/all/all.yml
[root@kf01_deploy_0 kubespray]# vim inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml
Cluster 배포 진행
[root@kf01_deploy_0 kubespray]# ansible-playbook -i inventory/mycluster/inventory.ini cluster.yml
...
PLAY RECAP *********************************************************************************************************************************
localhost : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0
master01 : ok=557 changed=121 unreachable=0 failed=0 skipped=1066 rescued=0 ignored=0
master02 : ok=413 changed=93 unreachable=0 failed=0 skipped=874 rescued=0 ignored=0
worker01 : ok=349 changed=75 unreachable=0 failed=0 skipped=569 rescued=0 ignored=0
worker02 : ok=349 changed=75 unreachable=0 failed=0 skipped=568 rescued=0 ignored=0
...
Cluster 배포 확인 (master 서버에서 진행할 예정)
[root@master01 ~]# kubectl get all -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system pod/calico-kube-controllers-8b5ff5d58-dnxff 1/1 Running 0 2m10s
kube-system pod/calico-node-7ngwv 1/1 Running 0 2m55s
kube-system pod/calico-node-8jd9z 1/1 Running 0 2m55s
kube-system pod/calico-node-9zhwr 1/1 Running 0 2m55s
...
[root@master01 ~]# kubectl cluster-info
Kubernetes master is running at https://10.13.62.10:6443
[root@master01 ~]# kubectl config view
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: DATA+OMITTED
server: https://10.13.62.10:6443
name: k8s.yjwang
contexts:
- context:
cluster: k8s.yjwang
user: kubernetes-admin
name: kubernetes-admin@k8s.yjwang
current-context: kubernetes-admin@k8s.yjwang
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: REDACTED
client-key-data: REDACTED
bash completion 적용
[root@master01 ~]# yum -y install bash-completion
[root@master01 ~]# kubectl completion bash > /etc/bash_completion.d/kubectl
[root@master01 ~]# exit
# 재 로그인
[root@master01 ~]# kubectl get [tab]
apiservices.apiregistration.k8s.io mutatingwebhookconfigurations.admissionregistration.k8s.io
certificatesigningrequests.certificates.k8s.io namespaces
clusterrolebindings.rbac.authorization.k8s.io networkpolicies.networking.k8s.io
...
[option] addon 및 metallb 추가하기
[root@addon01_deploy_0 ~]# grep -v ^# kubespray/inventory/addon/group_vars/k8s-cluster/addons.yml
---
dashboard_enabled: true
helm_enabled: true
...
metallb_enabled: true
metallb_ip_range:
- "10.18.62.50-10.18.62.99"
metallb_version: v0.9.3
metallb_protocol: "layer2"
metallb_port: "7472"
-
[root@addon01_deploy_0 ~]# grep strict kubespray/inventory/addon/group_vars/k8s-cluster/k8s-cluster.yml
kube_proxy_strict_arp: true
반응형