diff --git a/TuringPi/.kubernetes.md.kate-swp b/TuringPi/.kubernetes.md.kate-swp deleted file mode 100644 index 85d80bb..0000000 Binary files a/TuringPi/.kubernetes.md.kate-swp and /dev/null differ diff --git a/TuringPi/kubernetes.md b/TuringPi/kubernetes.md index 6e1cda7..0470b47 100644 --- a/TuringPi/kubernetes.md +++ b/TuringPi/kubernetes.md @@ -98,7 +98,7 @@ PasswordAuthentication no sudo systemctl restart sshd ``` -#### On shell1 : +#### On sandy : ```bash ssh-keygen -t ed25519 @@ -124,14 +124,14 @@ python3 -m pip install --user ansible [master] sandy ansible_connection=local -[node] +[workers] gary ansible_connection=ssh sheldon ansible_connection=ssh pearl ansible_connection=ssh [k3s_cluster:children] master -node +workers ``` ## UFW @@ -221,20 +221,20 @@ git clone https://github.com/k3s-io/k3s-ansible.git cp -R k3s-ansible/inventory-sample.yml k3s-ansible/inventory/bikiniBottom.yaml ``` -`vim www/k3s-ansible/inventory/sandy/hosts.ini` +`vim www/k3s-ansible/inventory/bikiniBottom.yaml` ```ini [master] -192.168.1.14 ansible_connection=ssh var_hostname=sandy var_disk=sda1 var_uuid=6fab06af-f38f-493a-87ab-512f52a6616c +192.168.1.14 ansible_connection=ssh var_hostname=sandy var_disk=nvme0n1 var_uuid=e2e3a295-9731-4aa2-996f-c72c2b81f40f -[node] -192.168.1.13 ansible_connection=ssh var_hostname=gary var_disk=sda1 var_uuid=7f348b05-b44e-4b85-8445-657dc95e72df -192.168.1.16 ansible_connection=ssh var_hostname=shedon var_disk=sda1 var_uuid=a612717e-ca95-44a3-9b10-10e6be26112f -192.168.1.19 ansible_connection=ssh var_hostname=pearl var_disk=sda1 var_uuid=75755b0b-e727-41df-a8ae-7f9fe59c36e0 +[workers] +192.168.1.13 ansible_connection=ssh var_hostname=gary var_disk=nvme0n1 var_uuid=124046b7-f656-4691-933b-4673e5abbb17 +192.168.1.16 ansible_connection=ssh var_hostname=shedon var_disk=nvme0n1 var_uuid=fd0d6603-c6f1-43fe-a993-39f79b4e3eb8 +192.168.1.19 ansible_connection=ssh var_hostname=pearl var_disk=nvme0n1 var_uuid=6d255d62-eafe-4333-87b0-9e0c6bf6a44d [k3s_cluster:children] master -node +workers ``` `vim www/k3s-ansible/inventory/sandy/group_vars/all.yml` @@ -244,9 +244,9 @@ node #### Install playbook ```bash -ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ping -ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m apt -a "name=iptables state=present" --become -ansible-playbook ~/www/k3s-ansible/site.yml -i ~/www/k3s-ansible/inventory/sandy/hosts.ini +ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -m ping +ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -m apt -a "name=iptables state=present" --become +ansible-playbook ~/delmar.bzh/ansible/k3s-ansible/playbooks/site.yml -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml ``` #### On master: @@ -273,7 +273,7 @@ sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl kubectl version --client -scp pleb@shell1:~/.kube/config ~/.kube/config +scp pleb@sandy:~/.kube/config ~/.kube/config export KUBECONFIG=~/.kube/config @@ -290,7 +290,7 @@ sudo k3s kubectl label nodes gary kubernetes.io/role=worker sudo k3s kubectl label nodes sheldon kubernetes.io/role=worker sudo k3s kubectl label nodes pearl kubernetes.io/role=worker -# sudo k3s kubectl label nodes shell1 node-type=worker +# sudo k3s kubectl label nodes sandy node-type=worker sudo k3s kubectl label nodes gary node-type=worker sudo k3s kubectl label nodes sheldon node-type=worker sudo k3s kubectl label nodes pearl node-type=worker @@ -298,17 +298,17 @@ sudo k3s kubectl label nodes pearl node-type=worker #### ! Ping ! -`ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ping` +`ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -m ping` #### ! Reset ! ```bash -ansible-playbook ~/www/k3s-ansible/reset.yml -i ~/www/k3s-ansible/inventory/sandy/hosts.ini +ansible-playbook ~/delmar.bzh/ansible/k3s-ansible/reset.yml -i ~/www/k3s-ansible/inventory/bikiniBottom.yaml ``` #### ! Restart ! -`ansible all -i ~/www/k3s-ansible/inventory/sandy/hosts.ini -a "shutdown -r now" -b` +`ansible all -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml -a "shutdown -r now" -b` ## 3. Helm @@ -338,7 +338,7 @@ metadata: namespace: metallb-system spec: addresses: - - 192.168.1.30-192.168.1.49 + - 192.168.1.21-192.168.1.49 --- apiVersion: metallb.io/v1beta1 kind: L2Advertisement @@ -357,20 +357,20 @@ EOF sudo systemctl enable iscsid.service sudo systemctl start iscsid.service -ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=nfs-common state=present" -ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=open-iscsi state=present" -ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=util-linux state=present" +ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -b -m apt -a "name=nfs-common state=present" +ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -b -m apt -a "name=open-iscsi state=present" +ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -b -m apt -a "name=util-linux state=present" ``` ```bash -ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m shell -a "lsblk -f" -ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m shell -a "blkid -s UUID -o value /dev/..." +ansible -i ~/www/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -b -m shell -a "lsblk -f" +ansible -i ~/www/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -b -m shell -a "blkid -s UUID -o value /dev/..." ``` #### Ansible mount: ```bash -ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ansible.posix.mount -a "path=/mnt/data src=UUID={{ var_uuid }} fstype=ext4 state=mounted" -b +ansible -i ~/www/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -m ansible.posix.mount -a "path=/mnt/data src=UUID={{ var_uuid }} fstype=ext4 state=mounted" -b ``` ### Longhorn