Files
bikinibottom/turing-pi/kubernetes.md
2025-04-07 10:10:12 +02:00

12 KiB

Kubernetes

1. Compute Modules

Install OS / headless installation

  1. Flash the Plebian image to a SD card/ EMMC.
  2. Boot.
  3. ssh as "pleb/pleb" and complete setup sudo vim /etc/sudoers.d/pleb
pleb ALL=(ALL:ALL) NOPASSWD: ALL
sudo apt update && sudo apt install -y curl 

curl -O https://overviewer.org/~pillow/up/75bea78e59/devicetrees-plebian-quartz64-20230601130309-arm64.deb

sudo dpkg -i devicetrees-plebian-quartz64-20230601130309-arm64.deb

# sudo sysctl -w net.core.rmem_max=2500000

sudo apt update && sudo apt -y upgrade

sudo hostnamectl set-hostname shell1

sudo apt install -y git wget screenfetch net-tools open-iscsi python3 python3-pip build-essential libssl-dev libffi-dev python3-dev

sudo apt install -y docker docker-compose

Reboot

sudo blkid -s UUID -o value /dev/sda1

shell1 | CHANGED | rc=0 >>  
UUID=a4e3d8f1-11cc-482b-9596-57c44b122e48  
shell2 | CHANGED | rc=0 >>  
UUID=e264ad26-0767-4e6c-9131-0671f4ecfad4  
shell3 | CHANGED | rc=0 >>  
UUID=2056f43e-f41c-4772-9bf9-8f5a09dc911c
sudo mkfs.ext4 /dev/sda1

sudo mkdir -p /mnt/data

echo "UUID=... /mnt/data ext4 defaults 0 0" | sudo tee -a /etc/fstab

cat /etc/fstab

sudo mount -a
sudo systemctl daemon-reload
df -h /mnt/data

sudo vim /etc/hosts

# ----------------------------------------
# Host addresses
127.0.0.1  localhost shell1
# Nodes
192.168.1.109 shell1 shell1.local 
192.168.1.163 shell2 shell2.local
#192.168.1. shell3 shell3.local
#192.168.1. shell4 shell3.local
# ----------------------------------------

On remote (PC) :

ssh-keygen -t ed25519
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@shell1
ssh pleb@shell1

vim /etc/ssh/sshd_config

# ----------------------------------------
PasswordAuthentication no
# ----------------------------------------

Restart service

sudo systemctl restart sshd

On shell1 :

ssh-keygen -t ed25519

ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@shell2
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@shell3
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@shell4

ssh pleb@shell2
ssh pleb@shell3
ssh pleb@shell4

curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
python3 get-pip.py --user

python3 -m pip install --user ansible

vim hosts.ini

[master]
shell1  ansible_connection=local

[node]
shell2  ansible_connection=ssh
shell3  ansible_connection=ssh
shell4  ansible_connection=ssh

[k3s_cluster:children]
master
node

UFW

sudo apt install -y ufw

sudo ufw allow "OpenSSH"
sudo ufw enable

sudo ufw allow 6443/tcp
sudo ufw allow 2379:2380/tcp
sudo ufw allow 10250/tcp
sudo ufw allow 10259/tcp
sudo ufw allow 10257/tcp

sudo ufw allow 179/tcp
sudo ufw allow 4789/udp
sudo ufw allow 4789/tcp
sudo ufw allow 2379/tcp

sudo ufw allow 30000:32767/tcp

sudo ufw status
for pkg in docker.io docker-doc docker-compose podman-docker containerd runc; do sudo apt-get remove $pkg; done

sudo apt-get update

sudo apt-get install ca-certificates curl gnupg

sudo install -m 0755 -d /etc/apt/keyrings

curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg

sudo chmod a+r /etc/apt/keyrings/docker.gpg

echo \
  "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
  "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
  sudo tee /etc/apt/sources.list.d/docker.list > /dev/null

sudo apt-get update

sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin

sudo service docker start

sudo docker run hello-world

sudo chmod 666 /var/run/docker.sock

sudo groupadd docker

sudo usermod -aG docker $USER

sudo systemctl enable docker.service
sudo systemctl enable containerd.service
docker run -d -p 8000:8000 -p 9443:9443 --name portainer --restart=always -v /var/run/docker.sock:/var/run/docker.sock -v portainer_data:/data portainer/portainer-ee:latest

2. Install Kubernetes (via Ansible)

k3s-ansible

Install Ansible

On remote (PC):

apt install -y ansible

Clone Git repo : https://github.com/k3s-io/k3s-ansible

cd www

git clone https://github.com/k3s-io/k3s-ansible.git

cp -R k3s-ansible/inventory/sample k3s-ansible/inventory/sandy

vim www/k3s-ansible/inventory/sandy/hosts.ini

[master]
192.168.1.209  ansible_connection=ssh  var_hostname=shell1  var_disk=sda1  var_uuid=e13c29b3-5263-4ae7-82df-8ccdcc78e0b2

[node]
192.168.1.224  ansible_connection=ssh  var_hostname=shell2  var_disk=sda1  var_uuid=85efb43b-5386-4a2d-9128-9a89b10538fa
192.168.1.123  ansible_connection=ssh  var_hostname=shell3  var_disk=sda1  var_uuid=08af04f0-a35c-447d-9651-c46675a27142
192.168.1.233  ansible_connection=ssh  var_hostname=shell4

[k3s_cluster:children]
master
node

vim www/k3s-ansible/inventory/sandy/group_vars/all.yml

change ansible_user to pleb

Install playbook

ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ping
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m apt -a "name=iptables state=present" --become
ansible-playbook ~/www/k3s-ansible/site.yml -i ~/www/k3s-ansible/inventory/sandy/hosts.ini

On master:

sudo chmod 644 /etc/rancher/k3s/k3s.yaml

mkdir ~/.kube
$ sudo cp /etc/rancher/k3s/k3s.yaml ~/.kube/config 
$ sudo chown $USER: ~/.kube/config
$ export KUBECONFIG=~/.kube/config

On PC :

curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"

curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"

echo "$(cat kubectl.sha256)  kubectl" | sha256sum --check

sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl

kubectl version --client 

scp pleb@shell1:~/.kube/config ~/.kube/config

export KUBECONFIG=~/.kube/config

kubectl get nodes
    
kubectl cluster-info

Setting labels:

# sudo k3s kubectl label nodes shell1 kubernetes.io/role=worker
sudo k3s kubectl label nodes shell2 kubernetes.io/role=worker
sudo k3s kubectl label nodes shell3 kubernetes.io/role=worker
sudo k3s kubectl label nodes shell4 kubernetes.io/role=worker

# sudo k3s kubectl label nodes shell1 node-type=worker
sudo k3s kubectl label nodes shell2 node-type=worker
sudo k3s kubectl label nodes shell3 node-type=worker
sudo k3s kubectl label nodes shell4 node-type=worker

! Ping !

ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ping

! Reset !

ansible-playbook ~/www/k3s-ansible/reset.yml -i ~/www/k3s-ansible/inventory/sandy/hosts.ini

! Restart !

ansible all -i ~/www/k3s-ansible/inventory/sandy/hosts.ini -a "shutdown -r now" -b

3. Helm

On master

curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh
helm version

4. MetalLb

helm repo add metallb https://metallb.github.io/metallb
helm search repo metallb
helm upgrade --install metallb metallb/metallb --create-namespace --namespace metallb-system --wait
cat << 'EOF' | kubectl apply -f -
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: default-pool
  namespace: metallb-system
spec:
  addresses:
  - 192.168.1.30-192.168.1.49
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: default
  namespace: metallb-system
spec:
  ipAddressPools:
  - default-pool
EOF

5. Local Storage Provider (Longhorn)

sudo systemctl enable iscsid.service
sudo systemctl start iscsid.service

ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=nfs-common state=present"
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=open-iscsi state=present"
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=util-linux state=present"
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m shell -a "lsblk -f"
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m shell -a "blkid -s UUID -o value /dev/sda1"

Ansible mount:

ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ansible.posix.mount -a "path=/mnt/data src=UUID={{ var_uuid }} fstype=ext4 state=mounted" -b

Longhorn

helm repo add longhorn https://charts.longhorn.io

helm repo update

helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --version 1.4.2 --set defaultSettings.defaultDataPath="/mnt/data" --set service.ui.loadBalancerIP="192.168.1.31" --set service.ui.type="LoadBalancer"

sudo k3s kubectl -n longhorn-system get pod

sudo k3s kubectl get storageclass
  • local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 111m
  • longhorn (default) driver.longhorn.io Delete Immediate true 65m Mark as "non-default"
sudo k3s kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'

Mark as "default"

kubectl patch storageclass longhorn -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

UI

USER=admin; PASSWORD=transatlantique; echo "${USER}:$(openssl passwd -stdin -apr1 <<< ${PASSWORD})" >> auth
sudo k3s kubectl -n longhorn-system create secret generic basic-auth --from-file=auth

vim longhorn-ingress.yml

apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: longhorn-ingress
  namespace: longhorn-system
  annotations:
    nginx.ingress.kubernetes.io/auth-type: basic
    nginx.ingress.kubernetes.io/ssl-redirect: 'false'
    nginx.ingress.kubernetes.io/auth-secret: basic-auth
    nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required '
    nginx.ingress.kubernetes.io/proxy-body-size: 10000m
spec:
  rules:
  - http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: longhorn-frontend
            port:
              number: 80
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: longhorn-ingress
  namespace: longhorn-system
  annotations:
    # add an annotation indicating the issuer to use
    cert-manager.io/cluster-issuer: letsencrypt-staging
spec:
  rules:
  - host: dashboard.delmar.bzh
    http:
      paths:
      - path: /
        backend:
          serviceName: longhorn-frontend
          servicePort: 8090
  tls:
  - # cert-manager will store the certificate and key in this secret
    secretName: dashboard-delmar-bzh-cert 
    hosts:
    - dashboard.delmar.bzh
sudo k3s kubectl -n longhorn-system apply -f longhorn-ingress.yml
sudo k3s kubectl -n longhorn-system get ingress

6. HAProxy

helm repo add haproxytech https://haproxytech.github.io/helm-charts

helm install haproxy haproxytech/kubernetes-ingress -n kube-system \
  --set controller.service.nodePorts.http=30000 \
  --set controller.service.nodePorts.https=30001 \
  --set controller.service.nodePorts.stat=30002 \
  --set controller.service.type=LoadBalancer

7. Services

...

Uninstall

On master:

/usr/local/bin/k3s-uninstall.sh

On workers:

/usr/local/bin/k3s-agent-uninstall.sh