487 lines
12 KiB
Markdown
487 lines
12 KiB
Markdown
# Kubernetes
|
|
|
|
## 1. Compute Modules
|
|
|
|
### Install OS / headless installation
|
|
|
|
1. Flash the Plebian image to a SD card/ EMMC.
|
|
2. Boot.
|
|
3. ssh as "pleb/pleb" and complete setup
|
|
|
|
```bash
|
|
sudo useradd pleb
|
|
sudo groupadd sudo
|
|
sudo usermod -aG sudo pleb
|
|
|
|
echo "pleb ALL=(ALL) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/pleb
|
|
```
|
|
|
|
```bash
|
|
sudo apt update && sudo apt install -y curl
|
|
|
|
# curl -O https://overviewer.org/~pillow/up/75bea78e59/devicetrees-plebian-quartz64-20230601130309-arm64.deb
|
|
# sudo dpkg -i devicetrees-plebian-quartz64-20230601130309-arm64.deb
|
|
# sudo sysctl -w net.core.rmem_max=2500000
|
|
|
|
sudo apt update && sudo apt -y upgrade
|
|
|
|
sudo hostnamectl set-hostname < hostname >
|
|
|
|
sudo apt install -y git wget screenfetch net-tools open-iscsi python3 python3-pip build-essential libssl-dev libffi-dev python3-dev
|
|
|
|
sudo apt install -y docker docker-compose
|
|
```
|
|
|
|
#### Reboot
|
|
|
|
```bash
|
|
sudo mkfs.ext4 /dev/nvme0n1
|
|
|
|
sudo mkdir -p /mnt/ssd
|
|
|
|
sudo blkid -s UUID -o value /dev/nvme0n1
|
|
|
|
> sandy UUID=e2e3a295-9731-4aa2-996f-c72c2b81f40f
|
|
|
|
> gary UUID=124046b7-f656-4691-933b-4673e5abbb17
|
|
|
|
> sheldon UUID=fd0d6603-c6f1-43fe-a993-39f79b4e3eb8
|
|
|
|
> pearl UUID=
|
|
|
|
echo "UUID=... /mnt/ssd ext4 defaults 0 0" | sudo tee -a /etc/fstab
|
|
|
|
cat /etc/fstab
|
|
|
|
sudo mount -a
|
|
sudo systemctl daemon-reload
|
|
df -h /mnt/ssd
|
|
```
|
|
|
|
`sudo vim /etc/hosts`
|
|
|
|
```
|
|
# ----------------------------------------
|
|
# Host addresses
|
|
127.0.0.1 localhost
|
|
# Nodes
|
|
192.168.1.13 gary gary.local
|
|
192.168.1.14 sandy sandy.local
|
|
192.168.1.16 sheldon sheldon.local
|
|
192.168.1.19 pearl pearl.local
|
|
# ----------------------------------------
|
|
```
|
|
|
|
#### On remote (PC) :
|
|
|
|
```bash
|
|
ssh-keygen -t ed25519
|
|
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@gary
|
|
ssh pleb@gary
|
|
|
|
vim /etc/ssh/sshd_config
|
|
```
|
|
|
|
```
|
|
# ----------------------------------------
|
|
PasswordAuthentication no
|
|
# ----------------------------------------
|
|
```
|
|
|
|
#### Restart service
|
|
|
|
```bash
|
|
sudo systemctl restart sshd`
|
|
```
|
|
|
|
#### On shell1 :
|
|
|
|
```bash
|
|
ssh-keygen -t ed25519
|
|
|
|
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@sandy
|
|
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub plebsheldon
|
|
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@pearl
|
|
|
|
ssh pleb@sandy
|
|
ssh pleb@sheldon
|
|
ssh pleb@pearl
|
|
|
|
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
|
|
python3 get-pip.py --user
|
|
|
|
python3 -m pip install --user ansible
|
|
```
|
|
|
|
`vim hosts.ini`
|
|
|
|
```ini
|
|
[master]
|
|
sandy ansible_connection=local
|
|
|
|
[node]
|
|
gary ansible_connection=ssh
|
|
sheldon ansible_connection=ssh
|
|
pearl ansible_connection=ssh
|
|
|
|
[k3s_cluster:children]
|
|
master
|
|
node
|
|
```
|
|
|
|
## UFW
|
|
|
|
```bash
|
|
sudo apt install -y ufw
|
|
|
|
sudo ufw allow "OpenSSH"
|
|
sudo ufw enable
|
|
|
|
sudo ufw allow 6443/tcp
|
|
sudo ufw allow 2379:2380/tcp
|
|
sudo ufw allow 10250/tcp
|
|
sudo ufw allow 10259/tcp
|
|
sudo ufw allow 10257/tcp
|
|
|
|
sudo ufw allow 179/tcp
|
|
sudo ufw allow 4789/udp
|
|
sudo ufw allow 4789/tcp
|
|
sudo ufw allow 2379/tcp
|
|
|
|
sudo ufw allow 30000:32767/tcp
|
|
|
|
sudo ufw status
|
|
```
|
|
|
|
```bash
|
|
for pkg in docker.io docker-doc docker-compose podman-docker containerd runc; do sudo apt-get remove $pkg; done
|
|
|
|
sudo apt-get update
|
|
|
|
sudo apt-get install ca-certificates curl gnupg
|
|
|
|
sudo install -m 0755 -d /etc/apt/keyrings
|
|
|
|
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
|
|
|
sudo chmod a+r /etc/apt/keyrings/docker.gpg
|
|
|
|
echo \
|
|
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
|
|
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
|
|
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
|
|
|
sudo apt-get update
|
|
|
|
sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
|
|
|
sudo service docker start
|
|
|
|
sudo docker run hello-world
|
|
|
|
sudo chmod 666 /var/run/docker.sock
|
|
|
|
sudo groupadd docker
|
|
|
|
sudo usermod -aG docker $USER
|
|
|
|
sudo systemctl enable docker.service
|
|
sudo systemctl enable containerd.service
|
|
```
|
|
|
|
```bash
|
|
docker run -d -p 8000:8000 -p 9443:9443 --name portainer --restart=always -v /var/run/docker.sock:/var/run/docker.sock -v portainer_data:/data portainer/portainer-ee:latest
|
|
```
|
|
|
|
## 2. Install Kubernetes (via Ansible)
|
|
|
|
### k3s-ansible
|
|
|
|
#### Install Ansible
|
|
|
|
- [https://wiki.archlinux.org/title/Ansible#Installation](https://wiki.archlinux.org/title/Ansible#Installation "https://wiki.archlinux.org/title/Ansible#Installation")
|
|
- [https://docs.ansible.com/ansible/latest/installation\_guide/index.html](https://docs.ansible.com/ansible/latest/installation_guide/index.html "https://docs.ansible.com/ansible/latest/installation_guide/index.html")
|
|
|
|
#### On remote (PC):
|
|
|
|
`apt install -y ansible`
|
|
|
|
#### Clone Git repo : [https://github.com/k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible "https://github.com/k3s-io/k3s-ansible")
|
|
|
|
```bash
|
|
cd www
|
|
|
|
git clone https://github.com/k3s-io/k3s-ansible.git
|
|
|
|
cp -R k3s-ansible/inventory/sample k3s-ansible/inventory/sandy
|
|
```
|
|
|
|
`vim www/k3s-ansible/inventory/sandy/hosts.ini`
|
|
|
|
```ini
|
|
[master]
|
|
192.168.1.14 ansible_connection=ssh var_hostname=sandy var_disk=sda1 var_uuid=6fab06af-f38f-493a-87ab-512f52a6616c
|
|
|
|
[node]
|
|
192.168.1.13 ansible_connection=ssh var_hostname=gary var_disk=sda1 var_uuid=7f348b05-b44e-4b85-8445-657dc95e72df
|
|
192.168.1.16 ansible_connection=ssh var_hostname=shedon var_disk=sda1 var_uuid=a612717e-ca95-44a3-9b10-10e6be26112f
|
|
192.168.1.19 ansible_connection=ssh var_hostname=pearl
|
|
|
|
[k3s_cluster:children]
|
|
master
|
|
node
|
|
```
|
|
|
|
`vim www/k3s-ansible/inventory/sandy/group_vars/all.yml`
|
|
|
|
> > change ansible\_user to pleb
|
|
|
|
#### Install playbook
|
|
|
|
```bash
|
|
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ping
|
|
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m apt -a "name=iptables state=present" --become
|
|
ansible-playbook ~/www/k3s-ansible/site.yml -i ~/www/k3s-ansible/inventory/sandy/hosts.ini
|
|
```
|
|
|
|
#### On master:
|
|
|
|
```bash
|
|
sudo chmod 644 /etc/rancher/k3s/k3s.yaml
|
|
|
|
mkdir ~/.kube
|
|
$ sudo cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
|
|
$ sudo chown $USER: ~/.kube/config
|
|
$ export KUBECONFIG=~/.kube/config
|
|
```
|
|
|
|
#### On PC :
|
|
|
|
```bash
|
|
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
|
|
|
curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
|
|
|
|
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
|
|
|
|
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
|
|
|
kubectl version --client
|
|
|
|
scp pleb@shell1:~/.kube/config ~/.kube/config
|
|
|
|
export KUBECONFIG=~/.kube/config
|
|
|
|
kubectl get nodes
|
|
|
|
kubectl cluster-info
|
|
```
|
|
|
|
#### Setting labels:
|
|
|
|
```bash
|
|
# sudo k3s kubectl label nodes xxx kubernetes.io/role=worker
|
|
sudo k3s kubectl label nodes gary kubernetes.io/role=worker
|
|
sudo k3s kubectl label nodes sheldon kubernetes.io/role=worker
|
|
sudo k3s kubectl label nodes pearl kubernetes.io/role=worker
|
|
|
|
# sudo k3s kubectl label nodes shell1 node-type=worker
|
|
sudo k3s kubectl label nodes gary node-type=worker
|
|
sudo k3s kubectl label nodes sheldon node-type=worker
|
|
sudo k3s kubectl label nodes pearl node-type=worker
|
|
```
|
|
|
|
#### ! Ping !
|
|
|
|
`ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ping`
|
|
|
|
#### ! Reset !
|
|
|
|
```bash
|
|
ansible-playbook ~/www/k3s-ansible/reset.yml -i ~/www/k3s-ansible/inventory/sandy/hosts.ini
|
|
```
|
|
|
|
#### ! Restart !
|
|
|
|
`ansible all -i ~/www/k3s-ansible/inventory/sandy/hosts.ini -a "shutdown -r now" -b`
|
|
|
|
## 3. Helm
|
|
|
|
#### On master
|
|
|
|
```bash
|
|
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
|
chmod 700 get_helm.sh
|
|
./get_helm.sh
|
|
helm version
|
|
```
|
|
|
|
## 4. MetalLb
|
|
|
|
```bash
|
|
helm repo add metallb https://metallb.github.io/metallb
|
|
helm search repo metallb
|
|
helm upgrade --install metallb metallb/metallb --create-namespace --namespace metallb-system --wait
|
|
```
|
|
|
|
```bash
|
|
cat << 'EOF' | kubectl apply -f -
|
|
apiVersion: metallb.io/v1beta1
|
|
kind: IPAddressPool
|
|
metadata:
|
|
name: default-pool
|
|
namespace: metallb-system
|
|
spec:
|
|
addresses:
|
|
- 192.168.1.30-192.168.1.49
|
|
---
|
|
apiVersion: metallb.io/v1beta1
|
|
kind: L2Advertisement
|
|
metadata:
|
|
name: default
|
|
namespace: metallb-system
|
|
spec:
|
|
ipAddressPools:
|
|
- default-pool
|
|
EOF
|
|
```
|
|
|
|
## 5. Local Storage Provider (Longhorn)
|
|
|
|
```bash
|
|
sudo systemctl enable iscsid.service
|
|
sudo systemctl start iscsid.service
|
|
|
|
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=nfs-common state=present"
|
|
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=open-iscsi state=present"
|
|
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=util-linux state=present"
|
|
```
|
|
|
|
```bash
|
|
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m shell -a "lsblk -f"
|
|
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m shell -a "blkid -s UUID -o value /dev/..."
|
|
```
|
|
|
|
#### Ansible mount:
|
|
|
|
```bash
|
|
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ansible.posix.mount -a "path=/mnt/data src=UUID={{ var_uuid }} fstype=ext4 state=mounted" -b
|
|
```
|
|
|
|
### Longhorn
|
|
|
|
```bash
|
|
helm repo add longhorn https://charts.longhorn.io
|
|
|
|
helm repo update
|
|
|
|
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --version 1.4.2 --set defaultSettings.defaultDataPath="/mnt/data" --set service.ui.loadBalancerIP="192.168.1.31" --set service.ui.type="LoadBalancer"
|
|
|
|
sudo k3s kubectl -n longhorn-system get pod
|
|
|
|
sudo k3s kubectl get storageclass
|
|
```
|
|
|
|
- local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 111m
|
|
- longhorn (default) driver.longhorn.io Delete Immediate true 65m Mark as "non-default"
|
|
|
|
```bash
|
|
sudo k3s kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
|
|
```
|
|
|
|
Mark as "default"
|
|
|
|
```bash
|
|
kubectl patch storageclass longhorn -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
|
|
```
|
|
|
|
#### UI
|
|
|
|
```bash
|
|
USER=admin; PASSWORD=transatlantique; echo "${USER}:$(openssl passwd -stdin -apr1 <<< ${PASSWORD})" >> auth
|
|
sudo k3s kubectl -n longhorn-system create secret generic basic-auth --from-file=auth
|
|
```
|
|
|
|
`vim longhorn-ingress.yml`
|
|
|
|
```yaml
|
|
apiVersion: networking.k8s.io/v1
|
|
kind: Ingress
|
|
metadata:
|
|
name: longhorn-ingress
|
|
namespace: longhorn-system
|
|
annotations:
|
|
nginx.ingress.kubernetes.io/auth-type: basic
|
|
nginx.ingress.kubernetes.io/ssl-redirect: 'false'
|
|
nginx.ingress.kubernetes.io/auth-secret: basic-auth
|
|
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required '
|
|
nginx.ingress.kubernetes.io/proxy-body-size: 10000m
|
|
spec:
|
|
rules:
|
|
- http:
|
|
paths:
|
|
- pathType: Prefix
|
|
path: "/"
|
|
backend:
|
|
service:
|
|
name: longhorn-frontend
|
|
port:
|
|
number: 80
|
|
```
|
|
|
|
```yaml
|
|
apiVersion: extensions/v1beta1
|
|
kind: Ingress
|
|
metadata:
|
|
name: longhorn-ingress
|
|
namespace: longhorn-system
|
|
annotations:
|
|
# add an annotation indicating the issuer to use
|
|
cert-manager.io/cluster-issuer: letsencrypt-staging
|
|
spec:
|
|
rules:
|
|
- host: dashboard.delmar.bzh
|
|
http:
|
|
paths:
|
|
- path: /
|
|
backend:
|
|
serviceName: longhorn-frontend
|
|
servicePort: 8090
|
|
tls:
|
|
- # cert-manager will store the certificate and key in this secret
|
|
secretName: dashboard-delmar-bzh-cert
|
|
hosts:
|
|
- dashboard.delmar.bzh
|
|
```
|
|
|
|
```bash
|
|
sudo k3s kubectl -n longhorn-system apply -f longhorn-ingress.yml
|
|
sudo k3s kubectl -n longhorn-system get ingress
|
|
```
|
|
|
|
## 6. HAProxy
|
|
|
|
```bash
|
|
helm repo add haproxytech https://haproxytech.github.io/helm-charts
|
|
|
|
helm install haproxy haproxytech/kubernetes-ingress -n kube-system \
|
|
--set controller.service.nodePorts.http=30000 \
|
|
--set controller.service.nodePorts.https=30001 \
|
|
--set controller.service.nodePorts.stat=30002 \
|
|
--set controller.service.type=LoadBalancer
|
|
```
|
|
|
|
## 7. Services
|
|
|
|
...
|
|
|
|
## Uninstall
|
|
|
|
> On master:
|
|
|
|
`/usr/local/bin/k3s-uninstall.sh`
|
|
|
|
> On workers:
|
|
|
|
`/usr/local/bin/k3s-agent-uninstall.sh`
|