Files
bikinibottom/TuringPi/k3sup.md
2026-02-18 17:46:23 +01:00

7.4 KiB

Installation de kubectl

sudo apt-get update && sudo apt-get install -y apt-transport-https ca-certificates curl gnupg
sudo mkdir -p -m 755 /etc/apt/keyrings
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.33/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
sudo chmod 644 /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo 'deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.33/deb/ /' | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo chmod 644 /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubectl

Installation de k3sup :

# Avec curl
curl -sLS https://get.k3sup.dev | sh
sudo install k3sup /usr/local/bin/

# Ou téléchargement direct depuis GitHub
wget https://github.com/alexellis/k3sup/releases/download/0.13.11/k3sup
chmod +x k3sup
sudo mv k3sup /usr/local/bin/

Configuration sudo sans mot de passe :

# Ajout de l'utilisateur au groupe sudo
sudo usermod -aG sudo pleb

# Configuration pour éviter la saisie de mot de passe
echo "pleb ALL=(ALL) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/pleb

Premier déploiement :

# Installation du master node
k3sup install --ip 192.168.1.14 --user pleb --ssh-key $HOME/.ssh/bikiniBottom

# Ajout des worker nodes
k3sup join --ip 192.168.1.13 --server-ip 192.168.1.14 --user pleb --ssh-key $HOME/.ssh/bikiniBottom
k3sup join --ip 192.168.1.16 --server-ip 192.168.1.14 --user pleb --ssh-key $HOME/.ssh/bikiniBottom
# k3sup join --ip 192.168.1.19 --server-ip 192.168.1.14 --user pleb --ssh-key $HOME/.ssh/bikiniBottom

Vérification du cluster :

# Export du kubeconfig
export KUBECONFIG=/local/path/to/kubeconfig

# Vérification des nœuds
kubectl get nodes -o wide

# Check des pods système
kubectl get pods -n kube-system

Validation :

kubectl get nodes

NAME      STATUS   ROLES           AGE   VERSION
gary      Ready    <none>          77s   v1.34.4+k3s1
sandy     Ready    control-plane   39m   v1.34.4+k3s1
sheldon   Ready    <none>          48s   v1.34.4+k3s1

Helm

On sandy

curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh
helm version

Headlamp

# first add our custom repo to your local helm repositories
helm repo add headlamp https://kubernetes-sigs.github.io/headlamp/

# now you should be able to install headlamp via helm
helm install bb-headlamp headlamp/headlamp --namespace kube-system
NAME: bb-headlamp
LAST DEPLOYED: Tue Feb 17 17:17:03 2026
NAMESPACE: kube-system
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
  1. Get the application URL by running these commands:
export POD_NAME=$(kubectl get pods --namespace kube-system -l "app.kubernetes.io/name=headlamp,app.kubernetes.io/instance=bb-headlamp" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace kube-system $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace kube-system port-forward $POD_NAME 8080:$CONTAINER_PORT
  1. Get the token using
kubectl create token bb-headlamp --namespace kube-system

longhorn

USER=admin; PASSWORD=v5bB4OQRDfY5tFJ1; echo "${USER}:$(openssl passwd -stdin -apr1 <<< ${PASSWORD})" >> auth

sudo k3s kubectl -n longhorn-system create secret generic basic-auth --from-file=auth

vim longhorn-ingress.yml
---
apiVersion: v1
kind: Secret
metadata:
  name: longhorn-basic-auth-secret
  namespace: longhorn-system
data:
  users: |2
    YWRtaW46JGFwcjEkMmp5TzMwYmskRE5IV0VEQW1VQXFVajVGOHNvdXNVMAoK

---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
  name: longhorn-basic-auth-middleware
spec:
  basicAuth:
    secret: longhorn-basic-auth-secret
    realm: "Longhorn Dashboard"

---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: longhorn-ingress
  namespace: longhorn-system
  annotations:
    spec.ingressClassName: traefik
    traefik.ingress.kubernetes.io/router.middlewares: longhorn-system-longhorn-basic-auth-middleware@kubernetescrd
spec:
  rules:
  - http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: longhorn-frontend
            port:
              number: 80
sudo k3s kubectl -n longhorn-system apply -f longhorn-ingress.yml

metallb

# Add MetalLB repository to Helm
helm repo add metallb https://metallb.github.io/metallb

# Check the added repository
helm search repo metallb

helm upgrade --install metallb metallb/metallb --create-namespace \
--namespace metallb-system --wait
Release "metallb" does not exist. Installing it now.
NAME: metallb
LAST DEPLOYED: Wed Feb 18 11:46:03 2026
NAMESPACE: metallb-system
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
MetalLB is now running in the cluster.

Now you can configure it via its CRs. Please refer to the metallb official docs
on how to use the CRs.
vim metallb-config.yaml

---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: default-pool
  namespace: metallb-system
spec:
  addresses:
  - 192.168.1.21-192.168.1.40

---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: default
  namespace: metallb-system
spec:
  ipAddressPools:
  - default-pool

traefik

vim traefik-ingress.yml
---
apiVersion: v1
kind: Secret
metadata:
  name: traefik-basic-auth-secret
  namespace: kube-system
data:
  users: |2
    YWRtaW46JGFwcjEkMmp5TzMwYmskRE5IV0VEQW1VQXFVajVGOHNvdXNVMAoK

---
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
  name: traefik-basic-auth-middleware
spec:
  basicAuth:
    secret: traefik-basic-auth-secret
    realm: "Traefik Dashboard"

---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: traefik-ingress
  namespace: kube-system
  annotations:
    spec.ingressClassName: traefik
    traefik.ingress.kubernetes.io/router.middlewares: kube-system-traefik-basic-auth-middleware@kubernetescrd
spec:
  serviceAccountName: traefik-ingress
  env:
    name: OVH_ENDPOINT
      value: ovh-eu
    name: OVH_APPLICATION_KEY
      value: 3f8bdfed17f848d8
    name: OVH_APPLICATION_SECRET
      value: 6946758d7515ecef108aeb286bf3c7d0
    name: OVH_CONSUMER_KEY
      value: 94b2ddf482d36421a33aa6b3aa51595
  args:
  - --configFile=/config/traefik.toml
  volumeMounts:
  - name: traefik-config
      mountPath: /config/
  - name: traefik-custom
      mountPath: /custom/
  - name: traefik-certs
      mountPath: /certs/
  ports:
    name: http
      containerPort: 80
    name: https
      containerPort: 443
    name: dashboard
      containerPort: 8080
  volumes:
  - name: traefik-config
  configMap:
  - name: traefik-config
  - name: traefik-custom
  - name: traefik-certs
  persistentVolumeClaim:
    claimName: traefik-certsrules:
  - http:
      paths:
      - pathType: Prefix
        path: "/"
        backend:
          service:
            name: traefik-dashboard
            port:
              number: 8080

---
apiVersion: v1
kind: Service
metadata:
  name: traefik-dashboard
spec:
  ports:
  - name: http
    port: 8080
    targetPort: 8080
  selector:
    app: traefik-dashboard
  type: LoadBalancer

sudo k3s kubectl -n kube-system apply -f traefik-ingress.yml