Compare commits

...

30 Commits

Author SHA1 Message Date
julien 2f77994a66 "Updates" 2026-05-07 00:05:37 +02:00
julien 515717aedf "Updates" 2026-05-06 20:03:54 +02:00
julien 05d115eeab "Updates" 2026-05-06 19:53:57 +02:00
julien 578f904576 "Updates" 2026-04-24 10:39:53 +02:00
julien de18a5085d "Updates" 2026-04-22 10:35:02 +02:00
julien 9d13b1261e "Updates" 2026-04-18 11:30:48 +02:00
julien 73552caca4 "Updates" 2026-04-17 19:20:29 +02:00
julien 08acbee85c "Updates" 2026-04-17 19:19:16 +02:00
julien eb3a47d6db "Updates" 2026-04-17 19:11:27 +02:00
julien 2892b98009 "Updates" 2026-04-17 18:51:07 +02:00
julien 32007276bb "Updates" 2026-04-17 18:48:57 +02:00
julien 274f41fe51 "Updates" 2026-04-17 18:39:57 +02:00
julien 56c534ff86 "Updates" 2026-04-17 18:18:01 +02:00
julien a93fb30b4a "Updates" 2026-04-17 18:14:07 +02:00
julien 8254859841 "Updates" 2026-04-15 08:45:36 +02:00
julien d3081d44be "Updates" 2026-04-15 00:56:04 +02:00
julien 0278914449 "Updates" 2026-04-15 00:12:19 +02:00
julien 9c4207ae20 "Updates" 2026-04-14 19:07:13 +02:00
julien cb1b75a09d "Updates" 2026-04-14 19:03:46 +02:00
julien cd049065fb "Updates" 2026-04-14 18:59:22 +02:00
julien 03422c035e "Updates" 2026-04-14 18:52:39 +02:00
julien a6c0792132 "Updates" 2026-04-14 18:47:11 +02:00
julien e40d2356fa "Updates" 2026-04-14 18:07:05 +02:00
julien facab9046e "Updates" 2026-04-14 18:04:46 +02:00
julien 4ec695443d "Updates" 2026-04-14 16:08:45 +02:00
julien 0968035f9d "Updates" 2026-04-09 18:07:55 +02:00
julien 653e5f15d0 "Updates" 2026-04-08 13:09:12 +02:00
julien 8edb7bc02a "Updates" 2026-04-08 12:53:55 +02:00
julien f859c7a661 "Updates" 2026-04-08 12:27:44 +02:00
julien 14a54e3733 "Updates" 2026-04-08 12:20:48 +02:00
11 changed files with 346 additions and 2497 deletions
+3 -2
View File
@@ -1,5 +1,5 @@
$TTL 3600
@ IN SOA dns106.ovh.net. tech.ovh.net. (2026032801 86400 3600 3600000 300)
@ IN SOA dns106.ovh.net. tech.ovh.net. (2076846298 86400 3600 3600000 300)
IN NS dns106.ovh.net.
IN NS ns106.ovh.net.
IN MX 100 mx3.mail.ovh.net.
@@ -21,11 +21,12 @@ auth IN A 176.188.240.123
autoconfig IN CNAME autodiscover.mail.ovh.net.
autodiscover IN CNAME mailconfig.ovh.net.
books IN A 176.188.240.123
borg IN A 176.188.240.123
cap IN A 176.188.240.123
cfy IN A 176.188.240.123
cloud IN A 176.188.240.123
cnvrt IN A 176.188.240.123
crbn IN A 176.188.240.123
crm IN A 176.188.240.123
cs IN A 176.188.240.123
dev IN A 176.188.240.123
dia IN A 176.188.240.123
+142 -2291
View File
File diff suppressed because it is too large Load Diff
+16 -4
View File
@@ -27,10 +27,22 @@ echo "UUID=... /mnt/<folder> ext4 defaults 0 0" | sudo tee -a /
#### bob (...)
```
# emmc
UUID=5855d1bc-99fd-41f2-9d11-7c00a1c0ad00 / ext4 defaults,x-systemd.growfs 0 1
# /etc/fstab: static file system information.
#
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# systemd generates mount units based on this file, see systemd.mount(5).
# Please run 'systemctl daemon-reload' after making changes here.
#
# <file system> <mount point> <type> <options> <dump> <pass>
# / was on /dev/mmcblk0p1 during installation
UUID=15c5c8b9-af8d-4d35-bf0b-47443d554971 / ext4 errors=remount-ro 0 1
# swap was on /dev/mmcblk0p5 during installation
UUID=1c99fbca-6863-4f8a-b8da-4bbea3fbe8c9 none swap sw 0 0
# nvme
UUID=529deeff-8612-4855-bc07-e07eb2cf55de /mnt/ssd ext4 defaults 0 0
UUID=b656c935-5ef2-45a6-894e-c7062a75a862 /mnt/ssd ext4 defaults 0 0
```
#### carlo (nvme0n1 / sda / sdb) (nfs server --> backups)
@@ -75,7 +87,7 @@ UUID=fd0d6603-c6f1-43fe-a993-39f79b4e3eb8 /mnt/ssd ext4 defaults 0
# <file system> <mount point> <type> <options> <dump> <fsck>
UUID=1cf633ab-4f5f-42da-b347-31282732a446 / ext4 defaults,x-systemd.growfs 0 1
# nvme
UUID=75755b0b-e727-41df-a8ae-7f9fe59c36e0 /mnt/ssd ext4 defaults 0 0
UUID=6d255d62-eafe-4333-87b0-9e0c6bf6a44d /mnt/ssd ext4 defaults 0 0
```
#### krabs (sad / sdb) (nfs server)
+26 -104
View File
@@ -41,7 +41,7 @@ pleb ALL=(ALL:ALL) NOPASSWD: ALL
Name=end0
[Network]
Address=192.168.1.21/24
Address=192.168.1.xxx/24
Gateway=192.168.1.254
# OpenDNS
DNS=208.67.222.222
@@ -55,12 +55,12 @@ DNS=208.67.220.220
```
# Host addresses
127.0.0.1 localhost
127.0.1.1 shell1
127.0.1.1 sandy
# Nodes
192.168.1.186 shell1 shell1.local
192.168.1.243 shell2 shell2.local
192.168.1.194 shell3 shell3.local
192.168.1.222 shell4 shell3.local
192.168.1.14 sandy sandy.local
192.168.1.13 gary gary.local
192.168.1.16 sheldon sheldon.local
192.168.1.19 pearl pearl.local
```
#### Reboot
@@ -69,8 +69,8 @@ DNS=208.67.220.220
```
ssh-keygen -t ed25519
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@shell2
ssh pleb@shell2
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@<host>
ssh pleb@<host>
```
#### Edit
@@ -126,7 +126,7 @@ sudo chmod 666 /var/run/docker.sock
#### On master
```
docker swarm init --advertise-addr 192.168.1.186
docker swarm init --advertise-addr 192.168.1.14
```
> *Swarm initialized: current node (3kdxixaa86m8pvag6jn0b70ut) is now a manager*.
@@ -134,7 +134,7 @@ docker swarm init --advertise-addr 192.168.1.186
#### On nodes
```
docker swarm join --token SWMTKN-1-2px1bindhl41x9h6l4ve7x15iwjryr0uf3ekmu7hz4bezjewwh-ae9vv4657zcki160s71vjn75z 192.168.1.186:2377
docker swarm join --token SWMTKN-1-2px1bindhl41x9h6l4ve7x15iwjryr0uf3ekmu7hz4bezjewwh-ae9vv4657zcki160s71vjn75z 192.168.1.14:2377
```
> *To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions*.
@@ -165,7 +165,7 @@ docker network ls
sudo apt-get -y install keepalived
```
### On master node (shell1)
### On master node (sandy)
#### Create/Edit
@@ -176,7 +176,7 @@ sudo apt-get -y install keepalived
global_defs {
notification_email {
jme69@pm.me
admin@delmar.bzh
}
notification_email_from admin@delmar.bzh
smtp_server pro1.mail.ovh.net
@@ -199,12 +199,12 @@ vrrp_instance VI_1 {
auth_pass transatlantique
}
virtual_ipaddress {
192.168.1.30
192.168.1.21
}
}
```
### On Node2
### On Nodes
#### Create/Edit
@@ -215,7 +215,7 @@ vrrp_instance VI_1 {
global_defs {
notification_email {
jme69@pm.me
admin@delmar.bzh
}
notification_email_from admin@delmar.bzh
smtp_server pro1.mail.ovh.net
@@ -238,85 +238,7 @@ vrrp_instance VI_1 {
auth_pass transatlantique
}
virtual_ipaddress {
192.168.1.30
}
}
```
### On Node3
#### Create/Edit
> sudo vim /etc/keepalived/keepalived.conf<span class="keep-md">`</span>
```
! Configuration File for keepalived
global_defs {
notification_email {
jme69@pm.me
}
notification_email_from admin@delmar.bzh
smtp_server pro1.mail.ovh.net
smtp_connect_timeout 30
router_id docker_ingress
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state BACKUP
interface end0
virtual_router_id 51
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass transatlantique
}
virtual_ipaddress {
192.168.1.30
}
}
```
### On Node4
#### Create/Edit
> sudo vim /etc/keepalived/keepalived.conf<span class="keep-md">`</span>
```
! Configuration File for keepalived
global_defs {
notification_email {
jme69@pm.me
}
notification_email_from admin@delmar.bzh
smtp_server pro1.mail.ovh.net
smtp_connect_timeout 30
router_id docker_ingress
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state BACKUP
interface end0
virtual_router_id 51
priority 70
advert_int 1
authentication {
auth_type PASS
auth_pass transatlantique
}
virtual_ipaddress {
192.168.1.30
192.168.1.21
}
}
```
@@ -347,7 +269,7 @@ sudo systemctl start glusterd
```
# Format your disk (If you decided to use your internal storage, just ignore the mount and format steps)
sudo mkfs.xfs -f /dev/sda
sudo mkfs.xfs -f /dev/nvme0n1
# Create brick folder location
sudo mkdir -p /data/glusterfs/volume/brick
@@ -363,9 +285,9 @@ sudo mount -a
sudo systemctl daemon-reload
sudo gluster peer probe shell2
sudo gluster peer probe shell3
sudo gluster peer probe shell4
sudo gluster peer probe gary
sudo gluster peer probe sheldon
sudo gluster peer probe pearl
```
> Check
@@ -378,16 +300,16 @@ sudo gluster pool list
sudo gluster peer status
```
> For 2 nodes:
> For 2 nodes: sandy and sheldon for example
```
sudo gluster volume create docker-volume replica 2 transport tcp shell1:/mnt/datav shell3:/mnt/data force
sudo gluster volume create docker-volume replica 2 transport tcp sandy:/mnt/ssd sheldon:/mnt/data force
```
For 3 nodes:
> For 3 nodes: sandy, sheldon and pearl for example
```
sudo gluster volume create dockervolume disperse 3 redundancy 1 shell1:/data/glusterfs/volume/brick shell2:/data/glusterfs/volume/brick shell3:/data/glusterfs/volume/brick force
sudo gluster volume create dockervolume disperse 3 redundancy 1 sandy:/data/glusterfs/volume/brick sheldon:/data/glusterfs/volume/brick pearl:/data/glusterfs/volume/brick force
```
#### Start Gluster Volume
@@ -417,7 +339,7 @@ sudo systemctl daemon-reload
```
df -h /mnt/data
sudo gluster volume set dockervolume auth.allow 127.0.0.1,192.168.1.186,192.168.1.243,192.168.1.194,192.168.1.222
sudo gluster volume set dockervolume auth.allow 127.0.0.1,192.168.1.14,192.168.1.13,192.168.1.16,192.168.1.19
```
> When adding a new brick:
@@ -429,7 +351,7 @@ sudo gluster volume add-brick **volume** replica X **server:brick**
> Option
```
docker plugin install --alias glusterfs mikebarkmin/glusterfs SERVERS=shell1,shell2,shell3,shell4 VOLNAME=dockervolume
docker plugin install --alias glusterfs mikebarkmin/glusterfs SERVERS=sandy,gary,sheldon,pearl VOLNAME=dockervolume
```
### 5. Portainer
+36 -11
View File
@@ -29,6 +29,7 @@ ff02::3 ip6-allhosts
192.168.1.17 bernie
192.168.1.18 patrick
192.168.1.19 pearl
192.168.1.20 karen
#
192.168.1.53 retropie
@@ -57,8 +58,9 @@ ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@<hostname>
| ├── sandy (192.168.1.14)
| ├── sheldon (192.168.1.16)
| └── pearl (192.168.1.19)
├── krabs (192.168.1.15) # nfs server
├── patrick (192.168.1.18)
├── karen (192.168.1.20) # jetson
├── krabs (192.168.1.15) # nfs server
├── bernie (192.168.1.17) # octoprint
| ------------------------------
└── retropie (192.168.1.53)
@@ -69,16 +71,16 @@ ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@<hostname>
### bob
```bash
_,met$$$$$gg. pleb@bob
,g$$$$$$$$$$$$$$$P. OS: Debian
,g$$P"" """Y$$.". Kernel: aarch64 Linux 6.1.0-44-arm64
,$$P' `$$$. Uptime: 11m
',$$P ,ggs. `$$b: Packages: 419
`d$$' ,$P"' . $$$ Shell: bash 5.2.15
$$P d$' , $$P Disk: 2,2G / 31G (8%)
$$: $$. - ,d$$' CPU: ARM Cortex-A55 @ 4x 1.8GHz
$$\; Y$b._ _,d$P' RAM: 326MiB / 3737MiB
Y$$. `.`"Y$$$$P"'
_,met$$$$$gg. pleb@bob
,g$$$$$$$$$$$$$$$P. OS: Debian 13 trixie
,g$$P"" """Y$$.". Kernel: x86_64 Linux 6.12.85+deb13-amd64
,$$P' `$$$. Uptime: 13m
',$$P ,ggs. `$$b: Packages: 412
`d$$' ,$P"' . $$$ Shell: bash 5.2.37
$$P d$' , $$P Disk: 1,5G / 35G (5%)
$$: $$. - ,d$$' CPU: Intel Atom E3950 @ 4x 2GHz [38.0°C]
$$\; Y$b._ _,d$P' GPU: HD Graphics 505
Y$$. `.`"Y$$$$P"' RAM: 517MiB / 15843MiB
`$$b "-.__
`Y$$
`Y$$.
@@ -269,3 +271,26 @@ ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@<hostname>
`"Y$b._
`""""
```
### karen
```bash
./+o+- pleb@karen
yyyyy- -yyyyyy+ OS: Ubuntu 22.04 jammy
://+//////-yyyyyyo Kernel: aarch64 Linux 5.15.185-tegra
.++ .:/++++++/-.+sss/` Uptime: 5m
.:++o: /++++++++/:--:/- Packages: 2136
o:+o+:++.`..```.-/oo+++++/ Shell: bash 5.1.16
.:+o:+o/. `+sssoo+/ Disk: 11G / 937G (2%)
.++/+:+oo+o:` /sssooo. CPU: ARMv8 rev 1 (v8l) @ 6x 1,344GHz
/+++//+:`oo+o /::--:. GPU: Orin (nvgpu)
\+/+o+++`o++o ++////. RAM: 918MiB / 7607MiB
.++.o+++oo+:` /dddhhh.
.+.o+oo:. `oddhhhh+
\+.++o+o``-````.:ohdhhhhh+
`:o+++ `ohhhhhhhhyo++os:
.o:`.syhhhhhhh/.oo++o`
/osyyyyyyo++ooo+++/
````` +oo+++o\:
`oo++.
```
Binary file not shown.

After

Width:  |  Height:  |  Size: 184 KiB

+58 -32
View File
@@ -51,7 +51,7 @@ sudo blkid -s UUID -o value /dev/nvme0n1
> sheldon UUID=fd0d6603-c6f1-43fe-a993-39f79b4e3eb8
> pearl UUID=75755b0b-e727-41df-a8ae-7f9fe59c36e0
> pearl UUID=6d255d62-eafe-4333-87b0-9e0c6bf6a44d
echo "UUID=... /mnt/ssd ext4 defaults 0 0" | sudo tee -a /etc/fstab
@@ -98,7 +98,7 @@ PasswordAuthentication no
sudo systemctl restart sshd
```
#### On shell1 :
#### On sandy :
```bash
ssh-keygen -t ed25519
@@ -124,14 +124,14 @@ python3 -m pip install --user ansible
[master]
sandy ansible_connection=local
[node]
[workers]
gary ansible_connection=ssh
sheldon ansible_connection=ssh
pearl ansible_connection=ssh
[k3s_cluster:children]
master
node
workers
```
## UFW
@@ -218,23 +218,23 @@ cd www
git clone https://github.com/k3s-io/k3s-ansible.git
cp -R k3s-ansible/inventory/sample k3s-ansible/inventory/sandy
cp -R k3s-ansible/inventory-sample.yml k3s-ansible/inventory/bikiniBottom.yaml
```
`vim www/k3s-ansible/inventory/sandy/hosts.ini`
`vim www/k3s-ansible/inventory/bikiniBottom.yaml`
```ini
[master]
192.168.1.14 ansible_connection=ssh var_hostname=sandy var_disk=sda1 var_uuid=6fab06af-f38f-493a-87ab-512f52a6616c
192.168.1.14 ansible_connection=ssh var_hostname=sandy var_disk=nvme0n1 var_uuid=e2e3a295-9731-4aa2-996f-c72c2b81f40f
[node]
192.168.1.13 ansible_connection=ssh var_hostname=gary var_disk=sda1 var_uuid=7f348b05-b44e-4b85-8445-657dc95e72df
192.168.1.16 ansible_connection=ssh var_hostname=shedon var_disk=sda1 var_uuid=a612717e-ca95-44a3-9b10-10e6be26112f
192.168.1.19 ansible_connection=ssh var_hostname=pearl var_disk=sda1 var_uuid=75755b0b-e727-41df-a8ae-7f9fe59c36e0
[workers]
192.168.1.13 ansible_connection=ssh var_hostname=gary var_disk=nvme0n1 var_uuid=124046b7-f656-4691-933b-4673e5abbb17
192.168.1.16 ansible_connection=ssh var_hostname=shedon var_disk=nvme0n1 var_uuid=fd0d6603-c6f1-43fe-a993-39f79b4e3eb8
192.168.1.19 ansible_connection=ssh var_hostname=pearl var_disk=nvme0n1 var_uuid=6d255d62-eafe-4333-87b0-9e0c6bf6a44d
[k3s_cluster:children]
master
node
workers
```
`vim www/k3s-ansible/inventory/sandy/group_vars/all.yml`
@@ -244,9 +244,9 @@ node
#### Install playbook
```bash
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ping
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m apt -a "name=iptables state=present" --become
ansible-playbook ~/www/k3s-ansible/site.yml -i ~/www/k3s-ansible/inventory/sandy/hosts.ini
ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -m ping
ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -m apt -a "name=iptables state=present" --become
ansible-playbook ~/delmar.bzh/ansible/k3s-ansible/playbooks/site.yml -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml
```
#### On master:
@@ -273,7 +273,7 @@ sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
kubectl version --client
scp pleb@shell1:~/.kube/config ~/.kube/config
scp pleb@sandy:~/.kube/config ~/.kube/config
export KUBECONFIG=~/.kube/config
@@ -290,7 +290,7 @@ sudo k3s kubectl label nodes gary kubernetes.io/role=worker
sudo k3s kubectl label nodes sheldon kubernetes.io/role=worker
sudo k3s kubectl label nodes pearl kubernetes.io/role=worker
# sudo k3s kubectl label nodes shell1 node-type=worker
# sudo k3s kubectl label nodes sandy node-type=worker
sudo k3s kubectl label nodes gary node-type=worker
sudo k3s kubectl label nodes sheldon node-type=worker
sudo k3s kubectl label nodes pearl node-type=worker
@@ -298,17 +298,17 @@ sudo k3s kubectl label nodes pearl node-type=worker
#### ! Ping !
`ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ping`
`ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -m ping`
#### ! Reset !
```bash
ansible-playbook ~/www/k3s-ansible/reset.yml -i ~/www/k3s-ansible/inventory/sandy/hosts.ini
ansible-playbook ~/delmar.bzh/ansible/k3s-ansible/reset.yml -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml
```
#### ! Restart !
`ansible all -i ~/www/k3s-ansible/inventory/sandy/hosts.ini -a "shutdown -r now" -b`
`ansible all -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml -a "shutdown -r now" -b`
## 3. Helm
@@ -338,7 +338,7 @@ metadata:
namespace: metallb-system
spec:
addresses:
- 192.168.1.30-192.168.1.49
- 192.168.1.21-192.168.1.49
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
@@ -357,20 +357,20 @@ EOF
sudo systemctl enable iscsid.service
sudo systemctl start iscsid.service
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=nfs-common state=present"
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=open-iscsi state=present"
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=util-linux state=present"
ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -b -m apt -a "name=nfs-common state=present"
ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -b -m apt -a "name=open-iscsi state=present"
ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -b -m apt -a "name=util-linux state=present"
```
```bash
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m shell -a "lsblk -f"
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m shell -a "blkid -s UUID -o value /dev/..."
ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -b -m shell -a "lsblk -f"
ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -b -m shell -a "blkid -s UUID -o value /dev/nvme0n1"
```
#### Ansible mount:
```bash
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ansible.posix.mount -a "path=/mnt/data src=UUID={{ var_uuid }} fstype=ext4 state=mounted" -b
ansible -i ~/delmar.bzh/ansible/k3s-ansible/inventory/bikiniBottom.yaml k3s_cluster -m ansible.posix.mount -a "path=/mnt/ssd src=UUID={{ var_uuid }} fstype=ext4 state=mounted" -b
```
### Longhorn
@@ -380,7 +380,7 @@ helm repo add longhorn https://charts.longhorn.io
helm repo update
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --version 1.4.2 --set defaultSettings.defaultDataPath="/mnt/data" --set service.ui.loadBalancerIP="192.168.1.31" --set service.ui.type="LoadBalancer"
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --version 1.4.2 --set defaultSettings.defaultDataPath="/mnt/ssd" --set service.ui.loadBalancerIP="192.168.1.23" --set service.ui.type="LoadBalancer"
sudo k3s kubectl -n longhorn-system get pod
@@ -402,8 +402,10 @@ kubectl patch storageclass longhorn -p '{"metadata": {"annotations":{"storagecla
#### UI
##### Option 1 : ingress ?
```bash
USER=admin; PASSWORD=transatlantique; echo "${USER}:$(openssl passwd -stdin -apr1 <<< ${PASSWORD})" >> auth
USER=admin; PASSWORD=v5bB4OQRDfY5tFJ1; echo "${USER}:$(openssl passwd -stdin -apr1 <<< ${PASSWORD})" >> auth
sudo k3s kubectl -n longhorn-system create secret generic basic-auth --from-file=auth
```
@@ -445,7 +447,7 @@ metadata:
cert-manager.io/cluster-issuer: letsencrypt-staging
spec:
rules:
- host: dashboard.delmar.bzh
- host: lghn.delmar.bzh
http:
paths:
- path: /
@@ -454,9 +456,9 @@ spec:
servicePort: 8090
tls:
- # cert-manager will store the certificate and key in this secret
secretName: dashboard-delmar-bzh-cert
secretName: lghn-delmar-bzh-cert
hosts:
- dashboard.delmar.bzh
- lghn.delmar.bzh
```
```bash
@@ -464,6 +466,30 @@ sudo k3s kubectl -n longhorn-system apply -f longhorn-ingress.yml
sudo k3s kubectl -n longhorn-system get ingress
```
##### Option 2 : caddy
```bash
sudo caddy hash-password
```
```bash
sudo vim /etc/caddy/Caddyfile
```
```
lghn.delmar.bzh {
encode {
zstd
gzip
minimum_length 1024
}
reverse_proxy 192.168.1.23
basic_auth / {
admin $2a$14$RsUJ13kC9DcRMoQa8hlmNOljcXGMSem.1XwYFeV1El8Drw7DIJbVu
}
}
```
## 6. HAProxy
```bash
+1
View File
@@ -32,6 +32,7 @@ ff02::3 ip6-allhosts
192.168.1.17 bernie
192.168.1.18 patrick
192.168.1.19 pearl
192.168.1.20 karen
#
192.168.1.53 recalbox
-1
View File
@@ -1,7 +1,6 @@
```bash
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBLk24u7FT8PhAdM8EVUFGlOi0hle4CW8L284E1foUhS julien@julien-pc
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE6wr+DUbcfVTltoWT6gbPRY3geUYNhgN7/CLcMaMu0B eliot@toile-win
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKmttayKqj6Z290hMCc97v4dMZTSUz4lYgXR0NtcRr8U delmar@thinkpad
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC/ZRSzTgEBh+NbLKKxjW5F0Gj/j7GJylnMnGlf96Wpy pleb@bob
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIbDwpvEZ+pvVSBQryt6tGNQ25+z1P2UJO45cPHmDkj0 pleb@carlo
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKL7A0xvHSfmWo+LUHdWWb03a5NXN1IlbLS5iSHxs3zw pleb@sandy
+53 -41
View File
@@ -86,6 +86,15 @@ books.delmar.bzh {
reverse_proxy patrick:10801
}
borg.delmar.bzh {
encode {
zstd
gzip
minimum_length 1024
}
reverse_proxy carlo:8080
}
cap.delmar.bzh {
encode {
zstd
@@ -95,6 +104,15 @@ cap.delmar.bzh {
reverse_proxy patrick:11338
}
cfy.delmar.bzh {
encode {
zstd
gzip
minimum_length 1024
}
reverse_proxy karen:8188
}
cloud.delmar.bzh {
encode {
zstd
@@ -146,15 +164,6 @@ crbn.delmar.bzh {
reverse_proxy patrick:4000
}
crm.delmar.bzh {
encode {
zstd
gzip
minimum_length 1024
}
reverse_proxy patrick:15069
}
cs.delmar.bzh {
encode {
zstd
@@ -208,7 +217,7 @@ gen.delmar.bzh {
gzip
minimum_length 1024
}
reverse_proxy patrick:15578
reverse_proxy patrick:32787
}
git.delmar.bzh {
@@ -229,15 +238,6 @@ gotify.delmar.bzh {
reverse_proxy patrick:41901
}
hdlp.delmar.bzh {
encode {
zstd
gzip
minimum_length 1024
}
reverse_proxy 192.168.1.22
}
homepage.delmar.bzh {
encode {
zstd
@@ -262,7 +262,7 @@ imgs.delmar.bzh {
gzip
minimum_length 1024
}
reverse_proxy patrick:32774
reverse_proxy karen:32774
}
inv.delmar.bzh {
@@ -271,7 +271,7 @@ inv.delmar.bzh {
gzip
minimum_length 1024
}
reverse_proxy patrick:8035 {
reverse_proxy patrick:7777 {
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
header_up X-Forwarded-Proto {scheme}
@@ -323,15 +323,6 @@ kontadenn.delmar.bzh {
}
}
lghn.delmar.bzh {
encode {
zstd
gzip
minimum_length 1024
}
reverse_proxy 192.168.1.23
}
lud.delmar.bzh {
encode {
zstd
@@ -455,7 +446,7 @@ pip.delmar.bzh {
gzip
minimum_length 1024
}
root * /mnt/ssd/www/picpitch-collage
root * /mnt/ssd/www/pip
file_server
}
@@ -546,7 +537,7 @@ stream.delmar.bzh {
gzip
minimum_length 1024
}
reverse_proxy bob:1984
reverse_proxy patrick:1984
basic_auth / {
admin $2a$14$Z5n.aZ3DVnl.DjE9h2WP4OcR21tROpN1dyv8hFxGU3AEbdcOssUKi
}
@@ -570,15 +561,6 @@ tpml.delmar.bzh {
}
}
trfk.delmar.bzh {
encode {
zstd
gzip
minimum_length 1024
}
reverse_proxy 192.168.1.21
}
trmx.delmar.bzh {
encode {
zstd
@@ -673,3 +655,33 @@ zik.delmar.bzh {
}
reverse_proxy patrick:4533
}
trfk.delmar.bzh {
encode {
zstd
gzip
minimum_length 1024
}
reverse_proxy 192.168.1.21
}
hdlp.delmar.bzh {
encode {
zstd
gzip
minimum_length 1024
}
reverse_proxy 192.168.1.22
}
lghn.delmar.bzh {
encode {
zstd
gzip
minimum_length 1024
}
reverse_proxy 192.168.1.23
basic_auth / {
admin $2a$14$RsUJ13kC9DcRMoQa8hlmNOljcXGMSem.1XwYFeV1El8Drw7DIJbVu
}
}
+10 -10
View File
@@ -16,7 +16,7 @@ borg init -e none /mnt/data/backup/<repo_name>
borg init -e none /mnt/data/backup/<repo_name>
# distant
borg init -e none ssh://pleb@krabs/mnt/data/backup/<repo_name>
borg init -e none ssh://pleb@carlo/mnt/data/backup/<repo_name>
```
#### Encrypted repo
@@ -25,7 +25,7 @@ borg init -e none ssh://pleb@krabs/mnt/data/backup/<repo_name>
borg init -e repokey /mnt/data/backup/<repo_name>
# distant
borg init -e repokey ssh://pleb@krabs/mnt/data/backup/<repo_name>
borg init -e repokey ssh://pleb@carlo/mnt/data/backup/<repo_name>
```
> Enter a passphrase to encrypt the folder.
```txt
@@ -45,7 +45,7 @@ cd /path/to/folder
borg create --progress --stats -C zstd,10 /mnt/data/backup/<repo_name>::20251018 .
# distant
borg create --progress --stats -C zstd,10 ssh://pleb@krabs/mnt/data/backup/<repo_name>::20251018 .
borg create --progress --stats -C zstd,10 ssh://pleb@carlo/mnt/data/backup/<repo_name>::20251018 .
```
### List a backup :
@@ -54,7 +54,7 @@ borg create --progress --stats -C zstd,10 ssh://pleb@krabs/mnt/data/backup/<repo
borg list /mnt/data/backup/<repo_name>::20251018
# distant
borg list ssh://pleb@krabs/mnt/data/backup/<repo_name>::20251018
borg list ssh://pleb@carlo/mnt/data/backup/<repo_name>::20251018
```
### Verify the backup folder using the command info:
@@ -63,7 +63,7 @@ borg list ssh://pleb@krabs/mnt/data/backup/<repo_name>::20251018
borg info /mnt/data/backup/<repo_name>::20251018
# distant
borg info ssh://pleb@krabs/mnt/data/backup/<repo_name>::20251018
borg info ssh://pleb@carlo/mnt/data/backup/<repo_name>::20251018
```
### Restore a backup to a folder :
@@ -72,7 +72,7 @@ borg info ssh://pleb@krabs/mnt/data/backup/<repo_name>::20251018
borg extract --progress /mnt/data/backup/<repo_name>::20251018 /path/to/folder
# distant
borg extract --progress ssh://pleb@krabs/mnt/data/backup/<repo_name>::20251018 /path/to/folder
borg extract --progress ssh://pleb@carlo/mnt/data/backup/<repo_name>::20251018 /path/to/folder
```
### Mount a backup :
@@ -83,7 +83,7 @@ mkdir /var/tmp/<folder_name>
borg mount /mnt/data/backup/<repo_name>::20251018 /var/tmp/<folder_name>
# distant
borg mount ssh://pleb@krabs/mnt/data/backup/<repo_name>::20251018 /var/tmp/<folder_name>
borg mount ssh://pleb@carlo/mnt/data/backup/<repo_name>::20251018 /var/tmp/<folder_name>
# unmount
borg unmount var/tmp/<folder_name>
@@ -95,7 +95,7 @@ borg unmount var/tmp/<folder_name>
borg delete /mnt/data/backup/<repo_name>::20251018
# distant
borg delete ssh://pleb@krabs/mnt/data/backup/<repo_name>::20251018
borg delete ssh://pleb@carlo/mnt/data/backup/<repo_name>::20251018
```
### Freeup space
@@ -105,7 +105,7 @@ borg delete ssh://pleb@krabs/mnt/data/backup/<repo_name>::20251018
borg compact /mnt/data/backup
# distant
borg compact ssh://pleb@krabs/mnt/data/backup
borg compact ssh://pleb@carlo/mnt/data/backup
```
#### Prune and keep lastest 2 backups
@@ -114,5 +114,5 @@ borg compact ssh://pleb@krabs/mnt/data/backup
borg prune --keep-last 2 /mnt/data/backup
# distant
borg prune --keep-last 2 ssh://pleb@krabs/mnt/data/backup
borg prune --keep-last 2 ssh://pleb@carlo/mnt/data/backup
```