"Updates"

This commit is contained in:
2026-04-15 08:45:36 +02:00
parent d3081d44be
commit 8254859841
+26 -104
View File
@@ -55,12 +55,12 @@ DNS=208.67.220.220
```
# Host addresses
127.0.0.1 localhost
127.0.1.1 shell1
127.0.1.1 sandy
# Nodes
192.168.1.186 shell1 shell1.local
192.168.1.243 shell2 shell2.local
192.168.1.194 shell3 shell3.local
192.168.1.222 shell4 shell3.local
192.168.1.14 sandy sandy.local
192.168.1.13 gary gary.local
192.168.1.16 sheldon sheldon.local
192.168.1.19 pearl pearl.local
```
#### Reboot
@@ -69,8 +69,8 @@ DNS=208.67.220.220
```
ssh-keygen -t ed25519
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@shell2
ssh pleb@shell2
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@<host>
ssh pleb@<host>
```
#### Edit
@@ -126,7 +126,7 @@ sudo chmod 666 /var/run/docker.sock
#### On master
```
docker swarm init --advertise-addr 192.168.1.186
docker swarm init --advertise-addr 192.168.1.14
```
> *Swarm initialized: current node (3kdxixaa86m8pvag6jn0b70ut) is now a manager*.
@@ -134,7 +134,7 @@ docker swarm init --advertise-addr 192.168.1.186
#### On nodes
```
docker swarm join --token SWMTKN-1-2px1bindhl41x9h6l4ve7x15iwjryr0uf3ekmu7hz4bezjewwh-ae9vv4657zcki160s71vjn75z 192.168.1.186:2377
docker swarm join --token SWMTKN-1-2px1bindhl41x9h6l4ve7x15iwjryr0uf3ekmu7hz4bezjewwh-ae9vv4657zcki160s71vjn75z 192.168.1.14:2377
```
> *To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions*.
@@ -165,7 +165,7 @@ docker network ls
sudo apt-get -y install keepalived
```
### On master node (shell1)
### On master node (sandy)
#### Create/Edit
@@ -176,7 +176,7 @@ sudo apt-get -y install keepalived
global_defs {
notification_email {
jme69@pm.me
admin@delmar.bzh
}
notification_email_from admin@delmar.bzh
smtp_server pro1.mail.ovh.net
@@ -199,12 +199,12 @@ vrrp_instance VI_1 {
auth_pass transatlantique
}
virtual_ipaddress {
192.168.1.30
192.168.1.21
}
}
```
### On Node2
### On Nodes
#### Create/Edit
@@ -215,7 +215,7 @@ vrrp_instance VI_1 {
global_defs {
notification_email {
jme69@pm.me
admin@delmar.bzh
}
notification_email_from admin@delmar.bzh
smtp_server pro1.mail.ovh.net
@@ -238,85 +238,7 @@ vrrp_instance VI_1 {
auth_pass transatlantique
}
virtual_ipaddress {
192.168.1.30
}
}
```
### On Node3
#### Create/Edit
> sudo vim /etc/keepalived/keepalived.conf<span class="keep-md">`</span>
```
! Configuration File for keepalived
global_defs {
notification_email {
jme69@pm.me
}
notification_email_from admin@delmar.bzh
smtp_server pro1.mail.ovh.net
smtp_connect_timeout 30
router_id docker_ingress
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state BACKUP
interface end0
virtual_router_id 51
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass transatlantique
}
virtual_ipaddress {
192.168.1.30
}
}
```
### On Node4
#### Create/Edit
> sudo vim /etc/keepalived/keepalived.conf<span class="keep-md">`</span>
```
! Configuration File for keepalived
global_defs {
notification_email {
jme69@pm.me
}
notification_email_from admin@delmar.bzh
smtp_server pro1.mail.ovh.net
smtp_connect_timeout 30
router_id docker_ingress
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_instance VI_1 {
state BACKUP
interface end0
virtual_router_id 51
priority 70
advert_int 1
authentication {
auth_type PASS
auth_pass transatlantique
}
virtual_ipaddress {
192.168.1.30
192.168.1.21
}
}
```
@@ -347,7 +269,7 @@ sudo systemctl start glusterd
```
# Format your disk (If you decided to use your internal storage, just ignore the mount and format steps)
sudo mkfs.xfs -f /dev/sda
sudo mkfs.xfs -f /dev/nvme0n1
# Create brick folder location
sudo mkdir -p /data/glusterfs/volume/brick
@@ -363,9 +285,9 @@ sudo mount -a
sudo systemctl daemon-reload
sudo gluster peer probe shell2
sudo gluster peer probe shell3
sudo gluster peer probe shell4
sudo gluster peer probe gary
sudo gluster peer probe sheldon
sudo gluster peer probe pearl
```
> Check
@@ -378,16 +300,16 @@ sudo gluster pool list
sudo gluster peer status
```
> For 2 nodes:
> For 2 nodes: sandy and sheldon for example
```
sudo gluster volume create docker-volume replica 2 transport tcp shell1:/mnt/datav shell3:/mnt/data force
sudo gluster volume create docker-volume replica 2 transport tcp sandy:/mnt/ssd sheldon:/mnt/data force
```
For 3 nodes:
> For 3 nodes: sandy, sheldon and pearl for example
```
sudo gluster volume create dockervolume disperse 3 redundancy 1 shell1:/data/glusterfs/volume/brick shell2:/data/glusterfs/volume/brick shell3:/data/glusterfs/volume/brick force
sudo gluster volume create dockervolume disperse 3 redundancy 1 sandy:/data/glusterfs/volume/brick sheldon:/data/glusterfs/volume/brick pearl:/data/glusterfs/volume/brick force
```
#### Start Gluster Volume
@@ -417,7 +339,7 @@ sudo systemctl daemon-reload
```
df -h /mnt/data
sudo gluster volume set dockervolume auth.allow 127.0.0.1,192.168.1.186,192.168.1.243,192.168.1.194,192.168.1.222
sudo gluster volume set dockervolume auth.allow 127.0.0.1,192.168.1.14,192.168.1.13,192.168.1.16,192.168.1.19
```
> When adding a new brick:
@@ -429,7 +351,7 @@ sudo gluster volume add-brick **volume** replica X **server:brick**
> Option
```
docker plugin install --alias glusterfs mikebarkmin/glusterfs SERVERS=shell1,shell2,shell3,shell4 VOLNAME=dockervolume
docker plugin install --alias glusterfs mikebarkmin/glusterfs SERVERS=sandy,gary,sheldon,pearl VOLNAME=dockervolume
```
### 5. Portainer
@@ -484,4 +406,4 @@ docker service update --image portainer/portainer-ce:latest --publish-add 9443:9
docker pull portainer/agent:latest
docker service update --image portainer/agent:latest --force portainer_agent
```
```