Turing Pi
This commit is contained in:
107
turing-pi/baseboard-management-controller-bmc.md
Normal file
107
turing-pi/baseboard-management-controller-bmc.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# Baseboard Management Controller (BMC)
|
||||
|
||||
### bikiniBottom
|
||||
|
||||
```
|
||||
Login : root
|
||||
Password : hardiness-mystify-pretext
|
||||
```
|
||||
|
||||
[http://192.168.1.69](http://192.168.1.69 "http://192.168.1.69")
|
||||
|
||||
```bash
|
||||
# tpi -h
|
||||
Usage: tpi [host] <options...>
|
||||
Options:
|
||||
-p, --power (on off status) Power management
|
||||
-u, --usb (host device status) USB mode,Must be used with the node command
|
||||
-n, --node (1 2 3 4) USB selected node
|
||||
-r, --resetsw reset switch
|
||||
-U, --uart uart opt get or set
|
||||
-C, --cmd uart set cmd
|
||||
-F, --upgrade upgrade fw
|
||||
-f, --flash todo
|
||||
-h, --help usage
|
||||
example:
|
||||
$ tpi -p on //power on
|
||||
$ tpi -p off //power off
|
||||
$ tpi -u host -n 1 //USB uses host mode to connect to Node1
|
||||
$ tpi --uart=get -n 1 //get node1 uart info
|
||||
$ tpi --uart=set -n 1 --cmd=ls//set node1 uart cmd
|
||||
$ tpi --upgrade=/mnt/sdcard/xxxx.swu //upgrade fw
|
||||
$ tpi -r //reset switch
|
||||
|
||||
```
|
||||
|
||||
`vi /etc/network/interfaces`
|
||||
|
||||
```
|
||||
# interface file auto-generated by buildroot
|
||||
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet static
|
||||
hwaddress ether 02:e4:fd:09:22:fb
|
||||
address 192.168.1.10
|
||||
netmask 255.255.255.0
|
||||
gateway 192.168.1.254
|
||||
pre-up /etc/network/nfs_check
|
||||
wait-delay 15
|
||||
hostname $(hostname)
|
||||
```
|
||||
|
||||
`vi /etc/resolv.conf`
|
||||
|
||||
```
|
||||
search lan # eth0
|
||||
nameserver 192.168.1.254 # eth0
|
||||
```
|
||||
|
||||
```bash
|
||||
/etc/init.d/S40network restart
|
||||
|
||||
date -s @"$(curl -s "http://worldtimeapi.org/api/timezone/Europe/Paris" | sed -n 's/.*"unixtime":\([0-9]*\).*/\1/p')"
|
||||
|
||||
hwclock --systohc
|
||||
```
|
||||
|
||||
- MAC Address : 02:e4:fd:09:22:fb
|
||||
- Hosts password : transatlantique
|
||||
|
||||
#### Update
|
||||
|
||||
```
|
||||
Check the root partition in use: Use df -h to check if /mnt/sdcard is mounted and to note which root partition is in use - ubi0_5 or ubi0_6.
|
||||
```
|
||||
|
||||
```bash
|
||||
scp path\\to\\the\\swu\\file root@192.168.1.10:/tmp
|
||||
```
|
||||
|
||||
```
|
||||
Create and execute a command: Use the firmware path and the partition information to create the command. If the partition in use is ubi05, flash ubi06 and vice versa.
|
||||
```
|
||||
|
||||
```bash
|
||||
swupdate -i /tmp/turing_pi_____.swu -e stable,upgrade_ubi6
|
||||
```
|
||||
|
||||
#### Powering On All Nodes at Startup
|
||||
|
||||
By default, the Turing Pi 2 doesn't power on any nodes at startup. While this can be beneficial in certain scenarios, it might not be ideal in situations like recovering from a power outage. To alter this default behavior, a small startup script can be added. Create the file S99zallnodeson.sh in the /etc/init.d directory with the following content:
|
||||
|
||||
```bash
|
||||
ssh root@192.168.1.10
|
||||
vi /etc/init.d/S99zallnodeson.sh
|
||||
```
|
||||
|
||||
```sh
|
||||
#!/bin/sh
|
||||
while ! netstat -tuln | grep LISTEN | grep ':80 '; do sleep 1; done
|
||||
```
|
||||
|
||||
##### Turn on all nodes
|
||||
|
||||
`tpi -p on` The name of the file must follow alphabetically the name S99hello.sh because the BMC server starts in the S99hello.sh.
|
||||
487
turing-pi/docker-swarm.md
Normal file
487
turing-pi/docker-swarm.md
Normal file
@@ -0,0 +1,487 @@
|
||||
# Docker Swarm
|
||||
|
||||
## 1. Compute Modules
|
||||
|
||||
### Install OS / headless installation
|
||||
|
||||
1. Flash the image to a SD card/ EMMC.
|
||||
2. Go to Boot partition.
|
||||
3. Then change the DTB listed in /boot/extlinux/extlinux.conf to -model-a.dtb instead of -cm4.dtb.
|
||||
4. Now put the SD card/ EMMC to the preferred base board and boot.
|
||||
5. First boot will take time as the user partition will resize to take whole storage size.
|
||||
|
||||
> ssh as "root" and complete setup
|
||||
|
||||
```
|
||||
sudo apt update && sudo apt upgrade
|
||||
|
||||
for pkg in docker.io docker-doc docker-compose podman-docker containerd runc; do sudo apt-get remove $pkg; done
|
||||
|
||||
sudo apt install -y docker-compose docker git vim screenfetch net-tools wget curl
|
||||
|
||||
sudo apt autoremove
|
||||
```
|
||||
|
||||
#### Edit
|
||||
|
||||
> sudo vim /etc/sudoers.d/pleb
|
||||
|
||||
```
|
||||
pleb ALL=(ALL:ALL) NOPASSWD: ALL
|
||||
```
|
||||
|
||||
### Set Static IP
|
||||
|
||||
#### Create/Edit
|
||||
|
||||
> sudo vim /etc/systemd/network/end0.network
|
||||
|
||||
```
|
||||
[Match]
|
||||
Name=end0
|
||||
|
||||
[Network]
|
||||
Address=192.168.1.21/24
|
||||
Gateway=192.168.1.254
|
||||
# OpenDNS
|
||||
DNS=208.67.222.222
|
||||
DNS=208.67.220.220
|
||||
```
|
||||
|
||||
#### Edit
|
||||
|
||||
> sudo vim /etc/hosts
|
||||
|
||||
```
|
||||
# Host addresses
|
||||
127.0.0.1 localhost
|
||||
127.0.1.1 shell1
|
||||
# Nodes
|
||||
192.168.1.186 shell1 shell1.local
|
||||
192.168.1.243 shell2 shell2.local
|
||||
192.168.1.194 shell3 shell3.local
|
||||
192.168.1.222 shell4 shell3.local
|
||||
```
|
||||
|
||||
#### Reboot
|
||||
|
||||
> On remote :
|
||||
|
||||
```
|
||||
ssh-keygen -t ed25519
|
||||
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@shell2
|
||||
ssh pleb@shell2
|
||||
```
|
||||
|
||||
#### Edit
|
||||
|
||||
> sudo vim /etc/ssh/sshd\_config
|
||||
|
||||
```
|
||||
PasswordAuthentication no
|
||||
```
|
||||
|
||||
#### Restart service
|
||||
|
||||
```
|
||||
sudo systemctl restart sshd
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
```
|
||||
sudo systemctl enable iptables
|
||||
|
||||
sudo systemctl start iptables
|
||||
|
||||
sudo iptables -I INPUT -p tcp -m tcp --dport 2376 -j ACCEPT
|
||||
sudo iptables -I INPUT -p tcp -m tcp --dport 2377 -j ACCEPT
|
||||
sudo iptables -I INPUT -p tcp -m tcp --dport 7946 -j ACCEPT
|
||||
sudo iptables -I INPUT -p udp -m udp --dport 7946 -j ACCEPT
|
||||
sudo iptables -I INPUT -p tcp -m udp --dport 4789 -j ACCEPT
|
||||
sudo iptables -I INPUT -p 50 -j ACCEPT # allows ipsec when secure overlay is enabled
|
||||
```
|
||||
|
||||
#### Restart...
|
||||
|
||||
---
|
||||
|
||||
## 2. Docker Swarm
|
||||
|
||||
### Install docker...
|
||||
|
||||
```
|
||||
sudo apt install -y docker docker-compose git
|
||||
|
||||
sudo usermod -aG docker pleb
|
||||
|
||||
sudo systemctl enable docker.service
|
||||
sudo systemctl start docker.service
|
||||
|
||||
sudo chmod 666 /var/run/docker.sock
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
#### On master
|
||||
|
||||
```
|
||||
docker swarm init --advertise-addr 192.168.1.186
|
||||
```
|
||||
|
||||
> *Swarm initialized: current node (3kdxixaa86m8pvag6jn0b70ut) is now a manager*.
|
||||
|
||||
#### On nodes
|
||||
|
||||
```
|
||||
docker swarm join --token SWMTKN-1-2px1bindhl41x9h6l4ve7x15iwjryr0uf3ekmu7hz4bezjewwh-ae9vv4657zcki160s71vjn75z 192.168.1.186:2377
|
||||
```
|
||||
|
||||
> *To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions*.
|
||||
|
||||
> Change ip range if necessary ?!
|
||||
|
||||
```
|
||||
docker network rm ingress
|
||||
# Create in different range
|
||||
docker network create --driver overlay --ingress --subnet 192.168.1.0/16 --gateway 192.168.1.254 ingress
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
```
|
||||
docker node ls
|
||||
|
||||
docker network ls
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. Keepalived
|
||||
|
||||
> *Load Balancer : install on all nodes*
|
||||
|
||||
```
|
||||
sudo apt-get -y install keepalived
|
||||
```
|
||||
|
||||
### On master node (shell1)
|
||||
|
||||
#### Create/Edit
|
||||
|
||||
> sudo vim /etc/keepalived/keepalived.conf
|
||||
|
||||
```
|
||||
! Configuration File for keepalived
|
||||
|
||||
global_defs {
|
||||
notification_email {
|
||||
jme69@pm.me
|
||||
}
|
||||
notification_email_from admin@delmar.bzh
|
||||
smtp_server pro1.mail.ovh.net
|
||||
smtp_connect_timeout 30
|
||||
router_id docker_ingress
|
||||
vrrp_skip_check_adv_addr
|
||||
vrrp_strict
|
||||
vrrp_garp_interval 0
|
||||
vrrp_gna_interval 0
|
||||
}
|
||||
|
||||
vrrp_instance VI_1 {
|
||||
state MASTER
|
||||
interface end0
|
||||
virtual_router_id 51
|
||||
priority 100
|
||||
advert_int 1
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass transatlantique
|
||||
}
|
||||
virtual_ipaddress {
|
||||
192.168.1.30
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### On Node2
|
||||
|
||||
#### Create/Edit
|
||||
|
||||
> sudo vim /etc/keepalived/keepalived.conf
|
||||
|
||||
```
|
||||
! Configuration File for keepalived
|
||||
|
||||
global_defs {
|
||||
notification_email {
|
||||
jme69@pm.me
|
||||
}
|
||||
notification_email_from admin@delmar.bzh
|
||||
smtp_server pro1.mail.ovh.net
|
||||
smtp_connect_timeout 30
|
||||
router_id docker_ingress
|
||||
vrrp_skip_check_adv_addr
|
||||
vrrp_strict
|
||||
vrrp_garp_interval 0
|
||||
vrrp_gna_interval 0
|
||||
}
|
||||
|
||||
vrrp_instance VI_1 {
|
||||
state BACKUP
|
||||
interface end0
|
||||
virtual_router_id 51
|
||||
priority 90
|
||||
advert_int 1
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass transatlantique
|
||||
}
|
||||
virtual_ipaddress {
|
||||
192.168.1.30
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### On Node3
|
||||
|
||||
#### Create/Edit
|
||||
|
||||
> sudo vim /etc/keepalived/keepalived.conf<span class="keep-md">`</span>
|
||||
|
||||
```
|
||||
! Configuration File for keepalived
|
||||
|
||||
global_defs {
|
||||
notification_email {
|
||||
jme69@pm.me
|
||||
}
|
||||
notification_email_from admin@delmar.bzh
|
||||
smtp_server pro1.mail.ovh.net
|
||||
smtp_connect_timeout 30
|
||||
router_id docker_ingress
|
||||
vrrp_skip_check_adv_addr
|
||||
vrrp_strict
|
||||
vrrp_garp_interval 0
|
||||
vrrp_gna_interval 0
|
||||
}
|
||||
|
||||
vrrp_instance VI_1 {
|
||||
state BACKUP
|
||||
interface end0
|
||||
virtual_router_id 51
|
||||
priority 80
|
||||
advert_int 1
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass transatlantique
|
||||
}
|
||||
virtual_ipaddress {
|
||||
192.168.1.30
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### On Node4
|
||||
|
||||
#### Create/Edit
|
||||
|
||||
> sudo vim /etc/keepalived/keepalived.conf<span class="keep-md">`</span>
|
||||
|
||||
```
|
||||
! Configuration File for keepalived
|
||||
|
||||
global_defs {
|
||||
notification_email {
|
||||
jme69@pm.me
|
||||
}
|
||||
notification_email_from admin@delmar.bzh
|
||||
smtp_server pro1.mail.ovh.net
|
||||
smtp_connect_timeout 30
|
||||
router_id docker_ingress
|
||||
vrrp_skip_check_adv_addr
|
||||
vrrp_strict
|
||||
vrrp_garp_interval 0
|
||||
vrrp_gna_interval 0
|
||||
}
|
||||
|
||||
vrrp_instance VI_1 {
|
||||
state BACKUP
|
||||
interface end0
|
||||
virtual_router_id 51
|
||||
priority 70
|
||||
advert_int 1
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass transatlantique
|
||||
}
|
||||
virtual_ipaddress {
|
||||
192.168.1.30
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Start/Enable Service
|
||||
|
||||
```
|
||||
sudo systemctl enable keepalived
|
||||
sudo systemctl start keepalived
|
||||
|
||||
ip a show end0
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Glusterfs
|
||||
|
||||
> *Distributed Storage : install on all nodes*
|
||||
|
||||
```
|
||||
sudo apt-get -y install glusterfs-server
|
||||
|
||||
sudo systemctl enable glusterd
|
||||
sudo systemctl start glusterd
|
||||
```
|
||||
|
||||
> sudo fdisk -l
|
||||
|
||||
```
|
||||
# Format your disk (If you decided to use your internal storage, just ignore the mount and format steps)
|
||||
sudo mkfs.xfs -f /dev/sda
|
||||
|
||||
# Create brick folder location
|
||||
sudo mkdir -p /data/glusterfs/volume/brick
|
||||
|
||||
# Add line to /etc/fstab to auto mount this disk to /data on boot
|
||||
echo "/dev/sda1 /data/glusterfs/volume/brick xfs defaults 0 0" | sudo tee -a /etc/fstab
|
||||
|
||||
# Create brick folder
|
||||
# sudo mkdir -p /data/glusterfs/volume/brick/brick
|
||||
|
||||
# Mount
|
||||
sudo mount -a
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
|
||||
sudo gluster peer probe shell2
|
||||
sudo gluster peer probe shell3
|
||||
sudo gluster peer probe shell4
|
||||
```
|
||||
|
||||
> Check
|
||||
|
||||
```
|
||||
df -h /data/glusterfs/volume/brick
|
||||
|
||||
sudo gluster pool list
|
||||
|
||||
sudo gluster peer status
|
||||
```
|
||||
|
||||
> For 2 nodes:
|
||||
|
||||
```
|
||||
sudo gluster volume create docker-volume replica 2 transport tcp shell1:/mnt/datav shell3:/mnt/data force
|
||||
```
|
||||
|
||||
For 3 nodes:
|
||||
|
||||
```
|
||||
sudo gluster volume create dockervolume disperse 3 redundancy 1 shell1:/data/glusterfs/volume/brick shell2:/data/glusterfs/volume/brick shell3:/data/glusterfs/volume/brick force
|
||||
```
|
||||
|
||||
#### Start Gluster Volume
|
||||
|
||||
```
|
||||
sudo gluster volume info dockervolume
|
||||
|
||||
sudo gluster volume start dockervolume
|
||||
```
|
||||
|
||||
> On all nodes :
|
||||
|
||||
```
|
||||
sudo mkdir /mnt/data
|
||||
|
||||
echo "localhost:/dockervolume /mnt/data glusterfs defaults,_netdev 0 0" | sudo tee -a /etc/fstab
|
||||
|
||||
sudo mount -a
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
```
|
||||
|
||||
> Check
|
||||
|
||||
> > Persistent shared storage will be "/mnt/data"
|
||||
|
||||
```
|
||||
df -h /mnt/data
|
||||
|
||||
sudo gluster volume set dockervolume auth.allow 127.0.0.1,192.168.1.186,192.168.1.243,192.168.1.194,192.168.1.222
|
||||
```
|
||||
|
||||
> When adding a new brick:
|
||||
|
||||
```
|
||||
sudo gluster volume add-brick **volume** replica X **server:brick**
|
||||
```
|
||||
|
||||
> Option
|
||||
|
||||
```
|
||||
docker plugin install --alias glusterfs mikebarkmin/glusterfs SERVERS=shell1,shell2,shell3,shell4 VOLNAME=dockervolume
|
||||
```
|
||||
|
||||
### 5. Portainer
|
||||
|
||||
Create container directory
|
||||
|
||||
```
|
||||
sudo mkdir -p /var/lib/docker/volumes
|
||||
|
||||
sudo mkdir -p /mnt/data/portainer
|
||||
```
|
||||
|
||||
> Edit
|
||||
|
||||
```
|
||||
sudo vim portainer-agent-stack.yml
|
||||
```
|
||||
|
||||
> Down at the bottom, remove :
|
||||
|
||||
```
|
||||
volumes:
|
||||
portainer_data:
|
||||
|
||||
```
|
||||
|
||||
> And in Section services -> portainer -> volumes, change it to :
|
||||
|
||||
```
|
||||
volumes:
|
||||
- type: bind
|
||||
source: /mnt/data/portainer/portainer_data
|
||||
target: /data
|
||||
|
||||
```
|
||||
|
||||
> Deploy
|
||||
|
||||
```
|
||||
docker stack deploy -c portainer-agent-stack.yml portainer
|
||||
```
|
||||
|
||||
#### Upgrade
|
||||
|
||||
```
|
||||
docker service ls
|
||||
|
||||
docker pull portainer/portainer-ce:latest
|
||||
|
||||
docker service update --image portainer/portainer-ce:latest --publish-add 9443:9443 --force portainer_portainer
|
||||
|
||||
docker pull portainer/agent:latest
|
||||
|
||||
docker service update --image portainer/agent:latest --force portainer_agent
|
||||
```
|
||||
483
turing-pi/kubernetes.md
Normal file
483
turing-pi/kubernetes.md
Normal file
@@ -0,0 +1,483 @@
|
||||
# Kubernetes
|
||||
|
||||
## 1. Compute Modules
|
||||
|
||||
### Install OS / headless installation
|
||||
|
||||
1. Flash the Plebian image to a SD card/ EMMC.
|
||||
2. Boot.
|
||||
3. ssh as "pleb/pleb" and complete setup `sudo vim /etc/sudoers.d/pleb`
|
||||
|
||||
```
|
||||
pleb ALL=(ALL:ALL) NOPASSWD: ALL
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo apt update && sudo apt install -y curl
|
||||
|
||||
curl -O https://overviewer.org/~pillow/up/75bea78e59/devicetrees-plebian-quartz64-20230601130309-arm64.deb
|
||||
|
||||
sudo dpkg -i devicetrees-plebian-quartz64-20230601130309-arm64.deb
|
||||
|
||||
# sudo sysctl -w net.core.rmem_max=2500000
|
||||
|
||||
sudo apt update && sudo apt -y upgrade
|
||||
|
||||
sudo hostnamectl set-hostname shell1
|
||||
|
||||
sudo apt install -y git wget screenfetch net-tools open-iscsi python3 python3-pip build-essential libssl-dev libffi-dev python3-dev
|
||||
|
||||
sudo apt install -y docker docker-compose
|
||||
```
|
||||
|
||||
#### Reboot
|
||||
|
||||
`sudo blkid -s UUID -o value /dev/sda1`
|
||||
|
||||
```
|
||||
shell1 | CHANGED | rc=0 >>
|
||||
UUID=a4e3d8f1-11cc-482b-9596-57c44b122e48
|
||||
shell2 | CHANGED | rc=0 >>
|
||||
UUID=e264ad26-0767-4e6c-9131-0671f4ecfad4
|
||||
shell3 | CHANGED | rc=0 >>
|
||||
UUID=2056f43e-f41c-4772-9bf9-8f5a09dc911c
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo mkfs.ext4 /dev/sda1
|
||||
|
||||
sudo mkdir -p /mnt/data
|
||||
|
||||
echo "UUID=... /mnt/data ext4 defaults 0 0" | sudo tee -a /etc/fstab
|
||||
|
||||
cat /etc/fstab
|
||||
|
||||
sudo mount -a
|
||||
sudo systemctl daemon-reload
|
||||
df -h /mnt/data
|
||||
```
|
||||
|
||||
`sudo vim /etc/hosts`
|
||||
|
||||
```
|
||||
# ----------------------------------------
|
||||
# Host addresses
|
||||
127.0.0.1 localhost shell1
|
||||
# Nodes
|
||||
192.168.1.109 shell1 shell1.local
|
||||
192.168.1.163 shell2 shell2.local
|
||||
#192.168.1. shell3 shell3.local
|
||||
#192.168.1. shell4 shell3.local
|
||||
# ----------------------------------------
|
||||
```
|
||||
|
||||
#### On remote (PC) :
|
||||
|
||||
```bash
|
||||
ssh-keygen -t ed25519
|
||||
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@shell1
|
||||
ssh pleb@shell1
|
||||
```
|
||||
|
||||
`vim /etc/ssh/sshd_config`
|
||||
|
||||
```
|
||||
# ----------------------------------------
|
||||
PasswordAuthentication no
|
||||
# ----------------------------------------
|
||||
```
|
||||
|
||||
#### Restart service
|
||||
|
||||
`sudo systemctl restart sshd`
|
||||
|
||||
#### On shell1 :
|
||||
|
||||
```bash
|
||||
ssh-keygen -t ed25519
|
||||
|
||||
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@shell2
|
||||
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@shell3
|
||||
ssh-copy-id -f -i ~/.ssh/id_ed25519.pub pleb@shell4
|
||||
|
||||
ssh pleb@shell2
|
||||
ssh pleb@shell3
|
||||
ssh pleb@shell4
|
||||
|
||||
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
|
||||
python3 get-pip.py --user
|
||||
|
||||
python3 -m pip install --user ansible
|
||||
```
|
||||
|
||||
`vim hosts.ini`
|
||||
|
||||
```ini
|
||||
[master]
|
||||
shell1 ansible_connection=local
|
||||
|
||||
[node]
|
||||
shell2 ansible_connection=ssh
|
||||
shell3 ansible_connection=ssh
|
||||
shell4 ansible_connection=ssh
|
||||
|
||||
[k3s_cluster:children]
|
||||
master
|
||||
node
|
||||
```
|
||||
|
||||
## UFW
|
||||
|
||||
```bash
|
||||
sudo apt install -y ufw
|
||||
|
||||
sudo ufw allow "OpenSSH"
|
||||
sudo ufw enable
|
||||
|
||||
sudo ufw allow 6443/tcp
|
||||
sudo ufw allow 2379:2380/tcp
|
||||
sudo ufw allow 10250/tcp
|
||||
sudo ufw allow 10259/tcp
|
||||
sudo ufw allow 10257/tcp
|
||||
|
||||
sudo ufw allow 179/tcp
|
||||
sudo ufw allow 4789/udp
|
||||
sudo ufw allow 4789/tcp
|
||||
sudo ufw allow 2379/tcp
|
||||
|
||||
sudo ufw allow 30000:32767/tcp
|
||||
|
||||
sudo ufw status
|
||||
```
|
||||
|
||||
```bash
|
||||
for pkg in docker.io docker-doc docker-compose podman-docker containerd runc; do sudo apt-get remove $pkg; done
|
||||
|
||||
sudo apt-get update
|
||||
|
||||
sudo apt-get install ca-certificates curl gnupg
|
||||
|
||||
sudo install -m 0755 -d /etc/apt/keyrings
|
||||
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
|
||||
|
||||
sudo chmod a+r /etc/apt/keyrings/docker.gpg
|
||||
|
||||
echo \
|
||||
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
|
||||
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
|
||||
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
|
||||
sudo apt-get update
|
||||
|
||||
sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
|
||||
|
||||
sudo service docker start
|
||||
|
||||
sudo docker run hello-world
|
||||
|
||||
sudo chmod 666 /var/run/docker.sock
|
||||
|
||||
sudo groupadd docker
|
||||
|
||||
sudo usermod -aG docker $USER
|
||||
|
||||
sudo systemctl enable docker.service
|
||||
sudo systemctl enable containerd.service
|
||||
```
|
||||
|
||||
```bash
|
||||
docker run -d -p 8000:8000 -p 9443:9443 --name portainer --restart=always -v /var/run/docker.sock:/var/run/docker.sock -v portainer_data:/data portainer/portainer-ee:latest
|
||||
```
|
||||
|
||||
## 2. Install Kubernetes (via Ansible)
|
||||
|
||||
### k3s-ansible
|
||||
|
||||
#### Install Ansible
|
||||
|
||||
- [https://wiki.archlinux.org/title/Ansible#Installation](https://wiki.archlinux.org/title/Ansible#Installation "https://wiki.archlinux.org/title/Ansible#Installation")
|
||||
- [https://docs.ansible.com/ansible/latest/installation\_guide/index.html](https://docs.ansible.com/ansible/latest/installation_guide/index.html "https://docs.ansible.com/ansible/latest/installation_guide/index.html")
|
||||
|
||||
#### On remote (PC):
|
||||
|
||||
`apt install -y ansible`
|
||||
|
||||
#### Clone Git repo : [https://github.com/k3s-io/k3s-ansible](https://github.com/k3s-io/k3s-ansible "https://github.com/k3s-io/k3s-ansible")
|
||||
|
||||
```bash
|
||||
cd www
|
||||
|
||||
git clone https://github.com/k3s-io/k3s-ansible.git
|
||||
|
||||
cp -R k3s-ansible/inventory/sample k3s-ansible/inventory/sandy
|
||||
```
|
||||
|
||||
`vim www/k3s-ansible/inventory/sandy/hosts.ini`
|
||||
|
||||
```ini
|
||||
[master]
|
||||
192.168.1.209 ansible_connection=ssh var_hostname=shell1 var_disk=sda1 var_uuid=e13c29b3-5263-4ae7-82df-8ccdcc78e0b2
|
||||
|
||||
[node]
|
||||
192.168.1.224 ansible_connection=ssh var_hostname=shell2 var_disk=sda1 var_uuid=85efb43b-5386-4a2d-9128-9a89b10538fa
|
||||
192.168.1.123 ansible_connection=ssh var_hostname=shell3 var_disk=sda1 var_uuid=08af04f0-a35c-447d-9651-c46675a27142
|
||||
192.168.1.233 ansible_connection=ssh var_hostname=shell4
|
||||
|
||||
[k3s_cluster:children]
|
||||
master
|
||||
node
|
||||
```
|
||||
|
||||
`vim www/k3s-ansible/inventory/sandy/group_vars/all.yml`
|
||||
|
||||
> > change ansible\_user to pleb
|
||||
|
||||
#### Install playbook
|
||||
|
||||
```bash
|
||||
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ping
|
||||
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m apt -a "name=iptables state=present" --become
|
||||
ansible-playbook ~/www/k3s-ansible/site.yml -i ~/www/k3s-ansible/inventory/sandy/hosts.ini
|
||||
```
|
||||
|
||||
#### On master:
|
||||
|
||||
```bash
|
||||
sudo chmod 644 /etc/rancher/k3s/k3s.yaml
|
||||
|
||||
mkdir ~/.kube
|
||||
$ sudo cp /etc/rancher/k3s/k3s.yaml ~/.kube/config
|
||||
$ sudo chown $USER: ~/.kube/config
|
||||
$ export KUBECONFIG=~/.kube/config
|
||||
```
|
||||
|
||||
#### On PC :
|
||||
|
||||
```bash
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
|
||||
curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256"
|
||||
|
||||
echo "$(cat kubectl.sha256) kubectl" | sha256sum --check
|
||||
|
||||
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
|
||||
|
||||
kubectl version --client
|
||||
|
||||
scp pleb@shell1:~/.kube/config ~/.kube/config
|
||||
|
||||
export KUBECONFIG=~/.kube/config
|
||||
|
||||
kubectl get nodes
|
||||
|
||||
kubectl cluster-info
|
||||
```
|
||||
|
||||
#### Setting labels:
|
||||
|
||||
```bash
|
||||
# sudo k3s kubectl label nodes shell1 kubernetes.io/role=worker
|
||||
sudo k3s kubectl label nodes shell2 kubernetes.io/role=worker
|
||||
sudo k3s kubectl label nodes shell3 kubernetes.io/role=worker
|
||||
sudo k3s kubectl label nodes shell4 kubernetes.io/role=worker
|
||||
|
||||
# sudo k3s kubectl label nodes shell1 node-type=worker
|
||||
sudo k3s kubectl label nodes shell2 node-type=worker
|
||||
sudo k3s kubectl label nodes shell3 node-type=worker
|
||||
sudo k3s kubectl label nodes shell4 node-type=worker
|
||||
```
|
||||
|
||||
#### ! Ping !
|
||||
|
||||
`ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ping`
|
||||
|
||||
#### ! Reset !
|
||||
|
||||
```bash
|
||||
ansible-playbook ~/www/k3s-ansible/reset.yml -i ~/www/k3s-ansible/inventory/sandy/hosts.ini
|
||||
```
|
||||
|
||||
#### ! Restart !
|
||||
|
||||
`ansible all -i ~/www/k3s-ansible/inventory/sandy/hosts.ini -a "shutdown -r now" -b`
|
||||
|
||||
## 3. Helm
|
||||
|
||||
#### On master
|
||||
|
||||
```bash
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
||||
chmod 700 get_helm.sh
|
||||
./get_helm.sh
|
||||
helm version
|
||||
```
|
||||
|
||||
## 4. MetalLb
|
||||
|
||||
```bash
|
||||
helm repo add metallb https://metallb.github.io/metallb
|
||||
helm search repo metallb
|
||||
helm upgrade --install metallb metallb/metallb --create-namespace --namespace metallb-system --wait
|
||||
```
|
||||
|
||||
```bash
|
||||
cat << 'EOF' | kubectl apply -f -
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: default-pool
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
- 192.168.1.30-192.168.1.49
|
||||
---
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: L2Advertisement
|
||||
metadata:
|
||||
name: default
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
ipAddressPools:
|
||||
- default-pool
|
||||
EOF
|
||||
```
|
||||
|
||||
## 5. Local Storage Provider (Longhorn)
|
||||
|
||||
```bash
|
||||
sudo systemctl enable iscsid.service
|
||||
sudo systemctl start iscsid.service
|
||||
|
||||
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=nfs-common state=present"
|
||||
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=open-iscsi state=present"
|
||||
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m apt -a "name=util-linux state=present"
|
||||
```
|
||||
|
||||
```bash
|
||||
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m shell -a "lsblk -f"
|
||||
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -b -m shell -a "blkid -s UUID -o value /dev/sda1"
|
||||
```
|
||||
|
||||
#### Ansible mount:
|
||||
|
||||
```bash
|
||||
ansible -i ~/www/k3s-ansible/inventory/sandy/hosts.ini k3s_cluster -m ansible.posix.mount -a "path=/mnt/data src=UUID={{ var_uuid }} fstype=ext4 state=mounted" -b
|
||||
```
|
||||
|
||||
### Longhorn
|
||||
|
||||
```bash
|
||||
helm repo add longhorn https://charts.longhorn.io
|
||||
|
||||
helm repo update
|
||||
|
||||
helm install longhorn longhorn/longhorn --namespace longhorn-system --create-namespace --version 1.4.2 --set defaultSettings.defaultDataPath="/mnt/data" --set service.ui.loadBalancerIP="192.168.1.31" --set service.ui.type="LoadBalancer"
|
||||
|
||||
sudo k3s kubectl -n longhorn-system get pod
|
||||
|
||||
sudo k3s kubectl get storageclass
|
||||
```
|
||||
|
||||
- local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 111m
|
||||
- longhorn (default) driver.longhorn.io Delete Immediate true 65m Mark as "non-default"
|
||||
|
||||
```bash
|
||||
sudo k3s kubectl patch storageclass local-path -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'
|
||||
```
|
||||
|
||||
Mark as "default"
|
||||
|
||||
```bash
|
||||
kubectl patch storageclass longhorn -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
|
||||
```
|
||||
|
||||
#### UI
|
||||
|
||||
```bash
|
||||
USER=admin; PASSWORD=transatlantique; echo "${USER}:$(openssl passwd -stdin -apr1 <<< ${PASSWORD})" >> auth
|
||||
sudo k3s kubectl -n longhorn-system create secret generic basic-auth --from-file=auth
|
||||
```
|
||||
|
||||
`vim longhorn-ingress.yml`
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: longhorn-ingress
|
||||
namespace: longhorn-system
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/auth-type: basic
|
||||
nginx.ingress.kubernetes.io/ssl-redirect: 'false'
|
||||
nginx.ingress.kubernetes.io/auth-secret: basic-auth
|
||||
nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required '
|
||||
nginx.ingress.kubernetes.io/proxy-body-size: 10000m
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- pathType: Prefix
|
||||
path: "/"
|
||||
backend:
|
||||
service:
|
||||
name: longhorn-frontend
|
||||
port:
|
||||
number: 80
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: longhorn-ingress
|
||||
namespace: longhorn-system
|
||||
annotations:
|
||||
# add an annotation indicating the issuer to use
|
||||
cert-manager.io/cluster-issuer: letsencrypt-staging
|
||||
spec:
|
||||
rules:
|
||||
- host: dashboard.delmar.bzh
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: longhorn-frontend
|
||||
servicePort: 8090
|
||||
tls:
|
||||
- # cert-manager will store the certificate and key in this secret
|
||||
secretName: dashboard-delmar-bzh-cert
|
||||
hosts:
|
||||
- dashboard.delmar.bzh
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo k3s kubectl -n longhorn-system apply -f longhorn-ingress.yml
|
||||
sudo k3s kubectl -n longhorn-system get ingress
|
||||
```
|
||||
|
||||
## 6. HAProxy
|
||||
|
||||
```bash
|
||||
helm repo add haproxytech https://haproxytech.github.io/helm-charts
|
||||
|
||||
helm install haproxy haproxytech/kubernetes-ingress -n kube-system \
|
||||
--set controller.service.nodePorts.http=30000 \
|
||||
--set controller.service.nodePorts.https=30001 \
|
||||
--set controller.service.nodePorts.stat=30002 \
|
||||
--set controller.service.type=LoadBalancer
|
||||
```
|
||||
|
||||
## 7. Services
|
||||
|
||||
...
|
||||
|
||||
## Uninstall
|
||||
|
||||
> On master:
|
||||
|
||||
`/usr/local/bin/k3s-uninstall.sh`
|
||||
|
||||
> On workers:
|
||||
|
||||
`/usr/local/bin/k3s-agent-uninstall.sh`
|
||||
108
turing-pi/networking.md
Normal file
108
turing-pi/networking.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# Networking
|
||||
|
||||
#### Hosts
|
||||
|
||||
```bash
|
||||
sudo vim /etc/hosts
|
||||
```
|
||||
|
||||
```bash
|
||||
127.0.0.1 localhost
|
||||
127.0.1.1 <hostname>.local <hostname>
|
||||
|
||||
# The following lines are desirable for IPv6 capable hosts
|
||||
::1 localhost ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
ff02::3 ip6-allhosts
|
||||
|
||||
#
|
||||
192.168.1.254 mabbox.bytel.fr
|
||||
|
||||
# local
|
||||
192.168.1.10 bikinibottom
|
||||
192.168.1.11 bob
|
||||
192.168.1.12 carlo
|
||||
192.168.1.13 gary
|
||||
192.168.1.14 sandy
|
||||
192.168.1.15 krabs
|
||||
192.168.1.16 sheldon
|
||||
192.168.1.17 bernie
|
||||
|
||||
#
|
||||
192.168.1.53 recalbox
|
||||
|
||||
# Optional
|
||||
# Added by Docker Desktop
|
||||
# To allow the same kube context to work on the host and the container:
|
||||
127.0.0.1 kubernetes.docker.internal
|
||||
# End of section
|
||||
|
||||
```
|
||||
|
||||
#### Debian
|
||||
|
||||
```bash
|
||||
sudo vim /etc/network/interfaces
|
||||
```
|
||||
|
||||
```bash
|
||||
# This file describes the network interfaces available on your system
|
||||
# and how to activate them. For more information, see interfaces(5).
|
||||
|
||||
source /etc/network/interfaces.d/*
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo vim /etc/network/interfaces.d/local
|
||||
```
|
||||
|
||||
```bash
|
||||
# The loopback network interface
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# The primary network interface
|
||||
# allow-hotplug end0
|
||||
auto end0
|
||||
iface end0 inet static
|
||||
address 192.168.1.12
|
||||
netmask 255.255.255.0
|
||||
gateway 192.168.1.254
|
||||
dns-nameservers 1.1.1.1
|
||||
dns-nameservers 1.0.0.1
|
||||
# This is an autoconfigured IPv6 interface
|
||||
# iface end0 inet6 auto
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo systemctl restart networking.service
|
||||
```
|
||||
|
||||
#### Ubuntu
|
||||
|
||||
```bash
|
||||
sudo vim /etc/netplan/50-cloud-init.yaml
|
||||
```
|
||||
|
||||
```yaml
|
||||
network:
|
||||
version: 2
|
||||
renderer: networkd
|
||||
ethernets:
|
||||
end0:
|
||||
dhcp4: no
|
||||
addresses:
|
||||
- 192.168.1.13/24
|
||||
routes:
|
||||
- to: default
|
||||
via: 192.168.1.254
|
||||
nameservers:
|
||||
addresses: [1.1.1.1, 1.0.0.1]
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo netplan apply
|
||||
```
|
||||
229
turing-pi/nfs-server.md
Normal file
229
turing-pi/nfs-server.md
Normal file
@@ -0,0 +1,229 @@
|
||||
# NFS Server
|
||||
|
||||
### Structure
|
||||
|
||||
<div class="table-wrapper" id="bkmrk-name-maj%3Amin-rm-size"><table><tbody><tr><th colspan="1" rowspan="1">NAME</th><th colspan="1" rowspan="1">MAJ:MIN</th><th colspan="1" rowspan="1">RM</th><th colspan="1" rowspan="1">SIZE</th><th colspan="1" rowspan="1">RO</th><th colspan="1" rowspan="1">TYPE</th><th colspan="1" rowspan="1">MOUNTPOINTS</th></tr><tr><td colspan="1" rowspan="1">sdb</td><td colspan="1" rowspan="1">8:16</td><td colspan="1" rowspan="1">0</td><td colspan="1" rowspan="1">3.6T</td><td colspan="1" rowspan="1">0</td><td colspan="1" rowspan="1">disk</td><td colspan="1" rowspan="1"> </td></tr><tr><td colspan="1" rowspan="1">└─ hdds-datas</td><td colspan="1" rowspan="1">253:0</td><td colspan="1" rowspan="1">0</td><td colspan="1" rowspan="1">7.3T</td><td colspan="1" rowspan="1">0</td><td colspan="1" rowspan="1">lvm</td><td colspan="1" rowspan="1">/export/media</td></tr><tr><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1">/srv/dev-disk-by-uuid-a67ae390-f36c-4b7c-98a0-4a3b5601c107</td></tr><tr><td colspan="1" rowspan="1">sdc</td><td colspan="1" rowspan="1">8:32</td><td colspan="1" rowspan="1">0</td><td colspan="1" rowspan="1">3.6T</td><td colspan="1" rowspan="1">0</td><td colspan="1" rowspan="1">disk</td><td colspan="1" rowspan="1"> </td></tr><tr><td colspan="1" rowspan="1">└─ hdds-datas</td><td colspan="1" rowspan="1">253:0</td><td colspan="1" rowspan="1">0</td><td colspan="1" rowspan="1">7.3T</td><td colspan="1" rowspan="1">0</td><td colspan="1" rowspan="1">lvm</td><td colspan="1" rowspan="1">/export/media</td></tr><tr><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1"> </td><td colspan="1" rowspan="1">/srv/dev-disk-by-uuid-a67ae390-f36c-4b7c-98a0-4a3b5601c107</td></tr></tbody></table>
|
||||
|
||||
</div>```bash
|
||||
apt install -y lvm2
|
||||
```
|
||||
|
||||
### Disks
|
||||
|
||||
```bash
|
||||
fdisk -l
|
||||
```
|
||||
|
||||
```
|
||||
Disk /dev/sdb: 3.64 TiB, 4000787030016 bytes, 7814037168 sectors
|
||||
Disk model: ST4000VN006-3CW1
|
||||
Units: sectors of 1 * 512 = 512 bytes
|
||||
Sector size (logical/physical): 512 bytes / 4096 bytes
|
||||
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
|
||||
|
||||
|
||||
Disk /dev/sdc: 3.64 TiB, 4000787030016 bytes, 7814037168 sectors
|
||||
Disk model: ST4000VN006-3CW1
|
||||
Units: sectors of 1 * 512 = 512 bytes
|
||||
Sector size (logical/physical): 512 bytes / 4096 bytes
|
||||
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
|
||||
|
||||
|
||||
Disk /dev/mapper/hdds-datas: 7.28 TiB, 8001566015488 bytes, 15628058624 sectors
|
||||
Units: sectors of 1 * 512 = 512 bytes
|
||||
Sector size (logical/physical): 512 bytes / 4096 bytes
|
||||
I/O size (minimum/optimal): 4096 bytes / 4096 bytes
|
||||
```
|
||||
|
||||
### LVM
|
||||
|
||||
#### Physical Volumes (partitions)
|
||||
|
||||
```
|
||||
pvcreate /dev/sdb
|
||||
pvcreate /dev/sdc
|
||||
```
|
||||
|
||||
#### Volume Group
|
||||
|
||||
```
|
||||
vgcreate hdds /dev/sdb /dev/sdc
|
||||
```
|
||||
|
||||
#### Logical Volume(s)
|
||||
|
||||
```
|
||||
lvcreate -L 100%FREE -n datas hdds
|
||||
```
|
||||
|
||||
```
|
||||
pvdisplay
|
||||
```
|
||||
|
||||
```
|
||||
--- Physical volume ---
|
||||
PV Name /dev/sdb
|
||||
VG Name hdds
|
||||
PV Size <3.64 TiB / not usable <3.84 MiB
|
||||
Allocatable yes (but full)
|
||||
PE Size 4.00 MiB
|
||||
Total PE 953861
|
||||
Free PE 0
|
||||
Allocated PE 953861
|
||||
PV UUID zHiPKx-t2BO-15r3-xPfQ-DPlB-7K40-YxIdM1
|
||||
|
||||
--- Physical volume ---
|
||||
PV Name /dev/sdc
|
||||
VG Name hdds
|
||||
PV Size <3.64 TiB / not usable <3.84 MiB
|
||||
Allocatable yes (but full)
|
||||
PE Size 4.00 MiB
|
||||
Total PE 953861
|
||||
Free PE 0
|
||||
Allocated PE 953861
|
||||
PV UUID bGAdMs-JsRy-6r3M-0zWt-CYQJ-GPsB-KCnCo6
|
||||
```
|
||||
|
||||
```bash
|
||||
vgdisplay
|
||||
```
|
||||
|
||||
```
|
||||
--- Volume group ---
|
||||
VG Name hdds
|
||||
System ID
|
||||
Format lvm2
|
||||
Metadata Areas 2
|
||||
Metadata Sequence No 6
|
||||
VG Access read/write
|
||||
VG Status resizable
|
||||
MAX LV 0
|
||||
Cur LV 1
|
||||
Open LV 1
|
||||
Max PV 0
|
||||
Cur PV 2
|
||||
Act PV 2
|
||||
VG Size <7.28 TiB
|
||||
PE Size 4.00 MiB
|
||||
Total PE 1907722
|
||||
Alloc PE / Size 1907722 / <7.28 TiB
|
||||
Free PE / Size 0 / 0
|
||||
VG UUID DPZPo2-OwJt-R1wG-wJRB-HD38-JeVl-5ZFfnI
|
||||
```
|
||||
|
||||
```
|
||||
lvdisplay
|
||||
```
|
||||
|
||||
```
|
||||
--- Logical volume ---
|
||||
LV Path /dev/hdds/datas
|
||||
LV Name data
|
||||
VG Name hdds
|
||||
LV UUID qgdjwr-9Aau-p4mj-rksl-IeBB-SjD1-z6hede
|
||||
LV Write Access read/write
|
||||
LV Creation host, time polito, 2023-08-01 10:10:14 +0200
|
||||
LV Status available
|
||||
# open 0
|
||||
LV Size <7.28 TiB
|
||||
Current LE 1907722
|
||||
Segments 2
|
||||
Allocation inherit
|
||||
Read ahead sectors auto
|
||||
- currently set to 256
|
||||
Block device 254:0
|
||||
```
|
||||
|
||||
```
|
||||
# mkdir -p /mnt/data/cloud
|
||||
# mkdir -p /mnt/data/gitea
|
||||
# mkdir -p /mnt/data/jellystack
|
||||
# mkdir -p /mnt/data/media
|
||||
|
||||
sudo vim /etc/exports
|
||||
```
|
||||
|
||||
```
|
||||
# /etc/exports: the access control list for filesystems which may be exported to NFS clients. See exports(5).
|
||||
#
|
||||
# Example for NFSv2 and NFSv3:
|
||||
# /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
|
||||
#
|
||||
# Example for NFSv4:
|
||||
# /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
|
||||
# /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
|
||||
#
|
||||
# /export 192.168.1.0/24(rw,no_root_squash,no_subtree_check)
|
||||
/export/cloud 192.168.1.0/24(rw,sync,no_root_squash,subtree_check,insecure)
|
||||
/export/gitea 192.168.1.0/24(rw,sync,no_root_squash,subtree_check,insecure)
|
||||
/export/jellystack 192.168.1.0/24(rw,sync,no_root_squash,subtree_check,insecure)
|
||||
/export/media 192.168.1.0/24(rw,sync,no_root_squash,subtree_check,insecure)
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo systemctl restart nfs-server
|
||||
```
|
||||
|
||||
##### Get LVM's UUID
|
||||
|
||||
```bash
|
||||
sudo blkid -s UUID -o value /dev/mapper/hdds-datas
|
||||
```
|
||||
|
||||
##### Mount LVM
|
||||
|
||||
```
|
||||
sudo vim /etc/fstab
|
||||
```
|
||||
|
||||
```
|
||||
# LVM
|
||||
UUID=a67ae390-f36c-4b7c-98a0-4a3b5601c107 /mnt/data ext4 defaults,nofail,user_xattr,usrjquota=aquota.user,grpjquota=aquota.group,jqfmt=vfsv0,acl 0 2
|
||||
# NFS
|
||||
/mnt/data/cloud /export/cloud none bind,nofail 0 0
|
||||
/mnt/data/gitea /export/gitea none bind,nofail 0 0
|
||||
/mnt/data/jellystack /export/jellystack none bind,nofail 0 0
|
||||
/mnt/data/media /export/media none bind,nofail 0 0
|
||||
```
|
||||
|
||||
**PS : Check the content of /var/lib/nfs/etab to show NFS shares list and all the applied permission details**
|
||||
|
||||
```bash
|
||||
cat /var/lib/nfs/etab
|
||||
```
|
||||
|
||||
```shell
|
||||
/export/media 192.168.1.0/24(rw,sync,wdelay,hide,nocrossmnt,insecure,no_root_squash,no_all_squash,subtree_check,secure_locks,acl,no_pnfs,anonuid=65534,anongid=65534,sec=sys,rw,insecure,no_root_squash,no_all_squash)
|
||||
/export/jellystack 192.168.1.0/24(rw,sync,wdelay,hide,nocrossmnt,insecure,no_root_squash,no_all_squash,subtree_check,secure_locks,acl,no_pnfs,anonuid=65534,anongid=65534,sec=sys,rw,insecure,no_root_squash,no_all_squash)
|
||||
/export/gitea 192.168.1.0/24(rw,sync,wdelay,hide,nocrossmnt,insecure,no_root_squash,no_all_squash,subtree_check,secure_locks,acl,no_pnfs,anonuid=65534,anongid=65534,sec=sys,rw,insecure,no_root_squash,no_all_squash)
|
||||
/export/cloud 192.168.1.0/24(rw,sync,wdelay,hide,nocrossmnt,insecure,no_root_squash,no_all_squash,subtree_check,secure_locks,acl,no_pnfs,anonuid=65534,anongid=65534,sec=sys,rw,insecure,no_root_squash,no_all_squash)
|
||||
```
|
||||
|
||||
#### Client(s)
|
||||
|
||||
```bash
|
||||
sudo apt install nfs-common -y
|
||||
|
||||
sudo mkdir -p /nfs/jellystack
|
||||
sudo mkdir -p /nfs/cloud
|
||||
sudo mkdir -p /nfs/gitea
|
||||
sudo mkdir -p /nfs/media
|
||||
|
||||
sudo vim /etc/fstab
|
||||
```
|
||||
|
||||
```shell
|
||||
krabs:/export/cloud /nfs/media nfs _netdev,x-systemd.automount,noauto,nofail,noatime,nolock,tcp,bg,actimeo=1800 0 0
|
||||
krabs:/export/gitea /nfs/gitea nfs _netdev,x-systemd.automount,noauto,nofail,noatime,nolock,tcp,bg,actimeo=1800 0 0
|
||||
krabs:/export/jellystack /nfs/jellystack nfs _netdev,x-systemd.automount,noauto,nofail,noatime,nolock,tcp,bg,actimeo=1800 0 0
|
||||
krabs:/export/media /nfs/media nfs _netdev,x-systemd.automount,noauto,nofail,noatime,nolock,tcp,bg,actimeo=1800 0 0
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo mount /nfs/cloud
|
||||
sudo mount /nfs/gitea
|
||||
sudo mount /nfs/jellystack
|
||||
sudo mount /nfs/media
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
```
|
||||
Reference in New Issue
Block a user