Various updates

Signed-off-by: julien <jme69@delmar.bzh>
This commit is contained in:
2025-10-27 17:20:33 +01:00
parent 49d539d5a1
commit d194e3c380
9 changed files with 307 additions and 80 deletions

23
_caddymanager/compose.env Normal file
View File

@@ -0,0 +1,23 @@
# Backend
API_BASE_URL=http://localhost:3000/api/v1
APP_NAME=Caddy Manager
DARK_MODE=true
# Frontend
PORT=3000
# Database Engine Configuration
DB_ENGINE=sqlite # Options: 'sqlite' or 'mongodb'
# SQLite Configuration (used when DB_ENGINE=sqlite)
SQLITE_DB_PATH=./caddymanager.sqlite
# MongoDB Configuration (used when DB_ENGINE=mongodb)
MONGO_USERNAME=mongoadmin
MONGO_PASSWORD=QaG33feoWfL2W7F9AuRYTS2N4Bm94hEA
MONGODB_URI=mongodb://mongoadmin:QaG33feoWfL2W7F9AuRYTS2N4Bm94hEA@localhost:27017/caddymanager?authSource=admin
CORS_ORIGIN=http://localhost:5173
LOG_LEVEL=debug
CADDY_SANDBOX_URL=http://localhost:2019
PING_INTERVAL=30000
PING_TIMEOUT=2000
AUDIT_LOG_MAX_SIZE_MB=100
AUDIT_LOG_RETENTION_DAYS=90
JWT_SECRET=YPKCVW8qEEshVN6BHPb6tq4YdhQpdQrR
JWT_EXPIRATION=24h

View File

@@ -0,0 +1,84 @@
# bob
# https://github.com/caddymanager
---
name: caddymanager
networks:
caddymanager:
driver: bridge
volumes:
mongodb_data: # Only used when MongoDB profile is active
sqlite_data: # SQLite database storage
services:
# MongoDB database for persistent storage (optional - SQLite is used by default)
mongodb:
image: mongo:8.0
container_name: caddymanager-mongodb
restart: unless-stopped
environment:
- MONGO_INITDB_ROOT_USERNAME=${MONGO_USERNAME:-mongoadmin}
- MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD:-someSecretPassword} # Change for production!
ports:
- "27017:27017" # Expose for local dev, remove for production
volumes:
- mongodb_data:/data/db
networks:
- caddymanager
profiles:
- mongodb # Use 'docker-compose --profile mongodb up' to include MongoDB
# Backend API server
backend:
image: caddymanager/caddymanager-backend:latest
container_name: caddymanager-backend
restart: unless-stopped
environment:
- PORT=3000
# Database Engine Configuration (defaults to SQLite)
- DB_ENGINE=sqlite # Options: 'sqlite' or 'mongodb'
# SQLite Configuration (used when DB_ENGINE=sqlite)
- SQLITE_DB_PATH=/app/data/caddymanager.sqlite
# MongoDB Configuration (used when DB_ENGINE=mongodb)
- MONGODB_URI=mongodb://$${MONGO_USERNAME:-mongoadmin}:$${MONGO_PASSWORD:-someSecretPassword}@mongodb:27017/caddymanager?authSource=admin
- CORS_ORIGIN=http://localhost:80
- LOG_LEVEL=debug
- CADDY_SANDBOX_URL=http://localhost:2019
- PING_INTERVAL=30000
- PING_TIMEOUT=2000
- AUDIT_LOG_MAX_SIZE_MB=100
- AUDIT_LOG_RETENTION_DAYS=90
- METRICS_HISTORY_MAX=1000 # Optional: max number of in-memory metric history snapshots to keep
- JWT_SECRET=YPKCVW8qEEshVN6BHPb6tq4YdhQpdQrR
- JWT_EXPIRATION=24h
# Backend is now only accessible through frontend proxy
volumes:
- sqlite_data:/app/data # SQLite database storage
networks:
- caddymanager
# Frontend web UI
frontend:
image: caddymanager/caddymanager-frontend:latest
container_name: caddymanager-frontend
restart: unless-stopped
depends_on:
- backend
environment:
- BACKEND_HOST=backend:3000
- APP_NAME=Caddy Manager
- DARK_MODE=true
ports:
# - "80:80" # Expose web UI
- 20125:80
networks:
- caddymanager
# Notes:
# - SQLite is the default database engine - no additional setup required!
# - To use MongoDB instead, set DB_ENGINE=mongodb and start with: docker-compose --profile mongodb up
# - For production, use strong passwords and consider secrets management.
# - The backend uses SQLite by default, storing data in a persistent volume.
# - The frontend proxies all /api/* requests to the backend service.
# - Backend is not directly exposed - all API access goes through the frontend proxy.

View File

@@ -0,0 +1,18 @@
# SECRETS
SECRET=AN9r5OOcid00yyW1AcYL0GIr6YS9o01p
SALT=1AvwZUDNL0fAFkEg
VAULT_PWD=KVfvVh052dt7uzxQwyOL6cFHelS8uyO6
# MONGO
DB_HOST=mongo
DB_NAME=ssm
DB_PORT=27017
# REDIS
REDIS_HOST=redis
REDIS_PORT=6379
#SSM CONFIG
#SSM_INSTALL_PATH=/opt/squirrelserversmanager
#SSM_DATA_PATH=/data
TELEMETRY_ENABLED=true
# PROMETHEUS
PROMETHEUS_USERNAME=2wjvpf0zFJpvdCRq
PROMETHEUS_PASSWORD=kODab3yU9njlHM6qwkyeaPs4JQYk9Mkc

View File

@@ -0,0 +1,92 @@
---
name: ssm
volumes:
prometheus:
mongo:
valkey:
server:
services:
proxy:
restart: unless-stopped
image: "ghcr.io/squirrelcorporation/squirrelserversmanager-proxy:latest"
container_name: ssm-proxy
ports:
- "32520:8000"
depends_on:
- client
- mongo
- server
- redis
- prometheus
labels:
wud.display.name: "SSM - Proxy"
wud.watch.digest: false
prometheus:
image: "ghcr.io/squirrelcorporation/squirrelserversmanager-prometheus:latest"
container_name: ssm-prometheus
restart: unless-stopped
env_file: .env
volumes:
- prometheus:/prometheus
labels:
wud.display.name: "SSM - Prometheus"
mongo:
container_name: ssm-mongo
image: mongo
restart: unless-stopped
volumes:
- mongo:/data/db
command: --quiet
labels:
wud.display.name: "SSM - MongoDB"
redis:
container_name: ssm-cache
image: valkey/valkey
restart: unless-stopped
volumes:
- valkey:/data
command: --save 60 1
labels:
wud.display.name: "SSM - Redis"
server:
image: "ghcr.io/squirrelcorporation/squirrelserversmanager-server:latest"
container_name: ssm-server
restart: unless-stopped
healthcheck:
test: curl --fail http://localhost:3000/ping || exit 1
interval: 40s
timeout: 30s
retries: 3
start_period: 60s
external_links:
- mongo
- redis
- prometheus
depends_on:
- mongo
- redis
- prometheus
env_file: .env
environment:
NODE_ENV: production
volumes:
- server:/data
labels:
wud.display.name: "SSM - Server"
wud.watch.digest: false
client:
image: "ghcr.io/squirrelcorporation/squirrelserversmanager-client:latest"
container_name: ssm-client
restart: unless-stopped
depends_on:
- server
labels:
wud.display.name: "SSM - Client"
wud.watch.digest: false

View File

@@ -18,7 +18,7 @@ identity_validation:
authentication_backend:
file:
path: '/config/users_database.yml'
path: '/config/users_database.yaml'
access_control:
default_policy: 'deny'
@@ -39,7 +39,7 @@ session:
authelia_url: 'https://auth.delmar.bzh'
expiration: '1 hour' # 1 hour
inactivity: '5 minutes' # 5 minutes
default_redirection_url: 'https://public.example.com'
default_redirection_url: 'https://www.delmar.bzh'
regulation:
max_retries: 3

View File

@@ -1,3 +1,7 @@
# carlo (auth.delmar.bzh)
---
name: authelia
networks:
net:
driver: bridge
@@ -15,6 +19,6 @@ services:
ports:
- 9091:9091
environment:
TZ: ${TZ}
TZ: ${TZ:-Europe/Paris}
volumes:
- config:/config

View File

@@ -1,10 +1,5 @@
COMPOSE_PORT_HTTP=62751
COMPOSE_PORT_HTTPS=62752
PG_PASS=7n7mXBhgI2wOUm1CAEErgbnWX7wK9fNn6wXH2eVnZSfadUEt
AUTHENTIK_SECRET_KEY=3R85z4JsPEKLKSFi+oxaoKypRlFkGctjElmNSiLzyafKlWfcRr+Y62yHfTBfedY19pv2fswhGL2F8TbU
AUTHENTIK_BOOTSTRAP_EMAIL=admin@delmar.bzh
AUTHENTIK_BOOTSTRAP_PASSWORD=X5r53JMPVg97EKfL
AUTHENTIK_BOOTSTRAP_TOKEN=rzDMUWQOxOWQIYXPcVY2XKlwgTau4vOSfaSfVYsLWQQSUUITAVDY3lqwvoTbUMVD
PG_PASS=yDX0DtPbaw7wLvwpdIUslEialMOiDnmAcUFoUVs1lZRGQlOc
AUTHENTIK_SECRET_KEY=uDRNjk2C/6FWDr1SFFFFyl9bKYIPRyvC/6VuhTwLDWDxN/AzmRhIGdLD78n10RFyb1ebIK01TeCUQ2DY
AUTHENTIK_ERROR_REPORTING__ENABLED=true
AUTHENTIK_EMAIL__HOST=pro1.mail.ovh.net
AUTHENTIK_EMAIL__PORT=587
@@ -13,4 +8,4 @@ AUTHENTIK_EMAIL__PASSWORD=sxS4GA8rBfmFkCFL
AUTHENTIK_EMAIL__USE_TLS=true
AUTHENTIK_EMAIL__USE_SSL=false
AUTHENTIK_EMAIL__TIMEOUT=10
AUTHENTIK_EMAIL__FROM=admin@delmar.bzh
AUTHENTIK_EMAIL__FROM=noreply@delmar.bzh

View File

@@ -1,95 +1,105 @@
volumes:
database:
redis:
# carlo (auth.delmar.bzh)
---
name: authentik
services:
postgresql:
container_name: authentik_postgres
image: docker.io/library/postgres:16-alpine
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
volumes:
- database:/var/lib/postgresql/data
container_name: authentik-db
env_file:
- .env
environment:
POSTGRES_DB: ${PG_DB:-authentik}
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
POSTGRES_USER: ${PG_USER:-authentik}
POSTGRES_DB: ${PG_DB:-authentik}
# env_file:
# - compose.env
redis:
container_name: authentik_redis
image: docker.io/library/redis:alpine
command: --save 60 1 --loglevel warning
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
start_period: 20s
test:
- CMD-SHELL
- pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}
timeout: 5s
image: docker.io/library/postgres:16-alpine
restart: unless-stopped
volumes:
- redis:/data
- database:/var/lib/postgresql/data
redis:
container_name: authentik-redis
command: --save 60 1 --loglevel warning
healthcheck:
interval: 30s
retries: 5
start_period: 20s
test:
- CMD-SHELL
- redis-cli ping | grep PONG
timeout: 3s
image: docker.io/library/redis:alpine
restart: unless-stopped
volumes:
- redis:/data
server:
container_name: authentik_server
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.4.0}
restart: unless-stopped
container_name: authentik-server
command: server
environment:
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
volumes:
- ./media:/media
- ./custom-templates:/templates
# env_file:
# - compose.env
ports:
- "${COMPOSE_PORT_HTTP:-9000}:9000"
- "${COMPOSE_PORT_HTTPS:-9443}:9443"
depends_on:
postgresql:
condition: service_healthy
redis:
condition: service_healthy
env_file:
- .env
environment:
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.8.4}
ports:
- ${COMPOSE_PORT_HTTP:-9000}:9000
- ${COMPOSE_PORT_HTTPS:-9443}:9443
restart: unless-stopped
volumes:
- media:/media
- custom-templates:/templates
worker:
container_name: authentik_worker
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.4.0}
restart: unless-stopped
container_name: authentik-worker
command: worker
environment:
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
# `user: root` and the docker socket volume are optional.
# See more for the docker socket integration here:
# https://goauthentik.io/docs/outposts/integrations/docker
# Removing `user: root` also prevents the worker from fixing the permissions
# on the mounted folders, so when removing this make sure the folders have the correct UID/GID
# (1000:1000 by default)
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./media:/media
- ./certs:/certs
- ./custom-templates:/templates
# env_file:
# - compose.env
depends_on:
postgresql:
condition: service_healthy
redis:
condition: service_healthy
env_file:
- .env
environment:
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY:?secret key required}
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2025.8.4}
restart: unless-stopped
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- media:/media
- certs:/certs
- custom-templates:/templates
volumes:
media:
driver: local
custom-templates:
driver: local
database:
driver: local
redis:
driver: local
certs:
driver: local

1
tryton Submodule

Submodule tryton added at dbd2b6bb49