initial commit

This commit is contained in:
2025-11-25 12:27:53 +03:30
commit f9d16ab078
102 changed files with 11156 additions and 0 deletions

View File

@@ -0,0 +1,2 @@
# Run backup management every minute for testing (change to '15 2 * * *' for daily at 02:15)
0 2 * * * /manage-backups.sh 2>&1 | tee -a /var/log/backup.log

197
scripts/backup/manage-backups.sh Executable file
View File

@@ -0,0 +1,197 @@
#!/bin/sh
set -eu
# Minimal PATH for cron-like environments
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
log() {
printf '%s %s\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" "$*"
}
err() {
log "ERROR: $*" >&2
}
log "manage-backups.sh starting"
BACKUP_BASE=/backups
date_formatted() {
local format="${1:-%F_%H-%M}"
# Determine timezone to use. Priority:
# 1. $TIMEZONE environment variable
# 2. /etc/timezone file (common in many images)
# 3. /etc/localtime symlink target (if present and pointing into zoneinfo)
# 4. fall back to UTC
local tz="${TIMEZONE:-}"
if [ -z "$tz" ] && [ -f /etc/timezone ]; then
tz=$(cat /etc/timezone 2>/dev/null || true)
fi
if [ -z "$tz" ] && [ -L /etc/localtime ]; then
# readlink output might be like /usr/share/zoneinfo/Region/City
local target
target=$(readlink /etc/localtime 2>/dev/null || true)
case "$target" in
*/usr/share/zoneinfo/*) tz=${target#*/usr/share/zoneinfo/} ;;
*) tz="" ;;
esac
fi
# If still empty, try reading the container PID 1 environment (docker-compose env vars live there)
if [ -z "$tz" ] && [ -r /proc/1/environ ]; then
tz=$(tr '\0' '\n' < /proc/1/environ 2>/dev/null | sed -n 's/^TIMEZONE=//p' | head -n1 || true)
fi
if [ -n "$tz" ]; then
# Prefer the system's zoneinfo if available. We consider TZ working
# if the timezone offset (%%z) differs from UTC's offset.
local utc_z tz_z
utc_z=$(date -u +%z)
tz_z=$(TZ="$tz" date +%z 2>/dev/null || true)
if [ -n "$tz_z" ] && [ "$tz_z" != "$utc_z" ]; then
TZ="$tz" date +"$format"
return
fi
# Fallback: some minimal images (alpine without tzdata) don't have
# zoneinfo. As a pragmatic fallback map a few common timezones to
# their current standard offsets (in seconds). This is best-effort
# and does not handle historical DST transitions.
local offset_secs=0
case "$tz" in
Asia/Tehran) offset_secs=12600 ;; # +03:30
Asia/Kolkata) offset_secs=19800 ;; # +05:30
Europe/London) offset_secs=0 ;; # UTC (note: ignores BST)
Europe/Paris) offset_secs=3600 ;; # +01:00 (ignores CEST)
America/New_York) offset_secs=-18000 ;; # -05:00 (ignores EDT)
America/Los_Angeles) offset_secs=-28800 ;; # -08:00 (ignores PDT)
UTC|Etc/UTC) offset_secs=0 ;;
*) offset_secs=0 ;;
esac
# Compute local epoch by adding offset to UTC epoch and format via UTC
local epoch_utc epoch_local
epoch_utc=$(date -u +%s)
epoch_local=$((epoch_utc + offset_secs))
# Most busybox/git images support -d "@SECONDS" with -u
date -u -d "@${epoch_local}" +"$format"
else
date +"$format"
fi
}
wait_for_database_backups() {
local timeout="${DB_BACKUP_TIMEOUT:-600}"
local start_ts
start_ts=$(date -u +%s)
local status_file="$BACKUP_BASE/databases/.last_backup_complete"
log "Waiting for database backup completion marker at $status_file (timeout ${timeout}s)"
while :; do
if [ -f "$status_file" ]; then
local last_ts
last_ts=$(head -n1 "$status_file" 2>/dev/null || echo 0)
case "$last_ts" in
(""|*[!0-9]*) last_ts=0 ;;
esac
if [ "$last_ts" -ge "$start_ts" ]; then
log "Detected recent database backup completion at epoch $last_ts"
return 0
fi
fi
local now
now=$(date -u +%s)
if [ $(( now - start_ts )) -ge "$timeout" ]; then
err "Timed out waiting for database backups to finish"
return 1
fi
sleep 5
done
}
cleanup_old_backups() {
local retention_days="${BACKUP_RETENTION_DAYS:-7}"
local retention_count="${BACKUP_RETENTION_COUNT:-0}"
case "$retention_days" in
''|*[!0-9]*) retention_days=7 ;;
esac
case "$retention_count" in
''|*[!0-9]*) retention_count=0 ;;
esac
log "Cleaning up old backups older than ${retention_days} days (keep ${retention_count} newest)"
if [ "$retention_days" -ge 0 ] 2>/dev/null; then
find "$BACKUP_BASE" -maxdepth 1 -mindepth 1 -type d ! -name databases -mtime +"${retention_days}" -exec rm -rf {} \; 2>/dev/null || true
find "$BACKUP_BASE/databases" -type f -mtime +"${retention_days}" -delete 2>/dev/null || true
fi
if [ "$retention_count" -gt 0 ] 2>/dev/null; then
local idx=0
local entry
local old_ifs="$IFS"
IFS='
'
set -- $(ls -1dt "$BACKUP_BASE"/*/ 2>/dev/null || echo)
IFS="$old_ifs"
for entry in "$@"; do
[ -z "$entry" ] && continue
case "$entry" in
"$BACKUP_BASE/databases"|"$BACKUP_BASE/databases/")
continue
;;
esac
idx=$((idx + 1))
if [ "$idx" -le "$retention_count" ]; then
continue
fi
log "Removing old backup directory $entry (exceeds retention count)"
rm -rf "$entry" 2>/dev/null || err "Failed to remove $entry"
done
fi
}
# Prepare dated backup directory
DATED_DIR="$BACKUP_BASE/$(date_formatted)"
mkdir -p "$DATED_DIR"
wait_for_database_backups || exit 1
# Archive application data
log "Archiving data paths..."
# Archive multiple paths if they exist. Keeps one archive per path.
archive_path() {
local src="$1" prefix="$2"
if [ -d "$src" ]; then
log "Archiving $src"
if bsdtar --xattrs --same-owner --numeric-owner -czf "$DATED_DIR/${prefix}_$(date_formatted).tar.gz" -C "$src" .; then
log "$src archived successfully"
# (reverted) do not force ownership changes here
else
err "Failed to archive $src"
return 1
fi
else
log "Source path $src not found; skipping"
fi
}
# Prefer canonical paths mounted into the backup-manager container
archive_path /odoo_db_data odoo_db_data || true
archive_path /odoo_config odoo_config || true
archive_path /gitea_data gitea_data || true
archive_path /opencloud_data opencloud_data || true
archive_path /opencloud_config opencloud_config || true
# Find and Move today's database dumps to dated directory
log "Moving files from database dump to dated directory..."
# list the names of the files to move
#log "Files to move:"
ls -1 "$BACKUP_BASE/databases"/* 2>/dev/null || true
log "Moving database dumps to dated directory..."
mv "$BACKUP_BASE/databases"/*_$(date_formatted)*.sql.gz "$DATED_DIR"/ 2>/dev/null || true
cleanup_old_backups
# (reverted) do not change ownership of the dated directory
log "manage-backups.sh finished successfully"

View File

@@ -0,0 +1,2 @@
# Run database dumps every minute for testing (change to '0 2 * * *' for daily at 02:00)
0 2 * * * /pg-dump.sh 2>&1 | tee -a /var/log/backup.log

69
scripts/backup/pg-dump.sh Executable file
View File

@@ -0,0 +1,69 @@
#!/bin/sh
set -eu
# Minimal PATH for cron-like environments
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
log() {
printf '%s %s\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" "$*"
}
err() {
log "ERROR: $*" >&2
}
log "pg-dump.sh starting"
# Validate required env vars
: "${POSTGRES_USER:?POSTGRES_USER not set}"
: "${POSTGRES_HOST:?POSTGRES_HOST not set}"
date_formatted() {
local format="${1:-%F_%H-%M}"
# Use TIMEZONE env var if present; fall back to UTC
if [ -n "${TIMEZONE:-}" ]; then
TZ="$TIMEZONE" date +"$format"
else
date +"$format"
fi
}
BACKUP_DIR=/backups/databases
mkdir -p "$BACKUP_DIR"
log "Using backup dir: $BACKUP_DIR"
STATUS_FILE="$BACKUP_DIR/.last_backup_complete"
# Function to dump a specific database
dump_database() {
local db=$1
log "Checking existence of $db database..."
local db_exists
db_exists=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -tAc "SELECT 1 FROM pg_database WHERE datname='$db'")
if [[ "$db_exists" != "1" ]]; then
err "Database '$db' does not exist. Skipping dump."
return 2
fi
log "Dumping $db database..."
local outfile="$BACKUP_DIR/${db}_$(date_formatted).sql.gz"
if pg_dump -U "$POSTGRES_USER" -h "$POSTGRES_HOST" "$db" | gzip > "$outfile"; then
log "$db database dumped and compressed: $outfile"
return 0
else
err "Failed to dump $db database"
return 1
fi
}
# Dump each database
for db in $DB_LIST; do
dump_database "$db" || exit 1
done
tmp_status=$(mktemp "$BACKUP_DIR/.last_backup_complete.XXXXXX")
date -u +%s > "$tmp_status"
mv "$tmp_status" "$STATUS_FILE"
log "Recorded database backup completion marker at $STATUS_FILE"
log "pg-dump.sh finished successfully"

117
scripts/backup/restore-gitea.sh Executable file
View File

@@ -0,0 +1,117 @@
#!/bin/sh
set -eu
usage() {
cat <<EOF
Usage: $(basename "$0") BACKUP_ID [DB_NAME] [DB_USER]
Restore the Gitea data volume and database from a backup run identified by BACKUP_ID.
BACKUP_ID should match the directory name under ./backups (e.g. 2025-11-17_12-30).
DB_NAME and DB_USER default to environment variables GITEA_DB / GITEA_DB_USER,
falling back to "gitea" / "gitea" if unset.
EOF
}
if [ "${1:-}" = "--help" ] || [ "${1:-}" = "-h" ]; then
usage
exit 0
fi
if [ $# -lt 1 ] || [ $# -gt 3 ]; then
usage >&2
exit 1
fi
BACKUP_ID=$1
ROOT_DIR=$(cd "$(dirname "$0")/../.." && pwd)
BACKUPS_DIR="$ROOT_DIR/backups/$BACKUP_ID"
load_env_file() {
local file=$1
if [ -f "$file" ]; then
# shellcheck disable=SC1090
set -a
. "$file"
set +a
fi
}
# Load shared or service-specific environment files if present
load_env_file "$ROOT_DIR/.env"
DB_NAME=${GITEA_DB:-${GITEA_DB_NAME:-gitea}}
DB_USER=${GITEA_DB_USER:-gitea}
DB_PASSWORD=${GITEA_DB_PASSWORD:-${GITEA_DB_PASS:-}}
DB_NAME=${2:-$DB_NAME}
DB_USER=${3:-$DB_USER}
if [ ! -d "$BACKUPS_DIR" ]; then
printf 'Backup directory not found: %s\n' "$BACKUPS_DIR" >&2
exit 1
fi
COMPOSE_CMD="docker compose"
cd "$ROOT_DIR"
printf 'Stopping Gitea service before restore...\n'
if ! $COMPOSE_CMD stop gitea >/dev/null 2>&1; then
printf 'Warning: could not stop Gitea service (it may already be stopped).\n' >&2
fi
ARCHIVE="gitea_data_${BACKUP_ID}.tar.gz"
HOST_FILE="$BACKUPS_DIR/$ARCHIVE"
CONTAINER_PATH="/backups/$BACKUP_ID/$ARCHIVE"
if [ ! -f "$HOST_FILE" ]; then
printf 'Archive missing: %s\n' "$HOST_FILE" >&2
exit 1
fi
printf 'Restoring gitea_data from %s\n' "$HOST_FILE"
$COMPOSE_CMD run --rm restore restore-volume gitea_data "$CONTAINER_PATH"
DB_DUMP_DIR="$BACKUPS_DIR"
DB_DUMP_FILE="$DB_DUMP_DIR/${DB_NAME}_${BACKUP_ID}.sql"
DB_DUMP_GZ="$DB_DUMP_FILE.gz"
if [ -f "$DB_DUMP_GZ" ]; then
DB_SOURCE="$DB_DUMP_GZ"
elif [ -f "$DB_DUMP_FILE" ]; then
DB_SOURCE="$DB_DUMP_FILE"
else
DB_SOURCE=""
fi
if [ -n "$DB_SOURCE" ]; then
if [ -z "$DB_PASSWORD" ]; then
printf 'Database dump found (%s) but GITEA_DB_PASSWORD not set; skipping DB restore.\n' "$DB_SOURCE" >&2
else
printf 'Restoring database %s from %s\n' "$DB_NAME" "$DB_SOURCE"
DROP_FLAG=${GITEA_DROP_EXISTING_DB:-${DROP_EXISTING_DB:-1}}
RESTORE_ENV_ARGS="-e PGPASSWORD=$DB_PASSWORD -e DROP_EXISTING_DB=$DROP_FLAG"
if [ -n "${POSTGRES_ADMIN_USER:-}" ] && [ -n "${POSTGRES_ADMIN_PASSWORD:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_ADMIN_USER=$POSTGRES_ADMIN_USER -e POSTGRES_ADMIN_PASSWORD=$POSTGRES_ADMIN_PASSWORD"
fi
if [ -n "${POSTGRES_ADMIN_DB:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_ADMIN_DB=$POSTGRES_ADMIN_DB"
fi
if [ -n "${GITEA_DB_HOST:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_HOST=$GITEA_DB_HOST"
elif [ -n "${POSTGRES_HOST:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_HOST=$POSTGRES_HOST"
fi
# shellcheck disable=SC2086
$COMPOSE_CMD run --rm $RESTORE_ENV_ARGS restore \
restore-db "/backups/$BACKUP_ID/$(basename "$DB_SOURCE")" "$DB_NAME" "$DB_USER" "$DB_PASSWORD"
fi
else
printf 'No database dump found for %s in %s\n' "$DB_NAME" "$DB_DUMP_DIR" >&2
fi
printf '\nRestore command completed. Restart Gitea when ready.\n'
printf 'Starting Gitea service...\n'
if ! $COMPOSE_CMD up -d gitea; then
printf 'Warning: failed to start Gitea service. Please start it manually.\n' >&2
fi

123
scripts/backup/restore-odoo.sh Executable file
View File

@@ -0,0 +1,123 @@
#!/bin/sh
set -eu
usage() {
cat <<EOF
Usage: $(basename "$0") BACKUP_ID [DB_NAME] [DB_USER]
Restore the standard Odoo volumes and the database from a backup run identified by BACKUP_ID.
BACKUP_ID should match the directory name under ./backups (e.g. 2025-11-17_12-30).
DB_NAME and DB_USER default to values from environment variables ODOO_DB / ODOO_DB_USER,
or fall back to "odoo" / "odoouser" if unset.
EOF
}
if [ "${1:-}" = "--help" ] || [ "${1:-}" = "-h" ]; then
usage
exit 0
fi
if [ $# -lt 1 ] || [ $# -gt 3 ]; then
usage >&2
exit 1
fi
BACKUP_ID=$1
ROOT_DIR=$(cd "$(dirname "$0")/../.." && pwd)
BACKUPS_DIR="$ROOT_DIR/backups/$BACKUP_ID"
load_env_file() {
local file=$1
if [ -f "$file" ]; then
# shellcheck disable=SC1090
set -a
. "$file"
set +a
fi
}
# Load credentials from .env files if present
load_env_file "$ROOT_DIR/.env"
DB_NAME=${ODOO_DB:-odoo}
DB_USER=${ODOO_DB_USER:-odoodbuser}
DB_PASSWORD=${ODOO_DB_PASSWORD:-}
DB_NAME=${2:-$DB_NAME}
DB_USER=${3:-$DB_USER}
if [ ! -d "$BACKUPS_DIR" ]; then
printf 'Backup directory not found: %s\n' "$BACKUPS_DIR" >&2
exit 1
fi
COMPOSE_CMD="docker compose"
cd "$ROOT_DIR"
printf 'Stopping Odoo service before restore...\n'
if ! $COMPOSE_CMD stop odoo >/dev/null 2>&1; then
printf 'Warning: could not stop Odoo service (it may already be stopped).\n' >&2
fi
restore_volume() {
volume=$1
archive_name=$2
host_file="$BACKUPS_DIR/$archive_name"
container_path="/backups/$BACKUP_ID/$archive_name"
if [ ! -f "$host_file" ]; then
printf 'Skipping %s: archive missing (%s)\n' "$volume" "$host_file" >&2
return 0
fi
printf 'Restoring %s from %s\n' "$volume" "$host_file"
$COMPOSE_CMD run --rm restore restore-volume "$volume" "$container_path"
}
restore_volume odoo-config "odoo_config_${BACKUP_ID}.tar.gz"
restore_volume odoo-db-data "odoo_db_data_${BACKUP_ID}.tar.gz"
DB_DUMP_DIR="$BACKUPS_DIR"
DB_DUMP_FILE="$DB_DUMP_DIR/${DB_NAME}_${BACKUP_ID}.sql"
DB_DUMP_GZ="$DB_DUMP_FILE.gz"
if [ -f "$DB_DUMP_GZ" ]; then
DB_SOURCE="$DB_DUMP_GZ"
elif [ -f "$DB_DUMP_FILE" ]; then
DB_SOURCE="$DB_DUMP_FILE"
else
DB_SOURCE=""
fi
if [ -n "$DB_SOURCE" ]; then
if [ -z "$DB_PASSWORD" ]; then
printf 'Database dump found (%s) but ODOO_DB_PASSWORD not set; skipping DB restore.\n' "$DB_SOURCE" >&2
else
printf 'Restoring database %s from %s\n' "$DB_NAME" "$DB_SOURCE"
DROP_FLAG=${ODOO_DROP_EXISTING_DB:-${DROP_EXISTING_DB:-1}}
RESTORE_ENV_ARGS="-e PGPASSWORD=$DB_PASSWORD -e DROP_EXISTING_DB=$DROP_FLAG"
if [ -n "${POSTGRES_ADMIN_USER:-}" ] && [ -n "${POSTGRES_ADMIN_PASSWORD:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_ADMIN_USER=$POSTGRES_ADMIN_USER -e POSTGRES_ADMIN_PASSWORD=$POSTGRES_ADMIN_PASSWORD"
fi
if [ -n "${POSTGRES_ADMIN_DB:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_ADMIN_DB=$POSTGRES_ADMIN_DB"
fi
if [ -n "${ODOO_DB_HOST:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_HOST=$ODOO_DB_HOST"
elif [ -n "${POSTGRES_HOST:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_HOST=$POSTGRES_HOST"
fi
# shellcheck disable=SC2086
$COMPOSE_CMD run --rm $RESTORE_ENV_ARGS restore \
restore-db "/backups/$BACKUP_ID/$(basename "$DB_SOURCE")" "$DB_NAME" "$DB_USER" "$DB_PASSWORD"
fi
else
printf 'No database dump found for %s in %s\n' "$DB_NAME" "$DB_DUMP_DIR" >&2
fi
printf '\nRestore commands completed. Restart the dependent services when ready.\n'
printf 'Starting Odoo service...\n'
if ! $COMPOSE_CMD up -d odoo; then
printf 'Warning: failed to start Odoo service. Please start it manually.\n' >&2
fi

View File

@@ -0,0 +1,81 @@
#!/bin/sh
set -eu
usage() {
cat <<EOF
Usage: $(basename "$0") BACKUP_ID
Restore the OpenCloud volume from a backup run identified by BACKUP_ID.
BACKUP_ID should match the directory name under ./backups (e.g. 2025-11-17_12-30).
EOF
}
if [ "${1:-}" = "--help" ] || [ "${1:-}" = "-h" ]; then
usage
exit 0
fi
if [ $# -ne 1 ]; then
usage >&2
exit 1
fi
BACKUP_ID=$1
ROOT_DIR=$(cd "$(dirname "$0")/../.." && pwd)
BACKUPS_DIR="$ROOT_DIR/backups/$BACKUP_ID"
if [ ! -d "$BACKUPS_DIR" ]; then
printf 'Backup directory not found: %s\n' "$BACKUPS_DIR" >&2
exit 1
fi
load_env_file() {
local file=$1
if [ -f "$file" ]; then
# shellcheck disable=SC1090
set -a
. "$file"
set +a
fi
}
# Load shared/service env if present (not strictly required but keeps behavior consistent)
load_env_file "$ROOT_DIR/.env"
COMPOSE_CMD="docker compose"
cd "$ROOT_DIR"
printf 'Stopping OpenCloud service before restore...\n'
if ! $COMPOSE_CMD stop opencloud >/dev/null 2>&1; then
printf 'Warning: could not stop OpenCloud service (it may already be stopped).\n' >&2
fi
DATA_ARCHIVE="opencloud_data_${BACKUP_ID}.tar.gz"
CONFIG_ARCHIVE="opencloud_config_${BACKUP_ID}.tar.gz"
DATA_HOST_FILE="$BACKUPS_DIR/$DATA_ARCHIVE"
CONFIG_HOST_FILE="$BACKUPS_DIR/$CONFIG_ARCHIVE"
DATA_CONTAINER_PATH="/backups/$BACKUP_ID/$DATA_ARCHIVE"
CONFIG_CONTAINER_PATH="/backups/$BACKUP_ID/$CONFIG_ARCHIVE"
if [ ! -f "$DATA_HOST_FILE" ]; then
printf 'Archive missing: %s\n' "$DATA_HOST_FILE" >&2
exit 1
fi
if [ ! -f "$CONFIG_HOST_FILE" ]; then
printf 'Archive missing: %s\n' "$CONFIG_HOST_FILE" >&2
exit 1
fi
printf 'Restoring opencloud-data from %s\n' "$DATA_HOST_FILE"
$COMPOSE_CMD run --rm restore restore-volume opencloud-data "$DATA_CONTAINER_PATH"
printf 'Restoring opencloud-config from %s\n' "$CONFIG_HOST_FILE"
$COMPOSE_CMD run --rm restore restore-volume opencloud-config "$CONFIG_CONTAINER_PATH"
printf '\nRestore command completed. Restart OpenCloud services when ready.\n'
printf 'Starting OpenCloud service...\n'
if ! $COMPOSE_CMD up -d opencloud; then
printf 'Warning: failed to start OpenCloud service. Please start it manually.\n' >&2
fi

219
scripts/backup/restore.sh Executable file
View File

@@ -0,0 +1,219 @@
#!/bin/sh
set -eu
# Simple restore helper for backups produced by this stack.
# Usage:
# ./restore.sh list # list backups in ./backups
# ./restore.sh restore-volume <volume-name> <backup-archive-path>
# ./restore.sh restore-db <dump-file> <db-name> <db-user> <db-password>
#
# Notes:
# - This assumes you use `docker compose` in the repo root and the postgres service
# is named `postgres` in your compose stack. Adjust POSTGRES_SERVICE if different.
# - Stop services that use the target volume/database before restoring to avoid conflicts.
BACKUPS_DIR="${BACKUPS_DIR:-./backups}"
POSTGRES_SERVICE="${POSTGRES_SERVICE:-postgres}"
COMPOSE="docker compose"
require_bsdtar() {
if ! command -v bsdtar >/dev/null 2>&1; then
err "bsdtar not found. Please install libarchive/bsdtar in the current environment."
exit 2
fi
}
require_bsdtar
log() { printf '%s %s\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" "$*"; }
err() { log "ERROR: $*" >&2; }
usage() {
cat <<EOF
Usage: $0 <command> [args]
Commands:
list
List dated backup directories under $BACKUPS_DIR
restore-volume <volume-name> <archive-path>
Extract a tar.gz archive into a named docker volume.
Example: $0 restore-volume odoo-db-data backups/2025-11-13/odoo_filestore_2025-11-13.tar.gz
restore-db <dump-file> <db-name> <db-user> <db-password>
Restore a SQL dump into the running Postgres service.
Example: $0 restore-db backups/2025-11-13/odoodb_2025-11-13.sql odoodb admin adminpass
EOF
}
list_backups() {
ls -1 "$BACKUPS_DIR" || true
}
restore_volume() {
volume="$1"
archive="$2"
if [ ! -f "$archive" ]; then
err "Archive not found: $archive"
exit 2
fi
# log "*** IMPORTANT: stop services that use volume '$volume' before running this restore."
# log "Proceeding in 5 seconds; press Ctrl-C to abort..."
# sleep 5
if [ "${IN_CONTAINER:-0}" = "1" ]; then
log "Running in-container restore: mapping volume name to mounted path."
target=""
case "$volume" in
opencloud-config*|*opencloud-config*) [ -d "/opencloud_config" ] && target="/opencloud_config" ;;
opencloud-data*|*opencloud-data*) [ -d "/opencloud_data" ] && target="/opencloud_data" ;;
odoo-config*|*odoo-config*) [ -d "/odoo_config" ] && target="/odoo_config" ;;
odoo-db-data*|*odoo-db-data*|*odoo*) [ -d "/odoo_db_data" ] && target="/odoo_db_data" ;;
gitea*|*gitea*) [ -d "/gitea_data" ] && target="/gitea_data" ;;
esac
if [ -z "$target" ]; then
err "Could not determine mount path for volume '$volume' inside container."
exit 4
fi
log "Extracting $archive into $target"
bsdtar --xattrs --same-owner --numeric-owner -xpf "$archive" -C "$target"
log "Restore finished. You may need to adjust ownership inside the target container if required."
return 0
fi
log "Restoring archive $archive into volume $volume"
docker run --rm -v "$volume":/data -v "$(pwd)/$archive":/backup.tar.gz alpine \
sh -c "apk add --no-cache libarchive-tools >/dev/null && bsdtar --xattrs --same-owner --numeric-owner -xpf /backup.tar.gz -C /data"
log "Restore finished. You may need to adjust ownership inside the target container if required."
}
restore_db() {
dumpfile="$1"
dbname="$2"
dbuser="$3"
dbpass="$4"
if [ ! -f "$dumpfile" ]; then
err "Dump file not found: $dumpfile"
exit 2
fi
host="${POSTGRES_HOST:-$POSTGRES_SERVICE}"
admin_db="${POSTGRES_ADMIN_DB:-postgres}"
admin_user="${POSTGRES_ADMIN_USER:-$dbuser}"
admin_pass="${POSTGRES_ADMIN_PASSWORD:-$dbpass}"
in_container="${IN_CONTAINER:-0}"
drop_existing="${DROP_EXISTING_DB:-1}"
stream_dump() {
case "$dumpfile" in
*.gz) gunzip -c "$dumpfile" ;;
*) cat "$dumpfile" ;;
esac
}
if [ "$in_container" = "1" ]; then
cont_id=""
else
cont_id="$($COMPOSE ps -q "$POSTGRES_SERVICE" || true)"
if [ -z "$cont_id" ]; then
err "Postgres service '$POSTGRES_SERVICE' not running. Start it with: $COMPOSE up -d $POSTGRES_SERVICE"
exit 3
fi
fi
run_psql_sql() {
user="$1"
pass="$2"
database="$3"
sql="$4"
if [ "$in_container" = "1" ]; then
PGPASSWORD="$pass" psql -h "$host" -U "$user" -d "$database" -v ON_ERROR_STOP=1 -v psql_restricted=off -tAc "$sql"
else
docker exec -i "$cont_id" env PGPASSWORD="$pass" psql -U "$user" -d "$database" -v ON_ERROR_STOP=1 -v psql_restricted=off -tAc "$sql"
fi
}
createdb_with_admin() {
if [ "$in_container" = "1" ]; then
PGPASSWORD="$admin_pass" createdb -h "$host" -U "$admin_user" -O "$dbuser" "$dbname"
else
docker exec -i "$cont_id" env PGPASSWORD="$admin_pass" createdb -U "$admin_user" -O "$dbuser" "$dbname"
fi
}
dropdb_with_admin() {
terminate_sql="SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname='$dbname' AND pid <> pg_backend_pid();"
run_psql_sql "$admin_user" "$admin_pass" "$admin_db" "$terminate_sql" >/dev/null 2>&1 || true
if [ "$in_container" = "1" ]; then
PGPASSWORD="$admin_pass" dropdb -h "$host" -U "$admin_user" "$dbname"
else
docker exec -i "$cont_id" env PGPASSWORD="$admin_pass" dropdb -U "$admin_user" "$dbname"
fi
}
ensure_database() {
db_exists=$(run_psql_sql "$admin_user" "$admin_pass" "$admin_db" "SELECT 1 FROM pg_database WHERE datname='$dbname'" 2>/dev/null | tr -d '[:space:]' || true)
if [ "$db_exists" = "1" ]; then
if [ "$drop_existing" = "1" ]; then
log "Database '$dbname' already exists. Dropping before restore (DROP_EXISTING_DB=1)."
if ! dropdb_with_admin 2>/dev/null; then
err "Failed to drop existing database '$dbname'. Ensure the configured credentials have DROP DATABASE privileges or set DROP_EXISTING_DB=0 to skip dropping."
return 4
fi
else
log "Database '$dbname' already exists; continuing without dropping (DROP_EXISTING_DB=0)."
return 0
fi
fi
log "Creating database '$dbname' owned by '$dbuser' using user '$admin_user'."
if createdb_with_admin 2>/dev/null; then
return 0
fi
err "Failed to create database '$dbname' with user '$admin_user'. Ensure the user has CREATEDB privileges or create the database manually."
return 4
}
log "Restoring SQL dump into $dbname on host/service ${host}."
log "*** IMPORTANT: stop users/applications that use the database or run in maintenance mode."
if ! ensure_database; then
return 4
fi
if [ "$in_container" = "1" ]; then
stream_dump | env PGPASSWORD="$dbpass" psql -h "$host" -U "$dbuser" -d "$dbname" -v ON_ERROR_STOP=1 -v psql_restricted=off >/dev/null
else
stream_dump | docker exec -i "$cont_id" env PGPASSWORD="$dbpass" psql -U "$dbuser" -d "$dbname" -v ON_ERROR_STOP=1 -v psql_restricted=off >/dev/null
fi
log "Database restore finished."
}
case "${1:-}" in
list)
list_backups
;;
restore-volume)
if [ $# -ne 3 ]; then usage; exit 2; fi
restore_volume "$2" "$3"
;;
restore-db)
if [ $# -ne 5 ]; then usage; exit 2; fi
restore_db "$2" "$3" "$4" "$5"
;;
*)
usage
exit 2
;;
esac

50
scripts/odoo/README.md Normal file
View File

@@ -0,0 +1,50 @@
# odoo init helper
This folder contains helper scripts used to initialize the Odoo container on
first boot. The `odoo-init` one-shot runs a small wrapper (`init-wrapper.sh`)
that fixes filesystem ownerships for the Odoo named volumes and then runs the
actual initialization script (`init-odoo.sh`).
Why the wrapper runs as root
---------------------------
- The wrapper must run as `root` so it can chown the Docker named volumes after
they are created. Named volumes are created with root ownership by Docker on
many hosts, which causes Odoo (a non-root process inside the container) to
fail when writing the filestore.
- The wrapper drops to the `odoo` user before executing the init script so the
actual Odoo commands run with the correct, unprivileged uid/gid.
What it does
-----------
- Creates any missing directories the image expects (filestore, web, addons,
logs, config) and sets their ownership to the numeric `odoo` UID:GID.
- Writes a marker file `/var/lib/odoo/.odoo-init-done` to indicate the
one-shot ran successfully.
- Checks `/init-odoo.sh` is present and readable. If the script is missing or
unreadable the wrapper exits with status `2` and logs an error.
Troubleshooting
---------------
- If you see PermissionError writing `/var/lib/odoo/filestore/*`:
- Ensure the named volumes are not mounted on the host with root-only
permissions. The wrapper attempts to chown them on container start; if that
fails check the host mount permissions.
- You can run the wrapper manually in a container to inspect output:
```sh
docker compose run --rm --entrypoint /bin/sh odoo-init -c /init-wrapper.sh
```
- If the wrapper logs `ERROR: /init-odoo.sh missing or not readable`:
- Verify `./scripts/odoo/init-odoo.sh` exists in the repo and that it is
mounted into the container in `odoo-init.yml` at `/init-odoo.sh`.
Notes
-----
- `odoo-init.yml` intentionally runs the `odoo-init` service as `root` so the
wrapper can perform chown. The wrapper drops privileges before running the
init logic.
- The marker file is a simple heuristic other tooling (or manual checks) can
use to detect that initialization already ran. Removing the named volumes
and re-creating them will require re-running the wrapper (it runs on every
start of the `odoo-init` one-shot service).

View File

@@ -0,0 +1,20 @@
#!/bin/sh
# Wrapper for odoo init: fix named volume ownership then run the original init script
set -eu
OG="$(id -u odoo 2>/dev/null || id -u):$(id -g odoo 2>/dev/null || id -g)"
echo "Fixing Odoo volumes to UID:GID=$OG"
for d in /var/lib/odoo /etc/odo; do
echo " -> chown $OG $d"
chown -R "$OG" "$d" || true
ls -ldn "$d" || true
done
echo "Running Odoo init script"
if command -v su >/dev/null 2>&1; then
echo "Dropping to 'odoo' user to run init script via su"
su -s /bin/sh odoo -c "/init-odoo.sh $*"
else
echo "su not available; running init script as current user"
exec /init-odoo.sh "$@"
fi

33
scripts/odoo/init-odoo.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/sh
echo "Checking if Odoo DB '$DB_NAME' exists and base module is installed..."
retries=0
max_retries=10
install_base_via_odoo() {
echo "Installing base module into '$DB_NAME' via Odoo CLI"
# Use --stop-after-init so the command exits after installing
if odoo -d "$DB_NAME" -i base --stop-after-init --db_host="$HOST" --db_user="$USER" --db_password="$PASSWORD"; then
echo "Base module installed successfully"
return 0
fi
echo "Failed to install base module via Odoo CLI"
return 1
}
while true; do
# Try to install base into the DB. If the DB exists and base is already
# installed this will succeed or be a no-op. If the DB doesn't exist
# the command will fail and we'll proceed to initialize it.
if install_base_via_odoo; then
echo "Odoo DB '$DB_NAME' is ready with base module installed."
break
fi
retries=$((retries+1))
if [ "$retries" -ge "$max_retries" ]; then
echo "Initialization failed after $retries attempts. Exiting with error."
exit 1
fi
echo "Retry #$retries: waiting before next attempt..."
sleep 2
done

90
scripts/odoo/init-wrapper.sh Executable file
View File

@@ -0,0 +1,90 @@
#!/bin/sh
set -eu
log() { printf '%s %s\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" "$*"; }
# Determine the numeric UID:GID for the odoo user if present; otherwise
# fall back to the UID:GID of the current user (likely root when the
# wrapper runs as root inside the container).
OG_UID="$(id -u odoo 2>/dev/null || true)"
OG_GID="$(id -g odoo 2>/dev/null || true)"
if [ -n "$OG_UID" ] && [ -n "$OG_GID" ]; then
OG="$OG_UID:$OG_GID"
else
OG="$(id -u):$(id -g)"
fi
log "Fixing Odoo volumes to UID:GID=$OG"
# Paths we expect to ensure ownership for. If a path is missing we create it
# with sensible defaults so Odoo can write into it later.
for d in /var/lib/odo /etc/odoo; do
if [ -e "$d" ]; then
log " -> chown $OG $d"
if chown -R "$OG" "$d"; then
log " chown succeeded for $d"
else
log " WARNING: chown failed for $d (continuing)"
fi
ls -ldn "$d" || true
else
log " -> path $d does not exist, creating"
if mkdir -p "$d"; then
log " created $d"
chown -R "$OG" "$d" || log " WARNING: chown failed after creating $d"
ls -ldn "$d" || true
else
log " ERROR: failed to create $d"
fi
fi
done
# Touch a marker so other tooling can detect that the one-shot ran.
MARKER=/var/lib/odoo/.odoo-init-done
log "Creating marker $MARKER"
if touch "$MARKER"; then
chown "$OG" "$MARKER" || true
else
log "WARNING: could not create marker $MARKER"
fi
# Ensure the init script exists and is readable. If not, fail early with
# a clear exit code so upstream tooling can detect the problem.
if [ ! -r /init-odoo.sh ]; then
log "ERROR: /init-odoo.sh missing or not readable"
exit 2
fi
# Try to make it executable for the target user only if the file is writable.
# This avoids noisy "Read-only file system" errors when /init-odoo.sh is mounted
# read-only from the host.
if [ -w /init-odoo.sh ]; then
chmod +x /init-odoo.sh || log "WARNING: chmod /init-odoo.sh failed"
else
log "Skipping chmod: /init-odoo.sh is not writable (read-only mount)"
fi
log "Executing init script as 'odoo' user"
# Run the init script as the odoo user while passing DB env vars inline so
# they are visible even when using su which doesn't preserve the current
# environment by default.
exec su -s /bin/sh -c "DB_NAME=\"$DB_NAME\" HOST=\"$HOST\" USER=\"$USER\" PASSWORD=\"$PASSWORD\" /init-odoo.sh" odoo
#!/bin/sh
set -eu
OG="$(id -u odoo 2>/dev/null || id -u):$(id -g odoo 2>/dev/null || id -g)"
echo "Fixing Odoo volumes to UID:GID=$OG"
for d in /var/lib/odo /etc/odoo; do
echo " -> chown $OG $d"
chown -R "$OG" "$d" || true
ls -ldn "$d" || true
done
# Execute the original init script as the 'odoo' user (we run this wrapper as root).
# When switching user with su, the target user's environment is reset; pass the
# needed DB env vars inline so the init script sees them.
exec su -s /bin/sh -c \
"DB_NAME=\"$DB_NAME\" HOST=\"$HOST\" USER=\"$USER\" PASSWORD=\"$PASSWORD\" /init-odoo.sh" \
odoo

View File

@@ -0,0 +1,23 @@
#!/bin/sh
echo "Starting backup..."
BACKUP_DIR=/backups
DATE=$(date +%F)
# Gitea
tar -czf $BACKUP_DIR/gitea-$DATE.tar.gz -C /backup gitea
# Odoo
tar -czf $BACKUP_DIR/odoo-$DATE.tar.gz -C /backup odoo
# OpenCloud
tar -czf $BACKUP_DIR/opencloud-$DATE.tar.gz -C /backup opencloud
# PostgreSQL dump (all DBs)
pg_dump -h postgres -U admin -F c -b -v -f $BACKUP_DIR/postgres-$DATE.dump
# Rotation: keep only 10 newest backups per service
for svc in gitea odoo opencloud postgres; do
ls -1t $BACKUP_DIR/${svc}-* | tail -n +11 | xargs -r rm --
done
echo "Backup completed."

View File

@@ -0,0 +1,33 @@
#!/bin/sh
set -eu
set -o pipefail 2>/dev/null || true
# Create application roles/users (idempotent).
# Runs early to ensure roles exist before databases are created.
: "${GITEA_DB_USER:=gitea}"
: "${GITEA_DB_PASSWORD:=giteapass}"
: "${ODOO_DB_USER:=odoo}"
: "${ODOO_DB_PASSWORD:=odoopass}"
echo "[init] create-roles: gitea_user=${GITEA_DB_USER}, odoo_user=${ODOO_DB_USER}"
role_exists() {
psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='$1'" | grep -q 1 || return 1
}
create_role() {
local role="$1"; shift
local pass="$1"; shift
if role_exists "$role"; then
echo "[init] role '$role' already exists, skipping"
else
echo "[init] creating role '$role'"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" -c "CREATE USER \"${role}\" WITH ENCRYPTED PASSWORD '${pass}';"
fi
}
create_role "${GITEA_DB_USER}" "${GITEA_DB_PASSWORD}"
create_role "${ODOO_DB_USER}" "${ODOO_DB_PASSWORD}"
echo "[init] create-roles finished"

View File

@@ -0,0 +1,45 @@
#!/bin/sh
set -eu
set -o pipefail 2>/dev/null || true
# Create application databases and ensure ownership and grants (idempotent).
: "${GITEA_DB:=giteadb}"
: "${GITEA_DB_USER:=gitea}"
: "${ODOO_DB:=odoodb}"
: "${ODOO_DB_USER:=odoo}"
echo "[init] create-databases: gitea_db=${GITEA_DB}, odoo_db=${ODOO_DB}"
db_exists() {
psql -tAc "SELECT 1 FROM pg_database WHERE datname='$1'" | grep -q 1 || return 1
}
db_owner() {
psql -tAc "SELECT pg_catalog.pg_get_userbyid(datdba) FROM pg_database WHERE datname='$1'" | tr -d '[:space:]'
}
create_or_alter_db() {
local db="$1"; shift
local owner="$1"; shift
if db_exists "$db"; then
echo "[init] database '$db' already exists"
current_owner=$(db_owner "$db") || current_owner=""
if [ "$current_owner" != "$owner" ]; then
echo "[init] changing owner of '$db' from '$current_owner' to '$owner'"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" -c "ALTER DATABASE \"${db}\" OWNER TO \"${owner}\";"
fi
else
echo "[init] creating database '$db' with owner '$owner'"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" -c "CREATE DATABASE \"${db}\" OWNER \"${owner}\";"
fi
echo "[init] granting privileges on ${db} to ${owner}"
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" -c "GRANT ALL PRIVILEGES ON DATABASE \"${db}\" TO \"${owner}\";"
}
create_or_alter_db "${GITEA_DB}" "${GITEA_DB_USER}"
create_or_alter_db "${ODOO_DB}" "${ODOO_DB_USER}"
echo "[init] create-databases finished"