initial commit

This commit is contained in:
2025-11-25 12:27:53 +03:30
commit f9d16ab078
102 changed files with 11156 additions and 0 deletions

View File

@@ -0,0 +1,2 @@
# Run backup management every minute for testing (change to '15 2 * * *' for daily at 02:15)
0 2 * * * /manage-backups.sh 2>&1 | tee -a /var/log/backup.log

197
scripts/backup/manage-backups.sh Executable file
View File

@@ -0,0 +1,197 @@
#!/bin/sh
set -eu
# Minimal PATH for cron-like environments
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
log() {
printf '%s %s\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" "$*"
}
err() {
log "ERROR: $*" >&2
}
log "manage-backups.sh starting"
BACKUP_BASE=/backups
date_formatted() {
local format="${1:-%F_%H-%M}"
# Determine timezone to use. Priority:
# 1. $TIMEZONE environment variable
# 2. /etc/timezone file (common in many images)
# 3. /etc/localtime symlink target (if present and pointing into zoneinfo)
# 4. fall back to UTC
local tz="${TIMEZONE:-}"
if [ -z "$tz" ] && [ -f /etc/timezone ]; then
tz=$(cat /etc/timezone 2>/dev/null || true)
fi
if [ -z "$tz" ] && [ -L /etc/localtime ]; then
# readlink output might be like /usr/share/zoneinfo/Region/City
local target
target=$(readlink /etc/localtime 2>/dev/null || true)
case "$target" in
*/usr/share/zoneinfo/*) tz=${target#*/usr/share/zoneinfo/} ;;
*) tz="" ;;
esac
fi
# If still empty, try reading the container PID 1 environment (docker-compose env vars live there)
if [ -z "$tz" ] && [ -r /proc/1/environ ]; then
tz=$(tr '\0' '\n' < /proc/1/environ 2>/dev/null | sed -n 's/^TIMEZONE=//p' | head -n1 || true)
fi
if [ -n "$tz" ]; then
# Prefer the system's zoneinfo if available. We consider TZ working
# if the timezone offset (%%z) differs from UTC's offset.
local utc_z tz_z
utc_z=$(date -u +%z)
tz_z=$(TZ="$tz" date +%z 2>/dev/null || true)
if [ -n "$tz_z" ] && [ "$tz_z" != "$utc_z" ]; then
TZ="$tz" date +"$format"
return
fi
# Fallback: some minimal images (alpine without tzdata) don't have
# zoneinfo. As a pragmatic fallback map a few common timezones to
# their current standard offsets (in seconds). This is best-effort
# and does not handle historical DST transitions.
local offset_secs=0
case "$tz" in
Asia/Tehran) offset_secs=12600 ;; # +03:30
Asia/Kolkata) offset_secs=19800 ;; # +05:30
Europe/London) offset_secs=0 ;; # UTC (note: ignores BST)
Europe/Paris) offset_secs=3600 ;; # +01:00 (ignores CEST)
America/New_York) offset_secs=-18000 ;; # -05:00 (ignores EDT)
America/Los_Angeles) offset_secs=-28800 ;; # -08:00 (ignores PDT)
UTC|Etc/UTC) offset_secs=0 ;;
*) offset_secs=0 ;;
esac
# Compute local epoch by adding offset to UTC epoch and format via UTC
local epoch_utc epoch_local
epoch_utc=$(date -u +%s)
epoch_local=$((epoch_utc + offset_secs))
# Most busybox/git images support -d "@SECONDS" with -u
date -u -d "@${epoch_local}" +"$format"
else
date +"$format"
fi
}
wait_for_database_backups() {
local timeout="${DB_BACKUP_TIMEOUT:-600}"
local start_ts
start_ts=$(date -u +%s)
local status_file="$BACKUP_BASE/databases/.last_backup_complete"
log "Waiting for database backup completion marker at $status_file (timeout ${timeout}s)"
while :; do
if [ -f "$status_file" ]; then
local last_ts
last_ts=$(head -n1 "$status_file" 2>/dev/null || echo 0)
case "$last_ts" in
(""|*[!0-9]*) last_ts=0 ;;
esac
if [ "$last_ts" -ge "$start_ts" ]; then
log "Detected recent database backup completion at epoch $last_ts"
return 0
fi
fi
local now
now=$(date -u +%s)
if [ $(( now - start_ts )) -ge "$timeout" ]; then
err "Timed out waiting for database backups to finish"
return 1
fi
sleep 5
done
}
cleanup_old_backups() {
local retention_days="${BACKUP_RETENTION_DAYS:-7}"
local retention_count="${BACKUP_RETENTION_COUNT:-0}"
case "$retention_days" in
''|*[!0-9]*) retention_days=7 ;;
esac
case "$retention_count" in
''|*[!0-9]*) retention_count=0 ;;
esac
log "Cleaning up old backups older than ${retention_days} days (keep ${retention_count} newest)"
if [ "$retention_days" -ge 0 ] 2>/dev/null; then
find "$BACKUP_BASE" -maxdepth 1 -mindepth 1 -type d ! -name databases -mtime +"${retention_days}" -exec rm -rf {} \; 2>/dev/null || true
find "$BACKUP_BASE/databases" -type f -mtime +"${retention_days}" -delete 2>/dev/null || true
fi
if [ "$retention_count" -gt 0 ] 2>/dev/null; then
local idx=0
local entry
local old_ifs="$IFS"
IFS='
'
set -- $(ls -1dt "$BACKUP_BASE"/*/ 2>/dev/null || echo)
IFS="$old_ifs"
for entry in "$@"; do
[ -z "$entry" ] && continue
case "$entry" in
"$BACKUP_BASE/databases"|"$BACKUP_BASE/databases/")
continue
;;
esac
idx=$((idx + 1))
if [ "$idx" -le "$retention_count" ]; then
continue
fi
log "Removing old backup directory $entry (exceeds retention count)"
rm -rf "$entry" 2>/dev/null || err "Failed to remove $entry"
done
fi
}
# Prepare dated backup directory
DATED_DIR="$BACKUP_BASE/$(date_formatted)"
mkdir -p "$DATED_DIR"
wait_for_database_backups || exit 1
# Archive application data
log "Archiving data paths..."
# Archive multiple paths if they exist. Keeps one archive per path.
archive_path() {
local src="$1" prefix="$2"
if [ -d "$src" ]; then
log "Archiving $src"
if bsdtar --xattrs --same-owner --numeric-owner -czf "$DATED_DIR/${prefix}_$(date_formatted).tar.gz" -C "$src" .; then
log "$src archived successfully"
# (reverted) do not force ownership changes here
else
err "Failed to archive $src"
return 1
fi
else
log "Source path $src not found; skipping"
fi
}
# Prefer canonical paths mounted into the backup-manager container
archive_path /odoo_db_data odoo_db_data || true
archive_path /odoo_config odoo_config || true
archive_path /gitea_data gitea_data || true
archive_path /opencloud_data opencloud_data || true
archive_path /opencloud_config opencloud_config || true
# Find and Move today's database dumps to dated directory
log "Moving files from database dump to dated directory..."
# list the names of the files to move
#log "Files to move:"
ls -1 "$BACKUP_BASE/databases"/* 2>/dev/null || true
log "Moving database dumps to dated directory..."
mv "$BACKUP_BASE/databases"/*_$(date_formatted)*.sql.gz "$DATED_DIR"/ 2>/dev/null || true
cleanup_old_backups
# (reverted) do not change ownership of the dated directory
log "manage-backups.sh finished successfully"

View File

@@ -0,0 +1,2 @@
# Run database dumps every minute for testing (change to '0 2 * * *' for daily at 02:00)
0 2 * * * /pg-dump.sh 2>&1 | tee -a /var/log/backup.log

69
scripts/backup/pg-dump.sh Executable file
View File

@@ -0,0 +1,69 @@
#!/bin/sh
set -eu
# Minimal PATH for cron-like environments
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
log() {
printf '%s %s\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" "$*"
}
err() {
log "ERROR: $*" >&2
}
log "pg-dump.sh starting"
# Validate required env vars
: "${POSTGRES_USER:?POSTGRES_USER not set}"
: "${POSTGRES_HOST:?POSTGRES_HOST not set}"
date_formatted() {
local format="${1:-%F_%H-%M}"
# Use TIMEZONE env var if present; fall back to UTC
if [ -n "${TIMEZONE:-}" ]; then
TZ="$TIMEZONE" date +"$format"
else
date +"$format"
fi
}
BACKUP_DIR=/backups/databases
mkdir -p "$BACKUP_DIR"
log "Using backup dir: $BACKUP_DIR"
STATUS_FILE="$BACKUP_DIR/.last_backup_complete"
# Function to dump a specific database
dump_database() {
local db=$1
log "Checking existence of $db database..."
local db_exists
db_exists=$(psql -U "$POSTGRES_USER" -h "$POSTGRES_HOST" -tAc "SELECT 1 FROM pg_database WHERE datname='$db'")
if [[ "$db_exists" != "1" ]]; then
err "Database '$db' does not exist. Skipping dump."
return 2
fi
log "Dumping $db database..."
local outfile="$BACKUP_DIR/${db}_$(date_formatted).sql.gz"
if pg_dump -U "$POSTGRES_USER" -h "$POSTGRES_HOST" "$db" | gzip > "$outfile"; then
log "$db database dumped and compressed: $outfile"
return 0
else
err "Failed to dump $db database"
return 1
fi
}
# Dump each database
for db in $DB_LIST; do
dump_database "$db" || exit 1
done
tmp_status=$(mktemp "$BACKUP_DIR/.last_backup_complete.XXXXXX")
date -u +%s > "$tmp_status"
mv "$tmp_status" "$STATUS_FILE"
log "Recorded database backup completion marker at $STATUS_FILE"
log "pg-dump.sh finished successfully"

117
scripts/backup/restore-gitea.sh Executable file
View File

@@ -0,0 +1,117 @@
#!/bin/sh
set -eu
usage() {
cat <<EOF
Usage: $(basename "$0") BACKUP_ID [DB_NAME] [DB_USER]
Restore the Gitea data volume and database from a backup run identified by BACKUP_ID.
BACKUP_ID should match the directory name under ./backups (e.g. 2025-11-17_12-30).
DB_NAME and DB_USER default to environment variables GITEA_DB / GITEA_DB_USER,
falling back to "gitea" / "gitea" if unset.
EOF
}
if [ "${1:-}" = "--help" ] || [ "${1:-}" = "-h" ]; then
usage
exit 0
fi
if [ $# -lt 1 ] || [ $# -gt 3 ]; then
usage >&2
exit 1
fi
BACKUP_ID=$1
ROOT_DIR=$(cd "$(dirname "$0")/../.." && pwd)
BACKUPS_DIR="$ROOT_DIR/backups/$BACKUP_ID"
load_env_file() {
local file=$1
if [ -f "$file" ]; then
# shellcheck disable=SC1090
set -a
. "$file"
set +a
fi
}
# Load shared or service-specific environment files if present
load_env_file "$ROOT_DIR/.env"
DB_NAME=${GITEA_DB:-${GITEA_DB_NAME:-gitea}}
DB_USER=${GITEA_DB_USER:-gitea}
DB_PASSWORD=${GITEA_DB_PASSWORD:-${GITEA_DB_PASS:-}}
DB_NAME=${2:-$DB_NAME}
DB_USER=${3:-$DB_USER}
if [ ! -d "$BACKUPS_DIR" ]; then
printf 'Backup directory not found: %s\n' "$BACKUPS_DIR" >&2
exit 1
fi
COMPOSE_CMD="docker compose"
cd "$ROOT_DIR"
printf 'Stopping Gitea service before restore...\n'
if ! $COMPOSE_CMD stop gitea >/dev/null 2>&1; then
printf 'Warning: could not stop Gitea service (it may already be stopped).\n' >&2
fi
ARCHIVE="gitea_data_${BACKUP_ID}.tar.gz"
HOST_FILE="$BACKUPS_DIR/$ARCHIVE"
CONTAINER_PATH="/backups/$BACKUP_ID/$ARCHIVE"
if [ ! -f "$HOST_FILE" ]; then
printf 'Archive missing: %s\n' "$HOST_FILE" >&2
exit 1
fi
printf 'Restoring gitea_data from %s\n' "$HOST_FILE"
$COMPOSE_CMD run --rm restore restore-volume gitea_data "$CONTAINER_PATH"
DB_DUMP_DIR="$BACKUPS_DIR"
DB_DUMP_FILE="$DB_DUMP_DIR/${DB_NAME}_${BACKUP_ID}.sql"
DB_DUMP_GZ="$DB_DUMP_FILE.gz"
if [ -f "$DB_DUMP_GZ" ]; then
DB_SOURCE="$DB_DUMP_GZ"
elif [ -f "$DB_DUMP_FILE" ]; then
DB_SOURCE="$DB_DUMP_FILE"
else
DB_SOURCE=""
fi
if [ -n "$DB_SOURCE" ]; then
if [ -z "$DB_PASSWORD" ]; then
printf 'Database dump found (%s) but GITEA_DB_PASSWORD not set; skipping DB restore.\n' "$DB_SOURCE" >&2
else
printf 'Restoring database %s from %s\n' "$DB_NAME" "$DB_SOURCE"
DROP_FLAG=${GITEA_DROP_EXISTING_DB:-${DROP_EXISTING_DB:-1}}
RESTORE_ENV_ARGS="-e PGPASSWORD=$DB_PASSWORD -e DROP_EXISTING_DB=$DROP_FLAG"
if [ -n "${POSTGRES_ADMIN_USER:-}" ] && [ -n "${POSTGRES_ADMIN_PASSWORD:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_ADMIN_USER=$POSTGRES_ADMIN_USER -e POSTGRES_ADMIN_PASSWORD=$POSTGRES_ADMIN_PASSWORD"
fi
if [ -n "${POSTGRES_ADMIN_DB:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_ADMIN_DB=$POSTGRES_ADMIN_DB"
fi
if [ -n "${GITEA_DB_HOST:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_HOST=$GITEA_DB_HOST"
elif [ -n "${POSTGRES_HOST:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_HOST=$POSTGRES_HOST"
fi
# shellcheck disable=SC2086
$COMPOSE_CMD run --rm $RESTORE_ENV_ARGS restore \
restore-db "/backups/$BACKUP_ID/$(basename "$DB_SOURCE")" "$DB_NAME" "$DB_USER" "$DB_PASSWORD"
fi
else
printf 'No database dump found for %s in %s\n' "$DB_NAME" "$DB_DUMP_DIR" >&2
fi
printf '\nRestore command completed. Restart Gitea when ready.\n'
printf 'Starting Gitea service...\n'
if ! $COMPOSE_CMD up -d gitea; then
printf 'Warning: failed to start Gitea service. Please start it manually.\n' >&2
fi

123
scripts/backup/restore-odoo.sh Executable file
View File

@@ -0,0 +1,123 @@
#!/bin/sh
set -eu
usage() {
cat <<EOF
Usage: $(basename "$0") BACKUP_ID [DB_NAME] [DB_USER]
Restore the standard Odoo volumes and the database from a backup run identified by BACKUP_ID.
BACKUP_ID should match the directory name under ./backups (e.g. 2025-11-17_12-30).
DB_NAME and DB_USER default to values from environment variables ODOO_DB / ODOO_DB_USER,
or fall back to "odoo" / "odoouser" if unset.
EOF
}
if [ "${1:-}" = "--help" ] || [ "${1:-}" = "-h" ]; then
usage
exit 0
fi
if [ $# -lt 1 ] || [ $# -gt 3 ]; then
usage >&2
exit 1
fi
BACKUP_ID=$1
ROOT_DIR=$(cd "$(dirname "$0")/../.." && pwd)
BACKUPS_DIR="$ROOT_DIR/backups/$BACKUP_ID"
load_env_file() {
local file=$1
if [ -f "$file" ]; then
# shellcheck disable=SC1090
set -a
. "$file"
set +a
fi
}
# Load credentials from .env files if present
load_env_file "$ROOT_DIR/.env"
DB_NAME=${ODOO_DB:-odoo}
DB_USER=${ODOO_DB_USER:-odoodbuser}
DB_PASSWORD=${ODOO_DB_PASSWORD:-}
DB_NAME=${2:-$DB_NAME}
DB_USER=${3:-$DB_USER}
if [ ! -d "$BACKUPS_DIR" ]; then
printf 'Backup directory not found: %s\n' "$BACKUPS_DIR" >&2
exit 1
fi
COMPOSE_CMD="docker compose"
cd "$ROOT_DIR"
printf 'Stopping Odoo service before restore...\n'
if ! $COMPOSE_CMD stop odoo >/dev/null 2>&1; then
printf 'Warning: could not stop Odoo service (it may already be stopped).\n' >&2
fi
restore_volume() {
volume=$1
archive_name=$2
host_file="$BACKUPS_DIR/$archive_name"
container_path="/backups/$BACKUP_ID/$archive_name"
if [ ! -f "$host_file" ]; then
printf 'Skipping %s: archive missing (%s)\n' "$volume" "$host_file" >&2
return 0
fi
printf 'Restoring %s from %s\n' "$volume" "$host_file"
$COMPOSE_CMD run --rm restore restore-volume "$volume" "$container_path"
}
restore_volume odoo-config "odoo_config_${BACKUP_ID}.tar.gz"
restore_volume odoo-db-data "odoo_db_data_${BACKUP_ID}.tar.gz"
DB_DUMP_DIR="$BACKUPS_DIR"
DB_DUMP_FILE="$DB_DUMP_DIR/${DB_NAME}_${BACKUP_ID}.sql"
DB_DUMP_GZ="$DB_DUMP_FILE.gz"
if [ -f "$DB_DUMP_GZ" ]; then
DB_SOURCE="$DB_DUMP_GZ"
elif [ -f "$DB_DUMP_FILE" ]; then
DB_SOURCE="$DB_DUMP_FILE"
else
DB_SOURCE=""
fi
if [ -n "$DB_SOURCE" ]; then
if [ -z "$DB_PASSWORD" ]; then
printf 'Database dump found (%s) but ODOO_DB_PASSWORD not set; skipping DB restore.\n' "$DB_SOURCE" >&2
else
printf 'Restoring database %s from %s\n' "$DB_NAME" "$DB_SOURCE"
DROP_FLAG=${ODOO_DROP_EXISTING_DB:-${DROP_EXISTING_DB:-1}}
RESTORE_ENV_ARGS="-e PGPASSWORD=$DB_PASSWORD -e DROP_EXISTING_DB=$DROP_FLAG"
if [ -n "${POSTGRES_ADMIN_USER:-}" ] && [ -n "${POSTGRES_ADMIN_PASSWORD:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_ADMIN_USER=$POSTGRES_ADMIN_USER -e POSTGRES_ADMIN_PASSWORD=$POSTGRES_ADMIN_PASSWORD"
fi
if [ -n "${POSTGRES_ADMIN_DB:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_ADMIN_DB=$POSTGRES_ADMIN_DB"
fi
if [ -n "${ODOO_DB_HOST:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_HOST=$ODOO_DB_HOST"
elif [ -n "${POSTGRES_HOST:-}" ]; then
RESTORE_ENV_ARGS="$RESTORE_ENV_ARGS -e POSTGRES_HOST=$POSTGRES_HOST"
fi
# shellcheck disable=SC2086
$COMPOSE_CMD run --rm $RESTORE_ENV_ARGS restore \
restore-db "/backups/$BACKUP_ID/$(basename "$DB_SOURCE")" "$DB_NAME" "$DB_USER" "$DB_PASSWORD"
fi
else
printf 'No database dump found for %s in %s\n' "$DB_NAME" "$DB_DUMP_DIR" >&2
fi
printf '\nRestore commands completed. Restart the dependent services when ready.\n'
printf 'Starting Odoo service...\n'
if ! $COMPOSE_CMD up -d odoo; then
printf 'Warning: failed to start Odoo service. Please start it manually.\n' >&2
fi

View File

@@ -0,0 +1,81 @@
#!/bin/sh
set -eu
usage() {
cat <<EOF
Usage: $(basename "$0") BACKUP_ID
Restore the OpenCloud volume from a backup run identified by BACKUP_ID.
BACKUP_ID should match the directory name under ./backups (e.g. 2025-11-17_12-30).
EOF
}
if [ "${1:-}" = "--help" ] || [ "${1:-}" = "-h" ]; then
usage
exit 0
fi
if [ $# -ne 1 ]; then
usage >&2
exit 1
fi
BACKUP_ID=$1
ROOT_DIR=$(cd "$(dirname "$0")/../.." && pwd)
BACKUPS_DIR="$ROOT_DIR/backups/$BACKUP_ID"
if [ ! -d "$BACKUPS_DIR" ]; then
printf 'Backup directory not found: %s\n' "$BACKUPS_DIR" >&2
exit 1
fi
load_env_file() {
local file=$1
if [ -f "$file" ]; then
# shellcheck disable=SC1090
set -a
. "$file"
set +a
fi
}
# Load shared/service env if present (not strictly required but keeps behavior consistent)
load_env_file "$ROOT_DIR/.env"
COMPOSE_CMD="docker compose"
cd "$ROOT_DIR"
printf 'Stopping OpenCloud service before restore...\n'
if ! $COMPOSE_CMD stop opencloud >/dev/null 2>&1; then
printf 'Warning: could not stop OpenCloud service (it may already be stopped).\n' >&2
fi
DATA_ARCHIVE="opencloud_data_${BACKUP_ID}.tar.gz"
CONFIG_ARCHIVE="opencloud_config_${BACKUP_ID}.tar.gz"
DATA_HOST_FILE="$BACKUPS_DIR/$DATA_ARCHIVE"
CONFIG_HOST_FILE="$BACKUPS_DIR/$CONFIG_ARCHIVE"
DATA_CONTAINER_PATH="/backups/$BACKUP_ID/$DATA_ARCHIVE"
CONFIG_CONTAINER_PATH="/backups/$BACKUP_ID/$CONFIG_ARCHIVE"
if [ ! -f "$DATA_HOST_FILE" ]; then
printf 'Archive missing: %s\n' "$DATA_HOST_FILE" >&2
exit 1
fi
if [ ! -f "$CONFIG_HOST_FILE" ]; then
printf 'Archive missing: %s\n' "$CONFIG_HOST_FILE" >&2
exit 1
fi
printf 'Restoring opencloud-data from %s\n' "$DATA_HOST_FILE"
$COMPOSE_CMD run --rm restore restore-volume opencloud-data "$DATA_CONTAINER_PATH"
printf 'Restoring opencloud-config from %s\n' "$CONFIG_HOST_FILE"
$COMPOSE_CMD run --rm restore restore-volume opencloud-config "$CONFIG_CONTAINER_PATH"
printf '\nRestore command completed. Restart OpenCloud services when ready.\n'
printf 'Starting OpenCloud service...\n'
if ! $COMPOSE_CMD up -d opencloud; then
printf 'Warning: failed to start OpenCloud service. Please start it manually.\n' >&2
fi

219
scripts/backup/restore.sh Executable file
View File

@@ -0,0 +1,219 @@
#!/bin/sh
set -eu
# Simple restore helper for backups produced by this stack.
# Usage:
# ./restore.sh list # list backups in ./backups
# ./restore.sh restore-volume <volume-name> <backup-archive-path>
# ./restore.sh restore-db <dump-file> <db-name> <db-user> <db-password>
#
# Notes:
# - This assumes you use `docker compose` in the repo root and the postgres service
# is named `postgres` in your compose stack. Adjust POSTGRES_SERVICE if different.
# - Stop services that use the target volume/database before restoring to avoid conflicts.
BACKUPS_DIR="${BACKUPS_DIR:-./backups}"
POSTGRES_SERVICE="${POSTGRES_SERVICE:-postgres}"
COMPOSE="docker compose"
require_bsdtar() {
if ! command -v bsdtar >/dev/null 2>&1; then
err "bsdtar not found. Please install libarchive/bsdtar in the current environment."
exit 2
fi
}
require_bsdtar
log() { printf '%s %s\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" "$*"; }
err() { log "ERROR: $*" >&2; }
usage() {
cat <<EOF
Usage: $0 <command> [args]
Commands:
list
List dated backup directories under $BACKUPS_DIR
restore-volume <volume-name> <archive-path>
Extract a tar.gz archive into a named docker volume.
Example: $0 restore-volume odoo-db-data backups/2025-11-13/odoo_filestore_2025-11-13.tar.gz
restore-db <dump-file> <db-name> <db-user> <db-password>
Restore a SQL dump into the running Postgres service.
Example: $0 restore-db backups/2025-11-13/odoodb_2025-11-13.sql odoodb admin adminpass
EOF
}
list_backups() {
ls -1 "$BACKUPS_DIR" || true
}
restore_volume() {
volume="$1"
archive="$2"
if [ ! -f "$archive" ]; then
err "Archive not found: $archive"
exit 2
fi
# log "*** IMPORTANT: stop services that use volume '$volume' before running this restore."
# log "Proceeding in 5 seconds; press Ctrl-C to abort..."
# sleep 5
if [ "${IN_CONTAINER:-0}" = "1" ]; then
log "Running in-container restore: mapping volume name to mounted path."
target=""
case "$volume" in
opencloud-config*|*opencloud-config*) [ -d "/opencloud_config" ] && target="/opencloud_config" ;;
opencloud-data*|*opencloud-data*) [ -d "/opencloud_data" ] && target="/opencloud_data" ;;
odoo-config*|*odoo-config*) [ -d "/odoo_config" ] && target="/odoo_config" ;;
odoo-db-data*|*odoo-db-data*|*odoo*) [ -d "/odoo_db_data" ] && target="/odoo_db_data" ;;
gitea*|*gitea*) [ -d "/gitea_data" ] && target="/gitea_data" ;;
esac
if [ -z "$target" ]; then
err "Could not determine mount path for volume '$volume' inside container."
exit 4
fi
log "Extracting $archive into $target"
bsdtar --xattrs --same-owner --numeric-owner -xpf "$archive" -C "$target"
log "Restore finished. You may need to adjust ownership inside the target container if required."
return 0
fi
log "Restoring archive $archive into volume $volume"
docker run --rm -v "$volume":/data -v "$(pwd)/$archive":/backup.tar.gz alpine \
sh -c "apk add --no-cache libarchive-tools >/dev/null && bsdtar --xattrs --same-owner --numeric-owner -xpf /backup.tar.gz -C /data"
log "Restore finished. You may need to adjust ownership inside the target container if required."
}
restore_db() {
dumpfile="$1"
dbname="$2"
dbuser="$3"
dbpass="$4"
if [ ! -f "$dumpfile" ]; then
err "Dump file not found: $dumpfile"
exit 2
fi
host="${POSTGRES_HOST:-$POSTGRES_SERVICE}"
admin_db="${POSTGRES_ADMIN_DB:-postgres}"
admin_user="${POSTGRES_ADMIN_USER:-$dbuser}"
admin_pass="${POSTGRES_ADMIN_PASSWORD:-$dbpass}"
in_container="${IN_CONTAINER:-0}"
drop_existing="${DROP_EXISTING_DB:-1}"
stream_dump() {
case "$dumpfile" in
*.gz) gunzip -c "$dumpfile" ;;
*) cat "$dumpfile" ;;
esac
}
if [ "$in_container" = "1" ]; then
cont_id=""
else
cont_id="$($COMPOSE ps -q "$POSTGRES_SERVICE" || true)"
if [ -z "$cont_id" ]; then
err "Postgres service '$POSTGRES_SERVICE' not running. Start it with: $COMPOSE up -d $POSTGRES_SERVICE"
exit 3
fi
fi
run_psql_sql() {
user="$1"
pass="$2"
database="$3"
sql="$4"
if [ "$in_container" = "1" ]; then
PGPASSWORD="$pass" psql -h "$host" -U "$user" -d "$database" -v ON_ERROR_STOP=1 -v psql_restricted=off -tAc "$sql"
else
docker exec -i "$cont_id" env PGPASSWORD="$pass" psql -U "$user" -d "$database" -v ON_ERROR_STOP=1 -v psql_restricted=off -tAc "$sql"
fi
}
createdb_with_admin() {
if [ "$in_container" = "1" ]; then
PGPASSWORD="$admin_pass" createdb -h "$host" -U "$admin_user" -O "$dbuser" "$dbname"
else
docker exec -i "$cont_id" env PGPASSWORD="$admin_pass" createdb -U "$admin_user" -O "$dbuser" "$dbname"
fi
}
dropdb_with_admin() {
terminate_sql="SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname='$dbname' AND pid <> pg_backend_pid();"
run_psql_sql "$admin_user" "$admin_pass" "$admin_db" "$terminate_sql" >/dev/null 2>&1 || true
if [ "$in_container" = "1" ]; then
PGPASSWORD="$admin_pass" dropdb -h "$host" -U "$admin_user" "$dbname"
else
docker exec -i "$cont_id" env PGPASSWORD="$admin_pass" dropdb -U "$admin_user" "$dbname"
fi
}
ensure_database() {
db_exists=$(run_psql_sql "$admin_user" "$admin_pass" "$admin_db" "SELECT 1 FROM pg_database WHERE datname='$dbname'" 2>/dev/null | tr -d '[:space:]' || true)
if [ "$db_exists" = "1" ]; then
if [ "$drop_existing" = "1" ]; then
log "Database '$dbname' already exists. Dropping before restore (DROP_EXISTING_DB=1)."
if ! dropdb_with_admin 2>/dev/null; then
err "Failed to drop existing database '$dbname'. Ensure the configured credentials have DROP DATABASE privileges or set DROP_EXISTING_DB=0 to skip dropping."
return 4
fi
else
log "Database '$dbname' already exists; continuing without dropping (DROP_EXISTING_DB=0)."
return 0
fi
fi
log "Creating database '$dbname' owned by '$dbuser' using user '$admin_user'."
if createdb_with_admin 2>/dev/null; then
return 0
fi
err "Failed to create database '$dbname' with user '$admin_user'. Ensure the user has CREATEDB privileges or create the database manually."
return 4
}
log "Restoring SQL dump into $dbname on host/service ${host}."
log "*** IMPORTANT: stop users/applications that use the database or run in maintenance mode."
if ! ensure_database; then
return 4
fi
if [ "$in_container" = "1" ]; then
stream_dump | env PGPASSWORD="$dbpass" psql -h "$host" -U "$dbuser" -d "$dbname" -v ON_ERROR_STOP=1 -v psql_restricted=off >/dev/null
else
stream_dump | docker exec -i "$cont_id" env PGPASSWORD="$dbpass" psql -U "$dbuser" -d "$dbname" -v ON_ERROR_STOP=1 -v psql_restricted=off >/dev/null
fi
log "Database restore finished."
}
case "${1:-}" in
list)
list_backups
;;
restore-volume)
if [ $# -ne 3 ]; then usage; exit 2; fi
restore_volume "$2" "$3"
;;
restore-db)
if [ $# -ne 5 ]; then usage; exit 2; fi
restore_db "$2" "$3" "$4" "$5"
;;
*)
usage
exit 2
;;
esac