197 lines
6.8 KiB
Bash
Executable File
197 lines
6.8 KiB
Bash
Executable File
#!/bin/sh
|
|
set -eu
|
|
|
|
# Minimal PATH for cron-like environments
|
|
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
|
|
|
|
log() {
|
|
printf '%s %s\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" "$*"
|
|
}
|
|
|
|
err() {
|
|
log "ERROR: $*" >&2
|
|
}
|
|
|
|
log "manage-backups.sh starting"
|
|
|
|
BACKUP_BASE=/backups
|
|
|
|
date_formatted() {
|
|
local format="${1:-%F_%H-%M}"
|
|
# Determine timezone to use. Priority:
|
|
# 1. $TIMEZONE environment variable
|
|
# 2. /etc/timezone file (common in many images)
|
|
# 3. /etc/localtime symlink target (if present and pointing into zoneinfo)
|
|
# 4. fall back to UTC
|
|
local tz="${TIMEZONE:-}"
|
|
if [ -z "$tz" ] && [ -f /etc/timezone ]; then
|
|
tz=$(cat /etc/timezone 2>/dev/null || true)
|
|
fi
|
|
if [ -z "$tz" ] && [ -L /etc/localtime ]; then
|
|
# readlink output might be like /usr/share/zoneinfo/Region/City
|
|
local target
|
|
target=$(readlink /etc/localtime 2>/dev/null || true)
|
|
case "$target" in
|
|
*/usr/share/zoneinfo/*) tz=${target#*/usr/share/zoneinfo/} ;;
|
|
*) tz="" ;;
|
|
esac
|
|
fi
|
|
# If still empty, try reading the container PID 1 environment (docker-compose env vars live there)
|
|
if [ -z "$tz" ] && [ -r /proc/1/environ ]; then
|
|
tz=$(tr '\0' '\n' < /proc/1/environ 2>/dev/null | sed -n 's/^TIMEZONE=//p' | head -n1 || true)
|
|
fi
|
|
if [ -n "$tz" ]; then
|
|
# Prefer the system's zoneinfo if available. We consider TZ working
|
|
# if the timezone offset (%%z) differs from UTC's offset.
|
|
local utc_z tz_z
|
|
utc_z=$(date -u +%z)
|
|
tz_z=$(TZ="$tz" date +%z 2>/dev/null || true)
|
|
if [ -n "$tz_z" ] && [ "$tz_z" != "$utc_z" ]; then
|
|
TZ="$tz" date +"$format"
|
|
return
|
|
fi
|
|
|
|
# Fallback: some minimal images (alpine without tzdata) don't have
|
|
# zoneinfo. As a pragmatic fallback map a few common timezones to
|
|
# their current standard offsets (in seconds). This is best-effort
|
|
# and does not handle historical DST transitions.
|
|
local offset_secs=0
|
|
case "$tz" in
|
|
Asia/Tehran) offset_secs=12600 ;; # +03:30
|
|
Asia/Kolkata) offset_secs=19800 ;; # +05:30
|
|
Europe/London) offset_secs=0 ;; # UTC (note: ignores BST)
|
|
Europe/Paris) offset_secs=3600 ;; # +01:00 (ignores CEST)
|
|
America/New_York) offset_secs=-18000 ;; # -05:00 (ignores EDT)
|
|
America/Los_Angeles) offset_secs=-28800 ;; # -08:00 (ignores PDT)
|
|
UTC|Etc/UTC) offset_secs=0 ;;
|
|
*) offset_secs=0 ;;
|
|
esac
|
|
|
|
# Compute local epoch by adding offset to UTC epoch and format via UTC
|
|
local epoch_utc epoch_local
|
|
epoch_utc=$(date -u +%s)
|
|
epoch_local=$((epoch_utc + offset_secs))
|
|
# Most busybox/git images support -d "@SECONDS" with -u
|
|
date -u -d "@${epoch_local}" +"$format"
|
|
else
|
|
date +"$format"
|
|
fi
|
|
}
|
|
|
|
wait_for_database_backups() {
|
|
local timeout="${DB_BACKUP_TIMEOUT:-600}"
|
|
local start_ts
|
|
start_ts=$(date -u +%s)
|
|
local status_file="$BACKUP_BASE/databases/.last_backup_complete"
|
|
log "Waiting for database backup completion marker at $status_file (timeout ${timeout}s)"
|
|
|
|
while :; do
|
|
if [ -f "$status_file" ]; then
|
|
local last_ts
|
|
last_ts=$(head -n1 "$status_file" 2>/dev/null || echo 0)
|
|
case "$last_ts" in
|
|
(""|*[!0-9]*) last_ts=0 ;;
|
|
esac
|
|
if [ "$last_ts" -ge "$start_ts" ]; then
|
|
log "Detected recent database backup completion at epoch $last_ts"
|
|
return 0
|
|
fi
|
|
fi
|
|
|
|
local now
|
|
now=$(date -u +%s)
|
|
if [ $(( now - start_ts )) -ge "$timeout" ]; then
|
|
err "Timed out waiting for database backups to finish"
|
|
return 1
|
|
fi
|
|
sleep 5
|
|
done
|
|
}
|
|
|
|
cleanup_old_backups() {
|
|
local retention_days="${BACKUP_RETENTION_DAYS:-7}"
|
|
local retention_count="${BACKUP_RETENTION_COUNT:-0}"
|
|
|
|
case "$retention_days" in
|
|
''|*[!0-9]*) retention_days=7 ;;
|
|
esac
|
|
case "$retention_count" in
|
|
''|*[!0-9]*) retention_count=0 ;;
|
|
esac
|
|
|
|
log "Cleaning up old backups older than ${retention_days} days (keep ${retention_count} newest)"
|
|
|
|
if [ "$retention_days" -ge 0 ] 2>/dev/null; then
|
|
find "$BACKUP_BASE" -maxdepth 1 -mindepth 1 -type d ! -name databases -mtime +"${retention_days}" -exec rm -rf {} \; 2>/dev/null || true
|
|
find "$BACKUP_BASE/databases" -type f -mtime +"${retention_days}" -delete 2>/dev/null || true
|
|
fi
|
|
|
|
if [ "$retention_count" -gt 0 ] 2>/dev/null; then
|
|
local idx=0
|
|
local entry
|
|
local old_ifs="$IFS"
|
|
IFS='
|
|
'
|
|
set -- $(ls -1dt "$BACKUP_BASE"/*/ 2>/dev/null || echo)
|
|
IFS="$old_ifs"
|
|
for entry in "$@"; do
|
|
[ -z "$entry" ] && continue
|
|
case "$entry" in
|
|
"$BACKUP_BASE/databases"|"$BACKUP_BASE/databases/")
|
|
continue
|
|
;;
|
|
esac
|
|
idx=$((idx + 1))
|
|
if [ "$idx" -le "$retention_count" ]; then
|
|
continue
|
|
fi
|
|
log "Removing old backup directory $entry (exceeds retention count)"
|
|
rm -rf "$entry" 2>/dev/null || err "Failed to remove $entry"
|
|
done
|
|
fi
|
|
}
|
|
# Prepare dated backup directory
|
|
DATED_DIR="$BACKUP_BASE/$(date_formatted)"
|
|
mkdir -p "$DATED_DIR"
|
|
|
|
wait_for_database_backups || exit 1
|
|
|
|
# Archive application data
|
|
log "Archiving data paths..."
|
|
# Archive multiple paths if they exist. Keeps one archive per path.
|
|
archive_path() {
|
|
local src="$1" prefix="$2"
|
|
if [ -d "$src" ]; then
|
|
log "Archiving $src"
|
|
if bsdtar --xattrs --same-owner --numeric-owner -czf "$DATED_DIR/${prefix}_$(date_formatted).tar.gz" -C "$src" .; then
|
|
log "$src archived successfully"
|
|
# (reverted) do not force ownership changes here
|
|
else
|
|
err "Failed to archive $src"
|
|
return 1
|
|
fi
|
|
else
|
|
log "Source path $src not found; skipping"
|
|
fi
|
|
}
|
|
|
|
# Prefer canonical paths mounted into the backup-manager container
|
|
archive_path /odoo_db_data odoo_db_data || true
|
|
archive_path /odoo_config odoo_config || true
|
|
archive_path /gitea_data gitea_data || true
|
|
archive_path /opencloud_data opencloud_data || true
|
|
archive_path /opencloud_config opencloud_config || true
|
|
|
|
# Find and Move today's database dumps to dated directory
|
|
log "Moving files from database dump to dated directory..."
|
|
# list the names of the files to move
|
|
#log "Files to move:"
|
|
ls -1 "$BACKUP_BASE/databases"/* 2>/dev/null || true
|
|
log "Moving database dumps to dated directory..."
|
|
mv "$BACKUP_BASE/databases"/*_$(date_formatted)*.sql.gz "$DATED_DIR"/ 2>/dev/null || true
|
|
|
|
cleanup_old_backups
|
|
|
|
# (reverted) do not change ownership of the dated directory
|
|
|
|
log "manage-backups.sh finished successfully" |