better backups

This commit is contained in:
Ludwig Lehnert
2026-03-17 10:04:50 +01:00
parent 029488b80d
commit 6da6db6955
5 changed files with 564 additions and 117 deletions

View File

@@ -16,3 +16,8 @@ FSLOGIX_GROUP_SID=S-1-5-21-1111111111-2222222222-3333333333-513
# BACKUP_DESTINATION=smb://DOMAIN%5Cuser:pass@backup.example.com/Backups/samba
# BACKUP_DESTINATION=davfs://user:pass@webdav.example.com/remote.php/dav/files/backup
# BACKUP_DESTINATION=sftp://user:pass@sftp.example.com/exports/samba
# BACKUP_START_HOUR=2
# BACKUP_RETENTION_DAILY=3
# BACKUP_RETENTION_WEEKLY=2
# BACKUP_RETENTION_MONTHLY=2
# BACKUP_RETENTION_YEARLY=1

View File

@@ -27,8 +27,7 @@ This repository provides a production-oriented Samba file server container that
- once on startup
- every 5 minutes via cron
- Backup is executed:
- once on startup (when enabled)
- every 30 minutes via cron
- daily at `BACKUP_START_HOUR` (default: `2`, i.e. 02:00)
## Data Folder Lifecycle
@@ -109,6 +108,11 @@ Kerberos requires close time alignment.
- `DOMAIN_ADMINS_SID`
- optional `FSLOGIX_GROUP_SID` (defaults to `DOMAIN_USERS_SID`)
- optional `BACKUP_DESTINATION` (empty disables backup)
- optional `BACKUP_START_HOUR` (0-23, default `2`)
- optional `BACKUP_RETENTION_DAILY` (default `3`)
- optional `BACKUP_RETENTION_WEEKLY` (default `2`)
- optional `BACKUP_RETENTION_MONTHLY` (default `2`)
- optional `BACKUP_RETENTION_YEARLY` (default `1`)
Optional:
- `SAMBA_HOSTNAME` (defaults to `adsambafsrv`)
@@ -168,12 +172,25 @@ Kerberos requires close time alignment.
## Backups
- Backups are enabled only if `BACKUP_DESTINATION` is non-empty.
- Each run creates a timestamped snapshot under `snapshots/YYYYMMDDTHHMMSSZ` at the destination.
- Backup job is scheduled daily at `BACKUP_START_HOUR` in container local time.
- Sources synced to destination on each run:
- `/data/private` -> `data/private`
- `/data/groups` -> `data/groups`
- `/data/fslogix` -> `data/fslogix`
- `/state` -> `state`
- `/var/lib/samba/private` -> `samba/private`
- Retention policy env vars (defaults):
- `BACKUP_RETENTION_YEARLY=1`
- `BACKUP_RETENTION_MONTHLY=2`
- `BACKUP_RETENTION_WEEKLY=2`
- `BACKUP_RETENTION_DAILY=3`
- Retention logic:
- daily: newest N snapshots
- weekly: newest N snapshots created on week start (Monday)
- monthly: newest N snapshots created on day 1
- yearly: newest N snapshots created on Jan 1
- snapshots selected by any tier are retained; all others are pruned
- Supported destination schemes:
- `rsync://user:pass@host/module/path`
- `smb://user:pass@host/share/path` (domain user example: `smb://DOMAIN%5Cuser:pass@host/share/path`)
@@ -185,6 +202,11 @@ Kerberos requires close time alignment.
```env
BACKUP_DESTINATION=sftp://backupuser:StrongPassword@sftp.example.com/exports/samba
BACKUP_START_HOUR=2
BACKUP_RETENTION_DAILY=3
BACKUP_RETENTION_WEEKLY=2
BACKUP_RETENTION_MONTHLY=2
BACKUP_RETENTION_YEARLY=1
```
## Useful Commands

View File

@@ -1,16 +1,20 @@
#!/usr/bin/env python3
import datetime as dt
import fcntl
import os
import re
import subprocess
import sys
import tempfile
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
from typing import Dict, List, Optional, Set, Tuple
from urllib.parse import SplitResult, unquote, urlsplit
LOCK_PATH = "/state/backup.lock"
SNAPSHOT_NAME_RE = re.compile(r"^\d{8}T\d{6}Z$")
BACKUP_SOURCES: List[Tuple[str, str]] = [
("/data/private", "data/private"),
("/data/groups", "data/groups"),
@@ -30,6 +34,12 @@ RCLONE_SCHEME_MAP = {
"https": "webdav",
}
DEFAULT_BACKUP_START_HOUR = 2
DEFAULT_RETENTION_DAILY = 3
DEFAULT_RETENTION_WEEKLY = 2
DEFAULT_RETENTION_MONTHLY = 2
DEFAULT_RETENTION_YEARLY = 1
@dataclass
class Destination:
@@ -43,6 +53,20 @@ class Destination:
path: str
@dataclass
class RetentionPolicy:
daily: int
weekly: int
monthly: int
yearly: int
@dataclass(frozen=True)
class Snapshot:
name: str
timestamp: dt.datetime
def log(message: str) -> None:
print(f"[backup] {message}", flush=True)
@@ -51,9 +75,16 @@ def run_command(
command: List[str],
*,
env: Optional[Dict[str, str]] = None,
input_text: Optional[str] = None,
check: bool = True,
) -> subprocess.CompletedProcess:
result = subprocess.run(command, capture_output=True, text=True, env=env)
result = subprocess.run(
command,
capture_output=True,
text=True,
env=env,
input=input_text,
)
if check and result.returncode != 0:
output = result.stderr.strip() or result.stdout.strip()
raise RuntimeError(f"Command failed ({command[0]}): {output}")
@@ -127,14 +158,57 @@ def join_path(prefix: str, suffix: str) -> str:
return left or right
def obscure_secret(secret: str) -> str:
result = run_command(["rclone", "obscure", secret])
value = result.stdout.strip()
if not value:
raise RuntimeError("rclone obscure returned an empty value")
def parse_int_env(
name: str, default: int, *, minimum: int, maximum: Optional[int]
) -> int:
raw_value = os.getenv(name, "").strip()
if not raw_value:
return default
try:
value = int(raw_value)
except ValueError:
log(f"Invalid {name}='{raw_value}', using default {default}")
return default
if value < minimum or (maximum is not None and value > maximum):
if maximum is None:
log(f"Invalid {name}='{raw_value}', using default {default}")
else:
log(
f"Invalid {name}='{raw_value}' (expected {minimum}-{maximum}), using default {default}"
)
return default
return value
def parse_retention_policy() -> RetentionPolicy:
return RetentionPolicy(
daily=parse_int_env(
"BACKUP_RETENTION_DAILY", DEFAULT_RETENTION_DAILY, minimum=0, maximum=None
),
weekly=parse_int_env(
"BACKUP_RETENTION_WEEKLY",
DEFAULT_RETENTION_WEEKLY,
minimum=0,
maximum=None,
),
monthly=parse_int_env(
"BACKUP_RETENTION_MONTHLY",
DEFAULT_RETENTION_MONTHLY,
minimum=0,
maximum=None,
),
yearly=parse_int_env(
"BACKUP_RETENTION_YEARLY",
DEFAULT_RETENTION_YEARLY,
minimum=0,
maximum=None,
),
)
def parse_smb_identity(username: str) -> Tuple[str, str]:
if not username:
return "", ""
@@ -147,34 +221,102 @@ def parse_smb_identity(username: str) -> Tuple[str, str]:
return "", username
def sync_with_rsync(destination: Destination, sources: List[Tuple[str, str]]) -> None:
module_path = destination.path.lstrip("/")
if not module_path:
raise RuntimeError(
"rsync destinations must include a module path (example: rsync://user:pass@host/module/path)"
def obscure_secret(secret: str) -> str:
result = run_command(["rclone", "obscure", secret])
value = result.stdout.strip()
if not value:
raise RuntimeError("rclone obscure returned an empty value")
return value
def parse_snapshot_name(name: str) -> Optional[dt.datetime]:
if not SNAPSHOT_NAME_RE.match(name):
return None
try:
parsed = dt.datetime.strptime(name, "%Y%m%dT%H%M%SZ")
except ValueError:
return None
return parsed.replace(tzinfo=dt.timezone.utc)
def choose_snapshot_name(existing_names: Set[str]) -> str:
base = dt.datetime.now(dt.timezone.utc)
for offset in range(0, 120):
candidate = (base + dt.timedelta(seconds=offset)).strftime("%Y%m%dT%H%M%SZ")
if candidate not in existing_names:
return candidate
raise RuntimeError("Unable to generate a unique snapshot name")
def is_week_start(timestamp: dt.datetime) -> bool:
return timestamp.weekday() == 0
def is_month_start(timestamp: dt.datetime) -> bool:
return timestamp.day == 1
def is_year_start(timestamp: dt.datetime) -> bool:
return timestamp.month == 1 and timestamp.day == 1
def select_newest(snapshot_pool: List[Snapshot], limit: int) -> Set[str]:
if limit <= 0:
return set()
sorted_pool = sorted(snapshot_pool, key=lambda entry: entry.timestamp, reverse=True)
return {entry.name for entry in sorted_pool[:limit]}
def compute_retained_snapshots(
snapshots: List[Snapshot], policy: RetentionPolicy
) -> Set[str]:
retained: Set[str] = set()
retained.update(select_newest(snapshots, policy.daily))
retained.update(
select_newest(
[entry for entry in snapshots if is_week_start(entry.timestamp)],
policy.weekly,
)
)
retained.update(
select_newest(
[entry for entry in snapshots if is_month_start(entry.timestamp)],
policy.monthly,
)
)
retained.update(
select_newest(
[entry for entry in snapshots if is_year_start(entry.timestamp)],
policy.yearly,
)
)
return retained
class RcloneBackend:
def __init__(self, destination: Destination):
self.base_prefix = ""
options, self.base_prefix = self._build_remote(destination)
self.config_path = self._write_config(options)
def close(self) -> None:
if os.path.exists(self.config_path):
os.remove(self.config_path)
def _run(
self,
args: List[str],
*,
check: bool = True,
input_text: Optional[str] = None,
) -> subprocess.CompletedProcess:
return run_command(
["rclone", *args, "--config", self.config_path],
check=check,
input_text=input_text,
)
host = format_host(destination.hostname)
if destination.port is not None:
host = f"{host}:{destination.port}"
user_prefix = f"{destination.username}@" if destination.username else ""
remote_base = f"rsync://{user_prefix}{host}/{module_path.rstrip('/')}"
command_env = os.environ.copy()
if destination.password:
command_env["RSYNC_PASSWORD"] = destination.password
for source_path, destination_path in sources:
remote_path = f"{remote_base}/{destination_path.strip('/')}/"
log(f"Syncing {source_path} to rsync destination")
run_command(
["rsync", "-a", "--delete", f"{source_path}/", remote_path],
env=command_env,
)
def build_rclone_remote(destination: Destination) -> Tuple[Dict[str, str], str]:
def _build_remote(self, destination: Destination) -> Tuple[Dict[str, str], str]:
backend = RCLONE_SCHEME_MAP.get(destination.scheme)
if backend is None:
supported = ", ".join(["rsync", *sorted(RCLONE_SCHEME_MAP.keys())])
@@ -197,7 +339,9 @@ def build_rclone_remote(destination: Destination) -> Tuple[Dict[str, str], str]:
return options, remote_prefix
if backend == "smb":
path_segments = [segment for segment in destination.path.split("/") if segment]
path_segments = [
segment for segment in destination.path.split("/") if segment
]
if not path_segments:
raise RuntimeError(
"smb destinations must include a share name in the path (example: smb://user:pass@host/share/path)"
@@ -238,8 +382,7 @@ def build_rclone_remote(destination: Destination) -> Tuple[Dict[str, str], str]:
return options, remote_prefix
def write_rclone_config(options: Dict[str, str]) -> str:
def _write_config(self, options: Dict[str, str]) -> str:
with tempfile.NamedTemporaryFile("w", encoding="utf-8", delete=False) as handle:
handle.write("[backup]\n")
for key, value in options.items():
@@ -249,29 +392,221 @@ def write_rclone_config(options: Dict[str, str]) -> str:
os.chmod(config_path, 0o600)
return config_path
def _snapshots_root(self) -> str:
return join_path(self.base_prefix, "snapshots")
def sync_with_rclone(destination: Destination, sources: List[Tuple[str, str]]) -> None:
options, remote_prefix = build_rclone_remote(destination)
config_path = write_rclone_config(options)
def _snapshot_root(self, snapshot_name: str) -> str:
return join_path(self._snapshots_root(), snapshot_name)
try:
for source_path, destination_path in sources:
remote_path = join_path(remote_prefix, destination_path)
log(f"Syncing {source_path} to {destination.scheme} destination")
run_command(
def sync_source(
self, snapshot_name: str, source_path: str, destination_path: str
) -> None:
remote_path = join_path(self._snapshot_root(snapshot_name), destination_path)
self._run(
[
"rclone",
"sync",
f"{source_path}/",
f"backup:{remote_path}",
"--config",
config_path,
"--create-empty-src-dirs",
]
)
def write_marker(self, snapshot_name: str) -> None:
marker_path = join_path(self._snapshot_root(snapshot_name), ".backup_complete")
self._run(
["rcat", f"backup:{marker_path}"],
input_text=f"{dt.datetime.now(dt.timezone.utc).isoformat()}\n",
)
def _snapshot_has_marker(self, snapshot_name: str) -> bool:
result = self._run(
[
"lsf",
f"backup:{self._snapshot_root(snapshot_name)}",
"--files-only",
"--include",
".backup_complete",
],
check=False,
)
if result.returncode != 0:
return False
return any(
line.strip() == ".backup_complete" for line in result.stdout.splitlines()
)
def list_snapshots(self) -> List[str]:
result = self._run(
["lsf", f"backup:{self._snapshots_root()}", "--dirs-only", "--format", "p"],
check=False,
)
if result.returncode != 0:
output = (result.stderr.strip() or result.stdout.strip()).lower()
if "not found" in output or "doesn't exist" in output:
return []
raise RuntimeError(result.stderr.strip() or result.stdout.strip())
names: List[str] = []
for line in result.stdout.splitlines():
candidate = line.strip().rstrip("/")
if not SNAPSHOT_NAME_RE.match(candidate):
continue
if self._snapshot_has_marker(candidate):
names.append(candidate)
return sorted(set(names))
def delete_snapshot(self, snapshot_name: str) -> None:
result = self._run(
["purge", f"backup:{self._snapshot_root(snapshot_name)}"],
check=False,
)
if result.returncode != 0:
log(
f"Failed to delete snapshot {snapshot_name}: {result.stderr.strip() or result.stdout.strip()}"
)
class RsyncBackend:
def __init__(self, destination: Destination):
module_path = destination.path.lstrip("/")
if not module_path:
raise RuntimeError(
"rsync destinations must include a module path (example: rsync://user:pass@host/module/path)"
)
host = format_host(destination.hostname)
if destination.port is not None:
host = f"{host}:{destination.port}"
user_prefix = f"{destination.username}@" if destination.username else ""
self.remote_base = f"rsync://{user_prefix}{host}/{module_path.rstrip('/')}"
self.command_env = os.environ.copy()
if destination.password:
self.command_env["RSYNC_PASSWORD"] = destination.password
def close(self) -> None:
return
def _remote_path(self, path: str) -> str:
trimmed = path.strip("/")
if not trimmed:
return self.remote_base
return f"{self.remote_base}/{trimmed}"
def sync_source(
self, snapshot_name: str, source_path: str, destination_path: str
) -> None:
target = self._remote_path(
join_path(f"snapshots/{snapshot_name}", destination_path)
)
run_command(
["rsync", "-a", "--delete", f"{source_path}/", f"{target}/"],
env=self.command_env,
)
def write_marker(self, snapshot_name: str) -> None:
marker_remote = self._remote_path(f"snapshots/{snapshot_name}/.backup_complete")
marker_file = None
try:
with tempfile.NamedTemporaryFile(
"w", encoding="utf-8", delete=False
) as handle:
marker_file = handle.name
handle.write(f"{dt.datetime.now(dt.timezone.utc).isoformat()}\n")
run_command(
["rsync", "-a", marker_file, marker_remote], env=self.command_env
)
finally:
if os.path.exists(config_path):
os.remove(config_path)
if marker_file and os.path.exists(marker_file):
os.remove(marker_file)
def _snapshot_has_marker(self, snapshot_name: str) -> bool:
marker_remote = self._remote_path(f"snapshots/{snapshot_name}/.backup_complete")
result = run_command(
["rsync", "--list-only", marker_remote],
env=self.command_env,
check=False,
)
return result.returncode == 0
def list_snapshots(self) -> List[str]:
root = self._remote_path("snapshots")
result = run_command(
["rsync", "--list-only", f"{root}/"],
env=self.command_env,
check=False,
)
if result.returncode != 0:
output = result.stderr.strip() or result.stdout.strip()
lower = output.lower()
if (
"no such file" in lower
or "not found" in lower
or "chdir failed" in lower
):
return []
raise RuntimeError(output)
names: List[str] = []
for line in result.stdout.splitlines():
line = line.strip()
if not line or line.startswith("receiving"):
continue
parts = line.split()
if not parts:
continue
candidate = parts[-1].rstrip("/")
if not SNAPSHOT_NAME_RE.match(candidate):
continue
if self._snapshot_has_marker(candidate):
names.append(candidate)
return sorted(set(names))
def delete_snapshot(self, snapshot_name: str) -> None:
snapshot_remote = self._remote_path(f"snapshots/{snapshot_name}")
empty_dir = tempfile.mkdtemp(prefix="backup-empty-")
try:
run_command(
["rsync", "-a", "--delete", f"{empty_dir}/", f"{snapshot_remote}/"],
env=self.command_env,
check=False,
)
run_command(
[
"rsync",
"-a",
"--delete",
"--prune-empty-dirs",
"--include",
f"/{snapshot_name}/***",
"--exclude",
"*",
f"{empty_dir}/",
f"{self._remote_path('snapshots')}/",
],
env=self.command_env,
check=False,
)
finally:
os.rmdir(empty_dir)
def build_backend(destination: Destination):
if destination.scheme == "rsync":
return RsyncBackend(destination)
return RcloneBackend(destination)
def parse_snapshot_inventory(snapshot_names: List[str]) -> List[Snapshot]:
snapshots: List[Snapshot] = []
for name in snapshot_names:
timestamp = parse_snapshot_name(name)
if timestamp is None:
continue
snapshots.append(Snapshot(name=name, timestamp=timestamp))
snapshots.sort(key=lambda entry: entry.timestamp, reverse=True)
return snapshots
def run_backup() -> int:
@@ -280,21 +615,45 @@ def run_backup() -> int:
log("BACKUP_DESTINATION is unset, skipping backup")
return 0
policy = parse_retention_policy()
sources = available_sources()
if not sources:
log("No backup sources are available, skipping backup")
return 0
destination = parse_destination(destination_url)
backend = build_backend(destination)
try:
log(f"Starting backup to {redact_destination(destination.raw_url)}")
if destination.scheme == "rsync":
sync_with_rsync(destination, sources)
else:
sync_with_rclone(destination, sources)
existing = set(backend.list_snapshots())
snapshot_name = choose_snapshot_name(existing)
log(f"Creating snapshot {snapshot_name}")
log("Backup completed")
for source_path, destination_path in sources:
log(f"Syncing {source_path}")
backend.sync_source(snapshot_name, source_path, destination_path)
backend.write_marker(snapshot_name)
all_snapshot_names = backend.list_snapshots()
snapshots = parse_snapshot_inventory(all_snapshot_names)
retained = compute_retained_snapshots(snapshots, policy)
deleted_count = 0
for snapshot in snapshots:
if snapshot.name in retained:
continue
log(f"Pruning snapshot {snapshot.name}")
backend.delete_snapshot(snapshot.name)
deleted_count += 1
log(
f"Backup completed (snapshots total={len(snapshots)}, retained={len(retained)}, pruned={deleted_count})"
)
return 0
finally:
backend.close()
def with_lock() -> int:
@@ -311,6 +670,15 @@ def with_lock() -> int:
def main() -> int:
backup_hour = parse_int_env(
"BACKUP_START_HOUR",
DEFAULT_BACKUP_START_HOUR,
minimum=0,
maximum=23,
)
if backup_hour != DEFAULT_BACKUP_START_HOUR:
log(f"Configured backup start hour is {backup_hour}:00")
try:
return with_lock()
except Exception as exc: # pylint: disable=broad-except

View File

@@ -62,6 +62,17 @@ derive_netbios_name() {
export NETBIOS_NAME="${cleaned_name:0:15}"
}
derive_backup_start_hour() {
local raw_hour="${BACKUP_START_HOUR:-2}"
if [[ "$raw_hour" =~ ^[0-9]+$ ]] && (( raw_hour >= 0 && raw_hour <= 23 )); then
printf '%d\n' "$raw_hour"
return
fi
log "Invalid BACKUP_START_HOUR '${raw_hour}', defaulting to 2."
printf '2\n'
}
resolve_sid_to_group() {
local sid="$1"
local resolved_name=""
@@ -159,9 +170,22 @@ write_runtime_env_file() {
printf 'export DOMAIN_USERS_GROUP=%q\n' "$DOMAIN_USERS_GROUP"
printf 'export DOMAIN_ADMINS_GROUP=%q\n' "$DOMAIN_ADMINS_GROUP"
printf 'export FSLOGIX_GROUP=%q\n' "$FSLOGIX_GROUP"
printf 'export BACKUP_START_HOUR=%q\n' "$BACKUP_START_HOUR"
if [[ -n "${BACKUP_DESTINATION:-}" ]]; then
printf 'export BACKUP_DESTINATION=%q\n' "$BACKUP_DESTINATION"
fi
if [[ -n "${BACKUP_RETENTION_DAILY:-}" ]]; then
printf 'export BACKUP_RETENTION_DAILY=%q\n' "$BACKUP_RETENTION_DAILY"
fi
if [[ -n "${BACKUP_RETENTION_WEEKLY:-}" ]]; then
printf 'export BACKUP_RETENTION_WEEKLY=%q\n' "$BACKUP_RETENTION_WEEKLY"
fi
if [[ -n "${BACKUP_RETENTION_MONTHLY:-}" ]]; then
printf 'export BACKUP_RETENTION_MONTHLY=%q\n' "$BACKUP_RETENTION_MONTHLY"
fi
if [[ -n "${BACKUP_RETENTION_YEARLY:-}" ]]; then
printf 'export BACKUP_RETENTION_YEARLY=%q\n' "$BACKUP_RETENTION_YEARLY"
fi
if [[ -n "${JOIN_USER:-}" ]]; then
printf 'export JOIN_USER=%q\n' "$JOIN_USER"
fi
@@ -216,8 +240,8 @@ PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
EOF
if [[ -n "${BACKUP_DESTINATION:-}" ]]; then
cat >> /etc/cron.d/reconcile-shares <<'EOF'
*/30 * * * * root source /app/runtime.env && /usr/bin/python3 /app/backup_to_destination.py >> /var/log/backup.log 2>&1
cat >> /etc/cron.d/reconcile-shares <<EOF
0 ${BACKUP_START_HOUR} * * * root source /app/runtime.env && /usr/bin/python3 /app/backup_to_destination.py >> /var/log/backup.log 2>&1
EOF
fi
@@ -231,6 +255,7 @@ require_env DOMAIN_USERS_SID
require_env DOMAIN_ADMINS_SID
export REALM WORKGROUP DOMAIN
export BACKUP_START_HOUR="$(derive_backup_start_hour)"
export FSLOGIX_GROUP_SID="${FSLOGIX_GROUP_SID:-${DOMAIN_USERS_SID}}"
export DOMAIN_USERS_GROUP="${DOMAIN_USERS_SID}"
export DOMAIN_ADMINS_GROUP="${DOMAIN_ADMINS_SID}"
@@ -266,12 +291,9 @@ log 'Running startup reconciliation'
python3 /app/reconcile_shares.py
if [[ -n "${BACKUP_DESTINATION:-}" ]]; then
log 'Running startup backup'
if ! python3 /app/backup_to_destination.py; then
log 'Startup backup failed; continuing service startup.'
fi
log "Backups enabled: daily at ${BACKUP_START_HOUR}:00 (container local time)."
else
log 'BACKUP_DESTINATION is unset; startup backup skipped'
log 'BACKUP_DESTINATION is unset; scheduled backup disabled'
fi
install_cron_job

30
setup
View File

@@ -85,6 +85,11 @@ write_env_file() {
local domain_admins_sid=""
local fslogix_group_sid=""
local backup_destination=""
local backup_start_hour="2"
local backup_retention_daily="3"
local backup_retention_weekly="2"
local backup_retention_monthly="2"
local backup_retention_yearly="1"
local samba_hostname="adsambafsrv"
local netbios_name="ADSAMBAFSRV"
local service_password=""
@@ -124,6 +129,16 @@ write_env_file() {
netbios_name="$sanitized_netbios_name"
read -r -p "BACKUP_DESTINATION (optional URL, press Enter to disable): " backup_destination
read -r -p "BACKUP_START_HOUR [2]: " backup_start_hour
backup_start_hour="${backup_start_hour:-2}"
read -r -p "BACKUP_RETENTION_DAILY [3]: " backup_retention_daily
backup_retention_daily="${backup_retention_daily:-3}"
read -r -p "BACKUP_RETENTION_WEEKLY [2]: " backup_retention_weekly
backup_retention_weekly="${backup_retention_weekly:-2}"
read -r -p "BACKUP_RETENTION_MONTHLY [2]: " backup_retention_monthly
backup_retention_monthly="${backup_retention_monthly:-2}"
read -r -p "BACKUP_RETENTION_YEARLY [1]: " backup_retention_yearly
backup_retention_yearly="${backup_retention_yearly:-1}"
service_account_sam="$(sanitize_sam_account_name "$SERVICE_ACCOUNT_NAME")"
if [[ "$service_account_sam" != "$SERVICE_ACCOUNT_NAME" ]]; then
@@ -162,6 +177,11 @@ DOMAIN_USERS_SID=${domain_users_sid}
DOMAIN_ADMINS_SID=${domain_admins_sid}
FSLOGIX_GROUP_SID=${fslogix_group_sid}
BACKUP_DESTINATION=${backup_destination}
BACKUP_START_HOUR=${backup_start_hour}
BACKUP_RETENTION_DAILY=${backup_retention_daily}
BACKUP_RETENTION_WEEKLY=${backup_retention_weekly}
BACKUP_RETENTION_MONTHLY=${backup_retention_monthly}
BACKUP_RETENTION_YEARLY=${backup_retention_yearly}
SAMBA_HOSTNAME=${samba_hostname}
NETBIOS_NAME=${netbios_name}
EOF
@@ -214,6 +234,11 @@ DOMAIN_USERS_SID=${domain_users_sid}
DOMAIN_ADMINS_SID=${domain_admins_sid}
FSLOGIX_GROUP_SID=${fslogix_group_sid}
BACKUP_DESTINATION=${backup_destination}
BACKUP_START_HOUR=${backup_start_hour}
BACKUP_RETENTION_DAILY=${backup_retention_daily}
BACKUP_RETENTION_WEEKLY=${backup_retention_weekly}
BACKUP_RETENTION_MONTHLY=${backup_retention_monthly}
BACKUP_RETENTION_YEARLY=${backup_retention_yearly}
SAMBA_HOSTNAME=${samba_hostname}
NETBIOS_NAME=${netbios_name}
# Optional overrides:
@@ -225,6 +250,11 @@ NETBIOS_NAME=${netbios_name}
# BACKUP_DESTINATION=smb://DOMAIN%5Cuser:pass@backup.example.com/Backups/samba
# BACKUP_DESTINATION=davfs://user:pass@webdav.example.com/remote.php/dav/files/backup
# BACKUP_DESTINATION=sftp://user:pass@sftp.example.com/exports/samba
# BACKUP_START_HOUR=2
# BACKUP_RETENTION_DAILY=3
# BACKUP_RETENTION_WEEKLY=2
# BACKUP_RETENTION_MONTHLY=2
# BACKUP_RETENTION_YEARLY=1
EOF
chmod 600 "$ENV_FILE"