first progress
This commit is contained in:
145
app/init.sh
Executable file
145
app/init.sh
Executable file
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
log() {
|
||||
printf '[init] %s\n' "$*"
|
||||
}
|
||||
|
||||
require_env() {
|
||||
local name="$1"
|
||||
if [[ -z "${!name:-}" ]]; then
|
||||
printf '[init] ERROR: missing required env var %s\n' "$name" >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
append_winbind_to_nss() {
|
||||
sed -ri '/^passwd:/ { /winbind/! s/$/ winbind/ }' /etc/nsswitch.conf
|
||||
sed -ri '/^group:/ { /winbind/! s/$/ winbind/ }' /etc/nsswitch.conf
|
||||
}
|
||||
|
||||
render_krb5_conf() {
|
||||
cat > /etc/krb5.conf <<EOF
|
||||
[libdefaults]
|
||||
default_realm = ${REALM}
|
||||
dns_lookup_realm = false
|
||||
dns_lookup_kdc = true
|
||||
rdns = false
|
||||
ticket_lifetime = 24h
|
||||
forwardable = true
|
||||
|
||||
[realms]
|
||||
${REALM} = {
|
||||
kdc = ${DOMAIN}
|
||||
admin_server = ${DOMAIN}
|
||||
}
|
||||
|
||||
[domain_realm]
|
||||
.${DOMAIN} = ${REALM}
|
||||
${DOMAIN} = ${REALM}
|
||||
EOF
|
||||
}
|
||||
|
||||
render_smb_conf() {
|
||||
envsubst < /app/smb.conf.template > /etc/samba/smb.conf
|
||||
testparm -s /etc/samba/smb.conf >/dev/null
|
||||
}
|
||||
|
||||
write_runtime_env_file() {
|
||||
{
|
||||
printf 'export REALM=%q\n' "$REALM"
|
||||
printf 'export WORKGROUP=%q\n' "$WORKGROUP"
|
||||
printf 'export DOMAIN=%q\n' "$DOMAIN"
|
||||
if [[ -n "${JOIN_USER:-}" ]]; then
|
||||
printf 'export JOIN_USER=%q\n' "$JOIN_USER"
|
||||
fi
|
||||
if [[ -n "${JOIN_PASSWORD:-}" ]]; then
|
||||
printf 'export JOIN_PASSWORD=%q\n' "$JOIN_PASSWORD"
|
||||
fi
|
||||
printf 'export PUBLIC_GROUP=%q\n' "$PUBLIC_GROUP"
|
||||
if [[ -n "${LDAP_URI:-}" ]]; then
|
||||
printf 'export LDAP_URI=%q\n' "$LDAP_URI"
|
||||
fi
|
||||
if [[ -n "${LDAP_BASE_DN:-}" ]]; then
|
||||
printf 'export LDAP_BASE_DN=%q\n' "$LDAP_BASE_DN"
|
||||
fi
|
||||
} > /app/runtime.env
|
||||
chmod 600 /app/runtime.env
|
||||
}
|
||||
|
||||
join_domain_if_needed() {
|
||||
if net ads testjoin >/dev/null 2>&1; then
|
||||
log 'Domain join already present; skipping join.'
|
||||
return
|
||||
fi
|
||||
|
||||
require_env JOIN_USER
|
||||
require_env JOIN_PASSWORD
|
||||
|
||||
log "Joining AD domain ${REALM}"
|
||||
if ! printf '%s\n' "$JOIN_PASSWORD" | net ads join -U "$JOIN_USER" -S "$DOMAIN"; then
|
||||
log 'Join using explicit server failed, retrying automatic DC discovery.'
|
||||
printf '%s\n' "$JOIN_PASSWORD" | net ads join -U "$JOIN_USER"
|
||||
fi
|
||||
}
|
||||
|
||||
wait_for_winbind() {
|
||||
local tries=0
|
||||
local max_tries=30
|
||||
until wbinfo -t >/dev/null 2>&1; do
|
||||
tries=$((tries + 1))
|
||||
if [[ "$tries" -ge "$max_tries" ]]; then
|
||||
printf '[init] ERROR: winbind trust test failed after %d attempts\n' "$max_tries" >&2
|
||||
return 1
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
install_cron_job() {
|
||||
cat > /etc/cron.d/reconcile-shares <<'EOF'
|
||||
SHELL=/bin/bash
|
||||
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
*/5 * * * * root source /app/runtime.env && /usr/bin/python3 /app/reconcile_shares.py >> /var/log/reconcile.log 2>&1
|
||||
EOF
|
||||
chmod 0644 /etc/cron.d/reconcile-shares
|
||||
}
|
||||
|
||||
require_env REALM
|
||||
require_env WORKGROUP
|
||||
require_env DOMAIN
|
||||
|
||||
export REALM WORKGROUP DOMAIN
|
||||
export PUBLIC_GROUP="${PUBLIC_GROUP:-Domain Users}"
|
||||
if [[ -n "${JOIN_USER:-}" ]]; then
|
||||
export JOIN_USER
|
||||
fi
|
||||
if [[ -n "${JOIN_PASSWORD:-}" ]]; then
|
||||
export JOIN_PASSWORD
|
||||
fi
|
||||
|
||||
mkdir -p /data/private /data/public /data/groups /state /etc/samba/generated /var/log/samba
|
||||
touch /etc/samba/generated/shares.conf /var/log/reconcile.log
|
||||
|
||||
append_winbind_to_nss
|
||||
write_runtime_env_file
|
||||
render_krb5_conf
|
||||
render_smb_conf
|
||||
join_domain_if_needed
|
||||
|
||||
log 'Starting winbindd'
|
||||
winbindd -F --no-process-group &
|
||||
|
||||
wait_for_winbind
|
||||
|
||||
log 'Running startup reconciliation'
|
||||
python3 /app/reconcile_shares.py
|
||||
|
||||
install_cron_job
|
||||
|
||||
log 'Starting cron daemon'
|
||||
cron -f &
|
||||
|
||||
log 'Starting smbd in foreground'
|
||||
exec smbd -F --no-process-group
|
||||
538
app/reconcile_shares.py
Executable file
538
app/reconcile_shares.py
Executable file
@@ -0,0 +1,538 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import base64
|
||||
import datetime as dt
|
||||
import fcntl
|
||||
import grp
|
||||
import os
|
||||
import pwd
|
||||
import re
|
||||
import sqlite3
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import uuid
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
|
||||
DB_PATH = "/state/shares.db"
|
||||
LOCK_PATH = "/state/reconcile.lock"
|
||||
GROUP_ROOT = "/data/groups"
|
||||
PRIVATE_ROOT = "/data/private"
|
||||
PUBLIC_ROOT = "/data/public"
|
||||
GENERATED_CONF = "/etc/samba/generated/shares.conf"
|
||||
|
||||
LDAP_FILTER = "(&(objectClass=group)(sAMAccountName=FileShare_*))"
|
||||
GROUP_PREFIX = "FileShare_"
|
||||
|
||||
REQUIRED_ENV = ["REALM", "WORKGROUP", "DOMAIN"]
|
||||
ATTR_RE = re.compile(r"^([^:]+)(::?):\s*(.*)$")
|
||||
SHARE_NAME_INVALID_RE = re.compile(r"[\\/:*?\"<>|;\[\],+=]")
|
||||
|
||||
|
||||
def now_utc() -> str:
|
||||
return dt.datetime.now(dt.timezone.utc).isoformat(timespec="seconds")
|
||||
|
||||
|
||||
def log(message: str) -> None:
|
||||
print(f"[reconcile] {message}", flush=True)
|
||||
|
||||
|
||||
def ensure_required_env() -> None:
|
||||
missing = [key for key in REQUIRED_ENV if not os.getenv(key)]
|
||||
if missing:
|
||||
raise RuntimeError(f"Missing required env vars: {', '.join(missing)}")
|
||||
|
||||
|
||||
def realm_to_base_dn(realm: str) -> str:
|
||||
parts = [part for part in realm.split(".") if part]
|
||||
if not parts:
|
||||
raise RuntimeError("REALM is invalid and cannot be converted to base DN")
|
||||
return ",".join(f"DC={part}" for part in parts)
|
||||
|
||||
|
||||
def parse_guid(raw_value: str, is_b64: bool) -> str:
|
||||
if is_b64:
|
||||
raw = base64.b64decode(raw_value)
|
||||
if len(raw) != 16:
|
||||
raise ValueError("objectGUID has invalid binary length")
|
||||
return str(uuid.UUID(bytes_le=raw))
|
||||
|
||||
candidate = raw_value.strip().strip("{}")
|
||||
return str(uuid.UUID(candidate))
|
||||
|
||||
|
||||
def run_command(command: List[str], check: bool = True) -> subprocess.CompletedProcess:
|
||||
result = subprocess.run(command, capture_output=True, text=True)
|
||||
if check and result.returncode != 0:
|
||||
raise RuntimeError(
|
||||
f"Command failed ({' '.join(command)}): {result.stderr.strip() or result.stdout.strip()}"
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def parse_groups_from_ldap_output(output: str) -> List[Dict[str, str]]:
|
||||
entries: List[Dict[str, Tuple[str, bool]]] = []
|
||||
current: Dict[str, Tuple[str, bool]] = {}
|
||||
|
||||
for line in output.splitlines():
|
||||
stripped = line.strip()
|
||||
if not stripped:
|
||||
if current:
|
||||
entries.append(current)
|
||||
current = {}
|
||||
continue
|
||||
if stripped.startswith("#") or stripped.startswith("dn:"):
|
||||
continue
|
||||
|
||||
match = ATTR_RE.match(stripped)
|
||||
if not match:
|
||||
continue
|
||||
|
||||
key, delimiter, value = match.groups()
|
||||
current[key] = (value, delimiter == "::")
|
||||
|
||||
if current:
|
||||
entries.append(current)
|
||||
|
||||
groups: List[Dict[str, str]] = []
|
||||
for entry in entries:
|
||||
if "objectGUID" not in entry or "sAMAccountName" not in entry:
|
||||
continue
|
||||
|
||||
sam_value, _ = entry["sAMAccountName"]
|
||||
sam = sam_value.strip()
|
||||
if not sam.startswith(GROUP_PREFIX):
|
||||
continue
|
||||
|
||||
share_name = sam[len(GROUP_PREFIX) :]
|
||||
if not share_name:
|
||||
continue
|
||||
|
||||
guid_value, is_b64 = entry["objectGUID"]
|
||||
guid = parse_guid(guid_value.strip(), is_b64)
|
||||
|
||||
groups.append(
|
||||
{
|
||||
"objectGUID": guid,
|
||||
"samAccountName": sam,
|
||||
"shareName": share_name,
|
||||
}
|
||||
)
|
||||
|
||||
deduped: Dict[str, Dict[str, str]] = {}
|
||||
for group in groups:
|
||||
deduped[group["objectGUID"]] = group
|
||||
|
||||
return list(deduped.values())
|
||||
|
||||
|
||||
def fetch_groups_via_net_ads() -> List[Dict[str, str]]:
|
||||
result = run_command(
|
||||
["net", "ads", "search", "-P", LDAP_FILTER, "objectGUID", "sAMAccountName"],
|
||||
check=False,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
raise RuntimeError(
|
||||
result.stderr.strip() or result.stdout.strip() or "net ads search failed"
|
||||
)
|
||||
return parse_groups_from_ldap_output(result.stdout)
|
||||
|
||||
|
||||
def fetch_groups_via_ldap_bind() -> List[Dict[str, str]]:
|
||||
realm = os.environ["REALM"]
|
||||
join_user = os.getenv("JOIN_USER", "")
|
||||
join_password = os.getenv("JOIN_PASSWORD", "")
|
||||
if not join_user or not join_password:
|
||||
raise RuntimeError(
|
||||
"JOIN_USER/JOIN_PASSWORD are required for LDAP credential fallback"
|
||||
)
|
||||
|
||||
bind_dn = f"{join_user}@{realm}"
|
||||
ldap_uri = os.getenv("LDAP_URI", f"ldaps://{os.environ['DOMAIN']}")
|
||||
base_dn = os.getenv("LDAP_BASE_DN", realm_to_base_dn(realm))
|
||||
|
||||
pw_file = None
|
||||
try:
|
||||
with tempfile.NamedTemporaryFile("w", encoding="utf-8", delete=False) as handle:
|
||||
pw_file = handle.name
|
||||
handle.write(join_password)
|
||||
handle.write("\n")
|
||||
os.chmod(pw_file, 0o600)
|
||||
|
||||
result = run_command(
|
||||
[
|
||||
"ldapsearch",
|
||||
"-LLL",
|
||||
"-x",
|
||||
"-H",
|
||||
ldap_uri,
|
||||
"-D",
|
||||
bind_dn,
|
||||
"-y",
|
||||
pw_file,
|
||||
"-b",
|
||||
base_dn,
|
||||
LDAP_FILTER,
|
||||
"objectGUID",
|
||||
"sAMAccountName",
|
||||
]
|
||||
)
|
||||
return parse_groups_from_ldap_output(result.stdout)
|
||||
finally:
|
||||
if pw_file and os.path.exists(pw_file):
|
||||
os.remove(pw_file)
|
||||
|
||||
|
||||
def fetch_fileshare_groups() -> List[Dict[str, str]]:
|
||||
try:
|
||||
return fetch_groups_via_net_ads()
|
||||
except Exception as net_exc: # pylint: disable=broad-except
|
||||
log(f"net ads search failed, falling back to LDAP bind: {net_exc}")
|
||||
return fetch_groups_via_ldap_bind()
|
||||
|
||||
|
||||
def open_db() -> sqlite3.Connection:
|
||||
os.makedirs(os.path.dirname(DB_PATH), exist_ok=True)
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
conn.row_factory = sqlite3.Row
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS shares (
|
||||
objectGUID TEXT PRIMARY KEY,
|
||||
samAccountName TEXT NOT NULL,
|
||||
shareName TEXT NOT NULL,
|
||||
path TEXT NOT NULL,
|
||||
createdAt TIMESTAMP NOT NULL,
|
||||
lastSeenAt TIMESTAMP NOT NULL,
|
||||
isActive INTEGER NOT NULL
|
||||
)
|
||||
"""
|
||||
)
|
||||
conn.commit()
|
||||
return conn
|
||||
|
||||
|
||||
def ensure_group_path(path: str) -> None:
|
||||
os.makedirs(path, exist_ok=True)
|
||||
os.chmod(path, 0o2770)
|
||||
|
||||
|
||||
def reconcile_db(conn: sqlite3.Connection, ad_groups: List[Dict[str, str]]) -> None:
|
||||
timestamp = now_utc()
|
||||
seen = set()
|
||||
|
||||
for group in ad_groups:
|
||||
guid = group["objectGUID"]
|
||||
sam = group["samAccountName"]
|
||||
share_name = group["shareName"]
|
||||
seen.add(guid)
|
||||
|
||||
row = conn.execute(
|
||||
"SELECT objectGUID, path FROM shares WHERE objectGUID = ?", (guid,)
|
||||
).fetchone()
|
||||
|
||||
if row is None:
|
||||
path = os.path.join(GROUP_ROOT, guid)
|
||||
ensure_group_path(path)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO shares (objectGUID, samAccountName, shareName, path, createdAt, lastSeenAt, isActive)
|
||||
VALUES (?, ?, ?, ?, ?, ?, 1)
|
||||
""",
|
||||
(guid, sam, share_name, path, timestamp, timestamp),
|
||||
)
|
||||
log(f"Discovered new share group {sam} ({guid})")
|
||||
continue
|
||||
|
||||
path = row["path"]
|
||||
ensure_group_path(path)
|
||||
conn.execute(
|
||||
"""
|
||||
UPDATE shares
|
||||
SET samAccountName = ?,
|
||||
shareName = ?,
|
||||
lastSeenAt = ?,
|
||||
isActive = 1
|
||||
WHERE objectGUID = ?
|
||||
""",
|
||||
(sam, share_name, timestamp, guid),
|
||||
)
|
||||
|
||||
if seen:
|
||||
placeholders = ",".join("?" for _ in seen)
|
||||
conn.execute(
|
||||
f"UPDATE shares SET isActive = 0, lastSeenAt = ? WHERE isActive = 1 AND objectGUID NOT IN ({placeholders})",
|
||||
(timestamp, *seen),
|
||||
)
|
||||
else:
|
||||
conn.execute(
|
||||
"UPDATE shares SET isActive = 0, lastSeenAt = ? WHERE isActive = 1",
|
||||
(timestamp,),
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
|
||||
|
||||
def qualify_group(group_name: str) -> str:
|
||||
workgroup = os.getenv("WORKGROUP", "").strip()
|
||||
if workgroup:
|
||||
return f'@"{workgroup}\\{group_name}"'
|
||||
return f"@{group_name}"
|
||||
|
||||
|
||||
def is_valid_share_name(share_name: str) -> bool:
|
||||
if not share_name or share_name.casefold() in {"global", "homes", "printers"}:
|
||||
return False
|
||||
if SHARE_NAME_INVALID_RE.search(share_name):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def render_dynamic_shares(conn: sqlite3.Connection) -> None:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT objectGUID, samAccountName, shareName, path
|
||||
FROM shares
|
||||
WHERE isActive = 1
|
||||
ORDER BY shareName COLLATE NOCASE
|
||||
"""
|
||||
).fetchall()
|
||||
|
||||
stanzas: List[str] = [
|
||||
"# This file is generated by /app/reconcile_shares.py.",
|
||||
"# Manual changes will be overwritten.",
|
||||
"",
|
||||
]
|
||||
used_share_names = set()
|
||||
|
||||
for row in rows:
|
||||
share_name = row["shareName"].strip()
|
||||
if not share_name:
|
||||
continue
|
||||
folded_name = share_name.casefold()
|
||||
if folded_name in used_share_names:
|
||||
log(
|
||||
f"Skipping duplicate share name '{share_name}' for objectGUID {row['objectGUID']}"
|
||||
)
|
||||
continue
|
||||
|
||||
if not is_valid_share_name(share_name):
|
||||
log(
|
||||
f"Skipping invalid SMB share name '{share_name}' for objectGUID {row['objectGUID']}"
|
||||
)
|
||||
continue
|
||||
|
||||
used_share_names.add(folded_name)
|
||||
valid_users = qualify_group(row["samAccountName"])
|
||||
stanzas.extend(
|
||||
[
|
||||
f"[{share_name}]",
|
||||
f"path = {row['path']}",
|
||||
"read only = no",
|
||||
"browseable = yes",
|
||||
"guest ok = no",
|
||||
f"valid users = {valid_users}",
|
||||
"create mask = 0660",
|
||||
"directory mask = 2770",
|
||||
"inherit permissions = yes",
|
||||
"access based share enumeration = yes",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
content = "\n".join(stanzas).rstrip() + "\n"
|
||||
os.makedirs(os.path.dirname(GENERATED_CONF), exist_ok=True)
|
||||
with tempfile.NamedTemporaryFile(
|
||||
"w", encoding="utf-8", dir=os.path.dirname(GENERATED_CONF), delete=False
|
||||
) as tmp_file:
|
||||
tmp_file.write(content)
|
||||
temp_path = tmp_file.name
|
||||
|
||||
os.replace(temp_path, GENERATED_CONF)
|
||||
|
||||
|
||||
def reload_samba() -> None:
|
||||
result = run_command(["smbcontrol", "all", "reload-config"], check=False)
|
||||
if result.returncode != 0:
|
||||
log("smbcontrol reload-config failed; will retry on next run")
|
||||
|
||||
|
||||
def resolve_user_uid(qualified_user: str) -> Optional[int]:
|
||||
try:
|
||||
return pwd.getpwnam(qualified_user).pw_uid
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
def resolve_group_gid(qualified_group: str) -> Optional[int]:
|
||||
try:
|
||||
return grp.getgrnam(qualified_group).gr_gid
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
def set_acl(path: str, user_uid: int, admin_gid: Optional[int]) -> None:
|
||||
run_command(["setfacl", "-b", path], check=False)
|
||||
acl_entries = [f"u:{user_uid}:rwx", f"d:u:{user_uid}:rwx"]
|
||||
if admin_gid is not None:
|
||||
acl_entries.extend([f"g:{admin_gid}:rwx", f"d:g:{admin_gid}:rwx"])
|
||||
|
||||
result = run_command(
|
||||
["setfacl", "-m", ",".join(acl_entries), path],
|
||||
check=False,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
log(
|
||||
f"setfacl failed for {path}: {result.stderr.strip() or result.stdout.strip()}"
|
||||
)
|
||||
|
||||
|
||||
def set_group_acl(path: str, group_gid: int) -> None:
|
||||
acl_entries = [f"g:{group_gid}:rwx", f"d:g:{group_gid}:rwx"]
|
||||
result = run_command(["setfacl", "-m", ",".join(acl_entries), path], check=False)
|
||||
if result.returncode != 0:
|
||||
log(
|
||||
f"setfacl failed for {path}: {result.stderr.strip() or result.stdout.strip()}"
|
||||
)
|
||||
|
||||
|
||||
def set_group_acl_with_admin(
|
||||
path: str, group_gid: int, admin_gid: Optional[int]
|
||||
) -> None:
|
||||
run_command(["setfacl", "-b", path], check=False)
|
||||
acl_entries = [f"g:{group_gid}:rwx", f"d:g:{group_gid}:rwx"]
|
||||
if admin_gid is not None:
|
||||
acl_entries.extend([f"g:{admin_gid}:rwx", f"d:g:{admin_gid}:rwx"])
|
||||
result = run_command(["setfacl", "-m", ",".join(acl_entries), path], check=False)
|
||||
if result.returncode != 0:
|
||||
log(
|
||||
f"setfacl failed for {path}: {result.stderr.strip() or result.stdout.strip()}"
|
||||
)
|
||||
|
||||
|
||||
def list_domain_users() -> List[str]:
|
||||
result = run_command(["wbinfo", "-u"], check=False)
|
||||
if result.returncode != 0:
|
||||
log("wbinfo -u failed; skipping private directory sync")
|
||||
return []
|
||||
|
||||
users = []
|
||||
for line in result.stdout.splitlines():
|
||||
candidate = line.strip()
|
||||
if not candidate:
|
||||
continue
|
||||
if "\\" in candidate:
|
||||
candidate = candidate.split("\\", 1)[1]
|
||||
if not candidate or candidate.endswith("$"):
|
||||
continue
|
||||
users.append(candidate)
|
||||
return sorted(set(users))
|
||||
|
||||
|
||||
def sync_public_directory() -> None:
|
||||
workgroup = os.environ["WORKGROUP"]
|
||||
public_group = os.getenv("PUBLIC_GROUP", "Domain Users")
|
||||
qualified_group = f"{workgroup}\\{public_group}"
|
||||
|
||||
os.makedirs(PUBLIC_ROOT, exist_ok=True)
|
||||
gid = resolve_group_gid(qualified_group)
|
||||
|
||||
if gid is not None:
|
||||
os.chown(PUBLIC_ROOT, 0, gid)
|
||||
run_command(["setfacl", "-b", PUBLIC_ROOT], check=False)
|
||||
set_group_acl(PUBLIC_ROOT, gid)
|
||||
else:
|
||||
log(f"Unable to resolve GID for {qualified_group}; public ACLs unchanged")
|
||||
|
||||
os.chmod(PUBLIC_ROOT, 0o2770)
|
||||
|
||||
|
||||
def sync_private_directories() -> None:
|
||||
workgroup = os.environ["WORKGROUP"]
|
||||
admin_group = f"{workgroup}\\Domain Admins"
|
||||
admin_gid = resolve_group_gid(admin_group)
|
||||
|
||||
os.makedirs(PRIVATE_ROOT, exist_ok=True)
|
||||
os.chmod(PRIVATE_ROOT, 0o755)
|
||||
|
||||
users = list_domain_users()
|
||||
for username in users:
|
||||
qualified_user = f"{workgroup}\\{username}"
|
||||
uid = resolve_user_uid(qualified_user)
|
||||
if uid is None:
|
||||
log(f"Unable to resolve UID for {qualified_user}, skipping private folder")
|
||||
continue
|
||||
|
||||
user_path = os.path.join(PRIVATE_ROOT, username)
|
||||
os.makedirs(user_path, exist_ok=True)
|
||||
os.chown(user_path, uid, -1)
|
||||
os.chmod(user_path, 0o700)
|
||||
set_acl(user_path, uid, admin_gid)
|
||||
|
||||
|
||||
def sync_dynamic_directory_permissions(conn: sqlite3.Connection) -> None:
|
||||
workgroup = os.environ["WORKGROUP"]
|
||||
admin_gid = resolve_group_gid(f"{workgroup}\\Domain Admins")
|
||||
|
||||
rows = conn.execute(
|
||||
"SELECT samAccountName, path FROM shares WHERE isActive = 1"
|
||||
).fetchall()
|
||||
for row in rows:
|
||||
sam = row["samAccountName"]
|
||||
path = row["path"]
|
||||
os.makedirs(path, exist_ok=True)
|
||||
os.chmod(path, 0o2770)
|
||||
|
||||
gid = resolve_group_gid(f"{workgroup}\\{sam}")
|
||||
if gid is None:
|
||||
log(f"Unable to resolve GID for {workgroup}\\{sam}; leaving existing ACLs")
|
||||
continue
|
||||
|
||||
os.chown(path, 0, gid)
|
||||
set_group_acl_with_admin(path, gid, admin_gid)
|
||||
|
||||
|
||||
def with_lock() -> bool:
|
||||
os.makedirs(os.path.dirname(LOCK_PATH), exist_ok=True)
|
||||
lock_file = open(LOCK_PATH, "w", encoding="utf-8")
|
||||
try:
|
||||
fcntl.flock(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||
except BlockingIOError:
|
||||
log("Another reconciliation instance is running; skipping this cycle")
|
||||
lock_file.close()
|
||||
return False
|
||||
|
||||
try:
|
||||
ensure_required_env()
|
||||
os.makedirs(GROUP_ROOT, exist_ok=True)
|
||||
|
||||
conn = open_db()
|
||||
try:
|
||||
groups = fetch_fileshare_groups()
|
||||
reconcile_db(conn, groups)
|
||||
sync_dynamic_directory_permissions(conn)
|
||||
render_dynamic_shares(conn)
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
sync_public_directory()
|
||||
sync_private_directories()
|
||||
reload_samba()
|
||||
log("Reconciliation completed")
|
||||
return True
|
||||
finally:
|
||||
lock_file.close()
|
||||
|
||||
|
||||
def main() -> int:
|
||||
try:
|
||||
ok = with_lock()
|
||||
return 0 if ok else 0
|
||||
except Exception as exc: # pylint: disable=broad-except
|
||||
log(f"ERROR: {exc}")
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
Reference in New Issue
Block a user