Sync workspace: config, docs, scripts, CI, operator rules, and submodule pointers.
- Update dbis_core, cross-chain-pmm-lps, explorer-monorepo, metamask-integration, pr-workspace/chains - Omit embedded publish git dirs and empty placeholders from index Made-with: Cursor
This commit is contained in:
@@ -129,14 +129,27 @@ def main() -> None:
|
||||
|
||||
ip_to_rows: dict[str, list[dict]] = {}
|
||||
vmid_to_ip_live: dict[str, str] = {}
|
||||
live_vmids_all: set[str] = set()
|
||||
for g in guests:
|
||||
ip = (g.get("ip") or "").strip()
|
||||
vmid = str(g.get("vmid", "")).strip()
|
||||
if vmid:
|
||||
live_vmids_all.add(vmid)
|
||||
if ip:
|
||||
ip_to_rows.setdefault(ip, []).append(g)
|
||||
if vmid and ip:
|
||||
vmid_to_ip_live[vmid] = ip
|
||||
|
||||
doc_vmids = set(vmid_to_ip_doc.keys())
|
||||
vmids_in_all_vmids_doc_not_on_cluster = sorted(
|
||||
doc_vmids - live_vmids_all, key=lambda x: int(x) if x.isdigit() else 0
|
||||
)
|
||||
only_live_not_in_doc = live_vmids_all - doc_vmids
|
||||
vmids_on_cluster_not_in_all_vmids_table_count = len(only_live_not_in_doc)
|
||||
vmids_on_cluster_not_in_all_vmids_table_sample = sorted(
|
||||
only_live_not_in_doc, key=lambda x: int(x) if x.isdigit() else 0
|
||||
)[:100]
|
||||
|
||||
ip_to_vmids: dict[str, list[str]] = {
|
||||
ip: [str(r.get("vmid", "") or "?").strip() or "?" for r in rows]
|
||||
for ip, rows in ip_to_rows.items()
|
||||
@@ -188,10 +201,17 @@ def main() -> None:
|
||||
"guest_lan_ips_not_in_declared_sources": guest_lan_not_declared,
|
||||
"declared_lan11_ips_not_on_live_guests": declared_lan11_not_on_guests,
|
||||
"vmid_ip_mismatch_live_vs_all_vmids_doc": vmid_ip_mismatch,
|
||||
"vmids_in_all_vmids_doc_not_on_cluster": vmids_in_all_vmids_doc_not_on_cluster,
|
||||
"vmids_on_cluster_not_in_all_vmids_table": {
|
||||
"count": vmids_on_cluster_not_in_all_vmids_table_count,
|
||||
"sample_vmids": vmids_on_cluster_not_in_all_vmids_table_sample,
|
||||
"note": "ALL_VMIDS_ENDPOINTS pipe tables do not list every guest; large count is normal.",
|
||||
},
|
||||
"hypervisor_and_infra_ips_excluded_from_guest_match": sorted(hyp_ips),
|
||||
"declared_sources": {
|
||||
"ip_addresses_conf_ipv4_count": len(conf_ips),
|
||||
"all_vmids_md_lan11_count": len(doc_ips),
|
||||
"all_vmids_md_row_count": len(doc_vmids),
|
||||
},
|
||||
"notes": [],
|
||||
}
|
||||
@@ -208,6 +228,9 @@ def main() -> None:
|
||||
"source": "proxmox_cluster_pvesh_plus_config",
|
||||
"guests": guests,
|
||||
}
|
||||
neigh = live.get("ip_neigh_vmbr0_sample")
|
||||
if isinstance(neigh, dict):
|
||||
inv_out["ip_neigh_vmbr0_sample"] = neigh
|
||||
|
||||
args.out_dir.mkdir(parents=True, exist_ok=True)
|
||||
(args.out_dir / "live_inventory.json").write_text(
|
||||
|
||||
@@ -30,8 +30,12 @@ PY
|
||||
if ! ping -c1 -W2 "$SEED" >/dev/null 2>&1; then
|
||||
stub_unreachable >"$TMP"
|
||||
else
|
||||
REMOTE_PY="python3 -"
|
||||
case "${IT_COLLECT_IP_NEIGH:-}" in
|
||||
1|yes|true|TRUE|Yes) REMOTE_PY="env IT_COLLECT_IP_NEIGH=1 python3 -" ;;
|
||||
esac
|
||||
if ! ssh -o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=no \
|
||||
"root@${SEED}" "python3 -" <"$PY" >"$TMP" 2>/dev/null; then
|
||||
"root@${SEED}" "$REMOTE_PY" <"$PY" >"$TMP" 2>/dev/null; then
|
||||
stub_unreachable >"$TMP"
|
||||
fi
|
||||
fi
|
||||
@@ -47,5 +51,8 @@ set -e
|
||||
cp -f "$OUT_DIR/live_inventory.json" "${OUT_DIR}/live_inventory_${TS}.json" 2>/dev/null || true
|
||||
cp -f "$OUT_DIR/drift.json" "${OUT_DIR}/drift_${TS}.json" 2>/dev/null || true
|
||||
rm -f "$TMP"
|
||||
if [[ -n "${IT_BFF_SNAPSHOT_DB:-}" ]]; then
|
||||
python3 "${SCRIPT_DIR}/persist-it-snapshot-sqlite.py" "$IT_BFF_SNAPSHOT_DB" "$OUT_DIR" "${DRIFT_RC}" 2>/dev/null || true
|
||||
fi
|
||||
echo "Latest: ${OUT_DIR}/live_inventory.json , ${OUT_DIR}/drift.json"
|
||||
exit "${DRIFT_RC}"
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -18,6 +19,11 @@ def _extract_ip_from_net_line(line: str) -> str | None:
|
||||
return m.group(1) if m else None
|
||||
|
||||
|
||||
def _extract_hwaddr_from_net_line(line: str) -> str | None:
|
||||
m = re.search(r"hwaddr=([0-9A-Fa-f:]+)", line, re.IGNORECASE)
|
||||
return m.group(1) if m else None
|
||||
|
||||
|
||||
def _read_config(path: str) -> str:
|
||||
try:
|
||||
with open(path, encoding="utf-8", errors="replace") as f:
|
||||
@@ -65,12 +71,16 @@ def main() -> None:
|
||||
|
||||
body = _read_config(cfg_path)
|
||||
ip = ""
|
||||
mac = ""
|
||||
for line in body.splitlines():
|
||||
if line.startswith("net0:"):
|
||||
got = _extract_ip_from_net_line(line)
|
||||
if got:
|
||||
ip = got
|
||||
break
|
||||
h = _extract_hwaddr_from_net_line(line)
|
||||
if h:
|
||||
mac = h
|
||||
break
|
||||
if not ip and t == "qemu":
|
||||
for line in body.splitlines():
|
||||
if line.startswith("ipconfig0:"):
|
||||
@@ -84,6 +94,11 @@ def main() -> None:
|
||||
got = _extract_ip_from_net_line(line)
|
||||
if got:
|
||||
ip = got
|
||||
if not mac:
|
||||
h = _extract_hwaddr_from_net_line(line)
|
||||
if h:
|
||||
mac = h
|
||||
if ip:
|
||||
break
|
||||
|
||||
guests.append(
|
||||
@@ -94,14 +109,40 @@ def main() -> None:
|
||||
"name": name,
|
||||
"status": status,
|
||||
"ip": ip,
|
||||
"mac": mac,
|
||||
"config_path": cfg_path,
|
||||
}
|
||||
)
|
||||
|
||||
out = {
|
||||
out: dict = {
|
||||
"collected_at": collected_at,
|
||||
"guests": sorted(guests, key=lambda g: int(g["vmid"])),
|
||||
}
|
||||
|
||||
if os.environ.get("IT_COLLECT_IP_NEIGH", "").strip().lower() in (
|
||||
"1",
|
||||
"yes",
|
||||
"true",
|
||||
):
|
||||
neigh_lines: list[str] = []
|
||||
try:
|
||||
raw_neigh = subprocess.check_output(
|
||||
["ip", "-4", "neigh", "show", "dev", "vmbr0"],
|
||||
text=True,
|
||||
stderr=subprocess.DEVNULL,
|
||||
timeout=30,
|
||||
)
|
||||
neigh_lines = [
|
||||
ln.strip() for ln in raw_neigh.splitlines() if ln.strip()
|
||||
][:500]
|
||||
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, FileNotFoundError):
|
||||
neigh_lines = []
|
||||
out["ip_neigh_vmbr0_sample"] = {
|
||||
"collected_at": collected_at,
|
||||
"line_count": len(neigh_lines),
|
||||
"lines": neigh_lines,
|
||||
}
|
||||
|
||||
json.dump(out, sys.stdout, indent=2)
|
||||
|
||||
|
||||
|
||||
78
scripts/it-ops/persist-it-snapshot-sqlite.py
Executable file
78
scripts/it-ops/persist-it-snapshot-sqlite.py
Executable file
@@ -0,0 +1,78 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Optional: append IT inventory export metadata to SQLite (Phase 1 BFF persistence stub)."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def main() -> None:
|
||||
if len(sys.argv) < 4:
|
||||
print(
|
||||
"usage: persist-it-snapshot-sqlite.py <db_path> <reports_dir> <drift_exit_code>",
|
||||
file=sys.stderr,
|
||||
)
|
||||
sys.exit(2)
|
||||
db_path = Path(sys.argv[1])
|
||||
reports = Path(sys.argv[2])
|
||||
try:
|
||||
rc = int(sys.argv[3])
|
||||
except ValueError:
|
||||
rc = -1
|
||||
|
||||
drift_path = reports / "drift.json"
|
||||
live_path = reports / "live_inventory.json"
|
||||
drift = json.loads(drift_path.read_text(encoding="utf-8")) if drift_path.is_file() else {}
|
||||
live = json.loads(live_path.read_text(encoding="utf-8")) if live_path.is_file() else {}
|
||||
|
||||
collected = drift.get("collected_at") or live.get("collected_at")
|
||||
if not collected:
|
||||
collected = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
|
||||
dup = drift.get("duplicate_ips") or {}
|
||||
dup_count = len(dup) if isinstance(dup, dict) else 0
|
||||
guests = live.get("guests") if isinstance(live.get("guests"), list) else []
|
||||
guest_count = len(guests)
|
||||
|
||||
db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
con = sqlite3.connect(str(db_path))
|
||||
try:
|
||||
con.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS inventory_export_run (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
collected_at TEXT NOT NULL,
|
||||
drift_exit_code INTEGER NOT NULL,
|
||||
guest_count INTEGER NOT NULL,
|
||||
duplicate_ip_bucket_count INTEGER NOT NULL,
|
||||
drift_json TEXT NOT NULL,
|
||||
created_at TEXT NOT NULL
|
||||
)
|
||||
"""
|
||||
)
|
||||
con.execute(
|
||||
"""
|
||||
INSERT INTO inventory_export_run
|
||||
(collected_at, drift_exit_code, guest_count, duplicate_ip_bucket_count, drift_json, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
collected,
|
||||
rc,
|
||||
guest_count,
|
||||
dup_count,
|
||||
drift_path.read_text(encoding="utf-8") if drift_path.is_file() else "{}",
|
||||
datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
|
||||
),
|
||||
)
|
||||
con.commit()
|
||||
finally:
|
||||
con.close()
|
||||
print(f"SQLite snapshot row written: {db_path}", file=sys.stderr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
54
scripts/it-ops/proxmox-guarded-write-adapter.sh
Executable file
54
scripts/it-ops/proxmox-guarded-write-adapter.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bash
|
||||
# Phase 3 preview: Proxmox mutations only with production guard + explicit --apply + optional VMID allowlist.
|
||||
# This script NEVER passes apply by default — it prints the SSH command you would run.
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/it-ops/proxmox-guarded-write-adapter.sh --vmid 6205 --action start [--apply]
|
||||
# Env:
|
||||
# PROXMOX_HOST, PROXMOX_OPS_ALLOWED_VMIDS, PROXMOX_SAFE_DEFAULTS, PROXMOX_OPS_APPLY
|
||||
set -euo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
# shellcheck source=/dev/null
|
||||
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
|
||||
# shellcheck source=/dev/null
|
||||
source "${PROJECT_ROOT}/scripts/lib/proxmox-production-guard.sh"
|
||||
|
||||
VMID=""
|
||||
ACTION=""
|
||||
APPLY_CLI=false
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--vmid) VMID="${2:-}"; shift 2 ;;
|
||||
--action) ACTION="${2:-}"; shift 2 ;;
|
||||
--apply) APPLY_CLI=true; shift ;;
|
||||
*) echo "Unknown arg: $1" >&2; exit 2 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
[[ -n "$VMID" && -n "$ACTION" ]] || {
|
||||
echo "usage: $0 --vmid <VMID> --action start|stop|reboot [--apply]" >&2
|
||||
exit 2
|
||||
}
|
||||
|
||||
case "$ACTION" in
|
||||
start|stop|reboot) ;;
|
||||
*) echo "action must be start|stop|reboot" >&2; exit 2 ;;
|
||||
esac
|
||||
|
||||
HOST="${PROXMOX_HOST:-$(get_host_for_vmid "$VMID" 2>/dev/null || true)}"
|
||||
[[ -n "$HOST" ]] || HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
|
||||
|
||||
if ! pguard_vmid_allowed "$VMID"; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! pguard_require_apply_flag "$APPLY_CLI"; then
|
||||
echo "[dry-run] Would run on host $HOST:"
|
||||
echo " ssh root@$HOST -- pct $ACTION $VMID"
|
||||
echo "Opt-in: pass --apply or set PROXMOX_OPS_APPLY=1 (and keep PROXMOX_OPS_ALLOWED_VMIDS scoped)."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "[apply] ssh root@$HOST -- pct $ACTION $VMID"
|
||||
exec ssh -o BatchMode=yes -o ConnectTimeout=20 "root@${HOST}" "pct $ACTION $VMID"
|
||||
79
scripts/it-ops/vlan-segmentation-ordered-checklist.sh
Executable file
79
scripts/it-ops/vlan-segmentation-ordered-checklist.sh
Executable file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env bash
|
||||
# Ordered VLAN segmentation checklist (operator log). Does NOT configure UDM/Proxmox.
|
||||
# Usage: ./scripts/it-ops/vlan-segmentation-ordered-checklist.sh [--apply]
|
||||
set -euo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
|
||||
STATE_DIR="${PROJECT_ROOT}/reports/status"
|
||||
STATE_FILE="${STATE_DIR}/vlan_segmentation_checklist_state.json"
|
||||
APPLY=false
|
||||
[[ "${1:-}" == "--apply" ]] && APPLY=true
|
||||
|
||||
STEPS=(
|
||||
"1_oob_ipmi:Out-of-band / IPMI (if any)"
|
||||
"2_tenant_vlans_200plus:Tenant-facing VLANs 200+"
|
||||
"3_besu_validators_rpc:Besu validators and RPC"
|
||||
"4_sankofa_app_tier:Sankofa app tier (portal, Keycloak, NPM upstreams)"
|
||||
)
|
||||
|
||||
mkdir -p "$STATE_DIR"
|
||||
TS="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
|
||||
|
||||
if ! command -v python3 &>/dev/null; then
|
||||
echo "python3 required" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
python3 - "$STATE_FILE" "$TS" "$APPLY" "${STEPS[@]}" <<'PY'
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
path = Path(sys.argv[1])
|
||||
ts = sys.argv[2]
|
||||
apply = str(sys.argv[3]).lower() == "true"
|
||||
raw_steps = sys.argv[4:]
|
||||
steps = []
|
||||
for s in raw_steps:
|
||||
if ":" in s:
|
||||
sid, title = s.split(":", 1)
|
||||
steps.append({"id": sid, "title": title})
|
||||
|
||||
data = {"updated_at": ts, "steps": steps, "completed": {}}
|
||||
if path.is_file():
|
||||
try:
|
||||
old = json.loads(path.read_text(encoding="utf-8"))
|
||||
if isinstance(old.get("completed"), dict):
|
||||
data["completed"] = old["completed"]
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
print("VLAN segmentation — ordered checklist (spec order)\n")
|
||||
for st in steps:
|
||||
cid = st["id"]
|
||||
done = data["completed"].get(cid)
|
||||
mark = "✓" if done else " "
|
||||
print(f" [{mark}] {cid}: {st['title']}")
|
||||
if done:
|
||||
print(f" completed_at: {done}")
|
||||
|
||||
if not apply:
|
||||
print("\nDry-run only. Re-run with --apply after completing each wave (updates state file).")
|
||||
path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
||||
sys.exit(0)
|
||||
|
||||
print("\n--apply: mark steps complete interactively (empty line to skip).")
|
||||
for st in steps:
|
||||
cid = st["id"]
|
||||
if data["completed"].get(cid):
|
||||
continue
|
||||
try:
|
||||
ans = input(f"Mark '{cid}' complete now? [y/N]: ").strip().lower()
|
||||
except EOFError:
|
||||
break
|
||||
if ans in ("y", "yes"):
|
||||
data["completed"][cid] = ts
|
||||
|
||||
path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
||||
print(f"Wrote {path}")
|
||||
PY
|
||||
Reference in New Issue
Block a user