Sync workspace: config, docs, scripts, CI, operator rules, and submodule pointers.

- Update dbis_core, cross-chain-pmm-lps, explorer-monorepo, metamask-integration, pr-workspace/chains
- Omit embedded publish git dirs and empty placeholders from index

Made-with: Cursor
This commit is contained in:
defiQUG
2026-04-12 06:12:20 -07:00
parent 6fb6bd3993
commit dbd517b279
2935 changed files with 327972 additions and 5533 deletions

View File

@@ -0,0 +1,195 @@
#!/usr/bin/env bash
# Ensure Proxmox spare R630 nodes (r630-03, r630-04, …) have local LVM thin storage
# aligned with the cluster and optionally absorb orphan PVs into VG pve for pool data.
#
# Cluster (/etc/pve/storage.cfg, edit once on any quorum member):
# - thin1 (pve/thin1): nodes r630-01 only (ml110 has data, not thin1).
# - data + local-lvm: list each node that has VG pve + thin pool data (e.g. ml110,r630-01,r630-03,r630-04).
#
# Per-node:
# - pvesm set local-lvm --disable 0 when local-lvm includes this host.
# - Optional: vgextend pve <orphan-pv> && lvextend -l +100%FREE /dev/pve/data
#
# Community Scripts (optional interactive tune-up on fresh PVE):
# bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/pve/post-pve-install.sh)"
# # or: bash "$PROJECT_ROOT/ProxmoxVE/tools/pve/post-pve-install.sh"
# Headless optional maintenance (apt refresh, pveam): scripts/proxmox/pve-spare-host-optional-tuneup.sh
# Six SSD thin pools on r630-03: scripts/proxmox/provision-r630-03-six-ssd-thinpools.sh
#
# Usage:
# bash scripts/proxmox/ensure-r630-spare-node-storage.sh --cluster-sync --apply
# bash scripts/proxmox/ensure-r630-spare-node-storage.sh --node r630-04 --extend-second-disk --apply
# bash scripts/proxmox/ensure-r630-spare-node-storage.sh --node 192.168.11.13 --apply
# After r630-03 matches this layout (VG pve + thin pool data), add it to shared storage:
# bash scripts/proxmox/ensure-r630-spare-node-storage.sh --add-pve-node r630-03 --apply
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
CLUSTER_HOST="${PROXMOX_CLUSTER_CFG_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
R630_03_IP="${PROXMOX_HOST_R630_03:-${PROXMOX_R630_03:-192.168.11.13}}"
R630_04_IP="${PROXMOX_HOST_R630_04:-${PROXMOX_R630_04:-192.168.11.14}}"
APPLY=0
CLUSTER_SYNC=0
EXTEND_SECOND_DISK=0
ADD_PVE_NODE=""
TARGET=""
SSH_OPTS=(-o BatchMode=yes -o StrictHostKeyChecking=accept-new)
usage() { sed -n '2,24p' "$0"; exit 0; }
while [[ $# -gt 0 ]]; do
case "$1" in
--apply) APPLY=1 ;;
--cluster-sync) CLUSTER_SYNC=1 ;;
--extend-second-disk) EXTEND_SECOND_DISK=1 ;;
--node) TARGET="${2:?}"; shift ;;
--add-pve-node) ADD_PVE_NODE="${2:?}"; shift ;;
-h|--help) usage ;;
*) echo "Unknown option: $1" >&2; exit 2 ;;
esac
shift
done
log() { echo "[ensure-r630-spare-node-storage] $*"; }
resolve_ip() {
case "$1" in
r630-03|R630-03) echo "$R630_03_IP" ;;
r630-04|R630-04) echo "$R630_04_IP" ;;
*) echo "$1" ;;
esac
}
ssh_root() {
local h="$1"
shift
ssh "${SSH_OPTS[@]}" "root@$h" "$@"
}
if [[ "$APPLY" -ne 1 ]]; then
log "Dry run: pass --apply to modify cluster config or nodes."
fi
if [[ "$CLUSTER_SYNC" -eq 1 ]]; then
log "Cluster storage.cfg via root@$CLUSTER_HOST"
if [[ "$APPLY" -eq 1 ]]; then
ssh "${SSH_OPTS[@]}" "root@$CLUSTER_HOST" python3 <<'PY'
from pathlib import Path
import shutil
from datetime import datetime
p = Path("/etc/pve/storage.cfg")
orig = p.read_text()
shutil.copy(p, "/etc/pve/storage.cfg.bak." + datetime.now().strftime("%Y%m%d_%H%M%S"))
lines = orig.splitlines(keepends=True)
want_c = "\tcontent images,rootdir\n"
want_n = "\tnodes ml110,r630-01,r630-03,r630-04\n"
pve_body = ["\tthinpool data\n", "\tvgname pve\n", want_c, want_n]
out = []
i = 0
while i < len(lines):
line = lines[i]
if line.startswith("lvmthin: local-lvm") or line.startswith("lvmthin: data"):
out.append(line)
i += 1
while i < len(lines) and lines[i].startswith("\t"):
i += 1
out.extend(pve_body)
continue
out.append(line)
i += 1
text2 = "".join(out)
# thin1: only r630-01 (ml110 has data pool, not thin1)
a = "lvmthin: thin1\n\tthinpool thin1\n\tvgname pve\n\tcontent images,rootdir\n"
b = a + "\tnodes r630-01\n"
if a in text2 and b not in text2:
text2 = text2.replace(a, b, 1)
if text2 != orig:
p.write_text(text2)
print("updated")
else:
print("unchanged")
PY
else
log "(skipped) would run storage.cfg alignment on $CLUSTER_HOST"
fi
fi
if [[ -n "$ADD_PVE_NODE" ]]; then
log "Add $ADD_PVE_NODE to local-lvm + data node lists on $CLUSTER_HOST"
if [[ "$APPLY" -ne 1 ]]; then
log "(skipped) use --apply"
else
ssh "${SSH_OPTS[@]}" "root@$CLUSTER_HOST" python3 - "$ADD_PVE_NODE" <<'PY'
import sys
from pathlib import Path
import shutil
from datetime import datetime
node = sys.argv[1]
p = Path("/etc/pve/storage.cfg")
text = p.read_text()
shutil.copy(p, f"/etc/pve/storage.cfg.bak.{datetime.now().strftime('%Y%m%d_%H%M%S')}")
stanza = None
out = []
for line in text.splitlines(keepends=True):
if line.startswith("lvmthin:"):
stanza = line.split()[1].strip()
if stanza in ("local-lvm", "data") and line.strip().startswith("nodes "):
_, rest = line.strip().split(None, 1)
hosts = [h.strip() for h in rest.split(",") if h.strip()]
if node not in hosts:
hosts.append(node)
hosts.sort()
line = "\tnodes " + ",".join(hosts) + "\n"
out.append(line)
new_text = "".join(out)
if new_text != text:
p.write_text(new_text)
print("updated", node)
else:
print("unchanged")
PY
fi
fi
if [[ -n "$TARGET" ]]; then
IP="$(resolve_ip "$TARGET")"
log "Node $TARGET -> $IP"
if ! ssh_root "$IP" "hostname" 2>/dev/null; then
log "ERROR: cannot SSH to root@$IP"
exit 1
fi
log "Hostname: $(ssh_root "$IP" hostname)"
ssh_root "$IP" "pvesm status 2>&1 | head -18" || true
if [[ "$APPLY" -eq 1 ]]; then
ssh_root "$IP" "pvesm set local-lvm --disable 0 2>/dev/null || true"
fi
if [[ "$EXTEND_SECOND_DISK" -eq 1 ]]; then
if [[ "$APPLY" -ne 1 ]]; then
log "(skipped) --extend-second-disk requires --apply"
else
ssh_root "$IP" "bash -s" <<'REMOTE'
set -euo pipefail
vgs pve &>/dev/null || { echo "No VG pve; skipping."; exit 0; }
lvs pve/data &>/dev/null || { echo "No pve/data thin pool; skipping."; exit 0; }
mapfile -t ORPH < <(pvs --noheadings -o pv_name,vg_name | awk '$2=="" {print $1}')
if [[ ${#ORPH[@]} -eq 0 ]]; then echo "No orphan PVs."; exit 0; fi
for dev in "${ORPH[@]}"; do
[[ "$dev" =~ ^/dev/ ]] || continue
echo "vgextend pve $dev"
vgextend pve "$dev"
done
lvextend -l +100%FREE /dev/pve/data
lvs pve/data
REMOTE
fi
fi
fi
log "Done."

View File

@@ -0,0 +1,103 @@
#!/usr/bin/env bash
set -euo pipefail
# Stop-and-restore migration helper for CT 5000 (blockscout-1), which cannot
# use the normal pvesh LXC migrate path because it has a local bind mount:
# mp1: /var/lib/vz/logs-vmid5000,mp=/var/log-remote
#
# Default mode is plan-only. Export PROXMOX_OPS_APPLY=1 or pass --apply
# to execute the steps.
SOURCE_HOST="${SOURCE_HOST:-root@192.168.11.12}"
TARGET_HOST="${TARGET_HOST:-root@192.168.11.14}"
VMID="${VMID:-5000}"
TARGET_STORAGE="${TARGET_STORAGE:-local-lvm}"
LOG_BIND_DIR="${LOG_BIND_DIR:-/var/lib/vz/logs-vmid5000}"
BACKUP_DIR="${BACKUP_DIR:-/var/lib/vz/dump}"
COMPRESS="${COMPRESS:-zstd}"
APPLY=0
for arg in "$@"; do
case "$arg" in
--apply)
APPLY=1
;;
--dry-run)
APPLY=0
;;
*)
echo "Unknown argument: $arg" >&2
exit 1
;;
esac
done
if [[ "${PROXMOX_OPS_APPLY:-0}" == "1" ]]; then
APPLY=1
fi
run() {
echo "+ $*"
if [[ "$APPLY" == "1" ]]; then
eval "$@"
fi
}
src_ssh() {
ssh -o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=no "$SOURCE_HOST" "$@"
}
dst_ssh() {
ssh -o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=no "$TARGET_HOST" "$@"
}
echo "Blockscout 5000 stop-and-restore migration"
echo "mode: $([[ "$APPLY" == "1" ]] && echo apply || echo dry-run)"
echo "source: $SOURCE_HOST"
echo "target: $TARGET_HOST"
echo "vmid: $VMID"
echo "target storage: $TARGET_STORAGE"
echo
echo "1. Verify source CT config and target bind directory"
run "src_ssh 'pct config $VMID | sed -n \"1,40p\"'"
run "src_ssh 'du -sh $LOG_BIND_DIR'"
run "dst_ssh 'mkdir -p $LOG_BIND_DIR && du -sh $LOG_BIND_DIR'"
echo
echo "2. Seed log bind directory to target"
run "src_ssh 'rsync -aHAX --delete -e \"ssh -o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null\" $LOG_BIND_DIR/ ${TARGET_HOST}:$LOG_BIND_DIR/'"
echo
echo "3. Stop CT and perform final log sync"
run "src_ssh 'pct shutdown $VMID --timeout 120 || pct stop $VMID'"
run "src_ssh 'rsync -aHAX --delete -e \"ssh -o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null\" $LOG_BIND_DIR/ ${TARGET_HOST}:$LOG_BIND_DIR/'"
echo
echo "4. Create backup on source"
run "src_ssh 'vzdump $VMID --mode stop --compress $COMPRESS --storage local --dumpdir $BACKUP_DIR --remove 0'"
run "src_ssh 'ls -t $BACKUP_DIR/vzdump-lxc-$VMID-* | head -n 1'"
echo
echo "5. Copy backup archive to target"
run "src_ssh 'backup=\$(ls -t $BACKUP_DIR/vzdump-lxc-$VMID-* | head -n 1); scp -o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=no \"\$backup\" ${TARGET_HOST}:$BACKUP_DIR/'"
echo
echo "6. Restore CT on target storage"
run "dst_ssh 'backup=\$(ls -t $BACKUP_DIR/vzdump-lxc-$VMID-* | head -n 1); pct restore $VMID \"\$backup\" --storage $TARGET_STORAGE'"
echo
echo "7. Re-apply bind mount and boot settings on target"
run "dst_ssh 'pct set $VMID -mp1 $LOG_BIND_DIR,mp=/var/log-remote -onboot 1'"
run "dst_ssh 'pct start $VMID'"
echo
echo "8. Verify guest on target"
run "dst_ssh 'pct exec $VMID -- hostname'"
run "dst_ssh 'pct exec $VMID -- sh -lc \"df -h / /var/log-remote\"'"
echo
echo "9. Optional cleanup after successful cutover"
echo " - remove old CT from source if it still exists"
echo " - remove copied backup archives if no rollback point is needed"
echo " - keep $LOG_BIND_DIR on target as the permanent mp1 path"

View File

@@ -0,0 +1,28 @@
#!/usr/bin/env bash
# Print pct migrate commands: r630-01 → r630-03 for the standard offload batch
# (100 mail, 5010 tsunamiswap, 5702/5705 ai-inf, 7805 sankofa-studio).
# Does not run migrations. See docs/04-configuration/PROXMOX_LOAD_BALANCING_RUNBOOK.md §4b.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SRC="${PROXMOX_HOST_R630_01:-192.168.11.11}"
DST_NODE="r630-03"
STORE="local-lvm"
# Suggested order: small disks first, 5010 last
ORDER=(100 5702 5705 7805 5010)
echo "Source host IP: $SRC (run pct migrate from this node, or use ssh below)"
echo "Target node: $DST_NODE"
echo "Target storage: $STORE (required for 100: was thin1 on r630-01 only)"
echo ""
echo "Run one line at a time; ensure backups and maintenance window for 5010 (160G)."
echo ""
for vmid in "${ORDER[@]}"; do
echo "ssh root@${SRC} \"pct migrate ${vmid} ${DST_NODE} --storage ${STORE} --restart\""
done

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env bash
# Print pct migrate commands for the first-wave r630-01 -> r630-04 rebalance batch.
# Read-only: does not execute migrations.
#
# Batch chosen from the live LXC health report and excludes chain-critical / infra-critical
# workloads. Target storage is r630-04 local-lvm, which is active and empty in the latest checks.
#
# Usage:
# bash scripts/proxmox/print-migrate-r630-01-to-r630-04-first-wave.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SRC="${PROXMOX_HOST_R630_01:-192.168.11.11}"
DST_NODE="r630-04"
STORE="local-lvm"
# Smallest / lowest-risk first, heavier support workloads after.
ORDER=(
10201 # order-grafana
10210 # order-haproxy
7804 # gov-portals-dev
10020 # order-redis
10230 # order-vault
10092 # order-mcp-legal
8640 # vault-phoenix-1
8642 # vault-phoenix-3
10091 # order-portal-internal
10090 # order-portal-public
10070 # order-legal
10200 # order-prometheus
)
echo "Source host IP: $SRC"
echo "Target node: $DST_NODE"
echo "Target storage: $STORE"
echo ""
echo "Recommended approach: run one command at a time, verify service health after each small batch,"
echo "and leave order-prometheus (10200, 100G) for the end of the first wave."
echo ""
for vmid in "${ORDER[@]}"; do
echo "ssh root@${SRC} \"pct migrate ${vmid} ${DST_NODE} --storage ${STORE} --restart\""
done

View File

@@ -0,0 +1,60 @@
#!/usr/bin/env bash
# Provision r630-03 six ~233G SSDs (sdcsdh) as separate LVM thin pools + Proxmox storage.
# **Destructive:** wipefs + whole-disk PV on each disk. Idempotent: skips VGs that already exist.
#
# Storage IDs: thin1-r630-03 … thin6-r630-03 (sdc→thin1 … sdh→thin6).
#
# Usage:
# bash scripts/proxmox/provision-r630-03-six-ssd-thinpools.sh --dry-run
# bash scripts/proxmox/provision-r630-03-six-ssd-thinpools.sh --apply
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
HOST="${PROXMOX_HOST_R630_03:-192.168.11.13}"
APPLY=0
case "${1:-}" in
--apply) APPLY=1 ;;
--dry-run) APPLY=0 ;;
*) echo "Usage: $0 --dry-run | --apply" >&2; exit 2 ;;
esac
SSH_OPTS=(-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new)
ssh "${SSH_OPTS[@]}" "root@$HOST" "APPLY=$APPLY" bash -s <<'REMOTE'
set -euo pipefail
DISKS=(sdc sdd sde sdf sdg sdh)
for i in "${!DISKS[@]}"; do
d="${DISKS[$i]}"
n=$((i+1))
dev="/dev/$d"
sid="thin${n}-r630-03"
if vgs "thin${n}" &>/dev/null; then
echo "OK skip: VG thin${n} exists"
continue
fi
if [[ "$APPLY" -ne 1 ]]; then
echo "DRY-RUN: would wipefs+pvcreate+vgcreate+thin pool+pvesm $sid on $dev"
continue
fi
echo "=== $sid on $dev ==="
wipefs -a "$dev" || true
pvcreate -ff "$dev"
vgcreate "thin${n}" "$dev"
lvcreate --type thin-pool -l 100%FREE -n "thin${n}" "thin${n}"
if pvesm status "$sid" &>/dev/null; then
pvesm set "$sid" --disable 0 2>/dev/null || true
else
pvesm add lvmthin "$sid" --thinpool "thin${n}" --vgname "thin${n}" --content images,rootdir --nodes r630-03
fi
done
if [[ "$APPLY" -eq 1 ]]; then
echo "=== pvesm (this node, *-r630-03) ==="
pvesm status | grep r630-03 || true
vgs
fi
REMOTE

View File

@@ -0,0 +1,66 @@
#!/usr/bin/env bash
# Non-interactive optional maintenance for spare / expansion Proxmox nodes.
#
# Community Scripts "post-pve-install" (ProxmoxVE/tools/pve/post-pve-install.sh) is
# interactive (whiptail/read). Use it from the console when you want full apt/NIC
# wizard flows. This script only runs safe headless steps:
# - apt-get update (verify repos; expect no 401 if enterprise is disabled)
# - fstrim on / when the stack reports discard support (often no-op on thick LVM)
# - pveam update (refresh CT template index)
#
# Usage:
# source config/ip-addresses.conf 2>/dev/null || true
# bash scripts/proxmox/pve-spare-host-optional-tuneup.sh
# bash scripts/proxmox/pve-spare-host-optional-tuneup.sh 192.168.11.13 192.168.11.14
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SSH_OPTS=(-o BatchMode=yes -o ConnectTimeout=12 -o StrictHostKeyChecking=accept-new)
DEFAULT_HOSTS=(
"${PROXMOX_HOST_R630_03:-192.168.11.13}"
"${PROXMOX_HOST_R630_04:-192.168.11.14}"
)
if [[ $# -gt 0 ]]; then
HOSTS=("$@")
else
HOSTS=("${DEFAULT_HOSTS[@]}")
fi
log() { echo "[pve-spare-host-optional-tuneup] $*"; }
remote_tune() {
local h="$1"
log "=== $h ==="
if ! ssh "${SSH_OPTS[@]}" "root@$h" true 2>/dev/null; then
log "SKIP unreachable: $h"
return 0
fi
ssh "${SSH_OPTS[@]}" "root@$h" bash -s <<'REMOTE'
set -euo pipefail
echo "host: $(hostname)"
apt-get update -qq
echo "apt-get update: OK"
# util-linux fstrim: use -v only (-a conflicts with mountpoint on some versions)
if out=$(fstrim -v / 2>&1); then
echo "fstrim: $out"
else
echo "fstrim: skipped or unsupported ($out)"
fi
pveam update 2>&1 | tail -5 || true
echo "upgrade pending (simulate):"
apt-get -s upgrade 2>/dev/null | tail -2 || true
REMOTE
}
for h in "${HOSTS[@]}"; do
remote_tune "$h" || log "WARN: $h had errors"
done
log "Done."