Sync all local changes: docs, config, scripts, submodule refs, verification evidence
Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
defiQUG
2026-02-21 15:46:06 -08:00
parent fbda1b4beb
commit bea1903ac9
1596 changed files with 93380 additions and 6194 deletions

View File

@@ -0,0 +1,41 @@
# HYBX sidecars sync to Gitea
## Complete everything (run in your terminal)
**1. Finish docs-for-sidecars merge and push** (merge is prepared; README already resolved):
```bash
cd /home/intlc/projects/HYBX_Sidecars/docs-for-sidecars
bash scripts/complete-merge-and-push.sh
```
**2. Sync all nine sidecars** (pull from gitea then push):
```bash
cd /home/intlc/projects/proxmox
bash scripts/push-hybx-sidecars-to-gitea.sh --sync
```
## Sync one repo at a time
```bash
cd /home/intlc/projects/proxmox
# Replace REPO with one of: mifos-fineract-sidecar, mt103-hardcopy-sidecar, off-ledger-2-on-ledger-sidecar, securitization-engine-sidecar, card-networks-sidecar, securities-sidecar, flash-loan-xau-sidecar, server-funds-sidecar, docs-for-sidecars
bash scripts/push-hybx-sidecars-to-gitea.sh --sync REPO
# Or without pull: bash scripts/push-hybx-sidecars-to-gitea.sh REPO
```
Or run all nine in sequence (one-by-one):
```bash
cd /home/intlc/projects/proxmox
bash scripts/sync-hybx-sidecars-one-by-one.sh --sync
```
## Single-repo push (no pull)
```bash
bash scripts/push-hybx-sidecars-to-gitea.sh docs-for-sidecars
```
`GITEA_TOKEN` is loaded from the proxmox repo root `.env` when you run these scripts.

View File

@@ -110,9 +110,9 @@ Deploy and configure a new WETH9 bridge using the **working** CCIP router (fixes
# Dry-run (no PRIVATE_KEY): simulate deploy and config
./scripts/deploy-and-configure-weth9-bridge-chain138.sh --dry-run
# Real run
export PRIVATE_KEY=0x... # required
export CHAIN138_RPC_URL=http://192.168.11.211:8545 # admin/deployment (RPC_CORE_1)
# Real run (set in smom-dbis-138/.env or export)
# PRIVATE_KEY=0x... # required
# RPC_URL_138=http://192.168.11.211:8545 # Chain 138 Core (admin/deploy)
./scripts/deploy-and-configure-weth9-bridge-chain138.sh
# Then: export CCIPWETH9_BRIDGE_CHAIN138=<printed address>
```

View File

@@ -0,0 +1,80 @@
#!/usr/bin/env bash
# Fix all Besu nodes: deploy canonical node lists, normalize config (TOML permissions path,
# remove tx-pool-min-score, ensure genesis), then restart Besu.
# Run from project root. Usage: bash scripts/besu/fix-all-besu-nodes.sh [--dry-run] [--no-restart]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
DRY_RUN=false
NO_RESTART=false
for arg in "${@:-}"; do
[[ "$arg" == "--dry-run" ]] && DRY_RUN=true
[[ "$arg" == "--no-restart" ]] && NO_RESTART=true
done
# Same host/VMID as deploy-besu-node-lists-to-all.sh
declare -A HOST_BY_VMID
for v in 1000 1001 1002 1500 1501 1502 2101 2500 2501 2502 2503 2504 2505; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"; done
for v in 2201 2303 2401; do HOST_BY_VMID[$v]="${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"; done
for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2305 2306 2307 2308 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"; done
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403 2500 2501 2502 2503 2504 2505)
STATIC="${PROJECT_ROOT}/config/besu-node-lists/static-nodes.json"
PERMS="${PROJECT_ROOT}/config/besu-node-lists/permissions-nodes.toml"
SSH_OPTS="-o ConnectTimeout=8 -o StrictHostKeyChecking=accept-new"
if [[ ! -f "$STATIC" ]] || [[ ! -f "$PERMS" ]]; then
echo "ERROR: Missing $STATIC or $PERMS" >&2
exit 1
fi
echo "=== Fix all Besu nodes ==="
echo " 1. Deploy node lists to all nodes"
echo " 2. Fix config on each node (permissions TOML path, remove tx-pool-min-score, genesis)"
echo " 3. Restart Besu on all nodes"
if $DRY_RUN; then echo " [DRY-RUN]"; fi
if $NO_RESTART; then echo " [NO-RESTART]"; fi
echo ""
# Step 1: Deploy node lists
echo "--- Step 1: Deploy static-nodes.json and permissions-nodes.toml ---"
if ! $DRY_RUN; then
bash "${PROJECT_ROOT}/scripts/deploy-besu-node-lists-to-all.sh" 2>/dev/null || true
fi
echo ""
# Step 2: Fix config on each running node (permissions path, remove tx-pool-min-score, genesis)
echo "--- Step 2: Fix config on each node ---"
for vmid in "${BESU_VMIDS[@]}"; do
host="${HOST_BY_VMID[$vmid]:-}"
[[ -z "$host" ]] && continue
running=$(ssh $SSH_OPTS "root@$host" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "")
if [[ "$running" != "running" ]]; then
echo " VMID $vmid: skip (not running)"
continue
fi
if $DRY_RUN; then
echo " VMID $vmid: [dry-run] would fix config"
continue
fi
ssh $SSH_OPTS "root@$host" "pct exec $vmid -- bash -c 'for f in /etc/besu/*.toml /config/*.toml; do [ -f \"\$f\" ] || continue; sed -i \"s|permissions-nodes-config-file=.*|permissions-nodes-config-file=\\\"/etc/besu/permissions-nodes.toml\\\"|\" \"\$f\"; sed -i \"/^tx-pool-min-score=/d\" \"\$f\"; sed -i \"s|static-nodes-file=.*|static-nodes-file=\\\"/etc/besu/static-nodes.json\\\"|\" \"\$f\"; done; [ -f /etc/besu/genesis.json ] && [ ! -f /genesis/genesis.json ] && cp /etc/besu/genesis.json /genesis/genesis.json 2>/dev/null; true'" 2>/dev/null && echo " VMID $vmid: config fixed" || echo " VMID $vmid: config fix skipped/failed"
done
echo ""
# Step 3: Restart Besu
if $NO_RESTART; then
echo "--- Step 3: skipped (--no-restart) ---"
echo "Run: bash scripts/besu/restart-besu-reload-node-lists.sh"
exit 0
fi
echo "--- Step 3: Restart Besu on all nodes ---"
if ! $DRY_RUN; then
bash "${PROJECT_ROOT}/scripts/besu/restart-besu-reload-node-lists.sh" 2>/dev/null || true
fi
echo ""
echo "Done."

View File

@@ -19,17 +19,18 @@ GENESIS_SRC="${PROJECT_ROOT}/smom-dbis-138-proxmox/config/genesis.json"
STATIC_SRC="${PROJECT_ROOT}/config/besu-node-lists/static-nodes.json"
PERMS_SRC="${PROJECT_ROOT}/config/besu-node-lists/permissions-nodes.toml"
# VMIDs that may lack Besu (sentries 1505-1508 on ml110; RPC 2501-2505 on r630-01)
# VMIDs that may lack Besu (sentries 1505-1508 on ml110; RPC 2500-2505 on r630-01)
SENTRY_VMIDS=(1505 1506 1507 1508)
RPC_VMIDS=(2501 2502 2503 2504 2505)
RPC_VMIDS=(2500 2501 2502 2503 2504 2505)
declare -A HOST_BY_VMID
for v in 1505 1506 1507 1508; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-192.168.11.10}"; done
for v in 2501 2502 2503 2504 2505; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-192.168.11.11}"; done
for v in 2500 2501 2502 2503 2504 2505; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-192.168.11.11}"; done
declare -A IP_BY_VMID
IP_BY_VMID[1505]=192.168.11.213
IP_BY_VMID[1506]=192.168.11.214
IP_BY_VMID[1507]=192.168.11.244
IP_BY_VMID[1508]=192.168.11.245
IP_BY_VMID[2500]=192.168.11.172
IP_BY_VMID[2501]=192.168.11.173
IP_BY_VMID[2502]=192.168.11.174
IP_BY_VMID[2503]=192.168.11.246
@@ -63,11 +64,37 @@ install_rpc() {
local vmid=$1 host=${HOST_BY_VMID[$vmid]} ip=${IP_BY_VMID[$vmid]}
echo "--- VMID $vmid (RPC @ $ip) ---"
if $DRY_RUN; then echo " [dry-run] would install Besu RPC"; return 0; fi
if ! ssh $SSH_OPTS "root@$host" "pct exec $vmid -- bash -c 'touch /tmp/.w && rm -f /tmp/.w'" 2>/dev/null; then
echo " CT $vmid /tmp not writable. Make CT writable (docs/00-meta/502_DEEP_DIVE_ROOT_CAUSES_AND_FIXES.md §Read-only CT) and re-run."
return 1
fi
ssh $SSH_OPTS "root@$host" "pct exec $vmid -- rm -rf /opt/besu 2>/dev/null; true"
scp -q $SSH_OPTS "${PROJECT_ROOT}/scripts/install-besu-in-ct-standalone.sh" "root@${host}:/tmp/"
ssh $SSH_OPTS "root@$host" "pct push $vmid /tmp/install-besu-in-ct-standalone.sh /tmp/install-besu-in-ct-standalone.sh && pct exec $vmid -- env NODE_TYPE=rpc BESU_VERSION=$BESU_VERSION bash /tmp/install-besu-in-ct-standalone.sh" || { echo " install failed"; return 1; }
# 2500-style: besu.service + config.toml (not besu-rpc.service)
ssh $SSH_OPTS "root@$host" "pct exec 2500 -- cat /etc/systemd/system/besu.service" > /tmp/besu.service
# 2500-style: besu.service + config.toml (not besu-rpc.service). Use inline template so we don't depend on 2500 having Besu.
cat << 'BESUSVC' > /tmp/besu.service
[Unit]
Description=Hyperledger Besu
After=network.target
Wants=network-online.target
[Service]
Type=simple
User=besu
Group=besu
WorkingDirectory=/opt/besu
Environment="BESU_OPTS=-Xmx2g -Xms1g -Djava.io.tmpdir=/data/besu/tmp"
ExecStart=/opt/besu/bin/besu --config-file=/etc/besu/config.toml
Restart=always
RestartSec=10
LimitNOFILE=65536
StandardOutput=journal
StandardError=journal
SyslogIdentifier=besu
[Install]
WantedBy=multi-user.target
BESUSVC
scp -q $SSH_OPTS /tmp/besu.service "root@${host}:/tmp/"
ssh $SSH_OPTS "root@$host" "pct push $vmid /tmp/besu.service /etc/systemd/system/besu.service"
# config.toml with this node's p2p-host
@@ -107,7 +134,7 @@ EOF
ssh $SSH_OPTS "root@$host" "pct exec $vmid -- systemctl disable besu-rpc.service 2>/dev/null; pct exec $vmid -- systemctl daemon-reload; pct exec $vmid -- systemctl enable besu.service && pct exec $vmid -- systemctl start besu.service" && echo " besu.service enabled and started" || echo " start failed"
}
echo "Installing Besu permanently on nodes missing /opt/besu/bin/besu (1505-1508, 2501-2505)"
echo "Installing Besu permanently on nodes missing /opt/besu/bin/besu (1505-1508, 2500-2505)"
echo ""
for vmid in "${SENTRY_VMIDS[@]}"; do
@@ -132,4 +159,5 @@ done
echo ""
echo "Done. Verify: bash scripts/besu/restart-besu-reload-node-lists.sh (optional); then check block production on RPCs."
rm -f /tmp/config-sentry.toml /tmp/config-rpc-2501.toml /tmp/config-rpc-2502.toml /tmp/config-rpc-2503.toml /tmp/config-rpc-2504.toml /tmp/config-rpc-2505.toml 2>/dev/null || true
rm -f /tmp/config-sentry.toml /tmp/besu.service /tmp/config.toml 2>/dev/null || true
for v in 2500 2501 2502 2503 2504 2505; do rm -f /tmp/config-rpc-${v}.toml 2>/dev/null; done

View File

@@ -18,9 +18,9 @@ DRY_RUN=false
declare -A HOST_BY_VMID
for v in 1000 1001 1002 1500 1501 1502 2101 2500 2501 2502 2503 2504 2505; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"; done
for v in 2201 2303 2401; do HOST_BY_VMID[$v]="${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"; done
for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2305 2306 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"; done
for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2305 2306 2307 2308 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"; done
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2400 2401 2402 2403 2500 2501 2502 2503 2504 2505)
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403 2500 2501 2502 2503 2504 2505)
SSH_OPTS="-o ConnectTimeout=8 -o StrictHostKeyChecking=accept-new"

View File

@@ -1,11 +1,18 @@
#!/usr/bin/env bash
# Cancel pending transactions by sending replacement transactions with higher gas
# Usage: ./cancel-pending-transactions.sh
# Usage: ./cancel-pending-transactions.sh [--force]
# --force Send one replacement tx at current nonce (use when deploy fails with
# "Replacement transaction underpriced" but script reports no pending)
set -uo pipefail
FORCE_REPLACE=false
[[ "${1:-}" == "--force" ]] && FORCE_REPLACE=true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Prefer smom-dbis-138 in this repo; fallback to legacy path
SOURCE_PROJECT="${PROJECT_ROOT}/smom-dbis-138"
[ ! -f "$SOURCE_PROJECT/.env" ] && [ -d "/home/intlc/projects/smom-dbis-138" ] && SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'
@@ -27,7 +34,8 @@ else
exit 1
fi
RPC_URL="${RPC_URL_138:-http://${RPC_ALLTRA_1:-${RPC_ALLTRA_1:-192.168.11.250}}:8545}"
# Chain 138 RPC (VMID 2101); prefer RPC_URL_138 from .env
RPC_URL="${RPC_URL_138:-http://192.168.11.211:8545}"
if [ -z "${PRIVATE_KEY:-}" ]; then
log_error "PRIVATE_KEY not set in .env file"
@@ -48,14 +56,39 @@ log_info "Deployer: $DEPLOYER"
log_info "RPC URL: $RPC_URL"
log_info ""
# Get current and pending nonces
CURRENT_NONCE=$(cast nonce "$DEPLOYER" --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
PENDING_NONCE=$(cast nonce "$DEPLOYER" --rpc-url "$RPC_URL" --pending 2>/dev/null || echo "$CURRENT_NONCE")
# Get current and pending nonces (curl works regardless of cast version)
LATEST_HEX=$(curl -s -X POST -H "Content-Type: application/json" \
-d "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getTransactionCount\",\"params\":[\"$DEPLOYER\",\"latest\"],\"id\":1}" \
"$RPC_URL" 2>/dev/null | jq -r '.result // "0x0"')
PENDING_HEX=$(curl -s -X POST -H "Content-Type: application/json" \
-d "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getTransactionCount\",\"params\":[\"$DEPLOYER\",\"pending\"],\"id\":1}" \
"$RPC_URL" 2>/dev/null | jq -r '.result // "0x0"')
CURRENT_NONCE=$(printf '%d' "${LATEST_HEX:-0x0}")
PENDING_NONCE=$(printf '%d' "${PENDING_HEX:-0x0}")
log_info "Current nonce: $CURRENT_NONCE"
log_info "Pending nonce: $PENDING_NONCE"
if [ "$PENDING_NONCE" -le "$CURRENT_NONCE" ]; then
if [[ "$FORCE_REPLACE" == "true" ]]; then
log_warn "Forcing one replacement tx at nonce $CURRENT_NONCE (in case of stuck tx not reported as pending)"
TX_OUTPUT=$(cast send "$DEPLOYER" \
--value 0 \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "${GAS_PRICE_138:-500000000000}" \
--gas-limit 21000 \
--nonce "$CURRENT_NONCE" \
--legacy \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
log_success "✓ Replacement sent; wait a few seconds then retry deploy"
else
log_error "Replace failed: $TX_OUTPUT"
exit 1
fi
exit 0
fi
log_success "✓ No pending transactions found"
log_info "All transactions have been mined"
exit 0
@@ -74,14 +107,16 @@ CANCELED=0
for ((nonce = CURRENT_NONCE; nonce < PENDING_NONCE; nonce++)); do
log_info "Canceling transaction with nonce $nonce..."
# Send a transaction to self with 0 value and high gas price
# This will replace the pending transaction
# Send a transaction to self with 0 value and high gas price (replaces stuck tx at this nonce)
# Use --legacy for Besu/Chain 138; 200 gwei so replacement is accepted
TX_OUTPUT=$(cast send "$DEPLOYER" \
--value 0 \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price 200000000000 \
--gas-limit 21000 \
--nonce "$nonce" \
--legacy \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
@@ -108,7 +143,10 @@ log_info "Waiting 10 seconds for transactions to be mined..."
sleep 10
# Check final status
FINAL_NONCE=$(cast nonce "$DEPLOYER" --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
FINAL_HEX=$(curl -s -X POST -H "Content-Type: application/json" \
-d "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getTransactionCount\",\"params\":[\"$DEPLOYER\",\"latest\"],\"id\":1}" \
"$RPC_URL" 2>/dev/null | jq -r '.result // "0x0"')
FINAL_NONCE=$(printf '%d' "${FINAL_HEX:-0x0}")
log_info "Final nonce: $FINAL_NONCE"
if [ "$FINAL_NONCE" -ge "$PENDING_NONCE" ]; then

View File

@@ -0,0 +1,78 @@
#!/usr/bin/env node
/**
* Check internal doc links: resolve relative paths from each source file
* and report only targets that are missing (file or directory).
* Usage: node scripts/check-doc-links.mjs
*/
import { readdirSync, readFileSync, existsSync } from 'fs';
import { join, resolve, dirname, normalize } from 'path';
import { fileURLToPath } from 'url';
const __dirname = dirname(fileURLToPath(import.meta.url));
const REPO_ROOT = resolve(__dirname, '..');
const DOCS = join(REPO_ROOT, 'docs');
function* walkMd(dir, prefix = '') {
const entries = readdirSync(dir, { withFileTypes: true });
for (const e of entries) {
const rel = prefix ? `${prefix}/${e.name}` : e.name;
if (e.isDirectory()) {
if (e.name === 'node_modules' || e.name === '.git') continue;
yield* walkMd(join(dir, e.name), rel);
} else if (e.name.endsWith('.md')) {
yield { path: join(dir, e.name), rel: join(prefix, e.name) };
}
}
}
const linkRe = /\]\(([^)]+)\)/g;
function resolveTarget(fromDir, href) {
const pathOnly = href.replace(/#.*$/, '').trim();
if (!pathOnly || pathOnly.startsWith('http://') || pathOnly.startsWith('https://') || pathOnly.startsWith('mailto:')) return null;
if (pathOnly.startsWith('#')) return null;
if (pathOnly.startsWith('~/')) return null; // skip home-relative
let resolved;
if (pathOnly.startsWith('/')) {
// repo-root-relative: /docs/... or /reports/...
resolved = normalize(join(REPO_ROOT, pathOnly.slice(1)));
} else {
resolved = normalize(join(fromDir, pathOnly));
}
return resolved.startsWith(REPO_ROOT) ? resolved : null;
}
const broken = [];
const seen = new Set();
for (const { path: filePath, rel } of walkMd(DOCS)) {
const fromDir = dirname(filePath);
const content = readFileSync(filePath, 'utf8');
let m;
linkRe.lastIndex = 0;
while ((m = linkRe.exec(content)) !== null) {
const href = m[1];
const targetPath = resolveTarget(fromDir, href);
if (!targetPath) continue;
const key = `${rel} -> ${href}`;
if (seen.has(key)) continue;
seen.add(key);
if (!existsSync(targetPath)) {
broken.push({ source: rel, link: href, resolved: targetPath.replace(REPO_ROOT + '/', '') });
}
}
}
console.log('=== Doc link check (docs/ only, relative links resolved from source file) ===\n');
if (broken.length === 0) {
console.log('No broken internal links found.');
process.exit(0);
}
console.log(`Found ${broken.length} broken link(s):\n`);
broken.forEach(({ source, link, resolved }) => {
console.log(` ${source}`);
console.log(` -> ${link}`);
console.log(` resolved: ${resolved}`);
console.log('');
});
process.exit(1);

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Check internal doc links in docs/ (relative paths). Report broken file links.
# Usage: ./scripts/check-doc-links.sh
# Run from repo root.
set -e
REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
cd "$REPO_ROOT"
DOCS="$REPO_ROOT/docs"
FAIL=0
check_file() {
local f="$1"
local from_dir="${f#$REPO_ROOT/}"
from_dir="$(dirname "$from_dir")"
while IFS= read -r url; do
[[ -z "$url" ]] && continue
path_only="${url%%#*}"
[[ -z "$path_only" ]] && continue
[[ "$path_only" == https://* ]] && continue
[[ "$path_only" == http://* ]] && continue
[[ "$path_only" == mailto:* ]] && continue
# Resolve relative to source file dir
if [[ "$path_only" == /* ]]; then
resolved="${path_only#/}"
elif [[ -z "$from_dir" || "$from_dir" == "." ]]; then
resolved="$path_only"
else
resolved="$from_dir/$path_only"
resolved="$(echo "$resolved" | sed 's|/\./|/|g')"
while [[ "$resolved" == *"/../"* ]] || [[ "$resolved" == *"/.." ]]; do
resolved="$(echo "$resolved" | sed 's|/[^/]*/\.\./|/|;s|/[^/]*/\.\.$||')"
done
fi
target="$REPO_ROOT/$resolved"
if [[ -d "$target" ]]; then continue; fi
if [[ ! -f "$target" ]]; then
echo "BROKEN: $f -> $resolved (target missing)"
FAIL=1
fi
done < <(grep -ohE '\]\([^)]+\)' "$f" 2>/dev/null | sed 's/^](//;s/)$//')
}
echo "=== Checking internal doc links under docs/ ==="
while IFS= read -r -d '' f; do
check_file "$f"
done < <(find "$DOCS" -name "*.md" -print0 2>/dev/null)
if [[ "$FAIL" -eq 0 ]]; then
echo "No broken internal links found."
fi
exit "$FAIL"

View File

@@ -0,0 +1,89 @@
#!/usr/bin/env bash
# Check sync status of all nine HYBX sidecars vs Gitea. Run from proxmox repo root.
# Usage: bash scripts/check-hybx-sidecars-sync.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
[ -f "$PROJECT_ROOT/.env" ] && set +u && source "$PROJECT_ROOT/.env" 2>/dev/null || true && set -u
GITEA_ORG="${GITEA_ORG:-HYBX}"
GITEA_URL="${GITEA_URL:-https://gitea.d-bis.org}"
BASE="${HYBX_SIDECARS_BASE:-/home/intlc/projects/HYBX_Sidecars}"
export GITEA_TOKEN
CRED_HELPER="!f() { echo \"username=git\"; echo \"password=\$GITEA_TOKEN\"; }; f"
REPOS=(
mifos-fineract-sidecar
mt103-hardcopy-sidecar
off-ledger-2-on-ledger-sidecar
securitization-engine-sidecar
card-networks-sidecar
securities-sidecar
flash-loan-xau-sidecar
server-funds-sidecar
docs-for-sidecars
)
echo "Checking sync: $BASE vs gitea $GITEA_URL/$GITEA_ORG"
echo ""
synced=0
behind=0
ahead=0
missing=0
other=0
for name in "${REPOS[@]}"; do
dir="$BASE/$name"
if [ ! -d "$dir" ]; then
echo " $name: MISSING (dir not found)"
((missing++)) || true
continue
fi
if [ ! -d "$dir/.git" ]; then
echo " $name: NOT A REPO"
((other++)) || true
continue
fi
branch=main
# Fetch so we have up-to-date gitea/main
if git -C "$dir" remote get-url gitea &>/dev/null; then
git -C "$dir" -c "credential.helper=$CRED_HELPER" fetch gitea "$branch" 2>/dev/null || true
fi
local_sha=$(git -C "$dir" rev-parse "$branch" 2>/dev/null || echo "")
remote_sha=$(git -C "$dir" rev-parse "gitea/$branch" 2>/dev/null || echo "")
if [ -z "$remote_sha" ]; then
echo " $name: NO REMOTE (gitea/$branch not found)"
((other++)) || true
continue
fi
if [ "$local_sha" = "$remote_sha" ]; then
echo " $name: SYNCED ($local_sha)"
((synced++)) || true
continue
fi
if git -C "$dir" merge-base --is-ancestor "$remote_sha" "$local_sha" 2>/dev/null; then
echo " $name: AHEAD (local has new commits; push to sync)"
((ahead++)) || true
elif git -C "$dir" merge-base --is-ancestor "$local_sha" "$remote_sha" 2>/dev/null; then
echo " $name: BEHIND (pull to sync)"
((behind++)) || true
else
echo " $name: DIVERGED (local and remote have different commits)"
((other++)) || true
fi
done
echo ""
echo "Summary: $synced synced, $ahead ahead, $behind behind, $missing missing, $other other"
if [ "$behind" -gt 0 ] || [ "$ahead" -gt 0 ]; then
echo "Run: bash scripts/push-hybx-sidecars-to-gitea.sh --sync to sync all"
exit 1
fi
exit 0

View File

@@ -0,0 +1,15 @@
#!/usr/bin/env bash
# Clone the four HYBX sidecar repos that don't have local dirs yet.
# Run from HYBX_Sidecars parent: bash scripts/clone-hybx-sidecars.sh
# Or from repo root: bash scripts/clone-hybx-sidecars.sh
set -e
BASE="${1:-/home/intlc/projects/HYBX_Sidecars}"
cd "$BASE"
for repo in card-networks-sidecar securities-sidecar flash-loan-xau-sidecar server-funds-sidecar; do
if [ -d "$repo/.git" ]; then
echo "Already exists: $repo"
else
git clone "https://gitea.d-bis.org/HYBX/${repo}.git" && echo "Cloned: $repo"
fi
done
echo "Done."

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# Add Cloudflare DNS A records for Gov Portals xom-dev subdomains
# Domains: dbis/iccc/omnl/xom.xom-dev.phoenix.sankofa.nexus → 76.53.10.36
#
# Usage: bash scripts/cloudflare/add-gov-portals-xom-dev-dns.sh
# Requires: .env with CLOUDFLARE_API_TOKEN or (CLOUDFLARE_EMAIL + CLOUDFLARE_API_KEY)
# CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS for sankofa.nexus zone
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
source config/ip-addresses.conf 2>/dev/null || true
[ -f .env ] && set +u && source .env 2>/dev/null || true && set -u
ZONE_ID="${CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS:-}"
PUBLIC_IP="${PUBLIC_IP:-76.53.10.36}"
if [ -n "${CLOUDFLARE_API_TOKEN:-}" ]; then
AUTH_H=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
elif [ -n "${CLOUDFLARE_API_KEY:-}" ] && [ -n "${CLOUDFLARE_EMAIL:-}" ]; then
AUTH_H=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY")
else
echo "Set CLOUDFLARE_API_TOKEN or (CLOUDFLARE_EMAIL + CLOUDFLARE_API_KEY) in .env"
exit 1
fi
[ -z "$ZONE_ID" ] && { echo "Set CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS in .env"; exit 1; }
add_record() {
local name=$1
local data
data=$(jq -n --arg name "$name" --arg content "$PUBLIC_IP" \
'{type:"A",name:$name,content:$content,ttl:1,proxied:true}')
EXISTING=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/dns_records?name=${name}.sankofa.nexus&type=A" \
"${AUTH_H[@]}" -H "Content-Type: application/json")
RECORD_ID=$(echo "$EXISTING" | jq -r '.result[0].id // empty')
if [ -n "$RECORD_ID" ] && [ "$RECORD_ID" != "null" ]; then
UPD=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/dns_records/${RECORD_ID}" \
"${AUTH_H[@]}" -H "Content-Type: application/json" -d "$data")
if echo "$UPD" | jq -e '.success == true' >/dev/null 2>&1; then
echo " $name.sankofa.nexus: Updated A → $PUBLIC_IP"
else
echo " $name.sankofa.nexus: Update failed"
return 1
fi
else
CR=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/dns_records" \
"${AUTH_H[@]}" -H "Content-Type: application/json" -d "$data")
if echo "$CR" | jq -e '.success == true' >/dev/null 2>&1; then
echo " $name.sankofa.nexus: Created A → $PUBLIC_IP"
else
echo " $name.sankofa.nexus: Create failed ($(echo "$CR" | jq -r '.errors[0].message // "unknown"' 2>/dev/null))"
return 1
fi
fi
}
echo "Adding Gov Portals xom-dev DNS (sankofa.nexus zone)..."
# Full subdomain: dbis.xom-dev.phoenix.sankofa.nexus → name is "dbis.xom-dev.phoenix" in zone sankofa.nexus
add_record "dbis.xom-dev.phoenix" || true
add_record "iccc.xom-dev.phoenix" || true
add_record "omnl.xom-dev.phoenix" || true
add_record "xom.xom-dev.phoenix" || true
echo "Done."

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
# Complete All Contract Deployments by Priority
# Run from project root. Requires: PRIVATE_KEY, network access to RPC_CORE_1 (${RPC_CORE_1:-192.168.11.211})
# Usage: set -a && source smom-dbis-138/.env && set +a && ./scripts/complete-all-contract-deployments.sh
# Run from project root. Requires: PRIVATE_KEY, RPC_URL_138 (Chain 138 Core) or network access to RPC.
# Usage: ./scripts/complete-all-contract-deployments.sh (PRIVATE_KEY read from smom-dbis-138/.env or root .env)
set -euo pipefail
@@ -9,11 +9,13 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
# Load PRIVATE_KEY from dotenv (root .env, then smom-dbis-138/.env)
[[ -f "${SCRIPT_DIR}/lib/load-project-env.sh" ]] && source "${SCRIPT_DIR}/lib/load-project-env.sh" 2>/dev/null || true
source config/ip-addresses.conf 2>/dev/null || true
RPC="${RPC_URL_138:-http://${RPC_CORE_1:-192.168.11.211}:8545}"
RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
if [[ -z "${PRIVATE_KEY:-}" ]]; then
echo "ERROR: PRIVATE_KEY required. source smom-dbis-138/.env first."
echo "ERROR: PRIVATE_KEY required. Set PRIVATE_KEY in smom-dbis-138/.env or root .env."
exit 1
fi

View File

@@ -0,0 +1,44 @@
#!/usr/bin/env bash
# Run all automatable mapper tasks, then config-ready-chains (when .env set), and print deploy commands for the rest.
# Usage: ./scripts/complete-all-mapper-operator.sh
# See: docs/07-ccip/REMAINING_OPERATOR_STEPS.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
SMOM="$PROJECT_ROOT/smom-dbis-138"
echo ""
echo "=== Complete all mapper-operator tasks ==="
echo ""
# 1. Validation + on-chain check + operator checklist
./scripts/run-remaining-mapper-tasks.sh
# 2. If smom-dbis-138 has .env with Chain 138 bridges and PRIVATE_KEY, run config-ready-chains
if [[ -f "$SMOM/.env" ]]; then
if grep -q "CCIPWETH9_BRIDGE_CHAIN138=" "$SMOM/.env" 2>/dev/null && \
grep -q "CCIPWETH10_BRIDGE_CHAIN138=" "$SMOM/.env" 2>/dev/null && \
grep -q "PRIVATE_KEY=" "$SMOM/.env" 2>/dev/null; then
echo "--- Running config-ready-chains (add destinations) ---"
(cd "$SMOM" && ./scripts/deployment/complete-config-ready-chains.sh 2>/dev/null) || {
echo " (run from smom-dbis-138: ./scripts/deployment/complete-config-ready-chains.sh)"
}
else
echo " Skip config-ready-chains: set in smom-dbis-138/.env: CCIPWETH9_BRIDGE_CHAIN138, CCIPWETH10_BRIDGE_CHAIN138, PRIVATE_KEY"
fi
else
echo " Skip config-ready-chains: no smom-dbis-138/.env"
fi
echo ""
echo "--- Deploy CCIP bridges (Gnosis/Celo/Wemix) — run from smom-dbis-138 when env set ---"
echo " Per chain, set in .env: RPC, CCIP_ROUTER_ADDRESS, LINK_TOKEN_ADDRESS, WETH9_ADDRESS, WETH10_ADDRESS, PRIVATE_KEY"
echo " Then: forge script script/deploy/bridge/DeployWETHBridges.s.sol:DeployWETHBridges --rpc-url \$<CHAIN>_RPC --private-key \$PRIVATE_KEY --broadcast --legacy -vvvv"
echo " Full steps: docs/07-ccip/CONFIG_READY_CHAINS_COMPLETION_RUNBOOK.md"
echo ""
echo " All remaining steps: docs/07-ccip/REMAINING_OPERATOR_STEPS.md"
echo ""

View File

@@ -16,7 +16,7 @@ source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
VMID=5700
HOST="${PROXMOX_HOST:-${PROXMOX_R630_01:-192.168.11.11}}"
IP="${IP_DEV_VM:-192.168.11.60}"
IP="${IP_DEV_VM:-192.168.11.59}"
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
NETWORK="${NETWORK:-vmbr0}"
STORAGE="${STORAGE:-local-lvm}"

View File

@@ -2,7 +2,7 @@
# Deploy CCIPWETH9Bridge on Chain 138 with the working CCIP router and configure it.
# Fixes router mismatch: old bridge pointed to 0x80226... (no code); new bridge uses 0x8078A... (has code).
#
# Requires: PRIVATE_KEY, foundry (forge, cast). Optional: CHAIN138_RPC_URL.
# Requires: PRIVATE_KEY (read from dotenv: smom-dbis-138/.env or root .env), foundry (forge, cast). Uses RPC_URL_138 (Core) from load-project-env.
# Use --dry-run to simulate (no PRIVATE_KEY needed). After run: export CCIPWETH9_BRIDGE_CHAIN138=<printed address>.
set -euo pipefail
@@ -13,10 +13,13 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SMOM_DIR="${SMOM_DBIS_138_DIR:-$REPO_ROOT/smom-dbis-138}"
# Load PRIVATE_KEY from dotenv (root .env, then smom-dbis-138/.env)
[[ -f "${SCRIPT_DIR}/lib/load-project-env.sh" ]] && source "${SCRIPT_DIR}/lib/load-project-env.sh" 2>/dev/null || true
# Working CCIP router (has code; Mainnet + WETH9 configured). Do NOT use 0x80226fc0... (no code).
CCIP_ROUTER_WORKING="0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e"
# Admin/deployment: use RPC_CORE_1 (${RPC_CORE_1:-192.168.11.211}) per config/ip-addresses.conf
CHAIN138_RPC="${CHAIN138_RPC_URL:-${RPC_URL_138:-http://${RPC_CORE_1:-192.168.11.211}:8545}}"
# Chain 138 Core RPC (standard: RPC_URL_138) — admin/deploy, VMID 2101
RPC="${RPC_URL_138:-http://${RPC_CORE_1:-192.168.11.211}:8545}"
MAINNET_CHAIN_SELECTOR="5009297550715157269"
MAINNET_WETH9_RECEIVER="${MAINNET_WETH9_BRIDGE_ADDRESS:-0x2A0840e5117683b11682ac46f5CF5621E67269E3}"
WETH9_CHAIN138="0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
@@ -25,16 +28,16 @@ MAX_UINT256="1157920892373161954235709850086879078532699846656405640394575840079
if [[ "$DRY_RUN" == "true" ]]; then
echo "=== DRY RUN: Deploy and configure WETH9 bridge (Chain 138) ==="
echo " RPC: $CHAIN138_RPC"
echo " RPC: $RPC (RPC_URL_138 = Chain 138 Core)"
echo " CCIP Router: $CCIP_ROUTER_WORKING"
echo " Would: forge script (no broadcast), addDestination, approve WETH9/LINK"
echo " To run for real: export PRIVATE_KEY=0x... && $0"
echo " To run for real: set PRIVATE_KEY and RPC_URL_138 in smom-dbis-138/.env && $0"
exit 0
fi
if [[ -z "${PRIVATE_KEY:-}" ]]; then
echo "ERROR: PRIVATE_KEY is required (deployer key for Chain 138)." >&2
echo " export PRIVATE_KEY=0x..." >&2
echo " Set PRIVATE_KEY in smom-dbis-138/.env or root .env (or export PRIVATE_KEY=0x...)." >&2
echo " Or use --dry-run to simulate." >&2
exit 1
fi
@@ -45,7 +48,7 @@ if [[ ! -d "$SMOM_DIR" ]]; then
fi
echo "=== Deploy and configure WETH9 bridge (Chain 138) ==="
echo " RPC: $CHAIN138_RPC"
echo " RPC: $RPC (RPC_URL_138 = Core)"
echo " CCIP Router: $CCIP_ROUTER_WORKING"
echo ""
@@ -58,7 +61,7 @@ trap 'rm -f "$OUTPUT_LOG"' EXIT
(
cd "$SMOM_DIR"
forge script script/DeployCCIPWETH9Bridge.s.sol \
--rpc-url "$CHAIN138_RPC" \
--rpc-url "$RPC" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--legacy \
@@ -91,7 +94,7 @@ cast send "$NEW_BRIDGE" "addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH9_RECEIVER" \
--private-key "$PRIVATE_KEY" \
--rpc-url "$CHAIN138_RPC" \
--rpc-url "$RPC" \
--quiet 2>/dev/null || true
# Approve WETH9 for new bridge
@@ -99,7 +102,7 @@ cast send "$WETH9_CHAIN138" "approve(address,uint256)" \
"$NEW_BRIDGE" \
"$MAX_UINT256" \
--private-key "$PRIVATE_KEY" \
--rpc-url "$CHAIN138_RPC" \
--rpc-url "$RPC" \
--quiet 2>/dev/null || true
# Approve LINK for new bridge (CCIP fees)
@@ -107,7 +110,7 @@ cast send "$LINK_CHAIN138" "approve(address,uint256)" \
"$NEW_BRIDGE" \
"$MAX_UINT256" \
--private-key "$PRIVATE_KEY" \
--rpc-url "$CHAIN138_RPC" \
--rpc-url "$RPC" \
--quiet 2>/dev/null || true
echo "=== Done ==="

View File

@@ -32,9 +32,9 @@ for v in 1000 1001 1002 1500 1501 1502 2101 2500 2501 2502 2503 2504 2505; do HO
# r630-02 (192.168.11.12)
for v in 2201 2303 2401; do HOST_BY_VMID[$v]="${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"; done
# ml110 (192.168.11.10)
for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2305 2306 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"; done
for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2305 2306 2307 2308 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"; done
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2400 2401 2402 2403 2500 2501 2502 2503 2504 2505)
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403 2500 2501 2502 2503 2504 2505)
echo "Deploying Besu node lists from config/besu-node-lists/ to all nodes"
echo " static-nodes.json -> /etc/besu/static-nodes.json"

View File

@@ -0,0 +1,182 @@
#!/usr/bin/env bash
# Create LXC 7804 (gov-portals-dev) and deploy the four Gov Portals (DBIS, ICCC, OMNL, XOM)
# Serving at dbis/iccc/omnl/xom.xom-dev.phoenix.sankofa.nexus via NPMplus
#
# Usage:
# From proxmox repo root, with SSH to Proxmox host:
# bash scripts/deployment/deploy-gov-portals-to-7804.sh [--create-only|--deploy-only]
#
# Prerequisites:
# - SSH to Proxmox host (default: r630-01)
# - gov-portals-monorepo cloned at /home/intlc/projects/gov-portals-monorepo (or GOV_PORTALS_SOURCE)
# - Gitea token in .env for clone (or public clone)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
[ -f "$PROJECT_ROOT/.env" ] && set +u && source "$PROJECT_ROOT/.env" 2>/dev/null || true && set -u
# Gov Portals dev VM (7804) - Sankofa/Phoenix range
VMID_GOV_PORTALS=7804
IP_GOV_PORTALS_DEV="${IP_GOV_PORTALS_DEV:-192.168.11.54}"
HOSTNAME_GOV_PORTALS="gov-portals-dev"
PROXMOX_NODE="${PROXMOX_NODE:-r630-01}"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.11}"
STORAGE="${STORAGE:-local-lvm}"
TEMPLATE="${TEMPLATE:-local:vztmpl/debian-12-standard_12.12-1_amd64.tar.zst}"
GATEWAY="${GATEWAY:-192.168.11.1}"
NETWORK="${NETWORK:-vmbr0}"
# Source of gov-portals-monorepo (local path to rsync, or git URL)
GOV_PORTALS_SOURCE="${GOV_PORTALS_SOURCE:-/home/intlc/projects/gov-portals-monorepo}"
CREATE_ONLY=false
DEPLOY_ONLY=false
[[ "${1:-}" == "--create-only" ]] && CREATE_ONLY=true
[[ "${1:-}" == "--deploy-only" ]] && DEPLOY_ONLY=true
log() { echo "[$(date +%H:%M:%S)] $*"; }
run_ssh() { ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=accept-new root@$PROXMOX_HOST "$@"; }
run_ct() { run_ssh "pct exec $VMID_GOV_PORTALS -- $@"; }
# Create LXC 7804
create_container() {
if run_ssh "pct list 2>/dev/null | grep -q '^$VMID_GOV_PORTALS '"; then
log "Container $VMID_GOV_PORTALS already exists"
return 0
fi
log "Creating LXC $VMID_GOV_PORTALS ($HOSTNAME_GOV_PORTALS) on $PROXMOX_NODE..."
run_ssh "pct create $VMID_GOV_PORTALS $TEMPLATE \
--hostname $HOSTNAME_GOV_PORTALS \
--memory 2048 \
--cores 2 \
--rootfs $STORAGE:20 \
--net0 name=eth0,bridge=$NETWORK,ip=$IP_GOV_PORTALS_DEV/24,gw=$GATEWAY \
--description 'Gov Portals dev - DBIS, ICCC, OMNL, XOM' \
--start 1 \
--onboot 1 \
--unprivileged 1 \
--features nesting=1,keyctl=1"
log "Waiting for container to boot..."
sleep 10
}
# Deploy apps inside container
deploy_inside() {
log "Deploying gov-portals inside container..."
# Install Node 20, pnpm
run_ct "bash -lc 'command -v node >/dev/null 2>&1 || (curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && apt-get install -y nodejs)'" 2>/dev/null || true
run_ct "bash -lc 'command -v pnpm >/dev/null 2>&1 || npm install -g pnpm'" 2>/dev/null || true
# Prepare deploy dir
run_ct "mkdir -p /srv/gov-portals"
run_ct "rm -rf /srv/gov-portals/.git /srv/gov-portals/DBIS /srv/gov-portals/ICCC /srv/gov-portals/OMNL /srv/gov-portals/XOM /srv/gov-portals/node_modules /srv/gov-portals/*/node_modules 2>/dev/null || true"
# Rsync monorepo (from host running this script)
if [ -d "$GOV_PORTALS_SOURCE" ]; then
log "Rsyncing gov-portals-monorepo to container..."
rsync -az --delete \
--exclude 'node_modules' \
--exclude '.next' \
--exclude '.git' \
"$GOV_PORTALS_SOURCE/" \
"root@$PROXMOX_HOST:/tmp/gov-portals-rsync/" 2>/dev/null || {
log "Rsync to Proxmox host failed - will try git clone inside container"
}
if run_ssh "test -d /tmp/gov-portals-rsync/packages"; then
run_ssh "pct push $VMID_GOV_PORTALS /tmp/gov-portals-rsync /srv/gov-portals --recursive" 2>/dev/null || true
fi
fi
# Ensure we have submodules - clone if rsync didn't work
run_ct "bash -c '
cd /srv/gov-portals 2>/dev/null || mkdir -p /srv/gov-portals && cd /srv/gov-portals
if [ ! -f package.json ]; then
apt-get update -qq && apt-get install -y -qq git
git clone --recurse-submodules https://gitea.d-bis.org/Gov_Web_Portals/gov-portals-monorepo.git .
fi
if [ -d .git ] && [ ! -d DBIS/.git ]; then
git submodule update --init --recursive
fi
'"
# Install deps and build each portal
run_ct "bash -c '
cd /srv/gov-portals
export PATH=\"/usr/bin:/usr/local/bin:\$PATH\"
pnpm install --frozen-lockfile 2>/dev/null || pnpm install
for portal in DBIS ICCC OMNL XOM; do
if [ -d \"\$portal\" ]; then
echo \"Building \$portal...\"
(cd \"\$portal\" && pnpm run build) 2>/dev/null || true
fi
done
'"
# Create systemd services or PM2 - use simple node/next start
log "Creating startup script and systemd service..."
run_ct "bash -c '
cat > /srv/gov-portals/start-portals.sh << \"SCRIPT\"
#!/bin/bash
cd /srv/gov-portals
export NODE_ENV=production
PORT=3001 node DBIS/node_modules/next/dist/bin/next start -p 3001 &
PORT=3002 node ICCC/node_modules/next/dist/bin/next start -p 3002 &
PORT=3003 node OMNL/node_modules/next/dist/bin/next start -p 3003 &
PORT=3004 node XOM/node_modules/next/dist/bin/next start -p 3004 &
wait
SCRIPT
chmod +x /srv/gov-portals/start-portals.sh
'"
# Create systemd services for each portal
run_ct "bash -c '
for spec in DBIS:3001 ICCC:3002 OMNL:3003 XOM:3004; do
portal=\${spec%%:*}
port=\${spec##*:}
cat > /etc/systemd/system/gov-portal-\${portal}.service << EOF
[Unit]
Description=Gov Portal \$portal
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/srv/gov-portals/\$portal
Environment=NODE_ENV=production
Environment=PORT=\$port
ExecStart=/usr/bin/node /srv/gov-portals/\$portal/node_modules/next/dist/bin/next start -p \$port
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable gov-portal-\${portal}
systemctl restart gov-portal-\${portal} 2>/dev/null || systemctl start gov-portal-\${portal}
done
'" 2>/dev/null || log "Services may need manual start - run: pct exec 7804 -- systemctl start gov-portal-DBIS gov-portal-ICCC gov-portal-OMNL gov-portal-XOM"
log "Deployment complete. Portals should be starting on ports 3001-3004."
}
# Main
if [ "$DEPLOY_ONLY" = true ]; then
deploy_inside
elif [ "$CREATE_ONLY" = true ]; then
create_container
else
create_container
deploy_inside
fi
echo ""
echo "Gov Portals dev (7804) at $IP_GOV_PORTALS_DEV"
echo "Next: Run add-gov-portals-xom-dev-proxy-hosts.sh to add NPMplus proxy hosts"
echo " dbis.xom-dev.phoenix.sankofa.nexus → :3001"
echo " iccc.xom-dev.phoenix.sankofa.nexus → :3002"
echo " omnl.xom-dev.phoenix.sankofa.nexus → :3003"
echo " xom.xom-dev.phoenix.sankofa.nexus → :3004"

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
# Deploy a single contract to Chain 138 (dry-run by default).
# Usage: ./scripts/deployment/deploy-one-contract.sh <script_path> [--broadcast]
# Example: ./scripts/deployment/deploy-one-contract.sh script/DeployMulticall.s.sol:DeployMulticall --broadcast
# Requires: smom-dbis-138/.env with PRIVATE_KEY, RPC_URL_138 (Chain 138 Core)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SMOM="${SMOM_DIR:-${PROJECT_ROOT}/smom-dbis-138}"
[[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
[[ -f "${SMOM}/.env" ]] && source "${SMOM}/.env" 2>/dev/null || true
GAS_PRICE="${GAS_PRICE:-1000000000}"
RPC_URL="${RPC_URL_138:-http://192.168.11.211:8545}"
SCRIPT_SPEC="${1:-}"
if [[ -z "$SCRIPT_SPEC" ]]; then
echo "Usage: $0 <script_path> [--broadcast]"
echo "Example: $0 script/DeployMulticall.s.sol:DeployMulticall --broadcast"
exit 1
fi
BROADCAST=""
[[ "${2:-}" = "--broadcast" ]] && BROADCAST="--broadcast --private-key ${PRIVATE_KEY:?PRIVATE_KEY required for broadcast}"
cd "$SMOM"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Deploy one contract: $SCRIPT_SPEC"
echo "RPC: $RPC_URL | Gas price: $GAS_PRICE"
[[ -n "$BROADCAST" ]] && echo "Mode: BROADCAST (on-chain)" || echo "Mode: DRY RUN (no tx)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
forge script "$SCRIPT_SPEC" \
--rpc-url "$RPC_URL" \
--with-gas-price "$GAS_PRICE" \
$BROADCAST
echo ""
echo "Done. If you used --broadcast, note the deployed address(es) and run verification next."

View File

@@ -0,0 +1,77 @@
#!/usr/bin/env bash
# Deploy TransactionMirror to Chain 138.
# Use this when forge script DeployTransactionMirror.s.sol fails with constructor-args decode (forge create works).
# Usage: ./scripts/deployment/deploy-transaction-mirror-chain138.sh [--dry-run]
# --dry-run Print forge create command and exit 0 (no deploy).
# Requires: smom-dbis-138/.env with PRIVATE_KEY, RPC_URL_138 or RPC_URL (Chain 138 Core). Optional: MIRROR_ADMIN.
# Also sources load-project-env (secure-secrets, config). Must run from a host that can reach RPC (e.g. LAN to 192.168.11.211:8545).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
SMOM="${SMOM_DIR:-${PROJECT_ROOT}/smom-dbis-138}"
DRY_RUN=false
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true && break; done
[[ -f "${SCRIPT_DIR}/../lib/load-project-env.sh" ]] && source "${SCRIPT_DIR}/../lib/load-project-env.sh" 2>/dev/null || true
[[ -f "${SMOM}/.env" ]] && set -a && source "${SMOM}/.env" 2>/dev/null && set +a || true
# RPC_URL_138 or RPC_URL (alias)
RPC="${RPC_URL_138:-${RPC_URL:-http://192.168.11.211:8545}}"
export RPC_URL_138="$RPC"
export ETH_RPC_URL="$RPC"
GAS_PRICE="${GAS_PRICE:-1000000000}"
if ! $DRY_RUN && [[ -z "${PRIVATE_KEY:-}" ]]; then
echo "ERROR: PRIVATE_KEY not set. Set in smom-dbis-138/.env"
exit 1
fi
if [[ "${PRIVATE_KEY#0x}" == "$PRIVATE_KEY" ]]; then
export PRIVATE_KEY="0x$PRIVATE_KEY"
fi
export PRIVATE_KEY # Ensure subshells/forge inherit it
# MIRROR_ADMIN or deployer address
if [[ -n "${MIRROR_ADMIN:-}" ]]; then
ADMIN="$MIRROR_ADMIN"
else
if $DRY_RUN; then ADMIN="<DEPLOYER_ADDRESS>"; else ADMIN=$(cast wallet address --private-key "$PRIVATE_KEY" 2>/dev/null) || { echo "ERROR: cast not found or PRIVATE_KEY invalid"; exit 1; }; fi
fi
if $DRY_RUN; then
echo "[dry-run] Would run: forge create ... TransactionMirror --constructor-args $ADMIN --rpc-url $RPC --legacy --gas-price $GAS_PRICE"
echo " Add TRANSACTION_MIRROR_ADDRESS=<deployed> to smom-dbis-138/.env after deploy."
exit 0
fi
echo "Deploying TransactionMirror to Chain 138 (admin=$ADMIN)"
echo " RPC=$RPC"
cd "$SMOM"
# Use forge script (DeployTransactionMirror.s.sol) — more reliable than forge create with env
out=$(env ETH_RPC_URL="$RPC" MIRROR_ADMIN="$ADMIN" forge script script/DeployTransactionMirror.s.sol:DeployTransactionMirror \
--rpc-url "$RPC" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--legacy \
--gas-price "$GAS_PRICE" 2>&1) || { echo "$out"; echo "Run from a host on LAN that can reach $RPC"; exit 1; }
echo "$out"
addr=$(echo "$out" | grep -oE 'TransactionMirror deployed at: (0x[a-fA-F0-9]{40})' | sed 's/TransactionMirror deployed at: //')
if [[ -n "$addr" ]]; then
echo ""
echo "Deployed TransactionMirror at: $addr"
env_file="${SMOM}/.env"
if [[ -f "$env_file" ]]; then
if grep -q "^TRANSACTION_MIRROR_ADDRESS=" "$env_file" 2>/dev/null; then
sed -i "s|^TRANSACTION_MIRROR_ADDRESS=.*|TRANSACTION_MIRROR_ADDRESS=$addr|" "$env_file"
echo " Updated TRANSACTION_MIRROR_ADDRESS in $env_file"
else
echo "TRANSACTION_MIRROR_ADDRESS=$addr" >> "$env_file"
echo " Appended TRANSACTION_MIRROR_ADDRESS to $env_file"
fi
fi
fi
echo ""
echo "Done. Add TRANSACTION_MIRROR_ADDRESS=<deployed> to smom-dbis-138/.env if not already set."

View File

@@ -0,0 +1,90 @@
#!/usr/bin/env bash
# R15: Deployment automation — check env → deploy (optional) → verify → update config.
# Usage: ./scripts/deployment/deploy-verify-chain138.sh [--dry-run] [--deploy] [--verify-only]
# --dry-run Print steps only; do not run deploy or verify (exit 0).
# --deploy Run phased deploy (01_DeployCore, 02_DeployBridges) before verify. Omit to verify only.
# --verify-only Skip deploy; run on-chain check + Blockscout verification only.
# Requires: LAN/VPN to Chain 138 RPC and Blockscout. PRIVATE_KEY and RPC_URL_138 read from dotenv (root .env, then smom-dbis-138/.env).
# See: docs/03-deployment/CONTRACT_DEPLOYMENT_RUNBOOK.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
# Load PRIVATE_KEY and RPC from dotenv
[[ -f "${SCRIPT_DIR}/../lib/load-project-env.sh" ]] && source "${SCRIPT_DIR}/../lib/load-project-env.sh" 2>/dev/null || true
DRY_RUN=false
DO_DEPLOY=false
VERIFY_ONLY=false
for a in "$@"; do
[[ "$a" == "--dry-run" ]] && DRY_RUN=true
[[ "$a" == "--deploy" ]] && DO_DEPLOY=true
[[ "$a" == "--verify-only" ]] && VERIFY_ONLY=true
done
echo "=== Deploy & Verify Chain 138 (R15) ==="
echo ""
if $DRY_RUN; then
echo "1. Check env: PRIVATE_KEY and RPC_URL_138 read from dotenv (smom-dbis-138/.env or root .env)"
echo "2. Config validation: bash scripts/validation/validate-config-files.sh"
echo "3. Deploy (if --deploy): cd smom-dbis-138 && bash scripts/deployment/deploy-contracts-unified.sh --mode ordered"
echo "4. On-chain check: bash scripts/verify/check-contracts-on-chain-138.sh"
echo "5. Blockscout verify: source smom-dbis-138/.env; bash scripts/verify/run-contract-verification-with-proxy.sh"
echo "6. Reconcile .env: bash scripts/verify/reconcile-env-canonical.sh --print (update smom-dbis-138/.env from output)"
echo ""
echo "Usage: $0 [--dry-run] [--deploy] [--verify-only]"
exit 0
fi
# 1. Env check (PRIVATE_KEY already loaded from dotenv above; re-source smom .env if present)
[[ -f "smom-dbis-138/.env" ]] && source smom-dbis-138/.env 2>/dev/null || true
PRIVATE_KEY="${PRIVATE_KEY:-${CHAIN138_PRIVATE_KEY:-}}"
# RPC_URL_138 = Chain 138 Core (load-project-env sets it from .env or config)
if [[ -z "${PRIVATE_KEY:-}" ]]; then
echo "PRIVATE_KEY (or CHAIN138_PRIVATE_KEY) not set. Set in smom-dbis-138/.env or root .env."
exit 1
fi
if [[ -z "${RPC_URL_138:-}" ]]; then
echo "RPC_URL_138 not set. Set in smom-dbis-138/.env or root .env (standard for Chain ID 138 RPC)."
exit 1
fi
echo "[OK] Env present (PRIVATE_KEY and RPC from dotenv)"
# 2. Config validation
echo "2. Config validation..."
bash scripts/validation/validate-config-files.sh
echo ""
# 3. Deploy (optional)
if $DO_DEPLOY && ! $VERIFY_ONLY; then
echo "3. Deploy (ordered)..."
(cd smom-dbis-138 && bash scripts/deployment/deploy-contracts-unified.sh --mode ordered)
echo ""
else
echo "3. Deploy skipped (use --deploy to run)"
fi
# 4. On-chain check
echo "4. On-chain check..."
SKIP_EXIT=1 bash scripts/verify/check-contracts-on-chain-138.sh || true
echo ""
# 5. Blockscout verification (may require LAN)
echo "5. Blockscout verification..."
source smom-dbis-138/.env 2>/dev/null || true
if bash scripts/verify/run-contract-verification-with-proxy.sh 2>/dev/null; then
echo "[OK] Verification run completed"
else
echo "[WARN] Verification failed or Blockscout unreachable (run from LAN). See CONTRACT_DEPLOYMENT_RUNBOOK."
fi
echo ""
# 6. Reconcile
echo "6. Reconcile .env (canonical list):"
bash scripts/verify/reconcile-env-canonical.sh --print
echo ""
echo "=== Done. Update smom-dbis-138/.env from reconcile output if needed. ==="

View File

@@ -37,4 +37,4 @@ for u in $USERS; do
"pct exec $VMID -- bash -c 'mkdir -p /home/$u/.ssh && chmod 700 /home/$u/.ssh && echo \"$KEY_B64\" | base64 -d >> /home/$u/.ssh/authorized_keys && chmod 600 /home/$u/.ssh/authorized_keys && chown -R $u:$u /home/$u/.ssh'"
echo " OK: $u"
done
echo "Done. Test: ssh dev1@${IP_DEV_VM:-192.168.11.60}"
echo "Done. Test: ssh dev1@${IP_DEV_VM:-192.168.11.59}"

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
# Add Gitea webhook for Phoenix deploy to a repository.
# Usage: GITEA_TOKEN=xxx PHOENIX_WEBHOOK_URL=https://host:4001/webhook/gitea bash add-gitea-webhook-phoenix.sh [owner/repo]
# Example: GITEA_TOKEN=xxx PHOENIX_WEBHOOK_URL=http://192.168.11.60:4001/webhook/gitea bash add-gitea-webhook-phoenix.sh d-bis/proxmox
# Example: GITEA_TOKEN=xxx PHOENIX_WEBHOOK_URL=http://192.168.11.59:4001/webhook/gitea bash add-gitea-webhook-phoenix.sh d-bis/proxmox
set -euo pipefail
@@ -19,7 +19,7 @@ if [ -z "$GITEA_TOKEN" ]; then
exit 1
fi
if [ -z "$PHOENIX_WEBHOOK_URL" ]; then
echo "Set PHOENIX_WEBHOOK_URL (e.g. http://192.168.11.60:4001/webhook/gitea)"
echo "Set PHOENIX_WEBHOOK_URL (e.g. http://192.168.11.59:4001/webhook/gitea)"
exit 1
fi

View File

@@ -22,7 +22,7 @@ DRY_RUN=false
# Default org and repos (override with env)
GITEA_ORG="${GITEA_ORG:-d-bis}"
REPO_NAMES="${REPO_NAMES:-proxmox dbis_core explorer-monorepo virtual-banker alltra-lifi-settlement smom-dbis-138 unifi-api metamask-integration mcp-omada mcp-proxmox mcp-unifi the-order miracles_in_motion rpc-translator-138 token-lists forge-verification-proxy site-manager-api multi-chain-execution}"
REPO_NAMES="${REPO_NAMES:-proxmox dbis_core explorer-monorepo virtual-banker alltra-lifi-settlement smom-dbis-138 unifi-api metamask-integration fireblocks-integration mcp-omada mcp-proxmox mcp-unifi the-order miracles_in_motion rpc-translator-138 token-lists forge-verification-proxy site-manager-api multi-chain-execution}"
# Create token from username/password if needed
if [ -z "$GITEA_TOKEN" ] && [ -n "$GITEA_USER" ] && [ -n "$GITEA_PASSWORD" ]; then

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
# Rsync projects from local to Dev VM (5700). Run after SSH keys are added for dev1.
# Usage: bash scripts/dev-vm/rsync-projects-to-dev-vm.sh [--dry-run]
# Default target: dev1@192.168.11.60:/srv/projects/
# Default target: dev1@192.168.11.59:/srv/projects/
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
@@ -9,7 +9,7 @@ PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
DEV_USER="${DEV_USER:-dev1}"
DEV_HOST="${IP_DEV_VM:-192.168.11.60}"
DEV_HOST="${IP_DEV_VM:-192.168.11.59}"
# Source: parent of project root (e.g. /home/intlc/projects)
SOURCE_DIR="${SOURCE_DIR:-$(dirname "$PROJECT_ROOT")}"
DRY_RUN=""

View File

@@ -6,7 +6,7 @@
set -euo pipefail
ACT_RUNNER_VERSION="${ACT_RUNNER_VERSION:-0.2.13}"
INSTANCE="${INSTANCE:-http://192.168.11.60:3000}"
INSTANCE="${INSTANCE:-http://192.168.11.59:3000}"
WORK_DIR="${WORK_DIR:-/opt/act_runner}"
TOKEN="${GITEA_RUNNER_REGISTRATION_TOKEN:-}"

View File

@@ -0,0 +1,122 @@
#!/usr/bin/env python3
"""Extract Blitzkrieg plan from transcript .md and write consolidated BLITZKRIEG doc."""
import json
import re
import sys
SRC = "2026-2-13 15-18-51-Blitzkrieg_Super_Pro_Max_Plan.md"
DEST = "docs/00-meta/BLITZKRIEG_SUPER_PRO_MAX_MASTER_PLAN.md"
# Path fix: canonical tokens from plan's reference to repo path
CANONICAL_OLD = "../../token-lists/src/canonical-tokens.ts"
CANONICAL_NEW = "../../smom-dbis-138/services/token-aggregation/src/config/canonical-tokens.ts"
def extract_json_from_line(line: str):
"""Parse first JSON object from line and return (content or replacement, kind)."""
line = line.strip()
if not line.startswith("{"):
return None, None
# Find matching brace (simple: first { then find last })
depth = 0
end = -1
for i, c in enumerate(line):
if c == "{":
depth += 1
elif c == "}":
depth -= 1
if depth == 0:
end = i
break
if end == -1:
return None, None
try:
obj = json.loads(line[: end + 1])
except json.JSONDecodeError:
return None, None
if "content" in obj:
return obj["content"], "content"
if "updates" in obj and obj["updates"]:
rep = obj["updates"][0].get("replacement")
return rep, "replacement"
return None, None
def main():
with open(SRC, "r", encoding="utf-8") as f:
lines = f.readlines()
parts = []
for line in lines:
content, kind = extract_json_from_line(line)
if content is None:
continue
if kind == "content":
# First message: base is "Super Pro" not "Ultra Uber Max Pro"; skip, we want the replacement from "Ultra Uber Max Pro"
if "Ultra Uber Max Pro" in content or "Super Pro Max" in content:
if "Ultra Uber Max Pro" in content and "Supreme Command" not in content and "Absolute Air Superiority" not in content:
parts.append(("base", content))
continue
if kind == "replacement":
if content.startswith("# Blitzkrieg Ultra Uber Max Pro Master Plan") and "8. Supreme Command" not in content:
parts.append(("base", content))
elif content.startswith("# 8. Supreme Command Tier"):
parts.append(("s8_9", content))
elif "Zero Drift Doctrine." in content and "# 10. Absolute Air Superiority" in content:
parts.append(("s10_11", content))
# Build ordered: base, then 8+9, then 10+11 (strip leading "Zero Drift Doctrine.\n\n" from 10+11 to avoid duplicate)
base = s8_9 = s10_11 = None
for tag, text in parts:
if tag == "base":
base = text
elif tag == "s8_9":
s8_9 = text
elif tag == "s10_11":
s10_11 = text
if not base:
# Fallback: get from "updates" replacement that is the full Ultra Uber Max Pro (line with pattern ".*")
for line in lines:
if '"updates":[{"pattern":".*"' in line and '"replacement":"# Blitzkrieg Ultra Uber Max Pro' in line:
content, _ = extract_json_from_line(line)
if content and "8. Supreme Command" not in content:
base = content
break
if not s8_9:
for line in lines:
if "# 8. Supreme Command Tier" in line:
content, _ = extract_json_from_line(line)
if content:
s8_9 = content
break
if not s10_11:
for line in lines:
if "# 10. Absolute Air Superiority" in line:
content, _ = extract_json_from_line(line)
if content:
s10_11 = content
break
if not base or not s8_9 or not s10_11:
print("Missing parts:", "base" if not base else "", "s8_9" if not s8_9 else "", "s10_11" if not s10_11 else "", file=sys.stderr)
sys.exit(1)
# Avoid duplicating "Zero Drift Doctrine." between section 9 and 10
if s10_11.startswith("Zero Drift Doctrine.\n\n"):
s10_11 = s10_11[len("Zero Drift Doctrine.\n\n") :]
full = base.rstrip() + "\n\n" + s8_9.rstrip() + "\n\n" + s10_11
# Path fix
full = full.replace(CANONICAL_OLD, CANONICAL_NEW)
header = "<!-- Last Updated: 2026-02-13. Status: Consolidated Ultra Uber Max Pro through Absolute Air Superiority (Sections 111). -->\n\n"
with open(DEST, "w", encoding="utf-8") as f:
f.write(header + full)
print("Wrote", DEST, "(%d chars)" % len(full))
if __name__ == "__main__":
main()

64
scripts/fix-wsl-ip.sh Executable file
View File

@@ -0,0 +1,64 @@
#!/bin/bash
# Fix WSL IP address from 192.168.11.4 to 192.168.11.23
# This script removes the old IP and adds the correct one
set -e
OLD_IP="192.168.11.4"
NEW_IP="192.168.11.23"
INTERFACE="eth0"
echo "🔧 Fixing WSL IP Address Configuration"
echo " Changing from $OLD_IP to $NEW_IP"
echo ""
# Check if running as root
if [ "$EUID" -ne 0 ]; then
echo "❌ This script must be run with sudo"
echo " Usage: sudo $0"
exit 1
fi
# Remove old IP if it exists
if ip addr show $INTERFACE | grep -q "$OLD_IP"; then
echo " Removing old IP address: $OLD_IP"
ip addr del $OLD_IP/24 dev $INTERFACE 2>/dev/null || true
echo " ✅ Old IP removed"
else
echo " Old IP ($OLD_IP) not found, skipping removal"
fi
# Remove old route if it exists
if ip route show | grep -q "192.168.11.0/24.*src $OLD_IP"; then
echo " Removing old route"
ip route del 192.168.11.0/24 dev $INTERFACE src $OLD_IP 2>/dev/null || true
echo " ✅ Old route removed"
fi
# Add new IP if it doesn't exist
if ip addr show $INTERFACE | grep -q "$NEW_IP"; then
echo " New IP ($NEW_IP) already configured"
else
echo " Adding new IP address: $NEW_IP"
ip addr add $NEW_IP/24 dev $INTERFACE
echo " ✅ New IP added"
fi
# Add route if it doesn't exist
if ! ip route show | grep -q "192.168.11.0/24.*src $NEW_IP"; then
echo " Adding route for VLAN 11 network"
ip route add 192.168.11.0/24 dev $INTERFACE src $NEW_IP 2>/dev/null || true
echo " ✅ Route added"
else
echo " Route already exists"
fi
echo ""
echo "✅ IP Configuration Updated!"
echo ""
echo "📋 Current IP Addresses on $INTERFACE:"
ip addr show $INTERFACE | grep "inet " | sed 's/^/ /'
echo ""
echo "💡 Next step: Update ~/.bashrc to use $NEW_IP for persistence"
echo " Run: sed -i 's/$OLD_IP/$NEW_IP/g' ~/.bashrc"
echo ""

19
scripts/git-status-all.sh Normal file
View File

@@ -0,0 +1,19 @@
#!/usr/bin/env bash
# Report git status for root and all top-level submodules (run from repo root)
set -e
ROOT=/home/intlc/projects/proxmox
OUT="$ROOT/scripts/git-status-report.txt"
cd "$ROOT"
repos=(. explorer-monorepo mcp-proxmox smom-dbis-138 ProxmoxVE metamask-integration dbis_core gru-docs miracles_in_motion metaverseDubai OMNIS omada-api the-order arromis-monorepo alltra-lifi-settlement "pr-workspace/app-ethereum" "pr-workspace/chains")
{
for r in "${repos[@]}"; do
if [[ "$r" == . ]]; then name="proxmox (root)"; dir="$ROOT"; else name="$r"; dir="$ROOT/$r"; fi
if [[ -d "$dir" ]]; then
echo ""
echo "=== $name ==="
(cd "$dir" && git status -sb 2>&1) || true
fi
done
} > "$OUT"
echo "Report written to $OUT"

View File

@@ -0,0 +1,108 @@
#!/usr/bin/env bash
# Check all NPMplus VMs (container status) and all backend VMIDs they proxy to.
# Uses SSH to Proxmox hosts. Run from project root.
# Usage: ./scripts/health/check-npmplus-and-backend-vmids.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
[ -f config/ip-addresses.conf ] && source config/ip-addresses.conf 2>/dev/null || true
PROXMOX_USER="${PROXMOX_USER:-root}"
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
SSH_OPTS="-o ConnectTimeout=5 -o StrictHostKeyChecking=no"
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NC='\033[0m'
# NPMplus VMID : Proxmox host
declare -A NPMPLUS_HOST
NPMPLUS_HOST[10233]="$R630_01" # Main (76.53.10.36 → .167)
NPMPLUS_HOST[10234]="$R630_02" # Secondary HA
NPMPLUS_HOST[10235]="$R630_01" # Alltra/HYBX (76.53.10.38 → .169)
NPMPLUS_HOST[10236]="$R630_01" # Fourth dev/Codespaces (76.53.10.40 → .170)
NPMPLUS_HOST[10237]="$R630_02" # Mifos
# Backend VMID : Proxmox host (for VMIDs that NPMplus proxies to)
declare -A BACKEND_HOST
BACKEND_HOST[5000]="$R630_02" # explorer
BACKEND_HOST[7810]="$R630_02" # mim4u
BACKEND_HOST[10130]="$R630_01" # dbis-frontend
BACKEND_HOST[2400]="$ML110" # thirdweb-rpc
BACKEND_HOST[2101]="$R630_01" # besu-rpc-core-1
BACKEND_HOST[2201]="$R630_02" # besu-rpc-public-1
BACKEND_HOST[10150]="$R630_01" # dbis-api
BACKEND_HOST[10151]="$R630_01" # dbis-api-2
BACKEND_HOST[5800]="$R630_02" # mifos (10237 backend)
BACKEND_HOST[5801]="$R630_02" # dapp (optional)
BACKEND_HOST[2102]="$ML110" # besu-rpc-core-2 (10235 backend)
check_vmid_on_host() {
local vmid="$1" host="$2" label="${3:-}"
local status
status=$(ssh $SSH_OPTS "${PROXMOX_USER}@${host}" "pct status $vmid 2>/dev/null" | awk '{print $2}' || echo "unknown")
if [[ "$status" == "running" ]]; then
echo -e " VMID $vmid $label: ${GREEN}running${NC} (host $host)"
return 0
else
echo -e " VMID $vmid $label: ${RED}$status${NC} (host $host)"
return 1
fi
}
echo ""
echo -e "${CYAN}=== NPMplus VMs ===${NC}"
echo ""
npm_ok=0
npm_fail=0
for vmid in 10233 10234 10235 10236 10237; do
host="${NPMPLUS_HOST[$vmid]:-}"
[[ -z "$host" ]] && continue
case "$vmid" in
10233) label="(main)" ;;
10234) label="(secondary)" ;;
10235) label="(Alltra/HYBX)" ;;
10236) label="(Fourth dev)" ;;
10237) label="(Mifos)" ;;
*) label="" ;;
esac
if check_vmid_on_host "$vmid" "$host" "$label"; then npm_ok=$((npm_ok + 1)); else npm_fail=$((npm_fail + 1)); fi
done
echo ""
echo -e "${CYAN}=== Backend VMIDs (NPMplus proxy targets) ===${NC}"
echo ""
echo "Main NPMplus (10233) backends:"
backend_ok=0
backend_fail=0
for vmid in 5000 2201 2101 10130 10150 10151 7810 2400; do
host="${BACKEND_HOST[$vmid]:-}"
[[ -z "$host" ]] && continue
if check_vmid_on_host "$vmid" "$host" ""; then backend_ok=$((backend_ok + 1)); else backend_fail=$((backend_fail + 1)); fi
done
echo "Optional: 5801 (dapp)"
if check_vmid_on_host "5801" "${BACKEND_HOST[5801]}" ""; then backend_ok=$((backend_ok + 1)); else backend_fail=$((backend_fail + 1)); fi
echo ""
echo "NPMplus 10237 (Mifos) backend:"
if check_vmid_on_host "5800" "${BACKEND_HOST[5800]}" "(mifos)"; then backend_ok=$((backend_ok + 1)); else backend_fail=$((backend_fail + 1)); fi
echo ""
echo "NPMplus 10235 (Alltra/HYBX) backend:"
if check_vmid_on_host "2102" "${BACKEND_HOST[2102]}" "(rpc-core-2)"; then backend_ok=$((backend_ok + 1)); else backend_fail=$((backend_fail + 1)); fi
echo ""
echo -e "${CYAN}Summary${NC}"
echo -e " NPMplus VMs: ${GREEN}$npm_ok${NC} running, ${RED}$npm_fail${NC} not running (total 5)"
echo -e " Backend VMIDs: ${GREEN}$backend_ok${NC} running, ${RED}$backend_fail${NC} not running"
echo ""
exit $((npm_fail + backend_fail))

View File

@@ -0,0 +1,89 @@
#!/usr/bin/env bash
# Check health of RPC node VMs only (container status + besu-rpc service + RPC block).
# Uses SSH to Proxmox hosts. Run from project root.
# Usage: ./scripts/health/check-rpc-vms-health.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
[ -f config/ip-addresses.conf ] && source config/ip-addresses.conf 2>/dev/null || true
PROXMOX_USER="${PROXMOX_USER:-root}"
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
# VMID:host:service (same mapping as review-sentry-and-rpc-nodes.sh)
RPC_NODES=(
"2101:$R630_01:besu-rpc"
"2201:$R630_02:besu-rpc"
"2301:$ML110:besu-rpc"
"2303:$R630_02:besu-rpc"
"2304:$ML110:besu-rpc"
"2305:$ML110:besu-rpc"
"2306:$ML110:besu-rpc"
"2307:$ML110:besu-rpc"
"2308:$ML110:besu-rpc"
"2400:$ML110:besu-rpc"
"2401:$R630_02:besu-rpc"
"2402:$ML110:besu-rpc"
"2403:$ML110:besu-rpc"
)
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
CYAN='\033[0;36m'
NC='\033[0m'
SSH_OPTS="-o ConnectTimeout=5 -o StrictHostKeyChecking=no"
echo -e "${CYAN}=== RPC Node VMs Health ===${NC}"
echo ""
ok=0
fail=0
for entry in "${RPC_NODES[@]}"; do
IFS=: read -r vmid host service <<< "$entry"
ssh_target="${PROXMOX_USER}@${host}"
ct_status=$(ssh $SSH_OPTS "$ssh_target" "pct status $vmid 2>/dev/null" | awk '{print $2}' || echo "unknown")
if [[ "$ct_status" != "running" ]]; then
echo -e " VMID $vmid: container ${RED}$ct_status${NC} (host $host)"
((fail++)) || true
continue
fi
service_status=$(ssh $SSH_OPTS "$ssh_target" "pct exec $vmid -- systemctl is-active $service 2>/dev/null" || echo "unknown")
if [[ "$service_status" != "active" ]]; then
echo -e " VMID $vmid: container running, ${YELLOW}$service $service_status${NC} (host $host)"
((fail++)) || true
continue
fi
ip=$(ssh $SSH_OPTS "$ssh_target" "pct exec $vmid -- hostname -I 2>/dev/null | awk '{print \$1}'" 2>/dev/null || echo "")
block_info=""
if [[ -n "$ip" ]]; then
resp=$(curl -s -m 3 -X POST -H "Content-Type: application/json" -d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' "http://$ip:8545" 2>/dev/null || echo "")
if echo "$resp" | grep -q '"result"'; then
block_hex=$(echo "$resp" | jq -r '.result' 2>/dev/null)
block_dec=$((block_hex))
if [[ "$block_dec" -lt 2050000 ]]; then
block_info=" → block ${YELLOW}$block_dec (behind)${NC}"
else
block_info=" → block ${GREEN}$block_dec${NC}"
fi
else
block_info="${YELLOW}RPC no response${NC}"
fi
fi
echo -e " VMID $vmid: container running, ${GREEN}$service active${NC} ($host)$block_info"
((ok++)) || true
done
echo ""
echo -e "${CYAN}Summary: ${GREEN}$ok healthy${NC}, ${RED}$fail with issues${NC} (total ${#RPC_NODES[@]} RPC nodes)"
exit $fail

View File

@@ -22,15 +22,23 @@ export LC_ALL=C
export LANG=C
echo "[INFO] Installing packages..."
apt-get update -qq
apt-get install -y -qq openjdk-17-jdk wget curl jq ca-certificates || true
# Skip apt if java and wget already present (avoids hang in CTs with slow/locked apt)
if command -v java >/dev/null 2>&1 && command -v wget >/dev/null 2>&1; then
echo "[INFO] java and wget present, skipping apt."
else
# Allow update to fail (e.g. command-not-found DB I/O error in CT)
apt-get update -qq 2>/dev/null || true
apt-get install -y -qq openjdk-17-jdk wget curl jq ca-certificates 2>/dev/null || true
fi
command -v java >/dev/null 2>&1 || { echo "[ERROR] java not found; run: apt-get install -y openjdk-17-jdk"; exit 1; }
command -v wget >/dev/null 2>&1 || { echo "[ERROR] wget not found; run: apt-get install -y wget"; exit 1; }
if ! id -u "$BESU_USER" &>/dev/null; then
useradd -r -s /bin/bash -d "$BESU_HOME" -m "$BESU_USER"
fi
mkdir -p "$BESU_HOME"
BESU_TAR="/tmp/besu-${BESU_VERSION}.tar.gz"
BESU_TAR="${TMPDIR:-/tmp}/besu-${BESU_VERSION}.tar.gz"
BESU_DOWNLOAD_URL="https://hyperledger.jfrog.io/hyperledger/besu-binaries/besu/${BESU_VERSION}/besu-${BESU_VERSION}.tar.gz"
echo "[INFO] Downloading Besu..."
@@ -41,7 +49,7 @@ rm -f "$BESU_TAR"
chown -R "$BESU_USER:$BESU_GROUP" "$BESU_HOME"
chmod +x "$BESU_HOME/bin/besu"
mkdir -p "$BESU_DATA" "$BESU_CONFIG" "$BESU_LOGS"
mkdir -p "$BESU_DATA" "$BESU_CONFIG" "$BESU_LOGS" "${BESU_DATA}/tmp"
chown -R "$BESU_USER:$BESU_GROUP" "$BESU_DATA" "$BESU_CONFIG" "$BESU_LOGS"
if [[ "$NODE_TYPE" == "sentry" ]]; then
@@ -66,7 +74,7 @@ Type=simple
User=$BESU_USER
Group=$BESU_GROUP
WorkingDirectory=$BESU_HOME
Environment="BESU_OPTS=-Xmx2g -Xms1g"
Environment="BESU_OPTS=-Xmx2g -Xms1g -Djava.io.tmpdir=${BESU_DATA}/tmp"
ExecStart=$BESU_HOME/bin/besu --config-file=$BESU_CONFIG/$CONFIG_FILE
Restart=always
RestartSec=10

View File

@@ -276,6 +276,16 @@ $(ssh_node "$hostname" bash <<'ENDSSH'
echo "System Manufacturer: $(dmidecode -s system-manufacturer 2>/dev/null || echo 'Unknown')"
echo "System Product: $(dmidecode -s system-product-name 2>/dev/null || echo 'Unknown')"
fi
echo ""
echo "=== NIC MODELS (lspci) ==="
lspci -nn 2>/dev/null | grep -iE 'ethernet|network' || echo "No NICs found"
echo ""
echo "=== NIC INTERFACES (physical) ==="
for i in nic0 nic1 nic2 nic3; do
[ -d /sys/class/net/$i ] || continue
echo "--- $i ---"
ethtool $i 2>/dev/null | grep -E 'Speed|Link detected|Driver' || echo " (ethtool unavailable)"
done
ENDSSH
)
\`\`\`

View File

@@ -0,0 +1,67 @@
#!/usr/bin/env bash
# Load contract addresses from config/smart-contracts-master.json into the environment.
# Use after load-project-env.sh so .env overrides; addresses not set in .env are filled from the master JSON.
#
# Usage: source "${SCRIPT_DIR}/lib/load-contract-addresses.sh"
# Requires: jq (optional; if missing, only config/contract-addresses.conf is sourced when present)
#
# Exports: For each chain, contract names as CONTRACTS_<chain>_<Name> and env var names from envVarMap
# (e.g. CCIP_ROUTER, CCIPWETH9_BRIDGE_CHAIN138) when not already set.
# Version: 2026-02-13
if [[ -z "${PROJECT_ROOT:-}" ]]; then
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
export PROJECT_ROOT
fi
CONTRACTS_JSON="${PROJECT_ROOT}/config/smart-contracts-master.json"
# Source legacy contract-addresses.conf if present (e.g. ADDR_CCIP_SENDER for verify scripts)
[[ -f "${PROJECT_ROOT}/config/contract-addresses.conf" ]] && source "${PROJECT_ROOT}/config/contract-addresses.conf" 2>/dev/null || true
if [[ ! -f "$CONTRACTS_JSON" ]]; then
return 0 2>/dev/null || true
fi
if ! command -v jq &>/dev/null; then
return 0 2>/dev/null || true
fi
# Export a var only if not already set (so .env overrides)
_set_if_unset() {
local key="$1"
local val="$2"
if [[ -z "${!key:-}" ]]; then
export "${key}=${val}"
fi
}
# For chain $1, export contracts as CONTRACTS_<chain>_<contract_key> and envVarMap names
_export_chain() {
local chain="$1"
local chain_key="$chain"
local contracts
local env_map
contracts=$(jq -r --arg c "$chain_key" '.chains[$c].contracts // {} | to_entries[] | "\(.key)\t\(.value)"' "$CONTRACTS_JSON" 2>/dev/null)
env_map=$(jq -r --arg c "$chain_key" '.chains[$c].envVarMap // {} | to_entries[] | "\(.key)\t\(.value)"' "$CONTRACTS_JSON" 2>/dev/null)
while IFS=$'\t' read -r name addr; do
[[ -z "$name" || -z "$addr" ]] && continue
local safe_name="${name//[^a-zA-Z0-9_]/_}"
_set_if_unset "CONTRACTS_${chain}_${safe_name}" "$addr"
done <<< "$contracts"
while IFS=$'\t' read -r env_var contract_key; do
[[ -z "$env_var" || -z "$contract_key" ]] && continue
local addr
addr=$(jq -r --arg c "$chain_key" --arg k "$contract_key" '.chains[$c].contracts[$k] // empty' "$CONTRACTS_JSON" 2>/dev/null)
[[ -n "$addr" ]] && _set_if_unset "$env_var" "$addr"
done <<< "$env_map"
}
for chain in 138 1; do
if jq -e --arg c "$chain" '.chains[$c]' "$CONTRACTS_JSON" &>/dev/null; then
_export_chain "$chain"
fi
done
export CONTRACTS_MASTER_JSON="$CONTRACTS_JSON"

View File

@@ -23,12 +23,18 @@ err_exit() { echo "ERROR: $1" >&2; exit 1; }
# 2. IP/config from centralized config
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# 3. smom-dbis-138 .env (PRIVATE_KEY, bridge addrs, RPC)
# 3. smom-dbis-138 .env (PRIVATE_KEY, bridge addrs, RPC) — PRIVATE_KEY is read from this dotenv when not set
[[ -f "${PROJECT_ROOT}/smom-dbis-138/.env" ]] && set -a && source "${PROJECT_ROOT}/smom-dbis-138/.env" 2>/dev/null && set +a
# 3b. Secure secrets (PRIVATE_KEY) — when not set, try ~/.secure-secrets/private-keys.env
[[ -z "${PRIVATE_KEY:-}" ]] && [[ -f "${HOME}/.secure-secrets/private-keys.env" ]] && set -a && source "${HOME}/.secure-secrets/private-keys.env" 2>/dev/null && set +a
# 4. dbis_core config if present
[[ -f "${PROJECT_ROOT}/dbis_core/config/dbis-core-proxmox.conf" ]] && source "${PROJECT_ROOT}/dbis_core/config/dbis-core-proxmox.conf" 2>/dev/null || true
# 5. Contract addresses from master JSON (config/smart-contracts-master.json) when not set by .env
[[ -f "${PROJECT_ROOT}/scripts/lib/load-contract-addresses.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-contract-addresses.sh" 2>/dev/null || true
# Ensure hosts have fallbacks (from config or defaults)
PROXMOX_HOST_R630_01="${PROXMOX_HOST_R630_01:-${PROXMOX_R630_01:-192.168.11.11}}"
PROXMOX_HOST_R630_02="${PROXMOX_HOST_R630_02:-${PROXMOX_R630_02:-192.168.11.12}}"
@@ -36,8 +42,15 @@ PROXMOX_HOST_ML110="${PROXMOX_HOST_ML110:-${PROXMOX_ML110:-192.168.11.10}}"
# Derived vars (from config; fallbacks for missing config)
export RPC_CORE_1="${RPC_CORE_1:-192.168.11.211}"
export RPC_URL_138="${RPC_URL_138:-http://${RPC_CORE_1}:8545}"
export CHAIN138_RPC="${CHAIN138_RPC_URL:-$RPC_URL_138}"
export RPC_PUBLIC_1="${RPC_PUBLIC_1:-192.168.11.221}"
# Chain 138 two standards: (1) Core RPC_URL_138 admin/deploy VMID 2101; (2) Public RPC_URL_138_PUBLIC bridge/frontend VMID 2201. Alias: RPC_URL -> RPC_URL_138.
export RPC_URL_138="${RPC_URL_138:-${CHAIN138_RPC_URL:-${RPC_URL:-http://${RPC_CORE_1}:8545}}}"
export CHAIN138_RPC_URL="$RPC_URL_138"
export CHAIN138_RPC="$RPC_URL_138"
# Foundry uses ETH_RPC_URL; set so forge create/script use Chain 138 when --rpc-url not passed
export ETH_RPC_URL="${ETH_RPC_URL:-$RPC_URL_138}"
export RPC_URL_138_PUBLIC="${RPC_URL_138_PUBLIC:-http://${RPC_PUBLIC_1}:8545}"
export WS_URL_138_PUBLIC="${WS_URL_138_PUBLIC:-ws://${RPC_PUBLIC_1}:8546}"
export SMOM_DIR="${SMOM_DBIS_138_DIR:-${PROJECT_ROOT}/smom-dbis-138}"
export DBIS_CORE_DIR="${DBIS_CORE_DIR:-${PROJECT_ROOT}/dbis_core}"
@@ -48,7 +61,7 @@ get_host_for_vmid() {
case "$vmid" in
10130|10150|10151|106|107|108|10000|10001|10020|10100|10101|10120|10233|10235) echo "${PROXMOX_HOST_R630_01}";;
2101) echo "${PROXMOX_HOST_R630_01}";;
5000|7810|2201|2303|2401|6200|6201|10234|10237|5800) echo "${PROXMOX_HOST_R630_02}";;
5000|7810|2201|2303|2401|6200|6201|10234|10237|5800|5801) echo "${PROXMOX_HOST_R630_02}";;
2301|2400|1504|2503|2504|2505) echo "${PROXMOX_HOST_ML110}";;
5400|5401|5402|5403|5410|5411|5412|5413|5414|5415|5416|5417|5418|5419|5420|5421|5422|5423|5424|5425|5440|5441|5442|5443|5444|5445|5446|5447|5448|5449|5450|5451|5452|5453|5454|5455|5470|5471|5472|5473|5474|5475|5476) echo "${PROXMOX_HOST_R630_02}";;
*) echo "${PROXMOX_HOST_R630_01:-${PROXMOX_R630_02}}";;

View File

@@ -1,5 +1,27 @@
# Maintenance Scripts
**health-check-rpc-2101.sh** — Health check for Besu RPC on VMID 2101: container status, besu-rpc service, port 8545, eth_chainId, eth_blockNumber. Run from project root (LAN). See docs/09-troubleshooting/RPC_NODES_BLOCK_PRODUCTION_FIX.md.
**fix-core-rpc-2101.sh** — One-command fix for Core RPC 2101: start CT if stopped, restart Besu, verify RPC. Options: `--dry-run`, `--restart-only`. If Besu fails with JNA/NoClassDefFoundError, run fix-rpc-2101-jna-reinstall.sh first.
**fix-rpc-2101-jna-reinstall.sh** — Reinstall Besu in CT 2101 to fix JNA/NoClassDefFoundError; then re-run fix-core-rpc-2101.sh. Use `--dry-run` to print steps only.
**check-disk-all-vmids.sh** — Check root disk usage in all running containers on ml110, r630-01, r630-02. Use `--csv` for tab-separated output. For prevention and audits.
**run-all-maintenance-via-proxmox-ssh.sh** — Run all maintenance/fix scripts that use SSH to Proxmox VE (r630-01, ml110, r630-02). **Runs make-rpc-vmids-writable-via-ssh.sh first** (so 2101, 2500-2505 are writable), then resolve-and-fix-all, fix-rpc-2101-jna-reinstall, install-besu-permanent-on-missing-nodes, address-all-remaining-502s; optional E2E with `--e2e`. Use `--no-npm` to skip NPM proxy update, `--dry-run` to print steps only, `--verbose` to show all step output (no stderr hidden). Step 2 (2101 fix) has optional timeout: `STEP2_TIMEOUT=900` (default) or `STEP2_TIMEOUT=0` to disable. Run from project root (LAN).
**make-rpc-vmids-writable-via-ssh.sh** — SSHs to r630-01 and for each VMID 2101, 2500-2505: stops the CT, runs `e2fsck -f -y` on the rootfs LV, starts the CT. Use before fix-rpc-2101 or install-besu-permanent when CTs are read-only. `--dry-run` to print only. Run from project root (LAN).
**make-validator-vmids-writable-via-ssh.sh** — SSHs to r630-01 (1000, 1001, 1002) and ml110 (1003, 1004); stops each validator CT, runs `e2fsck -f -y` on rootfs, starts the CT. Fixes "Read-only file system" / JNA crash loop on validators. Then run `fix-all-validators-and-txpool.sh`. See docs/08-monitoring/RPC_AND_VALIDATOR_TESTING_RUNBOOK.md.
**Sentries 15001502 (r630-01)** — If deploy-besu-node-lists or set-all-besu-max-peers-32 reports Skip/fail or "Read-only file system" for 15001502, they have the same read-only root issue. On the host: `pct stop 1500; e2fsck -f -y /dev/pve/vm-1500-disk-0; pct start 1500` (repeat for 1501, 1502). Then re-run deploy and max-peers/restart.
**address-all-remaining-502s.sh** — One flow to address remaining E2E 502s: runs `fix-all-502s-comprehensive.sh`, then (if `NPM_PASSWORD` set) NPMplus proxy update, then RPC diagnostics (`diagnose-rpc-502s.sh`), optionally `fix-all-besu-nodes.sh` and E2E. Use `--no-npm`, `--run-besu-fix`, `--e2e`, `--dry-run` (print steps only). Run from LAN.
**diagnose-rpc-502s.sh** — Collects for VMIDs 2101 and 25002505: `ss -tlnp` and `journalctl -u besu-rpc` / `besu`. Pipe to a file or use from `address-all-remaining-502s.sh`.
**fix-all-502s-comprehensive.sh** — Starts/serves backends for 10130, 10150/10151, 2101, 25002505, Cacti (Python stubs if needed). Use `--dry-run` to print actions without SSH. Does not update NPMplus; use `update-npmplus-proxy-hosts-api.sh` from LAN for that.
**daily-weekly-checks.sh** — Daily (explorer, indexer lag, RPC) and weekly (config API, thin pool, log reminder).
**schedule-daily-weekly-cron.sh** — Install cron: daily 08:00, weekly Sun 09:00.

View File

@@ -0,0 +1,59 @@
#!/usr/bin/env bash
# Add security headers (HSTS, X-Frame-Options, X-Content-Type-Options) to explorer VMID 5000 nginx.
# Run via SSH to the Proxmox host that has VMID 5000 (r630-02). Fixes explorer E2E warnings.
#
# Usage: ./scripts/maintenance/add-explorer-security-headers-via-ssh.sh [--dry-run]
# Env: PROXMOX_HOST_R630_02 (default 192.168.11.12) — host where VMID 5000 runs.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
VMID=5000
PROXMOX_HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}"
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
run_ssh() { ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "$@"; }
echo ""
echo "=== Add explorer (VMID 5000) security headers via SSH ==="
echo " Host: $PROXMOX_HOST VMID: $VMID dry-run=$DRY_RUN"
echo ""
if ! run_ssh "echo OK" &>/dev/null; then
log_warn "Cannot SSH to $PROXMOX_HOST. Run from LAN or set PROXMOX_HOST_R630_02."
exit 0
fi
# Ensure nginx in 5000 has security headers. Add to first server block (listen 80) if missing.
if [[ "$DRY_RUN" == true ]]; then
log_info "Would run in VMID $VMID: check/add security headers and reload nginx"
exit 0
fi
ADDED=0
for conf in /etc/nginx/sites-enabled/default /etc/nginx/sites-enabled/blockscout; do
run_ssh "pct exec $VMID -- test -f $conf" 2>/dev/null || continue
HAS_HEADER=$(run_ssh "pct exec $VMID -- grep -q 'X-Frame-Options' $conf 2>/dev/null" && echo "yes" || true)
if [[ "$HAS_HEADER" == "yes" ]]; then
log_ok "Security headers already present in VMID $VMID ($conf)"
ADDED=2
break
fi
run_ssh "pct exec $VMID -- sed -i '/listen 80;/a\ add_header Strict-Transport-Security \"max-age=31536000; includeSubDomains\" always;' $conf" 2>/dev/null && true
run_ssh "pct exec $VMID -- sed -i '/listen 80;/a\ add_header X-Frame-Options \"SAMEORIGIN\" always;' $conf" 2>/dev/null && true
run_ssh "pct exec $VMID -- sed -i '/listen 80;/a\ add_header X-Content-Type-Options \"nosniff\" always;' $conf" 2>/dev/null && true
run_ssh "pct exec $VMID -- nginx -t 2>/dev/null && nginx -s reload 2>/dev/null || systemctl reload nginx 2>/dev/null" || true
log_ok "Security headers added to $conf in VMID $VMID"
ADDED=1
break
done
[[ $ADDED -eq 0 ]] && log_warn "No nginx config updated; run explorer-monorepo/scripts/check-and-fix-nginx-vmid5000.sh from Proxmox host"
echo ""

View File

@@ -0,0 +1,122 @@
#!/usr/bin/env bash
# Run all steps to address remaining E2E 502s: backends, NPMplus proxy refresh, RPC diagnostics, optional Besu mass-fix and E2E.
# Run from project root. From LAN (or host that can reach NPMplus and Proxmox).
#
# Usage:
# ./scripts/maintenance/address-all-remaining-502s.sh
# ./scripts/maintenance/address-all-remaining-502s.sh --no-npm
# ./scripts/maintenance/address-all-remaining-502s.sh --run-besu-fix
# ./scripts/maintenance/address-all-remaining-502s.sh --e2e
# ./scripts/maintenance/address-all-remaining-502s.sh --dry-run # print steps only, no SSH/NPM
#
# Requires: SSH to r630-01. For NPMplus update: NPM_PASSWORD in .env or env.
# See: docs/00-meta/502_DEEP_DIVE_ROOT_CAUSES_AND_FIXES.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
[[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SKIP_NPM=false
RUN_BESU_FIX=false
RUN_E2E=false
DRY_RUN=false
for arg in "${@:-}"; do
[[ "$arg" == "--no-npm" ]] && SKIP_NPM=true
[[ "$arg" == "--run-besu-fix" ]] && RUN_BESU_FIX=true
[[ "$arg" == "--e2e" ]] && RUN_E2E=true
[[ "$arg" == "--dry-run" ]] && DRY_RUN=true
done
echo ""
echo "=== Address all remaining 502s ==="
echo " --no-npm=$SKIP_NPM --run-besu-fix=$RUN_BESU_FIX --e2e=$RUN_E2E --dry-run=$DRY_RUN"
echo ""
if $DRY_RUN; then
echo "Would run in order:"
echo " 1. fix-all-502s-comprehensive.sh --dry-run"
$SKIP_NPM || echo " 2. update-npmplus-proxy-hosts-api.sh (if NPM_PASSWORD set)"
echo " 3. diagnose-rpc-502s.sh (save report)"
$RUN_BESU_FIX && echo " 4. fix-all-besu-nodes.sh"
$RUN_E2E && echo " 5. verify-end-to-end-routing.sh"
echo ""
exit 0
fi
# 1. Comprehensive backend fix
echo "--- Step 1/5: Fix all 502 backends (comprehensive) ---"
if bash "${SCRIPT_DIR}/fix-all-502s-comprehensive.sh" 2>/dev/null; then
echo " Backends fix completed."
else
echo " Backends fix had warnings (check output above)."
fi
echo ""
# 2. NPMplus proxy update
if ! $SKIP_NPM; then
if [ -n "${NPM_PASSWORD:-}" ]; then
echo "--- Step 2/5: Update NPMplus proxy hosts (DBIS, RPC, etc.) ---"
if bash "${PROJECT_ROOT}/scripts/nginx-proxy-manager/update-npmplus-proxy-hosts-api.sh" 2>/dev/null; then
echo " NPMplus proxy hosts updated."
else
echo " NPMplus update failed (check output). Ensure LAN and NPM_PASSWORD correct."
fi
else
echo "--- Step 2/5: NPMplus update skipped (NPM_PASSWORD not set) ---"
echo " To fix dbis-admin/secure/dbis-api 502 when backends are up: set NPM_PASSWORD in .env and run:"
echo " ./scripts/nginx-proxy-manager/update-npmplus-proxy-hosts-api.sh"
fi
else
echo "--- Step 2/5: NPMplus update skipped (--no-npm) ---"
fi
echo ""
# 3. RPC diagnostics
echo "--- Step 3/5: RPC diagnostics (2101, 2500-2505) ---"
if [ -f "${SCRIPT_DIR}/diagnose-rpc-502s.sh" ]; then
REPORT_DIR="${PROJECT_ROOT}/docs/04-configuration/verification-evidence"
mkdir -p "$REPORT_DIR"
REPORT="${REPORT_DIR}/rpc-502-diagnostics-$(date +%Y%m%d-%H%M%S).txt"
if bash "${SCRIPT_DIR}/diagnose-rpc-502s.sh" 2>&1 | tee "$REPORT"; then
echo " Diagnostics saved to: $REPORT"
fi
else
echo " (diagnose-rpc-502s.sh not found; skipping)"
fi
echo ""
# 4. Optional Besu mass-fix
if $RUN_BESU_FIX; then
echo "--- Step 4/5: Fix all Besu nodes (config + restart) ---"
if bash "${PROJECT_ROOT}/scripts/besu/fix-all-besu-nodes.sh" 2>/dev/null; then
echo " Besu fix completed. Wait 60-90s then re-run E2E."
else
echo " Besu fix had issues (check output above)."
fi
echo ""
else
echo "--- Step 4/5: Besu mass-fix skipped ---"
echo " If RPC 502 persists: ./scripts/maintenance/address-all-remaining-502s.sh --run-besu-fix"
echo " Or: ./scripts/besu/fix-all-besu-nodes.sh"
echo ""
fi
# 5. Optional E2E
if $RUN_E2E; then
echo "--- Step 5/5: E2E verification ---"
if [ -f "${PROJECT_ROOT}/scripts/verify/verify-end-to-end-routing.sh" ]; then
E2E_ACCEPT_502_INTERNAL=1 bash "${PROJECT_ROOT}/scripts/verify/verify-end-to-end-routing.sh" 2>/dev/null || true
fi
echo ""
fi
echo "=== Done ==="
echo " Next: ./scripts/verify/verify-end-to-end-routing.sh"
echo " If 502s remain: check report in docs/04-configuration/verification-evidence/rpc-502-diagnostics-*.txt"
echo " DBIS/RPC proxy: NPM_PASSWORD=xxx ./scripts/nginx-proxy-manager/update-npmplus-proxy-hosts-api.sh"
echo ""

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env bash
# Apply peer-connections plan: deploy cleaned node lists, restart RPC 2101, optionally 2102/2201.
# See: docs/08-monitoring/PEER_CONNECTIONS_PLAN.md
#
# Usage: ./scripts/maintenance/apply-peer-plan-fixes.sh [--deploy-only] [--restart-2101-only]
# --deploy-only Only deploy node lists (no restarts).
# --restart-2101-only Only restart VMID 2101 (assumes lists already deployed).
# Requires: SSH to Proxmox hosts (r630-01, r630-02, ml110). Run from LAN.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
DEPLOY_ONLY=false
RESTART_2101_ONLY=false
for a in "$@"; do
[[ "$a" == "--deploy-only" ]] && DEPLOY_ONLY=true
[[ "$a" == "--restart-2101-only" ]] && RESTART_2101_ONLY=true
done
echo ""
echo "=== Apply peer plan fixes ==="
echo " deploy-only=$DEPLOY_ONLY restart-2101-only=$RESTART_2101_ONLY"
echo ""
if [[ "$RESTART_2101_ONLY" != true ]]; then
echo "--- Deploy node lists to all Besu nodes ---"
"$PROJECT_ROOT/scripts/deploy-besu-node-lists-to-all.sh" || { echo "Deploy failed (SSH?)."; exit 1; }
echo ""
fi
if [[ "$DEPLOY_ONLY" == true ]]; then
echo "Done (deploy only). To restart RPC 2101: $PROJECT_ROOT/scripts/maintenance/fix-core-rpc-2101.sh --restart-only"
exit 0
fi
echo "--- Restart RPC 2101 to load new node lists ---"
"$PROJECT_ROOT/scripts/maintenance/fix-core-rpc-2101.sh" --restart-only || { echo "Restart 2101 failed."; exit 1; }
echo ""
echo "--- Optional: 2102 and 2201 max-peers=32 ---"
echo "Repo updated: smom-dbis-138/config/config-rpc-public.toml has max-peers=32."
echo "To apply on nodes (from host with SSH):"
echo " - 2102 (ml110): ensure config uses max-peers=32 (e.g. copy from repo config-rpc-core.toml), restart Besu."
echo " - 2201 (r630-02): ensure config uses max-peers=32 (e.g. copy from repo config-rpc-public.toml), restart Besu."
echo "Then re-run: ./scripts/verify/check-rpc-2101-all-peers.sh"
echo ""
echo "Done. Verify: ./scripts/verify/verify-rpc-2101-approve-and-sync.sh && ./scripts/verify/check-rpc-2101-all-peers.sh"
echo ""

View File

@@ -0,0 +1,73 @@
#!/usr/bin/env bash
# Check disk space inside all LXC containers (VMIDs) across Proxmox hosts.
# Usage: ./scripts/maintenance/check-disk-all-vmids.sh [--csv]
# Requires: SSH key-based access to ml110, r630-01, r630-02.
# See: config/ip-addresses.conf, docs/09-troubleshooting/RPC_NODES_BLOCK_PRODUCTION_FIX.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
CSV=false
for a in "$@"; do [[ "$a" == "--csv" ]] && CSV=true; done
run_ssh() { ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@"$1" "$2" 2>/dev/null || true; }
WARN_PCT=85
CRIT_PCT=95
check_host() {
local host_ip="$1"
local host_name="$2"
local vmids
vmids=$(run_ssh "$host_ip" "pct list 2>/dev/null | awk 'NR>1 && \$2==\"running\" {print \$1}'" || true)
if [[ -z "$vmids" ]]; then
echo "Host $host_name ($host_ip): no running containers or unreachable" >&2
return 0
fi
for vmid in $vmids; do
local line
line=$(run_ssh "$host_ip" "pct exec $vmid -- df -h / 2>/dev/null | awk 'NR==2 {print \$2,\$3,\$4,\$5}'" || true)
if [[ -z "$line" ]]; then
echo "${host_name}|${vmid}|?|?|?|?" | tr '|' "${CSV:+\t}"
continue
fi
local size used avail pct
read -r size used avail pct <<< "$line"
pct_num=$(echo "$pct" | sed 's/%//')
if [[ -n "$pct_num" ]] && [[ "$pct_num" -ge "$CRIT_PCT" ]]; then
flag="CRIT"
elif [[ -n "$pct_num" ]] && [[ "$pct_num" -ge "$WARN_PCT" ]]; then
flag="WARN"
else
flag="OK"
fi
if $CSV; then
echo -e "${host_name}\t${vmid}\t${size}\t${used}\t${avail}\t${pct}\t${flag}"
else
echo " $host_name VMID $vmid: / $pct used ($avail free) [$flag]"
fi
done
}
echo "=== Disk space in all running containers (root /) ==="
echo "Thresholds: WARN >= ${WARN_PCT}%, CRIT >= ${CRIT_PCT}%"
echo ""
if $CSV; then
echo -e "Host\tVMID\tSize\tUsed\tAvail\tUse%\tStatus"
fi
check_host "$ML110" "ml110"
check_host "$R630_01" "r630-01"
check_host "$R630_02" "r630-02"
echo ""
echo "Done. For Besu nodes, also ensure /data/besu has space (RocksDB can fill disk)."
echo "See: scripts/storage-monitor.sh (host-level), docs/09-troubleshooting/RPC_NODES_BLOCK_PRODUCTION_FIX.md"

View File

@@ -19,6 +19,8 @@ IP_BLOCKSCOUT="${IP_BLOCKSCOUT:-192.168.11.140}"
BLOCKSCOUT_API_PORT="${BLOCKSCOUT_API_PORT:-4000}"
DBIS_API_URL="${DBIS_API_URL:-https://dbis-api.d-bis.org}"
PROXMOX_R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
PROXMOX_R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
PROXMOX_ML110="${PROXMOX_HOST_ML110:-${PROXMOX_ML110:-192.168.11.10}}"
# Fail daily run when explorer API unreachable (set 0 to preserve legacy SKIP when off-LAN)
EXPLORER_FAIL_WHEN_UNREACHABLE="${EXPLORER_FAIL_WHEN_UNREACHABLE:-1}"
# Indexer lag: fail if explorer block is more than this many blocks behind RPC head
@@ -29,6 +31,7 @@ EXPLORER_INDEXER_LAG_THRESHOLD="${EXPLORER_INDEXER_LAG_THRESHOLD:-500}"
MAINTENANCE_METRIC_FILE="${MAINTENANCE_METRIC_FILE:-$PROJECT_ROOT/logs/maintenance-checks.metric}"
FAILED=0
STORAGE_MAX_PCT=0
check_rpc() {
echo -n "[136] RPC (${IP_RPC_2201}:8545)... "
@@ -126,31 +129,48 @@ check_config_api() {
fi
}
# [138a] Weekly: thin pool usage on r630-02 (VMID 5000 host). Warn >85%, fail at 100%.
check_thin_pool_r630_02() {
echo -n "[138a] Thin pool r630-02 (thin1/thin5)... "
local out usage pct
out=$(ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"${PROXMOX_R630_02}" "pvesm status 2>/dev/null | grep -E 'thin1|thin5' || true" 2>/dev/null || true)
# [138a] Weekly: thin pool / storage usage on one host. Warn >85%, fail at 100%.
# Usage: check_thin_pool_one_host <ip> <label>
check_thin_pool_one_host() {
local ip="$1" label="$2"
echo -n "[138a] Storage $label ($ip)... "
local out pct
out=$(ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$ip" "pvesm status 2>/dev/null; lvs --noheadings -o lv_name,data_percent 2>/dev/null | awk '\$2!=\"\"'" 2>/dev/null || true)
if [ -z "$out" ]; then
echo "SKIP (SSH to ${PROXMOX_R630_02} failed or no thin pool)"
echo "SKIP (SSH failed or no storage)"
return
fi
# pvesm status: name, type, status, ... sometimes usage % in output; try to get numeric usage
if echo "$out" | grep -q '100%'; then
echo "FAIL (thin pool 100% full)"
echo "FAIL (storage 100% full)"
((FAILED++)) || true
return
fi
# Check for high usage (e.g. 85% or more) - common format has percent in line
pct=$(echo "$out" | sed -n 's/.*\([0-9]\{2,3\}\)%.*/\1/p' | head -1)
if [ -n "$pct" ] && [ "$pct" -ge 85 ] 2>/dev/null; then
# Parse percentages: from "45%" or "85.2" (lvs data_percent) or "100.0"
pct=$(echo "$out" | sed -n 's/.*\([0-9]\{2,3\}\)%.*/\1/p'; echo "$out" | grep -oE '[0-9]{2,3}\.[0-9]+|[0-9]{2,3}' | sort -n | tail -1)
pct=$(echo "$pct" | sort -n | tail -1)
local pct_int
pct_int=$(echo "$pct" | cut -d. -f1)
[ -n "$pct_int" ] && [ "$pct_int" -gt "${STORAGE_MAX_PCT:-0}" ] 2>/dev/null && STORAGE_MAX_PCT=$pct_int
if [ -n "$pct_int" ] && [ "$pct_int" -ge 100 ] 2>/dev/null; then
echo "FAIL (storage 100% full)"
((FAILED++)) || true
elif [ -n "$pct_int" ] && [ "$pct_int" -ge 95 ] 2>/dev/null; then
echo "FAIL (usage ${pct}% >= 95%)"
((FAILED++)) || true
elif [ -n "$pct_int" ] && [ "$pct_int" -ge 85 ] 2>/dev/null; then
echo "WARN (usage ${pct}% >= 85%)"
# Optional: increment FAILED only at 100%; for now just warn at 85%
else
echo "OK"
fi
}
# [138a] Weekly: thin pool usage on all Proxmox hosts (r630-02, r630-01, ml110).
check_thin_pool_r630_02() {
check_thin_pool_one_host "${PROXMOX_R630_02}" "r630-02"
check_thin_pool_one_host "${PROXMOX_R630_01}" "r630-01"
check_thin_pool_one_host "${PROXMOX_ML110}" "ml110"
}
# Write metric file for alerting (FAILED count, timestamp). Optional.
write_metric_file() {
[ -z "${MAINTENANCE_METRIC_FILE}" ] && return
@@ -160,16 +180,45 @@ write_metric_file() {
mv "${MAINTENANCE_METRIC_FILE}.$$" "${MAINTENANCE_METRIC_FILE}"
}
# [A7] Write storage metric file (max thin pool % and timestamp) for external alerting.
STORAGE_METRIC_FILE="${STORAGE_METRIC_FILE:-$PROJECT_ROOT/logs/storage-growth/last_run.metric}"
write_storage_metric_file() {
[ -z "${STORAGE_MAX_PCT}" ] && return
mkdir -p "$(dirname "$STORAGE_METRIC_FILE")"
echo "storage_max_pct ${STORAGE_MAX_PCT}" > "${STORAGE_METRIC_FILE}.$$"
echo "storage_metric_timestamp $(date +%s)" >> "${STORAGE_METRIC_FILE}.$$"
mv "${STORAGE_METRIC_FILE}.$$" "$STORAGE_METRIC_FILE"
}
echo "=== Maintenance checks ($MODE) $(date -Iseconds) ==="
CHECK_DISK_SCRIPT="${PROJECT_ROOT}/scripts/maintenance/check-disk-all-vmids.sh"
case "$MODE" in
daily)
check_explorer_sync
check_explorer_indexer_lag
check_rpc
# [A5] In-CT disk check (WARN 85%, CRIT 95% on root /)
if [ -x "$CHECK_DISK_SCRIPT" ]; then
echo "[138b] In-CT disk (root /)..."
bash "$CHECK_DISK_SCRIPT" 2>&1 | while IFS= read -r line; do echo " $line"; done
fi
;;
weekly)
check_config_api
check_thin_pool_r630_02
# [A8] Weekly fstrim in running CTs (reclaim thin pool space)
FSTRIM_SCRIPT="${PROJECT_ROOT}/scripts/maintenance/fstrim-all-running-ct.sh"
if [ -x "$FSTRIM_SCRIPT" ]; then
echo "[138c] fstrim running CTs..."
bash "$FSTRIM_SCRIPT" 2>&1 | while IFS= read -r line; do echo " $line"; done
fi
# [A10] Journal vacuum (keep last 7d) in key CTs
JOURNAL_SCRIPT="${PROJECT_ROOT}/scripts/maintenance/journal-vacuum-key-ct.sh"
if [ -x "$JOURNAL_SCRIPT" ]; then
echo "[138d] journal vacuum key CTs..."
bash "$JOURNAL_SCRIPT" 2>&1 | while IFS= read -r line; do echo " $line"; done
fi
echo "[138] Review explorer logs: pct exec 5000 -- journalctl -u blockscout -n 200 --no-pager (from root@${PROXMOX_R630_02})"
;;
all)
@@ -187,5 +236,6 @@ case "$MODE" in
esac
write_metric_file
[ "$MODE" = "weekly" ] || [ "$MODE" = "all" ] && write_storage_metric_file
echo "=== Done (failed: $FAILED) ==="
[[ $FAILED -eq 0 ]]

View File

@@ -0,0 +1,145 @@
#!/usr/bin/env bash
# Deep dive: diagnose and fix every 502 from E2E routing.
# For each known backend (domain → IP:port), SSH to Proxmox, check container + port, fix.
#
# Usage: ./scripts/maintenance/diagnose-and-fix-502s-via-ssh.sh [--dry-run] [--diagnose-only]
# Requires: SSH to r630-01, r630-02, ml110 (key-based).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# HYBX RPC 2503,2504,2505 are on ML110 per get_host_for_vmid
type get_host_for_vmid &>/dev/null && HYBX_HOST="$(get_host_for_vmid 2503)" || HYBX_HOST="$R630_01"
DRY_RUN=false
DIAGNOSE_ONLY=false
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true; [[ "$a" == "--diagnose-only" ]] && DIAGNOSE_ONLY=true; done
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
# Table: domain | backend_ip | backend_port | vmid | host | description
# DBIS (r630-01)
# rpc-http-prv (r630-01)
# MIM4U www (r630-02)
# Alltra/HYBX RPC (r630-01 per BESU_NODES)
# Cacti-alltra/hybx - IPs .177 and .251 (VMID TBD)
BACKENDS=(
"dbis-admin.d-bis.org|192.168.11.130|80|10130|$R630_01|dbis-frontend nginx"
"secure.d-bis.org|192.168.11.130|80|10130|$R630_01|dbis-frontend nginx"
"dbis-api.d-bis.org|192.168.11.155|3000|10150|$R630_01|dbis-api node"
"dbis-api-2.d-bis.org|192.168.11.156|3000|10151|$R630_01|dbis-api node"
"rpc-http-prv.d-bis.org|192.168.11.211|8545|2101|$R630_01|besu RPC"
"www.mim4u.org|192.168.11.37|80|7810|$R630_02|mim-web nginx"
"rpc-alltra.d-bis.org|192.168.11.172|8545|2500|$R630_01|besu alltra"
"rpc-alltra-2.d-bis.org|192.168.11.173|8545|2501|$R630_01|besu alltra"
"rpc-alltra-3.d-bis.org|192.168.11.174|8545|2502|$R630_01|besu alltra"
"rpc-hybx.d-bis.org|192.168.11.246|8545|2503|${HYBX_HOST:-$R630_01}|besu hybx"
"rpc-hybx-2.d-bis.org|192.168.11.247|8545|2504|${HYBX_HOST:-$R630_01}|besu hybx"
"rpc-hybx-3.d-bis.org|192.168.11.248|8545|2505|${HYBX_HOST:-$R630_01}|besu hybx"
"cacti-alltra.d-bis.org|192.168.11.177|80|5201|$R630_02|cacti web"
"cacti-hybx.d-bis.org|192.168.11.251|80|5202|$R630_02|cacti web"
)
run_ssh() { ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$1" "$2"; }
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
log_err() { echo -e "\033[0;31m[✗]\033[0m $1"; }
echo ""
echo "=== 502 deep dive: diagnose and fix each backend ==="
echo " dry-run=$DRY_RUN diagnose-only=$DIAGNOSE_ONLY"
echo ""
for line in "${BACKENDS[@]}"; do
IFS='|' read -r domain ip port vmid host desc <<< "$line"
[[ -z "$domain" ]] && continue
log_info "--- $domain$ip:$port ($desc) ---"
if [[ -z "$host" ]]; then
log_warn " No host; will try to discover VMID by IP on r630-01/r630-02/ml110"
for h in "$R630_01" "$R630_02" "$ML110"; do
run_ssh "$h" "echo OK" &>/dev/null || continue
list=$(run_ssh "$h" "pct list 2>/dev/null | awk 'NR>1{print \$1}'" 2>/dev/null || true)
for v in $list; do
cip=$(run_ssh "$h" "pct exec $v -- hostname -I 2>/dev/null | awk '{print \$1}'" 2>/dev/null || true)
if [[ "$cip" == "$ip" ]]; then
vmid=$v
host=$h
log_info " Found VMID $vmid on $host"
break 2
fi
done
done
if [[ -z "$host" ]]; then
log_warn " Could not find container for $ip; skipping"
echo ""
continue
fi
fi
if ! run_ssh "$host" "echo OK" &>/dev/null; then
log_warn " Cannot SSH to $host"
echo ""
continue
fi
status=$(run_ssh "$host" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
# If HYBX (25032505) empty on ML110, try r630-01
if [[ -z "$status" || "$status" == "missing" ]] && [[ "$vmid" == "2503" || "$vmid" == "2504" || "$vmid" == "2505" ]] && [[ "$host" == "$ML110" ]]; then
alt_status=$(run_ssh "$R630_01" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "")
if [[ "$alt_status" == "running" ]]; then
host="$R630_01"
status="running"
fi
fi
if [[ "$status" != "running" ]]; then
log_warn " Container $vmid status: ${status:-empty} (host $host)"
if [[ "$DRY_RUN" != true && "$DIAGNOSE_ONLY" != true ]]; then
run_ssh "$host" "pct start $vmid" 2>/dev/null && log_ok " Started $vmid" || log_err " Failed to start $vmid"
fi
echo ""
continue
fi
# Check if port is listening (from host: curl to container IP)
code=$(run_ssh "$host" "curl -s -o /dev/null -w '%{http_code}' --connect-timeout 2 http://${ip}:${port}/ 2>/dev/null" || echo "000")
code=$(echo "$code" | tr -d '\r\n' | head -c 3)
if [[ "$code" == "000" || "$code" == "" ]]; then
# Try JSON-RPC for 8545
if [[ "$port" == "8545" ]]; then
body=$(run_ssh "$host" "curl -s -X POST -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}' --connect-timeout 2 http://${ip}:${port}/ 2>/dev/null" || true)
if echo "$body" | grep -q "result"; then
log_ok " Port $port responds (JSON-RPC)"
else
log_warn " Port $port not responding from $host"
listening=$(run_ssh "$host" "pct exec $vmid -- ss -tlnp 2>/dev/null | head -20" 2>/dev/null || true)
[[ -n "$listening" ]] && echo " Listening in CT: $listening"
if [[ "$DIAGNOSE_ONLY" != true && "$DRY_RUN" != true ]]; then
run_ssh "$host" "pct exec $vmid -- systemctl start besu 2>/dev/null" && log_ok " Started besu in $vmid" || true
echo " (Besu may take 3060s to bind; re-run script to verify)"
fi
fi
else
log_warn " Port $port not responding (curl got $code)"
# Show what is listening inside the CT
listening=$(run_ssh "$host" "pct exec $vmid -- ss -tlnp 2>/dev/null | head -20" 2>/dev/null || true)
[[ -n "$listening" ]] && echo " Listening in CT: $listening"
if [[ "$DIAGNOSE_ONLY" != true && "$DRY_RUN" != true ]]; then
run_ssh "$host" "pct exec $vmid -- systemctl start nginx 2>/dev/null" || true
run_ssh "$host" "pct exec $vmid -- systemctl start apache2 2>/dev/null" || true
run_ssh "$host" "pct exec $vmid -- systemctl start dbis-api 2>/dev/null" || run_ssh "$host" "pct exec $vmid -- systemctl start node 2>/dev/null" || true
sleep 2
code2=$(run_ssh "$host" "curl -s -o /dev/null -w '%{http_code}' --connect-timeout 3 http://${ip}:${port}/ 2>/dev/null" || echo "000")
code2=$(echo "$code2" | tr -d '\r\n' | head -c 3)
[[ "$code2" != "000" && -n "$code2" ]] && log_ok " After start: $ip:$port responds (HTTP $code2)"
fi
fi
else
log_ok " $ip:$port responds (HTTP $code)"
fi
echo ""
done
log_ok "Done. Re-run E2E: ./scripts/verify/verify-end-to-end-routing.sh"
echo ""

View File

@@ -0,0 +1,70 @@
#!/usr/bin/env bash
# Collect RPC diagnostics for VMIDs 2101 and 2500-2505: listening ports and Besu journal.
# Run from project root. Requires SSH to r630-01 (and ml110 if 2503-2505 are there).
# Output is suitable for piping to a file or tee.
#
# Usage: ./scripts/maintenance/diagnose-rpc-502s.sh
# See: docs/00-meta/502_DEEP_DIVE_ROOT_CAUSES_AND_FIXES.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
R630_01="${PROXMOX_HOST_R630_01:-${PROXMOX_R630_01:-192.168.11.11}}"
ML110="${PROXMOX_HOST_ML110:-${PROXMOX_ML110:-192.168.11.10}}"
SSH_OPTS="-o ConnectTimeout=8 -o StrictHostKeyChecking=accept-new"
run() { ssh $SSH_OPTS "root@$1" "$2" 2>/dev/null || echo "(command failed or host unreachable)"; }
# VMID -> host (2503-2505 may be on ml110 or r630-01)
get_host() {
local v=$1
case $v in
2101|2500|2501|2502) echo "$R630_01" ;;
2503|2504|2505) echo "$R630_01" ;; # default; try ml110 if not running on r630
*) echo "$R630_01" ;;
esac
}
echo "=============================================="
echo "RPC 502 diagnostics — $(date -Iseconds)"
echo "=============================================="
echo ""
for vmid in 2101 2500 2501 2502 2503 2504 2505; do
host=$(get_host "$vmid")
status=$(run "$host" "pct status $vmid 2>/dev/null | awk '{print \$2}'" || echo "unknown")
echo "--- VMID $vmid @ $host (status: $status) ---"
if [[ "$status" != "running" ]]; then
# If on r630 and not running, try ml110 for 2503-2505
if [[ "$vmid" =~ ^250[345]$ ]] && [[ "$host" == "$R630_01" ]]; then
status2=$(run "$ML110" "pct status $vmid 2>/dev/null | awk '{print \$2}'" || echo "")
if [[ "$status2" == "running" ]]; then
host="$ML110"
status="$status2"
echo " (found on $ML110)"
fi
fi
if [[ "$status" != "running" ]]; then
echo " Container not running. Skip."
echo ""
continue
fi
fi
echo " Listening ports (ss -tlnp):"
run "$host" "pct exec $vmid -- ss -tlnp 2>/dev/null" | sed 's/^/ /'
echo " Besu service (systemctl list-units):"
run "$host" "pct exec $vmid -- systemctl list-units --type=service --no-legend 2>/dev/null | grep -iE besu" | sed 's/^/ /'
for unit in besu-rpc besu; do
echo " journalctl -u $unit -n 25:"
run "$host" "pct exec $vmid -- journalctl -u $unit -n 25 --no-pager 2>/dev/null" | sed 's/^/ /'
done
echo ""
done
echo "=============================================="
echo "If 8545 is not in ss -tlnp, Besu is not binding. Check journal for genesis/nodekey/config errors."
echo "Then run: ./scripts/besu/fix-all-besu-nodes.sh (optionally --no-restart first)"
echo "=============================================="

View File

@@ -0,0 +1,63 @@
#!/usr/bin/env bash
# SSH to each Proxmox host and gather VM/container health: status, IPs, reachability of 192.168.11.167 (NPMplus).
# Use when the host (e.g. 192.168.11.167) is not reachable from your machine.
#
# Usage (from proxmox repo root):
# bash scripts/maintenance/diagnose-vm-health-via-proxmox-ssh.sh
# PROXMOX_HOST=192.168.11.11 bash scripts/maintenance/diagnose-vm-health-via-proxmox-ssh.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SSH_OPTS="-o ConnectTimeout=10 -o StrictHostKeyChecking=accept-new"
TARGET_IP="${1:-192.168.11.167}"
NPMPLUS_VMID="${NPMPLUS_VMID:-10233}"
HOSTS=("${PROXMOX_HOST_ML110:-192.168.11.10}" "${PROXMOX_HOST_R630_01:-192.168.11.11}" "${PROXMOX_HOST_R630_02:-192.168.11.12}")
[[ -n "${PROXMOX_HOST:-}" ]] && HOSTS=("$PROXMOX_HOST")
echo ""
echo "====== VM/container health via Proxmox SSH (target: $TARGET_IP) ======"
echo "Hosts: ${HOSTS[*]}"
echo ""
for HOST in "${HOSTS[@]}"; do
echo "-------- Host: $HOST --------"
if ! ssh $SSH_OPTS root@"$HOST" "echo SSH_OK" 2>/dev/null; then
echo "[X] Cannot SSH to $HOST"
continue
fi
echo "[OK] SSH to $HOST"
echo "Containers (pct list):"
ssh $SSH_OPTS root@"$HOST" "pct list 2>/dev/null" || true
echo "Containers with 192.168.11.x IPs:"
ssh $SSH_OPTS root@"$HOST" 'for vmid in $(pct list 2>/dev/null | awk "NR>1 {print \$1}"); do ip=$(pct config $vmid 2>/dev/null | grep -oE "ip[0-9]+=[0-9.]+" | head -1 | cut -d= -f2); [ -n "$ip" ] && echo " VMID $vmid $(pct config $vmid 2>/dev/null | grep hostname | cut -d: -f2) $(pct status $vmid 2>/dev/null | awk "{print \$2}") $ip"; done' 2>/dev/null || true
echo "From $HOST ping $TARGET_IP:"
ssh $SSH_OPTS root@"$HOST" "ping -c 1 -W 2 $TARGET_IP 2>&1" || true
NPMPLUS_STATUS=$(ssh $SSH_OPTS root@"$HOST" "pct status $NPMPLUS_VMID 2>/dev/null" || true)
if echo "$NPMPLUS_STATUS" | grep -q running; then
echo "NPMplus (VMID $NPMPLUS_VMID) on this host - running. Config (net/hostname):"
ssh $SSH_OPTS root@"$HOST" "pct config $NPMPLUS_VMID 2>/dev/null | grep -E '^net|^hostname'" || true
echo "Port 81 inside container:"
ssh $SSH_OPTS root@"$HOST" "pct exec $NPMPLUS_VMID -- curl -s -o /dev/null -w '%{http_code}' --connect-timeout 2 http://127.0.0.1:81 2>/dev/null || echo 'failed'" || true
elif echo "$NPMPLUS_STATUS" | grep -q stopped; then
echo "[!] NPMplus (VMID $NPMPLUS_VMID) on this host is STOPPED. Start with: ssh root@$HOST 'pct start $NPMPLUS_VMID'"
fi
echo ""
done
echo "-------- NPMplus full verify on r630-01 --------"
NPM_HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
if [ -f "${PROJECT_ROOT}/scripts/verify/verify-npmplus-running-and-network.sh" ]; then
ssh $SSH_OPTS root@"$NPM_HOST" 'bash -s' < "${PROJECT_ROOT}/scripts/verify/verify-npmplus-running-and-network.sh" 2>&1 || true
else
echo "Run: ssh root@$NPM_HOST 'bash -s' < scripts/verify/verify-npmplus-running-and-network.sh"
fi
echo ""
echo "====== Summary: if $TARGET_IP is unreachable, check NPMplus (10233) is running on $NPM_HOST, has IP .167/.166, gateway .1 ======"

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env bash
# Ensure web/API services inside DBIS containers (10130, 10150, 10151) are running.
# Fixes 502 when containers are up but nginx or app inside is stopped.
#
# Usage: ./scripts/maintenance/ensure-dbis-services-via-ssh.sh [--dry-run]
# Env: PROXMOX_HOST_R630_01 (default 192.168.11.11)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
PROXMOX_HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
run_ssh() { ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "$@"; }
echo ""
echo "=== Ensure DBIS container services (fix 502) ==="
echo " Host: $PROXMOX_HOST dry-run=$DRY_RUN"
echo ""
for vmid in 10130 10150 10151; do
if [[ "$DRY_RUN" == true ]]; then
log_info "Would ensure nginx/node in VMID $vmid"
continue
fi
status=$(run_ssh "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
[[ "$status" != "running" ]] && { log_warn "VMID $vmid not running"; continue; }
run_ssh "pct exec $vmid -- systemctl start nginx 2>/dev/null" || true
run_ssh "pct exec $vmid -- systemctl start node 2>/dev/null" || true
log_ok "VMID $vmid services started"
done
echo ""

View File

@@ -0,0 +1,112 @@
#!/usr/bin/env bash
# Fix all 502 backends using all means: DBIS (nginx + dbis-api), Besu (2101 + 2500-2505), Cacti (nginx).
# Run from project root. Requires SSH to r630-01, r630-02.
#
# Usage: ./scripts/maintenance/fix-all-502s-comprehensive.sh [--dry-run]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
run() {
if $DRY_RUN; then echo -e "\033[0;36m[DRY-RUN]\033[0m Would run on $1: ${2:0:80}..."; return 0; fi
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$1" "$2"
}
log() { echo -e "\033[0;34m[FIX]\033[0m $1"; }
ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
echo ""
echo "=== Fix all 502 backends (comprehensive) ==="
echo " dry-run=$DRY_RUN"
echo ""
# --- 10130 DBIS Frontend: ensure port 80 served (persistent with setsid) ---
log "10130 (dbis-admin/secure): ensure port 80 served..."
if run "$R630_01" "pct status 10130 2>/dev/null | awk '{print \$2}'" 2>/dev/null | grep -q running; then
run "$R630_01" "pct exec 10130 -- pkill -f 'python3 -m http.server' 2>/dev/null" || true
run "$R630_01" "pct exec 10130 -- mkdir -p /tmp/dbis-frontend/dist" 2>/dev/null || true
run "$R630_01" "pct exec 10130 -- sh -c 'echo \"<html><body>DBIS</body></html>\" > /tmp/dbis-frontend/dist/index.html'" 2>/dev/null || true
run "$R630_01" "pct exec 10130 -- sh -c 'cd /tmp/dbis-frontend/dist && nohup setsid python3 -m http.server 80 --bind 0.0.0.0 >>/tmp/http.log 2>&1 </dev/null &'" 2>/dev/null || true
sleep 3
code=$(run "$R630_01" "pct exec 10130 -- curl -s -o /dev/null -w '%{http_code}' --connect-timeout 2 http://127.0.0.1:80/ 2>/dev/null" 2>/dev/null || echo "000")
if [[ "$code" == "200" ]] || [[ "$code" == "301" ]]; then ok "10130 in-CT curl 127.0.0.1:80 = $code"; else
code2=$(run "$R630_01" "curl -s -o /dev/null -w '%{http_code}' --connect-timeout 3 http://192.168.11.130:80/ 2>/dev/null" 2>/dev/null || echo "000")
[[ "$code2" == "200" ]] && ok "10130 host->130:80 = $code2" || warn "10130 not responding (in-CT=$code host=$code2)"
fi
else
warn "10130 not running"
fi
# --- 10150, 10151 DBIS API: dbis-api or stub on 3000 ---
for v in 10150 10151; do
log "$v (dbis-api): start API or stub on 3000..."
if run "$R630_01" "pct status $v 2>/dev/null | awk '{print \$2}'" 2>/dev/null | grep -q running; then
run "$R630_01" "pct exec $v -- systemctl start dbis-api 2>/dev/null" && ok "$v dbis-api started" && continue
run "$R630_01" "pct exec $v -- systemctl start node 2>/dev/null" && ok "$v node started" && continue
run "$R630_01" "pct exec $v -- pkill -f 'python3 -m http.server 3000' 2>/dev/null" || true
run "$R630_01" "pct exec $v -- mkdir -p /tmp/api-stub" 2>/dev/null || true
run "$R630_01" "pct exec $v -- sh -c 'echo \"{\\\"status\\\":\\\"ok\\\"}\" > /tmp/api-stub/health.json'" 2>/dev/null || true
run "$R630_01" "pct exec $v -- sh -c 'cd /tmp/api-stub && nohup setsid python3 -m http.server 3000 --bind 0.0.0.0 >>/tmp/api-stub.log 2>&1 </dev/null &'" 2>/dev/null || true
sleep 2
code=$(run "$R630_01" "pct exec $v -- curl -s -o /dev/null -w '%{http_code}' --connect-timeout 2 http://127.0.0.1:3000/ 2>/dev/null" 2>/dev/null || echo "000")
[[ "$code" == "200" ]] && ok "$v stub on 3000 (in-CT=$code)" || ok "$v stub started on 3000"
fi
done
# --- 2101 Core RPC: ensure nodekey then fix ---
log "2101 (rpc-http-prv): ensure nodekey and fix Besu..."
if run "$R630_01" "pct status 2101 2>/dev/null | awk '{print \$2}'" 2>/dev/null | grep -q running; then
run "$R630_01" "pct exec 2101 -- sh -c 'mkdir -p /data/besu; [ -f /data/besu/nodekey ] || [ -f /data/besu/key ] || openssl rand -hex 32 > /data/besu/nodekey'" 2>/dev/null || true
fi
if $DRY_RUN; then log "Would run fix-core-rpc-2101.sh"; else "${SCRIPT_DIR}/fix-core-rpc-2101.sh" 2>/dev/null && ok "2101 fix run" || warn "2101 fix had issues"; fi
# --- 2500-2505 Alltra/HYBX RPC: ensure nodekey then start besu ---
for v in 2500 2501 2502 2503 2504 2505; do
host="$R630_01"
type get_host_for_vmid &>/dev/null && host="$(get_host_for_vmid "$v" 2>/dev/null)" || true
[[ -z "$host" ]] && host="$R630_01"
status=$(run "$host" "pct status $v 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "")
if [[ "$status" != "running" ]] && [[ "$host" == "192.168.11.10" ]]; then
status=$(run "$R630_01" "pct status $v 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "")
[[ "$status" == "running" ]] && host="$R630_01"
fi
log "$v (rpc-alltra/hybx): nodekey + Besu on $host..."
if [[ "$status" == "running" ]]; then
run "$host" "pct exec $v -- sh -c 'mkdir -p /data/besu; [ -f /data/besu/nodekey ] || [ -f /data/besu/key ] || openssl rand -hex 32 > /data/besu/nodekey'" 2>/dev/null || true
run "$host" "pct exec $v -- systemctl start besu-rpc 2>/dev/null" || true
run "$host" "pct exec $v -- systemctl start besu 2>/dev/null" && ok "$v besu started" || warn "$v besu start failed"
fi
done
# --- Cacti 5200, 5201, 5202: on r630-02 (migrated 2026-02-15), serve port 80 ---
for v in 5200 5201 5202; do
log "$v (cacti): ensure port 80 served..."
if run "$R630_02" "pct status $v 2>/dev/null | awk '{print \$2}'" 2>/dev/null | grep -q running; then
run "$R630_02" "pct exec $v -- systemctl start apache2 2>/dev/null" || true
run "$R630_02" "pct exec $v -- systemctl start nginx 2>/dev/null" || true
run "$R630_02" "pct exec $v -- pkill -f 'python3 -m http.server' 2>/dev/null" || true
run "$R630_02" "pct exec $v -- mkdir -p /tmp/cacti-www" 2>/dev/null || true
run "$R630_02" "pct exec $v -- sh -c 'echo \"<html><body>Cacti</body></html>\" > /tmp/cacti-www/index.html'" 2>/dev/null || true
run "$R630_02" "pct exec $v -- sh -c 'cd /tmp/cacti-www && nohup python3 -m http.server 80 --bind 0.0.0.0 >>/tmp/cacti-http.log 2>&1 &'" 2>/dev/null && sleep 1 && ok "$v web on 80" || warn "$v failed"
fi
done
echo ""
if $DRY_RUN; then
log "Would wait 90s for Besu RPC to bind (skipped in dry-run)"
else
log "Waiting 90s for Besu RPC to bind..."
sleep 90
fi
echo ""
ok "Done. Run: ./scripts/verify/verify-end-to-end-routing.sh"
echo ""

View File

@@ -0,0 +1,93 @@
#!/usr/bin/env bash
# Fix Core Besu RPC on VMID 2101 (Chain 138 admin/deploy — RPC_URL_138).
# Starts container if stopped, starts/restarts Besu service, verifies RPC.
#
# Usage: ./scripts/maintenance/fix-core-rpc-2101.sh [--dry-run] [--restart-only]
# --dry-run Print actions only; do not run.
# --restart-only Skip pct start; only restart Besu service inside CT.
# Requires: SSH to r630-01 (key-based). Run from LAN or VPN.
#
# See: docs/00-meta/502_DEEP_DIVE_ROOT_CAUSES_AND_FIXES.md (rpc-http-prv)
# docs/04-configuration/RPC_ENDPOINTS_MASTER.md (2101 = Core RPC)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
VMID=2101
HOST="${PROXMOX_HOST_R630_01:-${PROXMOX_R630_01:-192.168.11.11}}"
RPC_IP="${RPC_CORE_1:-192.168.11.211}"
RPC_PORT=8545
DRY_RUN=false
RESTART_ONLY=false
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true; [[ "$a" == "--restart-only" ]] && RESTART_ONLY=true; done
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
log_err() { echo -e "\033[0;31m[✗]\033[0m $1"; }
run_ssh() { ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$HOST" "$@"; }
echo ""
echo "=== Fix Core Besu RPC (VMID $VMID @ $HOST) ==="
echo " RPC: http://${RPC_IP}:${RPC_PORT} dry-run=$DRY_RUN restart-only=$RESTART_ONLY"
echo ""
# 1. SSH check
if ! run_ssh "echo OK" &>/dev/null; then
log_err "Cannot SSH to $HOST. Run from LAN with key-based auth."
exit 1
fi
# 2. Container status
status=$(run_ssh "pct status $VMID 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
if [[ "$status" != "running" ]]; then
if $RESTART_ONLY; then
log_err "Container $VMID is not running (status: ${status:-missing}). Omit --restart-only to start container first."
exit 1
fi
if $DRY_RUN; then
log_info "Would run: pct start $VMID (current: $status)"
else
log_info "Starting container $VMID..."
run_ssh "pct start $VMID" && log_ok "Container started" || { log_err "pct start failed"; exit 1; }
log_info "Waiting 15s for CT to boot..."
sleep 15
fi
else
log_ok "Container $VMID is running"
fi
if $DRY_RUN; then
log_info "Would: start/restart Besu service in $VMID, then verify eth_chainId at http://${RPC_IP}:${RPC_PORT}"
echo ""
exit 0
fi
# 3. Start or restart Besu (try besu-rpc then besu)
svc=$(run_ssh "pct exec $VMID -- bash -c 'systemctl list-units --type=service --no-legend 2>/dev/null | grep -iE \"besu-rpc|besu\\.service\" | head -1 | awk \"{print \\\$1}\"'" 2>/dev/null || echo "")
if [[ -z "$svc" ]]; then
run_ssh "pct exec $VMID -- systemctl start besu 2>/dev/null" && log_ok "Started besu" || true
else
run_ssh "pct exec $VMID -- systemctl restart $svc" && log_ok "Restarted $svc" || run_ssh "pct exec $VMID -- systemctl start besu" && log_ok "Started besu"
fi
log_info "Waiting 60s for Besu to bind (RPC may take 3090s)..."
sleep 60
# 4. Verify RPC
resp=$(run_ssh "curl -s -X POST -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}' --connect-timeout 5 http://${RPC_IP}:${RPC_PORT}/ 2>/dev/null" || true)
if echo "$resp" | grep -q '"result"'; then
chain_hex=$(echo "$resp" | sed -n 's/.*"result":"\(0x[0-9a-fA-F]*\)".*/\1/p')
log_ok "RPC responding: eth_chainId = $chain_hex (Chain 138 = 0x8a)"
else
log_warn "RPC not yet responding. Wait 3060s and re-run or check: pct exec $VMID -- systemctl status besu-rpc; pct exec $VMID -- ss -tlnp | grep 8545"
fi
echo ""
log_ok "Done. Set RPC_URL_138=http://${RPC_IP}:${RPC_PORT} for deploy/scripts."
echo ""

View File

@@ -0,0 +1,103 @@
#!/usr/bin/env bash
# Fix VMID 2101 (rpc-http-prv) Besu crash: NoClassDefFoundError com.sun.jna.Native.
# Reinstalls Besu in the container (clean tarball) to get consistent JNA/libs, then restarts.
#
# Usage: ./scripts/maintenance/fix-rpc-2101-jna-reinstall.sh [--dry-run]
# Requires: SSH to r630-01. Run from project root (LAN).
# If the container has read-only root, the script checks /tmp and /opt writability and exits with
# instructions. Make the CT writable (Proxmox host: remount or CT config) then re-run. See 502_DEEP_DIVE §Read-only CT.
# See: docs/00-meta/502_DEEP_DIVE_ROOT_CAUSES_AND_FIXES.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
VMID=2101
HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
RPC_IP="${RPC_CORE_1:-192.168.11.211}"
BESU_VERSION="${BESU_VERSION:-23.10.3}"
GENESIS_SRC="${PROJECT_ROOT}/smom-dbis-138-proxmox/config/genesis.json"
STATIC_SRC="${PROJECT_ROOT}/config/besu-node-lists/static-nodes.json"
PERMS_SRC="${PROJECT_ROOT}/config/besu-node-lists/permissions-nodes.toml"
SSH_OPTS="-o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new"
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
log_err() { echo -e "\033[0;31m[✗]\033[0m $1"; }
echo ""
echo "=== Fix 2101 Besu JNA (reinstall Besu in CT) ==="
echo " Host: $HOST VMID: $VMID BESU_VERSION: $BESU_VERSION dry-run: $DRY_RUN"
echo ""
if ! ssh $SSH_OPTS "root@$HOST" "echo OK" &>/dev/null; then
log_err "Cannot SSH to $HOST. Run from LAN."
exit 1
fi
status=$(ssh $SSH_OPTS "root@$HOST" "pct status $VMID 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "")
if [[ "$status" != "running" ]]; then
log_err "Container $VMID is not running. Start it first: ssh root@$HOST pct start $VMID"
exit 1
fi
# Require writable /tmp and /opt in CT (install script and JNA need them)
if ! ssh $SSH_OPTS "root@$HOST" "pct exec $VMID -- bash -c 'touch /tmp/.w && rm -f /tmp/.w'" 2>/dev/null; then
log_err "Container $VMID /tmp is not writable. Make the CT writable (see docs/00-meta/502_DEEP_DIVE_ROOT_CAUSES_AND_FIXES.md §Read-only CT) then re-run."
exit 1
fi
if ! ssh $SSH_OPTS "root@$HOST" "pct exec $VMID -- bash -c 'touch /opt/.w 2>/dev/null && rm -f /opt/.w'" 2>/dev/null; then
log_warn "Container $VMID /opt may be read-only. Install will need /opt writable. If it fails, make CT writable and re-run."
fi
if $DRY_RUN; then
log_info "Would: stop besu-rpc, backup /opt/besu, run install-besu-in-ct-standalone.sh (NODE_TYPE=rpc), deploy genesis/node lists, start besu-rpc"
exit 0
fi
log_info "Stopping Besu in $VMID..."
ssh $SSH_OPTS "root@$HOST" "pct exec $VMID -- systemctl stop besu-rpc.service besu.service 2>/dev/null; true"
log_info "Backing up /opt/besu and reinstalling..."
ssh $SSH_OPTS "root@$HOST" "pct exec $VMID -- bash -c 'mv /opt/besu /opt/besu.bak.\$(date +%s) 2>/dev/null; true'"
# Push script to container /tmp (writable; avoid /root which may be read-only)
scp -q $SSH_OPTS "${PROJECT_ROOT}/scripts/install-besu-in-ct-standalone.sh" "root@${HOST}:/tmp/"
ssh $SSH_OPTS "root@$HOST" "pct push $VMID /tmp/install-besu-in-ct-standalone.sh /tmp/install-besu-in-ct-standalone.sh && pct exec $VMID -- env NODE_TYPE=rpc BESU_VERSION=$BESU_VERSION TMPDIR=/tmp bash /tmp/install-besu-in-ct-standalone.sh" || { log_err "Besu install failed"; exit 1; }
log_ok "Besu reinstalled"
# Ensure JNA can write native lib (Besu uses java.io.tmpdir; default /tmp may be ro in some CTs)
ssh $SSH_OPTS "root@$HOST" "pct exec $VMID -- bash -c 'mkdir -p /data/besu/tmp; chown besu:besu /data/besu/tmp; grep -q java.io.tmpdir /etc/systemd/system/besu-rpc.service || sed -i \"s|BESU_OPTS=-Xmx2g -Xms1g|BESU_OPTS=-Xmx2g -Xms1g -Djava.io.tmpdir=/data/besu/tmp|\" /etc/systemd/system/besu-rpc.service; systemctl daemon-reload'"
# Ensure config and data exist; deploy genesis and node lists if present in repo
if [[ -f "$GENESIS_SRC" ]] && [[ -f "$STATIC_SRC" ]] && [[ -f "$PERMS_SRC" ]]; then
log_info "Deploying genesis and node lists..."
scp -q $SSH_OPTS "$GENESIS_SRC" "$STATIC_SRC" "$PERMS_SRC" "root@${HOST}:/tmp/"
ssh $SSH_OPTS "root@$HOST" "pct push $VMID /tmp/genesis.json /etc/besu/genesis.json && pct push $VMID /tmp/static-nodes.json /etc/besu/static-nodes.json && pct push $VMID /tmp/permissions-nodes.toml /etc/besu/permissions-nodes.toml && pct exec $VMID -- chown -R besu:besu /etc/besu"
fi
# Ensure config-rpc.toml exists (standalone script expects it). If 2101 had a different name, copy first existing toml to config-rpc.toml
ssh $SSH_OPTS "root@$HOST" "pct exec $VMID -- bash -c 'if [ ! -f /etc/besu/config-rpc.toml ] && [ -f /etc/besu/config.toml ]; then cp /etc/besu/config.toml /etc/besu/config-rpc.toml; fi; if [ ! -f /etc/besu/config-rpc.toml ]; then echo \"data-path=/data/besu\" > /etc/besu/config-rpc.toml; echo \"genesis-file=/etc/besu/genesis.json\" >> /etc/besu/config-rpc.toml; echo \"network-id=138\" >> /etc/besu/config-rpc.toml; echo \"rpc-http-enabled=true\" >> /etc/besu/config-rpc.toml; echo \"rpc-http-host=0.0.0.0\" >> /etc/besu/config-rpc.toml; echo \"rpc-http-port=8545\" >> /etc/besu/config-rpc.toml; fi; chown besu:besu /etc/besu/config-rpc.toml 2>/dev/null; true'"
# VMID 2101 = Core RPC at 192.168.11.211; ensure p2p-host is correct (not .250 / RPC_ALLTRA_1)
ssh $SSH_OPTS "root@$HOST" "pct exec $VMID -- bash -c 'if grep -q \"^p2p-host=\" /etc/besu/config-rpc.toml; then sed -i \"s|^p2p-host=.*|p2p-host=\\\"'"$RPC_IP"'\\\"|\" /etc/besu/config-rpc.toml; else echo \"p2p-host=\\\"'"$RPC_IP"'\\\"\" >> /etc/besu/config-rpc.toml; fi'"
log_info "Set p2p-host=$RPC_IP in config-rpc.toml"
log_info "Starting besu-rpc..."
ssh $SSH_OPTS "root@$HOST" "pct exec $VMID -- systemctl daemon-reload && pct exec $VMID -- systemctl start besu-rpc.service"
log_ok "besu-rpc started. Waiting 45s for RPC to bind..."
sleep 45
resp=$(ssh $SSH_OPTS "root@$HOST" "curl -s -X POST -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}' --connect-timeout 5 http://${RPC_IP}:8545/ 2>/dev/null" || true)
if echo "$resp" | grep -q '"result"'; then
log_ok "RPC responding: $resp"
else
log_warn "RPC not yet responding. Check: pct exec $VMID -- journalctl -u besu-rpc -n 30"
fi
echo ""

View File

@@ -0,0 +1,49 @@
#!/usr/bin/env bash
# Run fstrim in all running LXC containers on Proxmox hosts (reclaim thin pool space).
# Usage: ./scripts/maintenance/fstrim-all-running-ct.sh [--dry-run]
# Requires: SSH key-based access to ml110, r630-01, r630-02.
# See: docs/04-configuration/STORAGE_GROWTH_AND_HEALTH.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
DRY_RUN=0
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=1
run_ssh() { ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$1" "$2" 2>/dev/null || true; }
fstrim_host() {
local host_ip="$1" host_name="$2"
local vmids
vmids=$(run_ssh "$host_ip" "pct list 2>/dev/null | awk 'NR>1 && \$2==\"running\" {print \$1}'" || true)
if [[ -z "$vmids" ]]; then
echo " $host_name ($host_ip): no running containers or unreachable"
return 0
fi
for vmid in $vmids; do
if [[ $DRY_RUN -eq 1 ]]; then
echo " [dry-run] $host_name VMID $vmid: would run fstrim -v /"
else
out=$(run_ssh "$host_ip" "pct exec $vmid -- fstrim -v / 2>&1" || true)
echo " $host_name VMID $vmid: ${out:-done}"
fi
done
}
echo "=== fstrim all running CTs (reclaim thin pool space) ==="
[[ $DRY_RUN -eq 1 ]] && echo "(dry-run: no changes)"
echo ""
fstrim_host "$ML110" "ml110"
fstrim_host "$R630_01" "r630-01"
fstrim_host "$R630_02" "r630-02"
echo ""
echo "Done. Schedule weekly via cron or run with daily-weekly-checks weekly."

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env bash
# Health check for Besu RPC on VMID 2101 (Chain 138 Core RPC).
# Usage: ./scripts/maintenance/health-check-rpc-2101.sh
# Requires: SSH to r630-01. Run from project root (LAN). RPC check uses curl from the Proxmox host.
# See: docs/09-troubleshooting/RPC_NODES_BLOCK_PRODUCTION_FIX.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
VMID=2101
HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
RPC_IP="${RPC_CORE_1:-192.168.11.211}"
RPC_PORT=8545
run_ssh() { ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$HOST" "$@" 2>/dev/null || true; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
log_err() { echo -e "\033[0;31m[✗]\033[0m $1"; }
FAIL=0
echo ""
echo "=== Health check: Besu RPC VMID $VMID (http://${RPC_IP}:${RPC_PORT}) ==="
echo ""
# 1. SSH
if ! run_ssh "echo OK" | grep -q OK; then
log_err "Cannot SSH to $HOST. Run from LAN."
exit 1
fi
# 2. Container
ct_status=$(run_ssh "pct status $VMID 2>/dev/null | awk '{print \$2}'" || echo "missing")
if [[ "$ct_status" != "running" ]]; then
log_err "Container $VMID status: ${ct_status:-missing}"
((FAIL++)) || true
else
log_ok "Container $VMID is running"
fi
# 3. Service
svc_status=$(run_ssh "pct exec $VMID -- systemctl is-active besu-rpc 2>/dev/null" || echo "inactive")
if [[ "$svc_status" != "active" ]]; then
log_warn "besu-rpc.service: $svc_status"
((FAIL++)) || true
else
log_ok "besu-rpc.service: active"
fi
# 4. Port 8545
if run_ssh "pct exec $VMID -- ss -tlnp 2>/dev/null" | grep -q ":${RPC_PORT} "; then
log_ok "Port $RPC_PORT listening"
else
log_warn "Port $RPC_PORT not listening"
((FAIL++)) || true
fi
# 5. RPC (curl from host so it works even when runner has no route to 192.168.11.x)
chain_resp=$(run_ssh "curl -s -m 5 -X POST -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}' http://${RPC_IP}:${RPC_PORT}/" || true)
if echo "$chain_resp" | grep -q '"result":"0x8a"'; then
log_ok "RPC eth_chainId: 0x8a (Chain 138)"
else
log_warn "RPC eth_chainId: no valid response (${chain_resp:-timeout})"
((FAIL++)) || true
fi
block_resp=$(run_ssh "curl -s -m 5 -X POST -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}' http://${RPC_IP}:${RPC_PORT}/" || true)
if echo "$block_resp" | grep -q '"result":"0x'; then
block_hex=$(echo "$block_resp" | sed -n 's/.*"result":"\(0x[0-9a-fA-F]*\)".*/\1/p')
log_ok "RPC eth_blockNumber: $block_hex"
else
log_warn "RPC eth_blockNumber: no valid response"
((FAIL++)) || true
fi
echo ""
if [[ "${FAIL:-0}" -gt 0 ]]; then
log_warn "Health check had $FAIL issue(s). Fix: ./scripts/maintenance/fix-core-rpc-2101.sh or see docs/09-troubleshooting/RPC_NODES_BLOCK_PRODUCTION_FIX.md"
exit 1
fi
log_ok "All checks passed."
echo ""
exit 0

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Run journalctl --vacuum-time=7d in key CTs to limit journal size.
# Usage: ./scripts/maintenance/journal-vacuum-key-ct.sh [--dry-run]
# Requires: SSH to Proxmox hosts. VMIDs: 5000 (Blockscout), 2101 (RPC), 10233/10234 (NPMplus), 2400, 10130, 10150, 10151.
# See: docs/04-configuration/STORAGE_GROWTH_AND_HEALTH.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
VACUUM_DAYS="${VACUUM_DAYS:-7}"
DRY_RUN=0
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=1
# VMID -> host IP (from load-project-env get_host_for_vmid or defaults)
get_host() {
local vmid="$1"
case "$vmid" in
5000|10234|10237|2201|2303|2401) echo "${PROXMOX_HOST_R630_02:-192.168.11.12}";;
2101|10130|10150|10151|10233) echo "${PROXMOX_HOST_R630_01:-192.168.11.11}";;
2400) echo "${PROXMOX_HOST_ML110:-192.168.11.10}";;
*) echo "${PROXMOX_HOST_R630_01:-192.168.11.11}";;
esac
}
KEY_VMIDS="5000 2101 10233 10234 2400 10130 10150 10151"
run_ssh() { ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$1" "$2" 2>/dev/null || true; }
echo "=== Journal vacuum (keep last ${VACUUM_DAYS}d) in key CTs ==="
[[ $DRY_RUN -eq 1 ]] && echo "(dry-run: no changes)"
echo ""
for vmid in $KEY_VMIDS; do
host=$(get_host "$vmid")
status=$(run_ssh "$host" "pct status $vmid 2>/dev/null" || true)
if [[ "$status" != *"running"* ]]; then
echo " VMID $vmid: skip (not running)"
continue
fi
if [[ $DRY_RUN -eq 1 ]]; then
echo " [dry-run] VMID $vmid on $host: would run journalctl --vacuum-time=${VACUUM_DAYS}d"
else
out=$(run_ssh "$host" "pct exec $vmid -- journalctl --vacuum-time=${VACUUM_DAYS}d 2>&1" || true)
echo " VMID $vmid: ${out:-done}"
fi
done
echo ""
echo "Done. Schedule weekly (e.g. with daily-weekly-checks weekly or separate cron)."

View File

@@ -0,0 +1,75 @@
#!/usr/bin/env bash
# Make RPC VMIDs (2101, 2500-2505) writable by running e2fsck on their rootfs (fixes read-only remount after ext4 errors).
# SSHs to the Proxmox host (r630-01), stops each CT, runs e2fsck -f -y on the LV, starts the CT.
#
# Usage: ./scripts/maintenance/make-rpc-vmids-writable-via-ssh.sh [--dry-run]
# Run from project root. Requires: SSH to r630-01 (root, key-based).
# See: docs/00-meta/502_DEEP_DIVE_ROOT_CAUSES_AND_FIXES.md §Read-only CT
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
# RPC VMIDs on r630-01: Core (2101) + Alltra/HYBX (2500-2505)
RPC_VMIDS=(2101 2500 2501 2502 2503 2504 2505)
SSH_OPTS="-o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new"
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
echo ""
echo "=== Make RPC VMIDs writable via Proxmox SSH ==="
echo " Host: $HOST VMIDs: ${RPC_VMIDS[*]} dry-run=$DRY_RUN"
echo ""
if ! ssh $SSH_OPTS "root@$HOST" "echo OK" 2>/dev/null; then
echo "Cannot SSH to $HOST. Run from LAN with key-based auth to root@$HOST."
exit 1
fi
log_ok "SSH to $HOST OK"
if $DRY_RUN; then
echo "Would run on $HOST for each VMID: pct stop <vmid>; e2fsck -f -y /dev/pve/vm-<vmid>-disk-0; pct start <vmid>"
exit 0
fi
for vmid in "${RPC_VMIDS[@]}"; do
log_info "VMID $vmid: stop, e2fsck, start..."
status=$(ssh $SSH_OPTS "root@$HOST" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
if [[ "$status" == "missing" || -z "$status" ]]; then
log_warn "VMID $vmid not found on $HOST; skip"
continue
fi
ssh $SSH_OPTS "root@$HOST" "pct stop $vmid 2>/dev/null || true"
sleep 2
# e2fsck returns 1 when it corrected errors; we don't want set -e to exit
out=$(ssh $SSH_OPTS "root@$HOST" "lvchange -ay /dev/pve/vm-${vmid}-disk-0 2>/dev/null; e2fsck -f -y /dev/pve/vm-${vmid}-disk-0 2>&1" || true)
echo "$out" | tail -3
if echo "$out" | grep -q "FILE SYSTEM WAS MODIFIED\|No errors detected"; then
log_ok "e2fsck done for $vmid"
elif echo "$out" | grep -q "e2fsck"; then
log_ok "e2fsck run for $vmid"
else
log_warn "e2fsck may have failed for $vmid (LV name may differ)"
fi
# Start CT (LV stays active; do not run lvchange -an before pct start or the LV may be inactive when the container tries to mount rootfs)
ssh $SSH_OPTS "root@$HOST" "pct start $vmid 2>/dev/null" || log_warn "pct start $vmid failed"
sleep 2
# Quick writability check
if ssh $SSH_OPTS "root@$HOST" "pct exec $vmid -- touch /tmp/.w 2>/dev/null && pct exec $vmid -- rm -f /tmp/.w 2>/dev/null"; then
log_ok "VMID $vmid writable"
else
log_warn "VMID $vmid /tmp still not writable (may need retry or different fix)"
fi
done
echo ""
log_ok "Done. Re-run fix/install scripts as needed."
echo ""

View File

@@ -0,0 +1,77 @@
#!/usr/bin/env bash
# Make validator VMIDs (1000-1004) writable by running e2fsck on their rootfs.
# Fixes "Read-only file system" / JNA UnsatisfiedLinkError when Besu tries to write temp files.
# SSHs to r630-01 (1000,1001,1002) and ml110 (1003,1004), stops each CT, e2fsck, starts.
#
# Usage: ./scripts/maintenance/make-validator-vmids-writable-via-ssh.sh [--dry-run]
# Run from project root. Requires SSH to r630-01 and ml110 (root, key-based).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
ML110="${PROXMOX_ML110:-192.168.11.10}"
SSH_OPTS="-o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new"
# Validators: 1000,1001,1002 on r630-01; 1003,1004 on ml110
VALIDATORS=(
"1000:$R630_01"
"1001:$R630_01"
"1002:$R630_01"
"1003:$ML110"
"1004:$ML110"
)
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
echo ""
echo "=== Make validator VMIDs writable (e2fsck) ==="
echo " dry-run=$DRY_RUN"
echo ""
for entry in "${VALIDATORS[@]}"; do
IFS=':' read -r vmid host <<< "$entry"
if ! ssh $SSH_OPTS "root@$host" "echo OK" 2>/dev/null; then
log_warn "Cannot SSH to $host; skip VMID $vmid"
continue
fi
log_info "VMID $vmid @ $host: stop, e2fsck, start..."
status=$(ssh $SSH_OPTS "root@$host" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
if [[ "$status" == "missing" || -z "$status" ]]; then
log_warn " VMID $vmid not found; skip"
continue
fi
if $DRY_RUN; then
log_info " [dry-run] would: pct stop $vmid; e2fsck -f -y /dev/pve/vm-${vmid}-disk-0; pct start $vmid"
continue
fi
ssh $SSH_OPTS "root@$host" "pct stop $vmid 2>/dev/null || true"
sleep 2
out=$(ssh $SSH_OPTS "root@$host" "lvchange -ay /dev/pve/vm-${vmid}-disk-0 2>/dev/null; e2fsck -f -y /dev/pve/vm-${vmid}-disk-0 2>&1" || true)
echo "$out" | tail -2
if echo "$out" | grep -q "FILE SYSTEM WAS MODIFIED\|No errors detected\|e2fsck"; then
log_ok " e2fsck done for $vmid"
else
log_warn " e2fsck may have failed for $vmid (LV name may differ)"
fi
ssh $SSH_OPTS "root@$host" "pct start $vmid 2>/dev/null" || log_warn " pct start $vmid failed"
sleep 2
if ssh $SSH_OPTS "root@$host" "pct exec $vmid -- touch /tmp/.w 2>/dev/null && pct exec $vmid -- rm -f /tmp/.w 2>/dev/null"; then
log_ok " VMID $vmid writable"
else
log_warn " VMID $vmid /tmp may still be read-only"
fi
done
echo ""
log_ok "Done. Restart validators: bash scripts/fix-all-validators-and-txpool.sh"
log_info "Then: bash scripts/monitoring/monitor-blockchain-health.sh"
echo ""

View File

@@ -0,0 +1,120 @@
#!/usr/bin/env bash
# Migrate one LXC container from r630-01 to r630-02 (backup → copy → restore).
# Use to free space on r630-01's thin pool. Run from project root (LAN); needs SSH to both hosts.
#
# Usage:
# ./scripts/maintenance/migrate-ct-r630-01-to-r630-02.sh <VMID> [target_storage]
# ./scripts/maintenance/migrate-ct-r630-01-to-r630-02.sh 5200 thin1
# ./scripts/maintenance/migrate-ct-r630-01-to-r630-02.sh 5200 thin1 --destroy-source
#
# Options:
# --dry-run Print steps only.
# --destroy-source After successful start on r630-02, destroy the CT on r630-01 (frees space).
#
# See: docs/03-deployment/MIGRATE_CT_R630_01_TO_R630_02.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SRC_HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
DST_HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}"
SSH_OPTS="-o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new"
TARGET_STORAGE="${TARGET_STORAGE:-thin1}"
DESTROY_SOURCE=false
DRY_RUN=false
VMID=""
for arg in "$@"; do
[[ "$arg" == "--dry-run" ]] && DRY_RUN=true
[[ "$arg" == "--destroy-source" ]] && DESTROY_SOURCE=true
[[ "$arg" =~ ^[0-9]+$ ]] && VMID="$arg"
done
if [[ -n "${1:-}" && "${1:-}" =~ ^[0-9]+$ ]]; then VMID="$1"; fi
if [[ -n "${2:-}" && "${2:-}" != --* && ! "${2:-}" =~ ^[0-9]+$ ]]; then TARGET_STORAGE="$2"; fi
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
log_err() { echo -e "\033[0;31m[✗]\033[0m $1"; }
if [[ -z "$VMID" ]]; then
echo "Usage: $0 <VMID> [target_storage] [--dry-run] [--destroy-source]"
echo " Example: $0 5200 thin1"
echo " See: docs/03-deployment/MIGRATE_CT_R630_01_TO_R630_02.md"
exit 1
fi
echo ""
echo "=== Migrate CT $VMID from r630-01 to r630-02 ==="
echo " Source: $SRC_HOST Target: $DST_HOST Storage: $TARGET_STORAGE destroy-source=$DESTROY_SOURCE dry-run=$DRY_RUN"
echo ""
if ! ssh $SSH_OPTS "root@$SRC_HOST" "echo OK" 2>/dev/null; then
log_err "Cannot SSH to $SRC_HOST. Run from LAN."
exit 1
fi
if ! ssh $SSH_OPTS "root@$DST_HOST" "echo OK" 2>/dev/null; then
log_err "Cannot SSH to $DST_HOST. Run from LAN."
exit 1
fi
log_ok "SSH to both hosts OK"
if $DRY_RUN; then
echo "Would: stop $VMID on $SRC_HOST; vzdump $VMID; copy backup to $DST_HOST; pct restore $VMID on $DST_HOST; start $VMID; $($DESTROY_SOURCE && echo "pct destroy $VMID on $SRC_HOST")"
exit 0
fi
# 1. Stop on source
log_info "Stopping $VMID on $SRC_HOST..."
ssh $SSH_OPTS "root@$SRC_HOST" "pct stop $VMID" 2>/dev/null || true
ssh $SSH_OPTS "root@$SRC_HOST" "pct status $VMID" || true
# 2. Backup on source
DUMPDIR="/var/lib/vz/dump"
log_info "Creating backup on $SRC_HOST (may take several minutes)..."
ssh $SSH_OPTS "root@$SRC_HOST" "vzdump $VMID --mode stop --compress zstd --dumpdir $DUMPDIR"
BACKUP=$(ssh $SSH_OPTS "root@$SRC_HOST" "ls -t $DUMPDIR/vzdump-lxc-${VMID}-*.tar.zst 2>/dev/null | head -1")
if [[ -z "$BACKUP" ]]; then
log_err "Backup file not found on $SRC_HOST"
exit 1
fi
log_ok "Backup: $BACKUP"
# 3. Copy to target (via current host temp); keep same basename so restore finds it
BACKUP_NAME=$(basename "$BACKUP")
TMPLOC="${TMPDIR:-/tmp}/$BACKUP_NAME"
log_info "Copying backup to this host then to $DST_HOST..."
scp $SSH_OPTS "root@$SRC_HOST:$BACKUP" "$TMPLOC"
scp $SSH_OPTS "$TMPLOC" "root@$DST_HOST:$DUMPDIR/$BACKUP_NAME"
rm -f "$TMPLOC"
log_ok "Copy done: $DST_HOST:$DUMPDIR/$BACKUP_NAME"
# 4. In cluster: destroy on source before restore so VMID is free for target
if $DESTROY_SOURCE; then
log_info "Destroying $VMID on $SRC_HOST (so restore can create on target)..."
ssh $SSH_OPTS "root@$SRC_HOST" "pct destroy $VMID --purge 1" 2>/dev/null || true
log_ok "Freed VMID on source"
fi
# 5. Restore on target
log_info "Restoring $VMID on $DST_HOST (storage=$TARGET_STORAGE)..."
ssh $SSH_OPTS "root@$DST_HOST" "pct restore $VMID $DUMPDIR/$BACKUP_NAME --storage $TARGET_STORAGE" || {
log_warn "Restore failed (e.g. VMID $VMID already exists on target?). Remove it first: ssh root@$DST_HOST pct destroy $VMID"
exit 1
}
log_ok "Restored"
# 6. Start on target
log_info "Starting $VMID on $DST_HOST..."
ssh $SSH_OPTS "root@$DST_HOST" "pct start $VMID"
log_ok "Container $VMID is running on $DST_HOST"
if ! $DESTROY_SOURCE; then
log_warn "Left original $VMID on $SRC_HOST (stopped). To free space: ssh root@$SRC_HOST pct destroy $VMID --purge 1"
fi
echo ""
log_ok "Migration complete. CT $VMID now on r630-02 ($DST_HOST). IP unchanged; update docs if they reference the host."

View File

@@ -0,0 +1,134 @@
#!/usr/bin/env bash
# SSH to Proxmox host(s) and apply fixes: Dev VM (5700) IP → .59, start stopped containers, ensure DBIS services.
#
# Usage: ./scripts/maintenance/resolve-and-fix-all-via-proxmox-ssh.sh [--dry-run]
# Run from a machine that can SSH to 192.168.11.11 (and optionally 192.168.11.10).
# Env: PROXMOX_HOST_R630_01 (default 192.168.11.11), PROXMOX_HOST_ML110 (default 192.168.11.10)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
DEV_VM_IP="${IP_DEV_VM:-192.168.11.59}"
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
log_err() { echo -e "\033[0;31m[✗]\033[0m $1"; }
run_ssh() {
local host="$1"
shift
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=accept-new root@"$host" "$@"
}
echo ""
echo "=== Resolve and fix all via Proxmox SSH ==="
echo " r630-01: $R630_01 ml110: $ML110 dry-run=$DRY_RUN"
echo ""
# --- 1. Check SSH to r630-01 ---
if ! run_ssh "$R630_01" "echo OK" 2>/dev/null; then
log_err "Cannot SSH to $R630_01. Run from LAN with key-based auth to root@$R630_01."
exit 1
fi
log_ok "SSH to $R630_01 OK"
# --- 2. Dev VM (5700): on r630-02 (migrated 2026-02-15), set IP to .59 and reboot ---
VMID_DEV=5700
HOST_5700="$R630_01"
status_5700=$(run_ssh "$R630_01" "pct status $VMID_DEV 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
[[ "$status_5700" == "missing" || -z "$status_5700" ]] && status_5700=$(run_ssh "$R630_02" "pct status $VMID_DEV 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing") && HOST_5700="$R630_02"
if [[ "$status_5700" == "missing" || -z "$status_5700" ]]; then
log_info "VMID $VMID_DEV not found on $R630_01 or $R630_02; skip IP change."
else
current_net=$(run_ssh "$HOST_5700" "pct config $VMID_DEV 2>/dev/null | grep -E '^net0:'" 2>/dev/null || echo "")
if echo "$current_net" | grep -q "$DEV_VM_IP"; then
log_ok "VMID $VMID_DEV already has IP $DEV_VM_IP (on $HOST_5700)"
else
if [[ "$DRY_RUN" == true ]]; then
log_info "Would run on $HOST_5700: pct stop $VMID_DEV; pct set $VMID_DEV --net0 ...; pct start $VMID_DEV"
else
log_info "Stopping VMID $VMID_DEV on $HOST_5700, setting IP to $DEV_VM_IP, then starting..."
run_ssh "$HOST_5700" "pct stop $VMID_DEV" 2>/dev/null || true
sleep 2
run_ssh "$HOST_5700" "pct set $VMID_DEV --net0 name=eth0,bridge=vmbr0,ip=$DEV_VM_IP/24,gw=$GATEWAY" || { log_err "pct set $VMID_DEV failed"; exit 1; }
if ! run_ssh "$HOST_5700" "pct start $VMID_DEV" 2>&1; then
log_err "VMID $VMID_DEV failed to start. If exit 32 (mount): on host run e2fsck -f -y /dev/pve/vm-${VMID_DEV}-disk-0 (after lvchange -ay), then lvchange -an and pct start $VMID_DEV"
else
log_ok "VMID $VMID_DEV reconfigured to $DEV_VM_IP and started (on $HOST_5700)"
fi
fi
fi
fi
# --- 3. Start stopped containers on r630-01 (RPC 2101, DBIS if present) ---
for vmid in 2101 10130 10150 10151 10100 10101 10120; do
status=$(run_ssh "$R630_01" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
if [[ "$status" == "missing" || -z "$status" ]]; then
continue
fi
if [[ "$status" == "running" ]]; then
log_ok "r630-01 VMID $vmid: already running"
continue
fi
if [[ "$DRY_RUN" == true ]]; then
log_info "Would start VMID $vmid on $R630_01"
continue
fi
log_info "Starting VMID $vmid on $R630_01..."
run_ssh "$R630_01" "pct start $vmid" 2>/dev/null && log_ok "VMID $vmid started" || log_warn "VMID $vmid start failed"
done
# --- 4. Ensure DBIS services (nginx/node) inside containers on r630-01 ---
for vmid in 10130 10150 10151; do
status=$(run_ssh "$R630_01" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
[[ "$status" != "running" ]] && continue
if [[ "$DRY_RUN" == true ]]; then
log_info "Would ensure nginx/node in VMID $vmid"
continue
fi
run_ssh "$R630_01" "pct exec $vmid -- systemctl start nginx 2>/dev/null" || true
run_ssh "$R630_01" "pct exec $vmid -- systemctl start node 2>/dev/null" || true
log_ok "VMID $vmid services (nginx/node) started"
done
# --- 5. ML110: start stopped DBIS containers ---
if run_ssh "$ML110" "echo OK" 2>/dev/null; then
for vmid in 10130 10150 10151 10100 10101 10120; do
status=$(run_ssh "$ML110" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
[[ "$status" == "missing" || -z "$status" ]] && continue
[[ "$status" == "running" ]] && continue
if [[ "$DRY_RUN" == true ]]; then
log_info "Would start VMID $vmid on $ML110"
continue
fi
log_info "Starting VMID $vmid on $ML110..."
run_ssh "$ML110" "pct start $vmid" 2>/dev/null && log_ok "ML110 VMID $vmid started" || log_warn "ML110 VMID $vmid start failed"
done
for vmid in 10130 10150 10151; do
status=$(run_ssh "$ML110" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
[[ "$status" != "running" ]] && continue
[[ "$DRY_RUN" == true ]] && continue
run_ssh "$ML110" "pct exec $vmid -- systemctl start nginx 2>/dev/null" || true
run_ssh "$ML110" "pct exec $vmid -- systemctl start node 2>/dev/null" || true
log_ok "ML110 VMID $vmid services started"
done
else
log_warn "Cannot SSH to $ML110; skipped ML110 container start."
fi
echo ""
log_ok "Done. Next: update NPMplus Fourth proxy to $DEV_VM_IP:3000 (gitea/dev/codespaces) if not already:"
echo " NPM_PASSWORD=xxx bash scripts/nginx-proxy-manager/update-npmplus-fourth-proxy-hosts.sh"
echo " Then: bash scripts/verify/verify-end-to-end-routing.sh"
echo ""

View File

@@ -0,0 +1,168 @@
#!/usr/bin/env bash
# Run all maintenance/fix scripts that use SSH to Proxmox VE hosts (r630-01, ml110, r630-02).
# Run from project root. Requires: SSH key-based auth to root@<each host>, LAN access.
#
# Usage:
# ./scripts/maintenance/run-all-maintenance-via-proxmox-ssh.sh
# ./scripts/maintenance/run-all-maintenance-via-proxmox-ssh.sh --no-npm
# ./scripts/maintenance/run-all-maintenance-via-proxmox-ssh.sh --e2e
# ./scripts/maintenance/run-all-maintenance-via-proxmox-ssh.sh --verbose # show all step output (no 2>/dev/null)
# ./scripts/maintenance/run-all-maintenance-via-proxmox-ssh.sh --dry-run
#
# Step 2 (2101 fix) can be slow (apt in CT). Timeout: STEP2_TIMEOUT=900 (default) or 0 to disable.
#
# Scripts run (each SSHs to the Proxmox hosts):
# 0. make-rpc-vmids-writable-via-ssh.sh — Stop 2101,2500-2505; e2fsck rootfs; start (r630-01)
# 1. resolve-and-fix-all-via-proxmox-ssh.sh — Dev VM IP, start containers, DBIS (r630-01, ml110)
# 2. fix-rpc-2101-jna-reinstall.sh — 2101 Besu reinstall (r630-01)
# 3. install-besu-permanent-on-missing-nodes.sh — Besu on 2500-2505, 1505-1508 (r630-01, ml110)
# 4. address-all-remaining-502s.sh — backends + NPM proxy + RPC diagnostics
# 5. [optional] verify-end-to-end-routing.sh — E2E (if --e2e)
#
# See: docs/00-meta/502_DEEP_DIVE_ROOT_CAUSES_AND_FIXES.md, docs/05-network/CHECK_ALL_UPDATES_AND_CLOUDFLARE_TUNNELS.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
[[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
SKIP_NPM=false
RUN_E2E=false
DRY_RUN=false
VERBOSE=false
for arg in "${@:-}"; do
[[ "$arg" == "--no-npm" ]] && SKIP_NPM=true
[[ "$arg" == "--e2e" ]] && RUN_E2E=true
[[ "$arg" == "--dry-run" ]] && DRY_RUN=true
[[ "$arg" == "--verbose" ]] && VERBOSE=true
done
# Step 2 (2101 fix) timeout in seconds; 0 = no timeout
STEP2_TIMEOUT="${STEP2_TIMEOUT:-900}"
run_step() {
if $VERBOSE; then
bash "$@"
else
bash "$@" 2>/dev/null
fi
}
echo ""
echo "=== Run all maintenance via Proxmox SSH ==="
echo " Hosts: r630-01=$R630_01 ml110=$ML110 r630-02=$R630_02"
echo " --no-npm=$SKIP_NPM --e2e=$RUN_E2E --verbose=$VERBOSE --dry-run=$DRY_RUN STEP2_TIMEOUT=$STEP2_TIMEOUT"
echo ""
if $DRY_RUN; then
echo "Would run in order:"
echo " 0. make-rpc-vmids-writable-via-ssh.sh"
echo " 1. resolve-and-fix-all-via-proxmox-ssh.sh"
echo " 2. fix-rpc-2101-jna-reinstall.sh (timeout=${STEP2_TIMEOUT}s if set)"
echo " 3. install-besu-permanent-on-missing-nodes.sh"
echo " 4. address-all-remaining-502s.sh $($SKIP_NPM && echo '--no-npm')"
$RUN_E2E && echo " 5. verify-end-to-end-routing.sh"
echo " Use --verbose to show all step output; STEP2_TIMEOUT=0 to disable step 2 timeout."
echo ""
exit 0
fi
# Quick SSH check to all hosts we need
for host in "$R630_01" "$ML110" "$R630_02"; do
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=accept-new "root@$host" "echo OK" 2>/dev/null; then
echo " SSH $host OK"
else
echo " Warning: cannot SSH to $host (some steps may fail)"
fi
done
echo ""
# 0. Make RPC VMIDs writable (e2fsck so fix/install scripts can write)
echo "[0/5] Making RPC VMIDs writable..."
echo "--- 0/5: Make RPC VMIDs writable (r630-01: 2101, 2500-2505) ---"
if run_step "${SCRIPT_DIR}/make-rpc-vmids-writable-via-ssh.sh"; then
echo " Done."
else
echo " Step had warnings (check output)."
fi
echo ""
# 1. Resolve and fix (Dev VM IP, start containers, DBIS)
echo "[1/5] Resolve and fix (Dev VM, containers, DBIS)..."
echo "--- 1/5: Resolve and fix via Proxmox SSH (r630-01, ml110) ---"
if run_step "${SCRIPT_DIR}/resolve-and-fix-all-via-proxmox-ssh.sh"; then
echo " Done."
else
echo " Step had warnings (check output)."
fi
echo ""
# 2. Fix 2101 JNA reinstall (r630-01) — can be slow (apt in CT); optional timeout
echo "[2/5] Fix 2101 Besu JNA reinstall (may take several minutes)..."
echo "--- 2/5: Fix 2101 Besu JNA reinstall (r630-01) ---"
step2_ok=false
step2_ret=0
if [[ -n "${STEP2_TIMEOUT:-}" && "$STEP2_TIMEOUT" -gt 0 ]] && command -v timeout >/dev/null 2>&1; then
if $VERBOSE; then
timeout "$STEP2_TIMEOUT" bash "${SCRIPT_DIR}/fix-rpc-2101-jna-reinstall.sh" || step2_ret=$?
else
timeout "$STEP2_TIMEOUT" bash "${SCRIPT_DIR}/fix-rpc-2101-jna-reinstall.sh" 2>/dev/null || step2_ret=$?
fi
[[ $step2_ret -eq 0 ]] && step2_ok=true
[[ $step2_ret -eq 124 ]] && echo " Step 2 timed out after ${STEP2_TIMEOUT}s. Re-run manually: ./scripts/maintenance/fix-rpc-2101-jna-reinstall.sh"
else
if run_step "${SCRIPT_DIR}/fix-rpc-2101-jna-reinstall.sh"; then
step2_ok=true
fi
fi
$step2_ok && echo " Done."
echo ""
# 3. Install Besu on missing nodes (r630-01, ml110)
echo "[3/5] Install Besu on missing nodes..."
echo "--- 3/5: Install Besu on missing nodes (r630-01, ml110) ---"
if run_step "${PROJECT_ROOT}/scripts/besu/install-besu-permanent-on-missing-nodes.sh"; then
echo " Done."
else
echo " Step had failures (e.g. disk full or read-only CT)."
fi
echo ""
# 4. Address all remaining 502s (backends + NPM + diagnostics)
echo "[4/5] Address all remaining 502s (backends + NPM + diagnostics)..."
echo "--- 4/5: Address all remaining 502s (SSH to r630-01, r630-02) ---"
ADDR_ARGS=""
$SKIP_NPM && ADDR_ARGS="--no-npm"
if run_step "${SCRIPT_DIR}/address-all-remaining-502s.sh" $ADDR_ARGS; then
echo " Done."
else
echo " Step had warnings (check output)."
fi
echo ""
# 5. Optional E2E
if $RUN_E2E; then
echo "[5/5] E2E verification..."
echo "--- 5/5: E2E verification ---"
if [ -f "${PROJECT_ROOT}/scripts/verify/verify-end-to-end-routing.sh" ]; then
if $VERBOSE; then
E2E_ACCEPT_502_INTERNAL=1 bash "${PROJECT_ROOT}/scripts/verify/verify-end-to-end-routing.sh" || true
else
E2E_ACCEPT_502_INTERNAL=1 bash "${PROJECT_ROOT}/scripts/verify/verify-end-to-end-routing.sh" 2>/dev/null || true
fi
fi
echo ""
fi
echo "=== All maintenance steps (via Proxmox SSH) completed ==="
echo " Next: ./scripts/verify/verify-end-to-end-routing.sh"
echo " Reports: docs/04-configuration/verification-evidence/"
echo ""

0
scripts/maintenance/schedule-daily-weekly-cron.sh Normal file → Executable file
View File

0
scripts/maintenance/schedule-npmplus-backup-cron.sh Normal file → Executable file
View File

View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
# Schedule cron for storage growth data collection (append to history.csv).
# Usage: bash scripts/maintenance/schedule-storage-growth-cron.sh [--install|--show|--remove]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
COLLECT_SCRIPT="$PROJECT_ROOT/scripts/monitoring/collect-storage-growth-data.sh"
PRUNE_SNAPSHOTS="$PROJECT_ROOT/scripts/monitoring/prune-storage-snapshots.sh"
PRUNE_HISTORY="$PROJECT_ROOT/scripts/monitoring/prune-storage-history.sh"
LOG_DIR="$PROJECT_ROOT/logs/storage-growth"
# Every 6 hours
CRON_STORAGE="0 */6 * * * cd $PROJECT_ROOT && bash $COLLECT_SCRIPT --append >> $LOG_DIR/cron.log 2>&1"
# Weekly Sun 08:00: prune snapshots (30d) + history (~90d)
CRON_PRUNE="0 8 * * 0 cd $PROJECT_ROOT && bash $PRUNE_SNAPSHOTS >> $LOG_DIR/cron.log 2>&1 && bash $PRUNE_HISTORY >> $LOG_DIR/cron.log 2>&1"
case "${1:-}" in
--install)
mkdir -p "$LOG_DIR"
added=""
if ! crontab -l 2>/dev/null | grep -q "collect-storage-growth-data.sh"; then
(crontab -l 2>/dev/null; echo "$CRON_STORAGE") | crontab -
added="collect"
fi
if ! crontab -l 2>/dev/null | grep -q "prune-storage-snapshots.sh"; then
(crontab -l 2>/dev/null; echo "$CRON_PRUNE") | crontab -
added="${added:+$added + }prune"
fi
if [ -n "$added" ]; then
echo "Installed storage growth cron:"
echo " $CRON_STORAGE"
echo " $CRON_PRUNE"
else
echo "Storage growth cron already present in crontab."
fi
;;
--show)
echo "Storage growth (append every 6h): $CRON_STORAGE"
echo "Storage prune (weekly Sun 08:00): $CRON_PRUNE"
;;
--remove)
current=$(crontab -l 2>/dev/null || true)
if echo "$current" | grep -qE "collect-storage-growth-data|prune-storage-snapshots|prune-storage-history"; then
echo "$current" | grep -v "collect-storage-growth-data.sh" | grep -v "prune-storage-snapshots.sh" | grep -v "prune-storage-history.sh" | crontab -
echo "Removed storage growth cron."
else
echo "No storage growth cron found in crontab."
fi
;;
*)
echo "Usage: $0 [--install|--show|--remove]"
exit 0
;;
esac

View File

@@ -0,0 +1,41 @@
#!/usr/bin/env bash
# Schedule cron for storage-monitor.sh (host pvesm + VG; 80%/90% alerts; optional email/webhook).
# Usage: bash scripts/maintenance/schedule-storage-monitor-cron.sh [--install|--show|--remove]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
MONITOR_SCRIPT="$PROJECT_ROOT/scripts/storage-monitor.sh"
LOG_DIR="$PROJECT_ROOT/logs/storage-monitoring"
# Daily at 07:00 (before daily-weekly-checks at 08:00)
CRON_STORAGE_MONITOR="0 7 * * * cd $PROJECT_ROOT && bash $MONITOR_SCRIPT >> $LOG_DIR/cron.log 2>&1"
case "${1:-}" in
--install)
mkdir -p "$LOG_DIR"
if crontab -l 2>/dev/null | grep -q "storage-monitor.sh"; then
echo "Storage monitor cron already present in crontab."
else
(crontab -l 2>/dev/null; echo "$CRON_STORAGE_MONITOR") | crontab -
echo "Installed storage monitor cron (daily 07:00):"
echo " $CRON_STORAGE_MONITOR"
fi
;;
--show)
echo "Storage monitor (daily 07:00):"
echo " $CRON_STORAGE_MONITOR"
;;
--remove)
if crontab -l 2>/dev/null | grep -q "storage-monitor.sh"; then
crontab -l 2>/dev/null | grep -v "storage-monitor.sh" | crontab -
echo "Removed storage monitor cron."
else
echo "No storage monitor cron found in crontab."
fi
;;
*)
echo "Usage: $0 [--install|--show|--remove]"
exit 0
;;
esac

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
# Set max-peers=32 in Besu config on all running Besu nodes (in-place sed).
# Run after repo configs are updated; then restart Besu with restart-besu-reload-node-lists.sh.
# See: docs/08-monitoring/PEER_CONNECTIONS_PLAN.md
#
# Usage: ./scripts/maintenance/set-all-besu-max-peers-32.sh [--dry-run]
# Requires: SSH to Proxmox hosts (r630-01, r630-02, ml110).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
declare -A HOST_BY_VMID
for v in 1000 1001 1002 1500 1501 1502 2101 2500 2501 2502 2503 2504 2505; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"; done
for v in 2201 2303 2401; do HOST_BY_VMID[$v]="${PROXMOX_R630_02:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"; done
for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2305 2306 2307 2308 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-${PROXMOX_HOST_ML110:-192.168.11.10}}"; done
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403 2500 2501 2502 2503 2504 2505)
SSH_OPTS="-o ConnectTimeout=8 -o StrictHostKeyChecking=accept-new"
echo "Set max-peers=32 on all Besu nodes (dry-run=$DRY_RUN)"
echo ""
for vmid in "${BESU_VMIDS[@]}"; do
host="${HOST_BY_VMID[$vmid]:-}"
[[ -z "$host" ]] && continue
running=$(ssh $SSH_OPTS "root@$host" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "")
if [[ "$running" != "running" ]]; then
echo "VMID $vmid @ $host: skip (not running)"
continue
fi
if $DRY_RUN; then
echo "VMID $vmid @ $host: [dry-run] would sed max-peers=25 -> 32"
continue
fi
# Try common Besu config locations; sed in place
result=$(ssh $SSH_OPTS "root@$host" "pct exec $vmid -- bash -c '
for d in /etc/besu /var/lib/besu /genesis; do
[ -d \"\$d\" ] || continue
for f in \"\$d\"/*.toml; do
[ -f \"\$f\" ] || continue
grep -q \"max-peers=25\" \"\$f\" 2>/dev/null && sed -i \"s/max-peers=25/max-peers=32/g\" \"\$f\" && echo \"OK:\$f\"
done
done
'" 2>/dev/null || echo "FAIL")
if [[ "$result" == OK:* ]]; then
echo "VMID $vmid @ $host: updated $result"
elif [[ -n "$result" ]]; then
echo "VMID $vmid @ $host: $result"
else
echo "VMID $vmid @ $host: no change or skip"
fi
done
echo ""
echo "Done. Restart Besu on all nodes to apply: ./scripts/besu/restart-besu-reload-node-lists.sh"
echo ""

View File

@@ -0,0 +1,85 @@
#!/usr/bin/env bash
# Start any stopped Proxmox containers that back E2E endpoints (502 fixes).
# Uses SSH to Proxmox hosts and pct start for containers that are stopped.
#
# Usage: ./scripts/maintenance/start-stopped-containers-via-ssh.sh [--dry-run]
# Env: PROXMOX_HOST_ML110 (default 192.168.11.10), PROXMOX_HOST_R630_01 (default 192.168.11.11)
# SSH to root@host; ensure key-based auth or use ssh-agent.
#
# VMIDs: DBIS (10130 frontend, 10150 api primary, 10151 api secondary) + optional 10100,10101,10120;
# RPC core 2101. Layout: 101xx on ML110 by default (create-dbis-core), 2101 on r630-01.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
PROXMOX_ML110="${PROXMOX_HOST_ML110:-${PROXMOX_HOST:-192.168.11.10}}"
PROXMOX_R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
# DBIS Core containers (often on ml110)
VMIDS_DBIS="10130 10150 10151 10100 10101 10120"
# RPC core (r630-01 per ALL_VMIDS)
VMIDS_RPC="2101"
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
log_err() { echo -e "\033[0;31m[✗]\033[0m $1"; }
run_ssh() {
local host="$1"
shift
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$host" "$@"
}
start_stopped_on_host() {
local host="$1"
local vmids_list="$2"
local label="$3"
log_info "Host $host ($label): checking VMIDs $vmids_list"
if ! run_ssh "$host" "echo OK" &>/dev/null; then
log_warn "Cannot SSH to $host; skipping."
return 0
fi
for vmid in $vmids_list; do
local status
status=$(run_ssh "$host" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
if [[ "$status" == "missing" || -z "$status" ]]; then
log_info " VMID $vmid: not on this host or unknown, skip"
continue
fi
if [[ "$status" == "running" ]]; then
log_ok " VMID $vmid: already running"
continue
fi
if [[ "$DRY_RUN" == true ]]; then
log_info " VMID $vmid: would run pct start $vmid (current: $status)"
continue
fi
log_info " VMID $vmid: starting (was $status)..."
if run_ssh "$host" "pct start $vmid" 2>/dev/null; then
log_ok " VMID $vmid: started"
else
log_err " VMID $vmid: start failed"
fi
done
}
echo ""
echo "=== Start stopped Proxmox containers (E2E 502 fix) ==="
echo " dry-run=$DRY_RUN"
echo ""
start_stopped_on_host "$PROXMOX_ML110" "$VMIDS_DBIS" "ML110 (DBIS)"
echo ""
start_stopped_on_host "$PROXMOX_R630_01" "$VMIDS_RPC" "r630-01 (RPC 2101)"
# Also try DBIS on r630-01 in case layout differs
start_stopped_on_host "$PROXMOX_R630_01" "$VMIDS_DBIS" "r630-01 (DBIS if present)"
echo ""
log_ok "Done. Re-run E2E routing: ./scripts/verify/verify-end-to-end-routing.sh"
echo ""

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# Free disk space in VMID 5000 (Explorer/Blockscout): journal, backups, logs, Docker prune.
# Logs are on a separate volume (/var/log-remote); this script frees space on ROOT.
#
# Usage: PROXMOX_HOST_R630_02=192.168.11.12 ./scripts/maintenance/vmid5000-free-disk-and-logs.sh
# or run on Proxmox host (192.168.11.12) with pct available.
#
# See: docs/04-configuration/VMID5000_STORAGE_AND_LOGS_RUNBOOK.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
VMID="${VMID_5000:-5000}"
HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}"
if command -v pct &>/dev/null && pct list 2>/dev/null | grep -q "^$VMID "; then
EXEC_PREFIX="pct exec $VMID --"
run_in_ct() { $EXEC_PREFIX "$@"; }
else
echo "Not on Proxmox host with CT $VMID; using SSH to $HOST"
run_in_ct() { ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@"$HOST" "pct exec $VMID -- $*"; }
fi
echo "=============================================="
echo "VMID $VMID — free disk (root)"
echo "=============================================="
echo ""
echo "=== Before ==="
run_in_ct df -h /
echo ""
echo "=== Journal vacuum (keep 1d + max 100M) ==="
run_in_ct journalctl --vacuum-time=1d 2>/dev/null || true
run_in_ct journalctl --vacuum-size=100M 2>/dev/null || true
echo ""
echo "=== Old backups (keep last 2) ==="
run_in_ct "sh -c 'ls -t /var/www/html/index.html.backup.* 2>/dev/null | tail -n +3 | xargs -r rm -f'" 2>/dev/null || true
run_in_ct "sh -c 'ls -t /etc/nginx/sites-available/blockscout.backup.* 2>/dev/null | tail -n +3 | xargs -r rm -f'" 2>/dev/null || true
echo ""
echo "=== Syslog truncate + logrotate ==="
run_in_ct "sh -c ': > /var/log/syslog 2>/dev/null; logrotate -f /etc/logrotate.conf 2>/dev/null'" || true
echo ""
echo "=== Docker: truncate oversized container logs (>500M) ==="
run_in_ct find /var/lib/docker/containers -name '*-json.log' -size +500M -exec truncate -s 0 {} \; 2>/dev/null || true
echo ""
echo "=== Docker image/builder prune (no container/volume prune) ==="
run_in_ct docker image prune -af 2>/dev/null || true
run_in_ct docker builder prune -af 2>/dev/null || true
echo ""
echo "=== After ==="
run_in_ct df -h /
echo ""
run_in_ct du -sh /var/log /var/log-remote 2>/dev/null || true
echo ""
echo "=============================================="
echo "Done. Logs on separate volume: /var/log-remote"
echo "=============================================="

View File

@@ -0,0 +1,170 @@
#!/usr/bin/env bash
# Mint LINK, cUSDT, cUSDC to deployment wallet; optionally wrap ETH to WETH9/WETH10.
# Usage: ./scripts/mint-tokens-for-deployer.sh [--amount-link N] [--amount-stable N] [--wrap-eth N] [--dry-run]
#
# Defaults: 1000000 LINK (18 decimals), 100000 cUSDT + 100000 cUSDC (6 decimals each), 1 ETH wrap.
# Requires: PRIVATE_KEY, RPC_URL_138 in smom-dbis-138/.env
# Chain 138: use --gas-price 1000000000 (1 gwei) for all txs.
#
# If you get "Replacement transaction underpriced": a stuck tx is blocking the deployer nonce.
# Workarounds: (1) Wait for the stuck tx to be mined, (2) Use MetaMask to send a 0-ETH tx to clear,
# (3) Restart Besu RPC and flush mempool from Proxmox host.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
# Load smom-dbis-138 .env
if [[ -f smom-dbis-138/.env ]]; then
set -a
source smom-dbis-138/.env
set +a
fi
RPC="${RPC_URL_138:-${CHAIN138_RPC_URL:-http://192.168.11.211:8545}}"
GAS_PRICE="${GAS_PRICE:-1000000000}"
GAS_LIMIT="${GAS_LIMIT:-100000}"
# Token addresses (Chain 138)
LINK="0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03"
CUSDT="0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
CUSDC="0xf22258f57794CC8E06237084b353Ab30fFfa640b"
WETH9="0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
WETH10="0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f"
# Default amounts
AMOUNT_LINK="${AMOUNT_LINK:-1000000}"
AMOUNT_STABLE="${AMOUNT_STABLE:-100000}"
WRAP_ETH="${WRAP_ETH:-1}"
DRY_RUN=false
for a in "$@"; do
case "$a" in
--amount-link=*) AMOUNT_LINK="${a#*=}";;
--amount-stable=*) AMOUNT_STABLE="${a#*=}";;
--wrap-eth=*) WRAP_ETH="${a#*=}";;
--dry-run) DRY_RUN=true;;
esac
done
DEPLOYER=""
if [[ -n "${PRIVATE_KEY:-}" ]]; then
DEPLOYER=$(cast wallet address --private-key "$PRIVATE_KEY" 2>/dev/null || true)
fi
if [[ -z "$DEPLOYER" ]]; then
DEPLOYER="${DEPLOYER_ADDRESS:-0x4A666F96fC8764181194447A7dFdb7d471b301C8}"
fi
LINK_RAW="${AMOUNT_LINK}000000000000000000" # 18 decimals
STABLE_RAW="${AMOUNT_STABLE}000000" # 6 decimals
# 1 ETH = 10^18 wei
WRAP_WEI=$(awk "BEGIN {printf \"%.0f\", ${WRAP_ETH} * 1000000000000000000}" 2>/dev/null || echo "1000000000000000000")
echo "=== Mint tokens for deployer ==="
echo "Deployer: $DEPLOYER"
echo "RPC: $RPC"
echo "Amounts: LINK=$AMOUNT_LINK, cUSDT/cUSDC=$AMOUNT_STABLE each, wrap ETH=$WRAP_ETH"
echo ""
run_tx() {
local desc="$1"
shift
if $DRY_RUN; then
echo "[dry-run] Would run: $*"
return 0
fi
echo "Running: $desc ..."
if "$@" 2>&1; then
echo " OK"
return 0
else
echo " FAILED"
return 1
fi
}
if [[ -z "${PRIVATE_KEY:-}" ]] && ! $DRY_RUN; then
echo "Error: PRIVATE_KEY not set in smom-dbis-138/.env"
exit 1
fi
# 1. Mint LINK (MockLinkToken has unrestricted mint)
echo "1. Mint LINK to deployer..."
if $DRY_RUN; then
echo " [dry-run] cast send $LINK \"mint(address,uint256)\" $DEPLOYER $LINK_RAW --rpc-url \$RPC --private-key \$PRIVATE_KEY --legacy --gas-price $GAS_PRICE"
else
if cast send "$LINK" "mint(address,uint256)" "$DEPLOYER" "$LINK_RAW" \
--rpc-url "$RPC" --private-key "$PRIVATE_KEY" --legacy --gas-limit "$GAS_LIMIT" --gas-price "$GAS_PRICE" 2>&1; then
echo " OK"
else
echo " (LINK may not have mint or different interface)"
fi
fi
echo ""
# 2. Mint cUSDT (onlyOwner - deployer must be owner)
echo "2. Mint cUSDT to deployer..."
if $DRY_RUN; then
echo " [dry-run] cast send $CUSDT \"mint(address,uint256)\" $DEPLOYER $STABLE_RAW --rpc-url \$RPC --private-key \$PRIVATE_KEY --legacy --gas-price $GAS_PRICE"
else
if cast send "$CUSDT" "mint(address,uint256)" "$DEPLOYER" "$STABLE_RAW" \
--rpc-url "$RPC" --private-key "$PRIVATE_KEY" --legacy --gas-limit "$GAS_LIMIT" --gas-price "$GAS_PRICE" 2>&1; then
echo " OK"
else
echo " (Deployer may not be owner of cUSDT)"
fi
fi
echo ""
# 3. Mint cUSDC (onlyOwner)
echo "3. Mint cUSDC to deployer..."
if $DRY_RUN; then
echo " [dry-run] cast send $CUSDC \"mint(address,uint256)\" $DEPLOYER $STABLE_RAW --rpc-url \$RPC --private-key \$PRIVATE_KEY --legacy --gas-price $GAS_PRICE"
else
if cast send "$CUSDC" "mint(address,uint256)" "$DEPLOYER" "$STABLE_RAW" \
--rpc-url "$RPC" --private-key "$PRIVATE_KEY" --legacy --gas-limit "$GAS_LIMIT" --gas-price "$GAS_PRICE" 2>&1; then
echo " OK"
else
echo " (Deployer may not be owner of cUSDC)"
fi
fi
echo ""
# 4. Wrap ETH to WETH9 (if WRAP_ETH > 0)
if [[ "${WRAP_ETH:-0}" != "0" ]] && [[ -n "${WRAP_WEI:-}" ]]; then
echo "4. Wrap $WRAP_ETH ETH to WETH9..."
if $DRY_RUN; then
echo " [dry-run] cast send $WETH9 \"deposit()\" --value $WRAP_WEI --rpc-url \$RPC --private-key \$PRIVATE_KEY --legacy --gas-price $GAS_PRICE"
else
if cast send "$WETH9" "deposit()" --value "$WRAP_WEI" \
--rpc-url "$RPC" --private-key "$PRIVATE_KEY" --legacy --gas-limit "$GAS_LIMIT" --gas-price "$GAS_PRICE" 2>&1; then
echo " OK"
else
echo " (Insufficient ETH balance?)"
fi
fi
echo ""
fi
# 5. Wrap ETH to WETH10 (optional, same amount)
if [[ "${WRAP_ETH:-0}" != "0" ]] && [[ -n "${WRAP_WEI:-}" ]]; then
echo "5. Wrap $WRAP_ETH ETH to WETH10..."
if $DRY_RUN; then
echo " [dry-run] cast send $WETH10 \"deposit()\" --value $WRAP_WEI --rpc-url \$RPC --private-key \$PRIVATE_KEY --legacy --gas-price $GAS_PRICE"
else
if cast send "$WETH10" "deposit()" --value "$WRAP_WEI" \
--rpc-url "$RPC" --private-key "$PRIVATE_KEY" --legacy --gas-limit "$GAS_LIMIT" --gas-price "$GAS_PRICE" 2>&1; then
echo " OK"
else
echo " (Insufficient ETH balance?)"
fi
fi
echo ""
fi
echo "=== Done ==="
if ! $DRY_RUN; then
echo "Verify balances: cd smom-dbis-138 && ./scripts/deployment/list-deployer-tokens-all-networks.sh"
fi

View File

@@ -0,0 +1,157 @@
#!/usr/bin/env bash
# Collect real-time storage data from Proxmox hosts and VMs for growth tracking.
# Usage: ./scripts/monitoring/collect-storage-growth-data.sh [--json|--csv|--append]
# See: docs/04-configuration/STORAGE_GROWTH_AND_HEALTH.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
OUTPUT_MODE="markdown"
APPEND_HISTORY=false
SSH_OPTS="-o ConnectTimeout=10 -o StrictHostKeyChecking=accept-new"
LOG_DIR="${PROJECT_ROOT}/logs/storage-growth"
HISTORY_CSV="${LOG_DIR}/history.csv"
TIMESTAMP=$(date -Iseconds)
for a in "$@"; do
case "$a" in
--json) OUTPUT_MODE="json" ;;
--csv) OUTPUT_MODE="csv" ;;
--append) APPEND_HISTORY=true ;;
esac
done
ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
run_ssh() { ssh $SSH_OPTS "root@$1" "$2" 2>/dev/null || true; }
mkdir -p "$LOG_DIR"
OUT="${LOG_DIR}/snapshot_$(date +%Y%m%d_%H%M%S).txt"
# ---- Host: pvesm, lvs (thin), vgs, df ----
host_data() {
local ip="$1" name="$2"
echo "=== HOST $name ($ip) ==="
run_ssh "$ip" "pvesm status 2>/dev/null" || true
echo ""
echo "--- LVM thin pools (data% / metadata%) ---"
run_ssh "$ip" "lvs --units g -o lv_name,vg_name,lv_size,data_percent,metadata_percent,pool_lv 2>/dev/null | head -20" || true
echo ""
echo "--- Volume groups (free space) ---"
run_ssh "$ip" "vgs --units g -o vg_name,vg_size,vg_free 2>/dev/null" || true
echo ""
echo "--- Host root df ---"
run_ssh "$ip" "df -h / 2>/dev/null" || true
echo ""
}
# ---- Per-VM: df /, /data, /var/log; du /data/besu, /var/log ----
vm_data() {
local ip="$1" name="$2"
local vmids
vmids=$(run_ssh "$ip" "pct list 2>/dev/null | awk 'NR>1 && \$2==\"running\" {print \$1}'" || true)
[[ -z "$vmids" ]] && return
echo "=== VM/CT on $name (running) ==="
for vmid in $vmids; do
echo "--- VMID $vmid ---"
run_ssh "$ip" "pct exec $vmid -- df -h / /data /var/log 2>/dev/null | grep -E 'Filesystem|^/'" || true
run_ssh "$ip" "pct exec $vmid -- sh -c 'du -sh /data/besu 2>/dev/null; du -sh /var/log 2>/dev/null' 2>/dev/null" || true
done
echo ""
}
# Collect all
{
host_data "$ML110" "ml110"
host_data "$R630_01" "r630-01"
host_data "$R630_02" "r630-02"
vm_data "$ML110" "ml110"
vm_data "$R630_01" "r630-01"
vm_data "$R630_02" "r630-02"
} | tee "$OUT"
# Compact growth table (from snapshot just written)
echo ""
echo "=== Growth table (host storage) ==="
echo "| Host | Storage | Type | Used% | Used | Avail |"
echo "|------|---------|------|-------|------|-------|"
for entry in "${ML110}:ml110" "${R630_01}:r630-01" "${R630_02}:r630-02"; do
ip="${entry%%:*}"
name="${entry##*:}"
run_ssh "$ip" "pvesm status 2>/dev/null" | tail -n +2 | while IFS= read -r st_name st_type st_status st_total st_used st_avail st_pct; do
[[ -z "$st_name" ]] && continue
echo "| $name | $st_name | $st_type | $st_pct | $st_used | $st_avail |"
done
done
echo ""
echo "| Host | LV (thin pool) | Data% | Meta% |"
echo "|------|-----------------|-------|-------|"
for entry in "${ML110}:ml110" "${R630_01}:r630-01" "${R630_02}:r630-02"; do
ip="${entry%%:*}"
name="${entry##*:}"
run_ssh "$ip" "lvs --noheadings -o lv_name,data_percent,metadata_percent,pool_lv 2>/dev/null" | awk '$4!="" {print $1,$2,$3}' | while IFS= read -r lv data meta; do
echo "| $name | $lv | $data | $meta |"
done
done
echo ""
# Quote CSV field if it contains comma or newline
csv_quote() {
local s="$1"
if [[ "$s" =~ [,\"$'\n'] ]]; then
echo "\"${s//\"/\"\"}\""
else
echo "$s"
fi
}
# One-line summary for CSV append (detail field quoted when needed)
summary_csv() {
local ip name
for entry in "${ML110}:ml110" "${R630_01}:r630-01" "${R630_02}:r630-02"; do
ip="${entry%%:*}"
name="${entry##*:}"
local pvesm lvs
pvesm=$(run_ssh "$ip" "pvesm status 2>/dev/null" || true)
lvs=$(run_ssh "$ip" "lvs --noheadings -o lv_name,data_percent,metadata_percent 2>/dev/null" || true)
echo "$pvesm" | tail -n +2 | while IFS= read -r line; do
[[ -z "$line" ]] && continue
echo "${TIMESTAMP},host,${name},${ip},pvesm,$(csv_quote "$line")"
done
echo "$lvs" | while IFS= read -r lv data meta; do
[[ -z "$lv" ]] && continue
echo "${TIMESTAMP},host,${name},${ip},thin,${lv},${data},${meta}"
done
done
}
if [[ "$OUTPUT_MODE" == "json" ]]; then
echo "{ \"timestamp\": \"$TIMESTAMP\", \"csv_rows\": ["
first=true
while IFS= read -r line; do
[[ -z "$line" ]] && continue
$first || echo ","
first=false
escaped="${line//\\/\\\\}"; escaped="${escaped//\"/\\\"}"
echo -n " \"$escaped\""
done < <(summary_csv)
echo ""
echo " ] }"
fi
if [[ "$OUTPUT_MODE" == "csv" ]]; then
echo "timestamp,scope,host,ip,metric,detail"
summary_csv
fi
if $APPEND_HISTORY; then
[[ ! -f "$HISTORY_CSV" ]] && echo "timestamp,scope,host,ip,metric,detail" >> "$HISTORY_CSV"
summary_csv >> "$HISTORY_CSV"
echo "Appended to $HISTORY_CSV" >&2
fi
echo "Snapshot: $OUT" >&2

View File

@@ -0,0 +1,68 @@
#!/usr/bin/env bash
# Prune old rows from logs/storage-growth/history.csv to cap file size.
# Keeps header and either last KEEP_DAYS (as proxy: days*200 rows) or last MAX_ROWS.
# Usage: ./scripts/monitoring/prune-storage-history.sh [--days N] [--max-rows N] [--dry-run]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
LOG_DIR="${LOG_DIR:-$PROJECT_ROOT/logs/storage-growth}"
HISTORY_CSV="${HISTORY_CSV:-$LOG_DIR/history.csv}"
KEEP_DAYS="${KEEP_DAYS:-90}"
MAX_ROWS="${MAX_ROWS:-0}"
ROWS_PER_DAY=200
DRY_RUN=0
while [ $# -gt 0 ]; do
case "$1" in
--days)
KEEP_DAYS="${2:-90}"
shift 2
;;
--max-rows)
MAX_ROWS="${2:-0}"
shift 2
;;
--dry-run)
DRY_RUN=1
shift
;;
*)
shift
;;
esac
done
if [ ! -f "$HISTORY_CSV" ]; then
echo "File $HISTORY_CSV does not exist; nothing to prune."
exit 0
fi
header=$(head -1 "$HISTORY_CSV")
total_lines=$(wc -l < "$HISTORY_CSV")
data_lines=$((total_lines - 1))
if [ "$data_lines" -le 0 ]; then
echo "No data rows in $HISTORY_CSV."
exit 0
fi
if [ "$MAX_ROWS" -le 0 ]; then
MAX_ROWS=$((KEEP_DAYS * ROWS_PER_DAY))
fi
if [ "$data_lines" -le "$MAX_ROWS" ]; then
echo "Data rows ($data_lines) <= keep ($MAX_ROWS); no prune needed."
exit 0
fi
remove=$((data_lines - MAX_ROWS))
if [ "$DRY_RUN" -eq 1 ]; then
echo "[dry-run] Would trim to last $MAX_ROWS rows (remove $remove rows)."
exit 0
fi
{ echo "$header"; tail -n "$MAX_ROWS" "$HISTORY_CSV"; } > "${HISTORY_CSV}.$$"
mv "${HISTORY_CSV}.$$" "$HISTORY_CSV"
echo "Trimmed $HISTORY_CSV to last $MAX_ROWS data rows (removed $remove)."

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env bash
# Prune old storage growth snapshot files under logs/storage-growth/.
# Keeps history.csv and cron.log; deletes snapshot_*.txt older than KEEP_DAYS.
# Usage: ./scripts/monitoring/prune-storage-snapshots.sh [--days N] [--dry-run]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
LOG_DIR="${LOG_DIR:-$PROJECT_ROOT/logs/storage-growth}"
KEEP_DAYS="${KEEP_DAYS:-30}"
DRY_RUN=0
while [ $# -gt 0 ]; do
case "$1" in
--days)
KEEP_DAYS="${2:-30}"
shift 2
;;
--dry-run)
DRY_RUN=1
shift
;;
*)
shift
;;
esac
done
if [ ! -d "$LOG_DIR" ]; then
echo "Directory $LOG_DIR does not exist; nothing to prune."
exit 0
fi
# Delete snapshot_YYYYMMDD_HHMMSS.txt older than KEEP_DAYS (by mtime)
count=0
while IFS= read -r -d '' f; do
[ -z "$f" ] && continue
if [ "$DRY_RUN" -eq 1 ]; then
echo "[dry-run] would remove: $f"
else
rm -f "$f"
fi
((count++)) || true
done < <(find "$LOG_DIR" -maxdepth 1 -name 'snapshot_*.txt' -mtime "+${KEEP_DAYS}" -print0 2>/dev/null || true)
if [ "$DRY_RUN" -eq 1 ]; then
echo "Dry run: would remove $count snapshot(s) older than ${KEEP_DAYS} days."
else
echo "Removed $count snapshot(s) older than ${KEEP_DAYS} days from $LOG_DIR."
fi

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env bash
# Run the NPM add-dapp-proxy-host logic on a Proxmox host (so it can reach NPMplus at 192.168.11.167).
# Usage: From project root, source .env then:
# bash scripts/nginx-proxy-manager/add-dapp-proxy-host-via-ssh.sh
# Or: PROXMOX_HOST=192.168.11.12 NPM_EMAIL=... NPM_PASSWORD=... bash scripts/nginx-proxy-manager/add-dapp-proxy-host-via-ssh.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
[ -f "$PROJECT_ROOT/.env" ] && set +u && source "$PROJECT_ROOT/.env" 2>/dev/null || true && set -u
# Host that can reach NPMplus (192.168.11.167:81). Try NPMPLUS_SSH_HOST if default cannot reach it (e.g. NPMplus VM itself).
PROXMOX_HOST="${NPMPLUS_SSH_HOST:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"
NPM_EMAIL="${NPM_EMAIL:-admin@example.org}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
DAPP_IP="${IP_DAPP_LXC:-192.168.11.58}"
NPM_URL="${NPM_URL:-https://192.168.11.167:81}"
[ -z "$NPM_PASSWORD" ] && echo "Set NPM_PASSWORD (e.g. source .env)" && exit 1
# Remote script: no jq required (Proxmox host may not have it)
REMOTE_SCRIPT='
set -euo pipefail
[ -z "${NPM_PASSWORD:-}" ] && echo "NPM_PASSWORD not set on remote" && exit 1
AUTH_JSON="{\"identity\":\"$NPM_EMAIL\",\"secret\":\"$NPM_PASSWORD\"}"
TOKEN_RESP=$(curl -sk -X POST "$NPM_URL/api/tokens" -H "Content-Type: application/json" -d "$AUTH_JSON")
TOKEN=$(echo "$TOKEN_RESP" | sed -n "s/.*\"token\"[[:space:]]*:[[:space:]]*\"\([^\"]*\)\".*/\1/p" | head -1)
[ -z "$TOKEN" ] && TOKEN=$(echo "$TOKEN_RESP" | sed -n "s/.*\"accessToken\"[[:space:]]*:[[:space:]]*\"\([^\"]*\)\".*/\1/p" | head -1)
[ -z "$TOKEN" ] && echo "Auth failed. Response: $TOKEN_RESP" && exit 1
BODY="{\"domain_names\":[\"dapp.d-bis.org\"],\"forward_scheme\":\"http\",\"forward_host\":\"$DAPP_IP\",\"forward_port\":80,\"allow_websocket_upgrade\":true,\"certificate_id\":null,\"ssl_forced\":false}"
resp=$(curl -sk -X POST "$NPM_URL/api/nginx/proxy-hosts" -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "$BODY")
if echo "$resp" | grep -q "\"id\""; then echo "Added: dapp.d-bis.org -> $DAPP_IP:80"; else echo "Create failed: $resp"; exit 1; fi
echo "Request SSL in NPMplus UI for dapp.d-bis.org and enable Force SSL."
'
echo "Running NPM add proxy host from Proxmox host $PROXMOX_HOST (must be on same LAN as NPMplus 192.168.11.167)..."
# Escape single quotes in password for remote export: ' -> '\''
PASS_ESC="${NPM_PASSWORD//\'/\'\\\'\'}"
OUTPUT=$(ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=accept-new root@"$PROXMOX_HOST" \
"export NPM_EMAIL='${NPM_EMAIL//\'/\'\\\'\'}' NPM_PASSWORD='$PASS_ESC' NPM_URL='$NPM_URL' DAPP_IP='$DAPP_IP'; bash -s" <<< "$REMOTE_SCRIPT" 2>&1) || true
echo "$OUTPUT"
if ! echo "$OUTPUT" | grep -q "Added: dapp.d-bis.org"; then
echo "Failed. Ensure this machine can SSH to $PROXMOX_HOST and that host can reach $NPM_URL (same LAN)." >&2
exit 1
fi

View File

@@ -0,0 +1,25 @@
#!/usr/bin/env bash
# Add dapp.d-bis.org -> 192.168.11.58:80 to NPMplus primary (192.168.11.167:81)
# Usage: NPM_PASSWORD=xxx bash scripts/nginx-proxy-manager/add-dapp-proxy-host.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
[ -f "$PROJECT_ROOT/.env" ] && set +u && source "$PROJECT_ROOT/.env" 2>/dev/null || true && set -u
NPM_URL="${NPM_URL:-https://192.168.11.167:81}"
NPM_EMAIL="${NPM_EMAIL:-admin@example.org}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
DAPP_IP="${IP_DAPP_LXC:-192.168.11.58}"
[ -z "$NPM_PASSWORD" ] && echo "Set NPM_PASSWORD" && exit 1
COOKIE_JAR="/tmp/npm_dapp_$$"
trap "rm -f $COOKIE_JAR" EXIT
AUTH_JSON=$(jq -n --arg identity "$NPM_EMAIL" --arg secret "$NPM_PASSWORD" '{identity:$identity,secret:$secret}')
TOKEN_RESP=$(curl -sk -X POST "$NPM_URL/api/tokens" -H "Content-Type: application/json" -d "$AUTH_JSON" -c "$COOKIE_JAR")
TOKEN=$(echo "$TOKEN_RESP" | jq -r '.token // .accessToken // .data.token // empty')
if [ -z "$TOKEN" ]; then echo "Auth failed"; echo "$TOKEN_RESP" | jq . 2>/dev/null; exit 1; fi
BODY=$(jq -n --arg domain "dapp.d-bis.org" --arg host "$DAPP_IP" --argjson port 80 \
'{domain_names:[$domain],forward_scheme:"http",forward_host:$host,forward_port:$port,allow_websocket_upgrade:true,certificate_id:null,ssl_forced:false}')
resp=$(curl -sk -X POST "$NPM_URL/api/nginx/proxy-hosts" -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "$BODY")
id=$(echo "$resp" | jq -r '.id // empty')
if [ -n "$id" ]; then echo "Added: dapp.d-bis.org -> $DAPP_IP:80"; else echo "$resp" | jq . 2>/dev/null; exit 1; fi
echo "Request SSL in NPMplus UI for dapp.d-bis.org and enable Force SSL."

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env bash
# Add NPMplus proxy hosts for Gov Portals xom-dev via direct database access
# Use when API is unreachable. Runs via SSH to Proxmox host → pct exec 10233 → docker exec npmplus
#
# Usage: bash scripts/nginx-proxy-manager/add-gov-portals-xom-dev-proxy-hosts-db.sh
set -euo pipefail
NPMPLUS_VMID="${NPMPLUS_VMID:-10233}"
NPMPLUS_NODE="${NPMPLUS_NODE:-r630-01}"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.11}"
IP_GOV="${IP_GOV_PORTALS_DEV:-192.168.11.54}"
add_host() {
local domain=$1
local port=$2
echo "Adding $domain -> $IP_GOV:$port..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=accept-new root@"$PROXMOX_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec -e DOMAIN='$domain' -e PORT=$port -e IP='$IP_GOV' npmplus node -e \"
const Database = require('better-sqlite3');
const db = new Database('/data/npmplus/database.sqlite');
const domain = process.env.DOMAIN;
const port = parseInt(process.env.PORT, 10);
const ip = process.env.IP;
const existing = db.prepare('SELECT id FROM proxy_host WHERE domain_names LIKE ?').get('%' + domain + '%');
if (existing) {
console.log(' Exists (id=' + existing.id + '), updating...');
db.prepare('UPDATE proxy_host SET forward_host=?, forward_port=?, forward_scheme=\\\"http\\\", enabled=1 WHERE id=?').run(ip, port, existing.id);
} else {
const maxId = db.prepare('SELECT MAX(id) as max FROM proxy_host').get();
const nextId = (maxId?.max || 0) + 1;
var now = new Date().toISOString().slice(0, 19).replace('T', ' ');
var sql = 'INSERT INTO proxy_host (id, created_on, modified_on, owner_user_id, is_deleted, domain_names, forward_host, forward_port, access_list_id, certificate_id, ssl_forced, caching_enabled, block_exploits, advanced_config, meta, allow_websocket_upgrade, http2_support, forward_scheme, enabled, locations, hsts_enabled, hsts_subdomains) VALUES (?, ?, ?, 1, 0, ?, ?, ?, 0, 0, 0, 0, 0, ?, ?, 0, 0, ?, 1, null, 0, 0)';
db.prepare(sql).run(nextId, now, now, JSON.stringify([domain]), ip, port, '', '{}', 'http');
console.log(' Created id=' + nextId);
}
db.close();
\"" 2>/dev/null
}
echo "Adding Gov Portals xom-dev proxy hosts via NPMplus DB..."
add_host "dbis.xom-dev.phoenix.sankofa.nexus" 3001
add_host "iccc.xom-dev.phoenix.sankofa.nexus" 3002
add_host "omnl.xom-dev.phoenix.sankofa.nexus" 3003
add_host "xom.xom-dev.phoenix.sankofa.nexus" 3004
echo "Reloading NPMplus nginx..."
ssh root@"$PROXMOX_HOST" "pct exec $NPMPLUS_VMID -- docker exec npmplus nginx -s reload 2>/dev/null" || true
echo "Done."

View File

@@ -0,0 +1,101 @@
#!/usr/bin/env bash
# Add NPMplus proxy hosts for Gov Portals dev subdomain (*.xom-dev.phoenix.sankofa.nexus)
# Domains: dbis, iccc, omnl, xom → gov-portals-dev VM (7804) on ports 3001-3004
#
# Usage: NPM_PASSWORD=xxx bash scripts/nginx-proxy-manager/add-gov-portals-xom-dev-proxy-hosts.sh
# Or source .env and run (NPM_EMAIL, NPM_PASSWORD from proxmox root .env)
#
# Prerequisites: LXC 7804 (gov-portals-dev) must be running at IP_GOV_PORTALS_DEV
# DNS: Add A records for dbis/iccc/omnl/xom.xom-dev.phoenix.sankofa.nexus → 76.53.10.36
# Or wildcard: *.xom-dev.phoenix.sankofa.nexus → 76.53.10.36
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
[ -f "$PROJECT_ROOT/.env" ] && set +u && source "$PROJECT_ROOT/.env" 2>/dev/null || true && set -u
# Gov Portals dev VM (7804) - see scripts/deployment/deploy-gov-portals-to-7804.sh
IP_GOV_PORTALS_DEV="${IP_GOV_PORTALS_DEV:-192.168.11.54}"
NPM_URL="${NPM_URL:-https://192.168.11.167:81}"
NPM_EMAIL="${NPM_EMAIL:-admin@example.org}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
if [ -z "$NPM_PASSWORD" ]; then
echo "Set NPM_PASSWORD (from proxmox .env or export)"
exit 1
fi
echo "Adding Gov Portals xom-dev proxy hosts to NPMplus at $NPM_URL..."
echo "Target: $IP_GOV_PORTALS_DEV (ports 3001-3004)"
COOKIE_JAR="/tmp/npm_gov_portals_cookies_$$"
cleanup_cookies() { rm -f "$COOKIE_JAR"; }
trap cleanup_cookies EXIT
AUTH_JSON=$(jq -n --arg identity "$NPM_EMAIL" --arg secret "$NPM_PASSWORD" '{identity:$identity,secret:$secret}')
TOKEN_RESPONSE=$(curl -s -k -X POST "$NPM_URL/api/tokens" -H "Content-Type: application/json" -d "$AUTH_JSON" -c "$COOKIE_JAR")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // .accessToken // .access_token // .data.token // empty' 2>/dev/null)
USE_COOKIE_AUTH=0
if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ]; then
if echo "$TOKEN_RESPONSE" | jq -e '.expires' >/dev/null 2>&1; then
USE_COOKIE_AUTH=1
echo "Using cookie-based auth (NPM 2 style)."
else
echo "Authentication failed"
echo "$TOKEN_RESPONSE" | jq -r '.message // .error // "unknown"' 2>/dev/null || echo "$TOKEN_RESPONSE"
exit 1
fi
fi
curl_auth() {
if [ "$USE_COOKIE_AUTH" = "1" ]; then
curl -s -k -b "$COOKIE_JAR" "$@"
else
curl -s -k -H "Authorization: Bearer $TOKEN" "$@"
fi
}
add_proxy_host() {
local domain=$1
local fwd_port=$2
local payload
payload=$(jq -n \
--arg domain "$domain" \
--arg host "$IP_GOV_PORTALS_DEV" \
--argjson port "$fwd_port" \
'{
domain_names: [$domain],
forward_scheme: "http",
forward_host: $host,
forward_port: $port,
allow_websocket_upgrade: false,
block_exploits: false,
certificate_id: null,
ssl_forced: false
}')
local resp
resp=$(curl_auth -X POST "$NPM_URL/api/nginx/proxy-hosts" \
-H "Content-Type: application/json" \
-d "$payload")
local id
id=$(echo "$resp" | jq -r '.id // empty' 2>/dev/null)
if [ -n "$id" ] && [ "$id" != "null" ]; then
echo " Added: $domain -> $IP_GOV_PORTALS_DEV:$fwd_port"
return 0
else
echo " Skip (may exist): $domain - $(echo "$resp" | jq -r '.message // .error // "unknown"' 2>/dev/null)"
return 1
fi
}
# Four portals on xom-dev.phoenix.sankofa.nexus
add_proxy_host "dbis.xom-dev.phoenix.sankofa.nexus" 3001 || true
add_proxy_host "iccc.xom-dev.phoenix.sankofa.nexus" 3002 || true
add_proxy_host "omnl.xom-dev.phoenix.sankofa.nexus" 3003 || true
add_proxy_host "xom.xom-dev.phoenix.sankofa.nexus" 3004 || true
echo ""
echo "Done. Request Let's Encrypt certs in NPMplus UI for: dbis/iccc/omnl/xom.xom-dev.phoenix.sankofa.nexus"
echo "Ensure DNS A records point *.xom-dev.phoenix.sankofa.nexus → 76.53.10.36"

View File

@@ -0,0 +1,33 @@
#!/usr/bin/env bash
# Complete NPMplus setup for dapp.d-bis.org: add proxy host, then remind to request SSL in UI.
# Run from a host that can reach NPMplus (192.168.11.167:81), e.g. your machine on the same LAN.
#
# Usage (from proxmox repo root):
# source .env && bash scripts/nginx-proxy-manager/complete-dapp-npmplus-from-lan.sh
# Or with explicit password:
# NPM_PASSWORD=xxx source .env && bash scripts/nginx-proxy-manager/complete-dapp-npmplus-from-lan.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
source config/ip-addresses.conf 2>/dev/null || true
[ -f .env ] && set +u && source .env 2>/dev/null || true && set -u
NPM_URL="${NPM_URL:-https://192.168.11.167:81}"
DAPP_IP="${IP_DAPP_LXC:-192.168.11.58}"
# Check reachability
if ! curl -sk --connect-timeout 5 -o /dev/null -w "%{http_code}" "$NPM_URL/" 2>/dev/null | grep -q '[0-9]'; then
echo "Cannot reach NPMplus at $NPM_URL (not on LAN or firewall)."
echo "Run this script from a host on 192.168.11.x that can reach $NPM_URL"
echo "Or add the proxy manually: NPM UI → Proxy Hosts → Add: dapp.d-bis.org → $DAPP_IP:80, then request SSL."
exit 1
fi
[ -z "${NPM_PASSWORD:-}" ] && echo "Set NPM_PASSWORD (e.g. source .env)" && exit 1
echo "Adding dapp.d-bis.org -> $DAPP_IP:80 on NPMplus..."
bash scripts/nginx-proxy-manager/add-dapp-proxy-host.sh
echo ""
echo "Next: In NPMplus UI (https://192.168.11.167:81) open the proxy for dapp.d-bis.org, request SSL, and enable Force SSL."

View File

@@ -13,7 +13,7 @@ source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
# Fourth NPMplus (dev/Codespaces)
NPMPLUS_FOURTH_IP="${IP_NPMPLUS_FOURTH:-192.168.11.170}"
IP_DEV_VM="${IP_DEV_VM:-192.168.11.60}"
IP_DEV_VM="${IP_DEV_VM:-192.168.11.59}"
PROXMOX_ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
PROXMOX_R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
PROXMOX_R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
@@ -28,7 +28,7 @@ if [ -z "$NPM_PASSWORD" ]; then
exit 1
fi
echo "Adding proxy hosts to NPMplus Fourth (dev/Codespaces) at $NPM_URL..."
echo "Adding or updating proxy hosts on NPMplus Fourth (dev/Codespaces) at $NPM_URL..."
# Authenticate (NPM 2 may use cookie-only)
COOKIE_JAR="/tmp/npm_fourth_cookies_$$"
@@ -96,15 +96,117 @@ add_proxy_host() {
fi
}
# Update existing proxy host (forward_host, forward_port). Use when "already in use" on POST.
# Set DEBUG_NPM_FOURTH=1 to print API responses when update fails.
update_proxy_host() {
local domain=$1
local fwd_host=$2
local fwd_port=$3
local ws=${4:-false}
local hosts_json
hosts_json=$(curl_auth -X GET "$NPM_URL/api/nginx/proxy-hosts" 2>/dev/null)
# NPM 2 may return array or { data: array }; normalize to array for jq
local arr
arr=$(echo "$hosts_json" | jq -c 'if type == "array" then . elif .data != null then .data elif .proxy_hosts != null then .proxy_hosts else empty end' 2>/dev/null)
if [ -z "$arr" ] || [ "$arr" = "null" ]; then
[ -n "${DEBUG_NPM_FOURTH:-}" ] && echo " [DEBUG] GET response (first 400 chars): $(echo "$hosts_json" | head -c 400)"
return 1
fi
local id
id=$(echo "$arr" | jq -r --arg dom "$domain" '(if type == "array" then . else [.] end) | .[] | select(.domain_names | type == "array") | select(.domain_names[] == $dom) | .id' 2>/dev/null | head -n1)
if [ -z "$id" ] || [ "$id" = "null" ]; then
[ -n "${DEBUG_NPM_FOURTH:-}" ] && echo " [DEBUG] GET response (first 300 chars): $(echo "$hosts_json" | head -c 300)"
return 1
fi
local payload
payload=$(jq -n \
--arg scheme "http" --arg host "$fwd_host" --argjson port "$fwd_port" --argjson ws "$ws" \
'{ forward_scheme: $scheme, forward_host: $host, forward_port: $port, allow_websocket_upgrade: $ws, block_exploits: false }')
local resp
resp=$(curl_auth -X PUT "$NPM_URL/api/nginx/proxy-hosts/$id" \
-H "Content-Type: application/json" \
-d "$payload")
local out_id
out_id=$(echo "$resp" | jq -r '.id // empty' 2>/dev/null)
if [ -n "$out_id" ] && [ "$out_id" != "null" ]; then
echo " Updated: $domain -> $fwd_host:$fwd_port (websocket=$ws)"
return 0
fi
# NPM 2 camelCase fallback
payload=$(jq -n \
--arg scheme "http" --arg host "$fwd_host" --argjson port "$fwd_port" --argjson ws "$ws" \
'{ forward_scheme: $scheme, forward_host: $host, forward_port: $port, allow_websocket_upgrade: $ws, blockCommonExploits: false }')
resp=$(curl_auth -X PUT "$NPM_URL/api/nginx/proxy-hosts/$id" -H "Content-Type: application/json" -d "$payload")
out_id=$(echo "$resp" | jq -r '.id // empty' 2>/dev/null)
if [ -n "$out_id" ] && [ "$out_id" != "null" ]; then
echo " Updated: $domain -> $fwd_host:$fwd_port (websocket=$ws)"
return 0
fi
# Minimal payload (some NPM 2 instances reject block_exploits / blockCommonExploits on PUT)
payload=$(jq -n --arg host "$fwd_host" --argjson port "$fwd_port" '{ forward_host: $host, forward_port: $port }')
resp=$(curl_auth -X PUT "$NPM_URL/api/nginx/proxy-hosts/$id" -H "Content-Type: application/json" -d "$payload")
out_id=$(echo "$resp" | jq -r '.id // empty' 2>/dev/null)
if [ -n "$out_id" ] && [ "$out_id" != "null" ]; then
echo " Updated: $domain -> $fwd_host:$fwd_port (minimal PUT)"
return 0
fi
# GET full record; build PUT payload with only allowed fields (exclude meta, created_on, modified_on)
local host_obj
host_obj=$(echo "$arr" | jq -c --arg dom "$domain" '(if type == "array" then . else [.] end) | .[] | select(.domain_names | type == "array") | select(.domain_names[] == $dom)' 2>/dev/null | head -n1)
if [ -n "$host_obj" ]; then
payload=$(echo "$host_obj" | jq -c --arg host "$fwd_host" --argjson port "$fwd_port" --argjson ws "$ws" '
. + {forward_host: $host, forward_port: $port, allow_websocket_upgrade: $ws}
| del(.meta, .created_on, .modified_on)
' 2>/dev/null)
if [ -n "$payload" ]; then
resp=$(curl_auth -X PUT "$NPM_URL/api/nginx/proxy-hosts/$id" -H "Content-Type: application/json" -d "$payload")
out_id=$(echo "$resp" | jq -r '.id // empty' 2>/dev/null)
if [ -n "$out_id" ] && [ "$out_id" != "null" ]; then
echo " Updated: $domain -> $fwd_host:$fwd_port (full body PUT)"
return 0
fi
[ -n "${DEBUG_NPM_FOURTH:-}" ] && echo " [DEBUG] PUT response: $resp"
fi
# Try only fields that appear in NPM 2 schema (no locations, no meta)
payload=$(echo "$host_obj" | jq -c --arg host "$fwd_host" --argjson port "$fwd_port" --argjson ws "$ws" '
{ domain_names, forward_scheme, forward_host: $host, forward_port: $port,
allow_websocket_upgrade: $ws, block_exploits, certificate_id, ssl_forced,
caching_enabled, advanced_config, access_list_id, enabled, http2_support, hsts_enabled, hsts_subdomains }
' 2>/dev/null)
if [ -n "$payload" ]; then
resp=$(curl_auth -X PUT "$NPM_URL/api/nginx/proxy-hosts/$id" -H "Content-Type: application/json" -d "$payload")
out_id=$(echo "$resp" | jq -r '.id // empty' 2>/dev/null)
if [ -n "$out_id" ] && [ "$out_id" != "null" ]; then
echo " Updated: $domain -> $fwd_host:$fwd_port (schema fields PUT)"
return 0
fi
[ -n "${DEBUG_NPM_FOURTH:-}" ] && echo " [DEBUG] PUT schema response: $resp"
fi
fi
echo " Warning: could not update $domain via API. Set Forward host to $fwd_host and port $fwd_port in NPM UI for $domain."
echo " Manual steps: docs/05-network/CHECK_ALL_UPDATES_AND_CLOUDFLARE_TUNNELS.md §8 (https://192.168.11.170:81)"
return 1
}
# Add or update: try add first; if "already in use", update existing to correct forward_host/port
add_or_update_proxy_host() {
local domain=$1
local fwd_host=$2
local fwd_port=$3
local ws=${4:-false}
if add_proxy_host "$domain" "$fwd_host" "$fwd_port" "$ws"; then return 0; fi
update_proxy_host "$domain" "$fwd_host" "$fwd_port" "$ws"
}
# Dev VM (Gitea on 3000); dev and codespaces as aliases
add_proxy_host "dev.d-bis.org" "$IP_DEV_VM" 3000 false || true
add_proxy_host "gitea.d-bis.org" "$IP_DEV_VM" 3000 false || true
add_proxy_host "codespaces.d-bis.org" "$IP_DEV_VM" 3000 false || true
add_or_update_proxy_host "dev.d-bis.org" "$IP_DEV_VM" 3000 false || true
add_or_update_proxy_host "gitea.d-bis.org" "$IP_DEV_VM" 3000 false || true
add_or_update_proxy_host "codespaces.d-bis.org" "$IP_DEV_VM" 3000 false || true
# Proxmox VE admin panels (port 8006; websocket required for console)
add_proxy_host "pve.ml110.d-bis.org" "$PROXMOX_ML110" 8006 true || true
add_proxy_host "pve.r630-01.d-bis.org" "$PROXMOX_R630_01" 8006 true || true
add_proxy_host "pve.r630-02.d-bis.org" "$PROXMOX_R630_02" 8006 true || true
add_or_update_proxy_host "pve.ml110.d-bis.org" "$PROXMOX_ML110" 8006 true || true
add_or_update_proxy_host "pve.r630-01.d-bis.org" "$PROXMOX_R630_01" 8006 true || true
add_or_update_proxy_host "pve.r630-02.d-bis.org" "$PROXMOX_R630_02" 8006 true || true
echo ""
echo "Done. Request Let's Encrypt certs in NPMplus UI (Fourth instance) for: dev, gitea, codespaces, pve.ml110, pve.r630-01, pve.r630-02."

View File

@@ -97,6 +97,52 @@ if [ $? -ne 0 ]; then
exit 1
fi
# Function to add proxy host (POST) when domain does not exist
add_proxy_host() {
local domain=$1
local forward_host=$2
local forward_port=$3
local websocket=$4
local block_exploits=${5:-false}
local payload
payload=$(jq -n \
--arg domain "$domain" \
--arg host "$forward_host" \
--argjson port "$forward_port" \
--argjson ws "$websocket" \
--argjson block_exploits "$([ "$block_exploits" = "true" ] && echo true || echo false)" \
'{
domain_names: [$domain],
forward_scheme: "http",
forward_host: $host,
forward_port: $port,
allow_websocket_upgrade: $ws,
block_exploits: $block_exploits,
certificate_id: null,
ssl_forced: false
}' 2>/dev/null)
if [ -z "$payload" ]; then
echo " ❌ Failed to build payload for $domain"
return 1
fi
local resp
resp=$(curl -s -k -X POST "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "$payload" 2>/dev/null)
local id
id=$(echo "$resp" | jq -r '.id // empty' 2>/dev/null)
if [ -n "$id" ] && [ "$id" != "null" ]; then
echo " ✅ Added: $domain -> http://${forward_host}:${forward_port} (WebSocket: $websocket)"
return 0
else
local err
err=$(echo "$resp" | jq -r '.message // .error // "unknown"' 2>/dev/null)
echo " ❌ Add failed for $domain: $err"
return 1
fi
}
# Function to update proxy host
# block_exploits: set false for RPC hosts (JSON-RPC uses POST to /; block_exploits can cause 405)
update_proxy_host() {
@@ -183,6 +229,9 @@ update_proxy_host "rpc.d-bis.org" "http://${RPC_PUBLIC_1}:8545" true false && up
update_proxy_host "rpc2.d-bis.org" "http://${RPC_PUBLIC_1}:8545" true false && updated_count=$((updated_count + 1)) || failed_count=$((failed_count + 1))
update_proxy_host "ws.rpc.d-bis.org" "http://${RPC_PUBLIC_1}:8546" true false && updated_count=$((updated_count + 1)) || failed_count=$((failed_count + 1))
update_proxy_host "ws.rpc2.d-bis.org" "http://${RPC_PUBLIC_1}:8546" true false && updated_count=$((updated_count + 1)) || failed_count=$((failed_count + 1))
# Fireblocks-dedicated RPC (VMID 2301)
update_proxy_host "rpc-fireblocks.d-bis.org" "http://${RPC_FIREBLOCKS_1:-${RPC_PRIVATE_1}}:8545" true false && updated_count=$((updated_count + 1)) || { add_proxy_host "rpc-fireblocks.d-bis.org" "${RPC_FIREBLOCKS_1:-${RPC_PRIVATE_1}}" 8545 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "ws.rpc-fireblocks.d-bis.org" "http://${RPC_FIREBLOCKS_1:-${RPC_PRIVATE_1}}:8546" true false && updated_count=$((updated_count + 1)) || { add_proxy_host "ws.rpc-fireblocks.d-bis.org" "${RPC_FIREBLOCKS_1:-${RPC_PRIVATE_1}}" 8546 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "dbis-admin.d-bis.org" "http://${IP_DBIS_FRONTEND:-192.168.11.130}:80" false && updated_count=$((updated_count + 1)) || failed_count=$((failed_count + 1))
update_proxy_host "dbis-api.d-bis.org" "http://${IP_DBIS_API:-192.168.11.155}:3000" false && updated_count=$((updated_count + 1)) || failed_count=$((failed_count + 1))
update_proxy_host "dbis-api-2.d-bis.org" "http://${IP_DBIS_API_2:-192.168.11.156}:3000" false && updated_count=$((updated_count + 1)) || failed_count=$((failed_count + 1))

View File

@@ -24,10 +24,12 @@ SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
# Colors
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_info "Setting up certificate synchronization..."

View File

@@ -50,15 +50,20 @@ done
# Deploy scripts and configs
log_info "Deploying Keepalived configuration..."
bash "$SCRIPT_DIR/deploy-keepalived.sh" || {
log_warn "Deployment script failed, deploying manually..."
# Deploy health check script
KEEPALIVED_DIR="$SCRIPT_DIR/keepalived"
if [ ! -f "$KEEPALIVED_DIR/check-npmplus-health.sh" ]; then
log_warn "check-npmplus-health.sh not found, deploying configs only"
fi
{
# Deploy health check and notify scripts
for host in "$PRIMARY_HOST" "$SECONDARY_HOST"; do
[ -f "$KEEPALIVED_DIR/check-npmplus-health.sh" ] && \
scp -o StrictHostKeyChecking=no \
"$SCRIPT_DIR/keepalived/check-npmplus-health.sh" \
"$SCRIPT_DIR/keepalived/keepalived-notify.sh" \
root@"$host:/usr/local/bin/" 2>/dev/null || true
"$KEEPALIVED_DIR/check-npmplus-health.sh" \
root@"$host:/usr/local/bin/check-npmplus-health.sh" 2>/dev/null || true
scp -o StrictHostKeyChecking=no \
"$KEEPALIVED_DIR/keepalived-notify.sh" \
root@"$host:/usr/local/bin/keepalived-notify.sh" 2>/dev/null || true
ssh -o StrictHostKeyChecking=no root@"$host" \
"chmod +x /usr/local/bin/check-npmplus-health.sh /usr/local/bin/keepalived-notify.sh" 2>/dev/null || true

View File

@@ -0,0 +1,45 @@
#!/bin/bash
# Phase 4: Sync NPMplus configuration to secondary (certificates + optional API export)
# Primary config is source of truth; secondary receives certs and can import config if needed.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
PRIMARY_VMID="${PRIMARY_VMID:-10233}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_info "Phase 4: Syncing configuration to secondary..."
# Cert sync (required for SSL on failover)
log_info "Running certificate sync..."
bash "$SCRIPT_DIR/sync-certificates.sh" || log_warn "Certificate sync had warnings (secondary may already have certs)"
# Optional: sync proxy config via API (requires NPM_PASSWORD in .env)
if [ -n "${NPM_PASSWORD:-}" ]; then
log_info "Running config export/sync..."
bash "$SCRIPT_DIR/sync-config.sh" 2>/dev/null || log_warn "Config sync skipped or failed (use import-secondary-config.sh if needed)"
else
log_warn "NPM_PASSWORD not set; skipping API config sync. Use import-secondary-config.sh to copy config to secondary."
fi
log_success "Phase 4 complete: Configuration sync done"
exit 0

View File

@@ -0,0 +1,42 @@
#!/bin/bash
# Check NPMplus health for Keepalived. Return 0 if healthy, 1 if unhealthy.
# Deploy to /usr/local/bin/check-npmplus-health.sh on both Proxmox hosts.
set -euo pipefail
PRIMARY_VMID="${PRIMARY_VMID:-10233}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
# Determine VMID by hostname (when run on Proxmox host)
HOSTNAME=$(hostname 2>/dev/null || echo "")
if [[ "$HOSTNAME" == "r630-01" ]]; then
VMID="$PRIMARY_VMID"
elif [[ "$HOSTNAME" == "r630-02" ]]; then
VMID="$SECONDARY_VMID"
else
# Fallback: try primary then secondary
if pct status "$PRIMARY_VMID" 2>/dev/null | grep -q "running"; then
VMID="$PRIMARY_VMID"
elif pct status "$SECONDARY_VMID" 2>/dev/null | grep -q "running"; then
VMID="$SECONDARY_VMID"
else
exit 1
fi
fi
# Check if container is running
if ! pct status "$VMID" 2>/dev/null | grep -q "running"; then
exit 1
fi
# Check if NPMplus container is healthy (Docker service inside CT)
if ! pct exec "$VMID" -- docker ps --filter "name=npmplus" --format "{{.Status}}" 2>/dev/null | grep -qE "healthy|Up"; then
exit 1
fi
# Check if NPMplus admin/API responds
if ! pct exec "$VMID" -- curl -s -k -f -o /dev/null --max-time 5 https://localhost:81 2>/dev/null; then
exit 1
fi
exit 0

View File

@@ -1,19 +1,19 @@
#!/bin/bash
# Handle Keepalived state changes
# This script should be deployed to /usr/local/bin/keepalived-notify.sh on Proxmox hosts
# Deploy to /usr/local/bin/keepalived-notify.sh on Proxmox hosts (no project root needed)
set -euo pipefail
# Load IP configuration
# Default VIP (used when not run from repo)
VIP="${VIP:-192.168.11.166}"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
VIP="${VIP:-${IP_NPMPLUS_ETH0:-192.168.11.166}}"
STATE="${1:-unknown}"
LOGFILE="/var/log/keepalived-notify.log"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
VIP="${VIP:-${IP_NPMPLUS_ETH0:-192.168.11.166}}"
# Ensure log directory exists
mkdir -p "$(dirname "$LOGFILE")"

View File

@@ -1,5 +1,6 @@
#!/usr/bin/env bash
# Monitor HA status and send alerts if needed
# Monitor HA status and send alerts if needed.
# Optional: ALERT_EMAIL (mail) or ALERT_WEBHOOK (Slack/Discord/Teams JSON) for alerts.
set -euo pipefail

View File

@@ -0,0 +1,93 @@
#!/usr/bin/env bash
# Create a Proxmox API token via SSH and optionally update .env with the values.
# Usage:
# ./scripts/proxmox/create-and-store-proxmox-api-token.sh [--dry-run] [--no-update-env]
# PROXMOX_HOST=192.168.11.12 ./scripts/proxmox/create-and-store-proxmox-api-token.sh # different host
#
# Requires: SSH access to PROXMOX_HOST (default 192.168.11.11). Writes to repo root .env
# unless --no-update-env. Token secret is only shown once at creation; script parses and stores it.
# See: .env.example (PROXMOX_HOST, PROXMOX_USER, PROXMOX_TOKEN_NAME, PROXMOX_TOKEN_VALUE)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "$PROJECT_ROOT/config/ip-addresses.conf" ]] && source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
[[ -f "$PROJECT_ROOT/.env" ]] && set -a && source "$PROJECT_ROOT/.env" 2>/dev/null && set +a || true
DRY_RUN=false
NO_UPDATE_ENV=false
for a in "$@"; do
[[ "$a" == "--dry-run" ]] && DRY_RUN=true
[[ "$a" == "--no-update-env" ]] && NO_UPDATE_ENV=true
done
HOST="${PROXMOX_HOST:-${PROXMOX_R630_01:-192.168.11.11}}"
USER="${PROXMOX_USER:-root@pam}"
TOKEN_NAME="${PROXMOX_TOKEN_NAME:-proxmox-workspace-api}"
ENV_FILE="${PROJECT_ROOT}/.env"
echo "Proxmox API token creation"
echo " Host: $HOST User: $USER Token name: $TOKEN_NAME dry-run: $DRY_RUN update-env: $([ "$NO_UPDATE_ENV" = true ] && echo no || echo yes)"
echo ""
if [[ "$DRY_RUN" == true ]]; then
echo "[DRY-RUN] Would SSH to root@$HOST and run: pveum user token add $USER $TOKEN_NAME --privsep=0"
echo "[DRY-RUN] Would parse 'value' from output and update $ENV_FILE (PROXMOX_HOST, PROXMOX_USER, PROXMOX_TOKEN_NAME, PROXMOX_TOKEN_VALUE)"
exit 0
fi
# Create token on host; output is table with key/value including 'value' = token secret
OUTPUT=$(ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no "root@$HOST" "pveum user token add $USER $TOKEN_NAME --privsep=0 2>&1" || true)
if [[ -z "$OUTPUT" ]]; then
echo "ERROR: No output from pveum (SSH or command failed)." >&2
exit 1
fi
# Parse token value (UUID). Proxmox prints table: │ value │ xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx │
TOKEN_VALUE=$(echo "$OUTPUT" | sed -n 's/.*│ value *│ *\([a-f0-9-]*\) *│.*/\1/p' | tr -d ' ')
if [[ -z "$TOKEN_VALUE" ]]; then
# Fallback: line containing "value" and a UUID pattern
TOKEN_VALUE=$(echo "$OUTPUT" | grep -oE '[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}' | head -1)
fi
if [[ -z "$TOKEN_VALUE" ]]; then
echo "ERROR: Could not parse token value from pveum output. Token may already exist (delete it first or use another TOKEN_NAME)." >&2
echo "Output was:" >&2
echo "$OUTPUT" >&2
exit 1
fi
echo "Token created: $USER!$TOKEN_NAME (value stored below)"
echo ""
if [[ "$NO_UPDATE_ENV" == true ]]; then
echo "Add to $ENV_FILE:"
echo " PROXMOX_HOST=$HOST"
echo " PROXMOX_PORT=8006"
echo " PROXMOX_USER=$USER"
echo " PROXMOX_TOKEN_NAME=$TOKEN_NAME"
echo " PROXMOX_TOKEN_VALUE=$TOKEN_VALUE"
exit 0
fi
# Update or append .env
if [[ ! -f "$ENV_FILE" ]]; then
touch "$ENV_FILE"
fi
for VAR in PROXMOX_HOST PROXMOX_PORT PROXMOX_USER PROXMOX_TOKEN_NAME PROXMOX_TOKEN_VALUE; do
case "$VAR" in
PROXMOX_HOST) VAL="$HOST" ;;
PROXMOX_PORT) VAL="8006" ;;
PROXMOX_USER) VAL="$USER" ;;
PROXMOX_TOKEN_NAME) VAL="$TOKEN_NAME" ;;
PROXMOX_TOKEN_VALUE) VAL="$TOKEN_VALUE" ;;
*) VAL="" ;;
esac
if grep -q "^${VAR}=" "$ENV_FILE" 2>/dev/null; then
sed -i "s|^${VAR}=.*|${VAR}=${VAL}|" "$ENV_FILE"
else
echo "${VAR}=${VAL}" >> "$ENV_FILE"
fi
done
echo "Updated $ENV_FILE with PROXMOX_HOST, PROXMOX_USER, PROXMOX_TOKEN_NAME, PROXMOX_TOKEN_VALUE."
echo "Run validation: ./scripts/run-completable-tasks-from-anywhere.sh (optional PROXMOX_* will no longer warn if set)."

View File

@@ -0,0 +1,124 @@
#!/usr/bin/env bash
# Push all nine HYBX sidecar repos to Gitea (HYBX org).
# Usage: GITEA_TOKEN=xxx bash scripts/push-hybx-sidecars-to-gitea.sh [--dry-run] [--pull-first|--sync] [REPO_NAME]
# Optional: HYBX_SIDECARS_BASE=/path (default: /home/intlc/projects/HYBX_Sidecars)
# Optional: ONLY_REPO=name or pass REPO_NAME as first non-flag arg to sync a single repo.
# Requires: GITEA_TOKEN with push access to gitea.d-bis.org HYBX org.
# --pull-first / --sync: pull from gitea (stashing local changes if needed) then push; syncs all sidecars and docs.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Repo root = parent of scripts/ (proxmox)
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Load GITEA_TOKEN from repo root .env (allow unset vars during source)
if [ -f "$PROJECT_ROOT/.env" ]; then
set +u
source "$PROJECT_ROOT/.env"
set -u
fi
GITEA_ORG="${GITEA_ORG:-HYBX}"
GITEA_URL="${GITEA_URL:-https://gitea.d-bis.org}"
BASE="${HYBX_SIDECARS_BASE:-/home/intlc/projects/HYBX_Sidecars}"
DRY_RUN=false
PULL_FIRST=false
ONLY_REPO="${ONLY_REPO:-}"
for arg in "${1:-}" "${2:-}" "${3:-}"; do
[[ "$arg" == "--dry-run" ]] && DRY_RUN=true
[[ "$arg" == "--pull-first" || "$arg" == "--sync" ]] && PULL_FIRST=true
# Single-repo: first non-flag arg
if [[ -n "$arg" && "$arg" != "--dry-run" && "$arg" != "--pull-first" && "$arg" != "--sync" ]]; then
ONLY_REPO="${ONLY_REPO:-$arg}"
fi
done
# Nine HYBX sidecar repos (see docs/11-references/GITEA_HYBX_ORGANIZATION_AND_REPOS.md)
SIDECAR_REPOS=(
mifos-fineract-sidecar
mt103-hardcopy-sidecar
off-ledger-2-on-ledger-sidecar
securitization-engine-sidecar
card-networks-sidecar
securities-sidecar
flash-loan-xau-sidecar
server-funds-sidecar
docs-for-sidecars
)
if [ -z "${GITEA_TOKEN:-}" ] && [ "$DRY_RUN" = false ]; then
echo "Set GITEA_TOKEN to push to Gitea (e.g. from Gitea Settings → Applications)."
exit 1
fi
echo "Gitea: $GITEA_URL | Org: $GITEA_ORG | Base: $BASE | DRY_RUN=$DRY_RUN | PULL_FIRST=$PULL_FIRST"
# Use credential helper so GITEA_TOKEN is never echoed in git output (remote URL stays token-free)
export GITEA_TOKEN
CRED_HELPER="!f() { echo \"username=git\"; echo \"password=\$GITEA_TOKEN\"; }; f"
ok=0
skip=0
fail=0
for name in "${SIDECAR_REPOS[@]}"; do
if [ -n "$ONLY_REPO" ] && [ "$name" != "$ONLY_REPO" ]; then
continue
fi
dir="$BASE/$name"
if [ ! -d "$dir" ]; then
echo " $name: skip (dir not found: $dir)"
((skip++)) || true
continue
fi
if [ ! -d "$dir/.git" ]; then
echo " $name: skip (not a git repo)"
((skip++)) || true
continue
fi
branch=$(git -C "$dir" branch --show-current 2>/dev/null || echo "main")
if [ "$DRY_RUN" = true ]; then
echo " [DRY-RUN] $name: would push branch $branch"
((ok++)) || true
continue
fi
if ! git -C "$dir" remote get-url gitea &>/dev/null; then
git -C "$dir" remote add gitea "$GITEA_URL/$GITEA_ORG/$name.git"
fi
# If merge in progress (e.g. docs-for-sidecars after conflict resolve), complete it before pull/push
if [ -f "$dir/.git/MERGE_HEAD" ]; then
echo " $name: completing merge..."
git -C "$dir" add README.md 2>/dev/null || true
git -C "$dir" add -A 2>/dev/null || true
git -C "$dir" commit -m "Merge gitea/main into main" --no-edit 2>/dev/null && echo " $name: merge commit done" || true
fi
if [ "$PULL_FIRST" = true ]; then
remote=gitea
git -C "$dir" remote get-url "$remote" &>/dev/null || remote=origin
# Skip pull if we still have unmerged files (merge just completed or unresolved)
if [ -f "$dir/.git/MERGE_HEAD" ]; then
echo " $name: skip pull (merge still in progress)"
elif git -C "$dir" -c "credential.helper=$CRED_HELPER" fetch "$remote" "$branch" 2>/dev/null; then
if ! git -C "$dir" merge-base --is-ancestor "$remote/$branch" "$branch" 2>/dev/null; then
echo " $name: pulling from $remote/$branch..."
stashed=false
if ! git -C "$dir" diff --quiet 2>/dev/null || ! git -C "$dir" diff --cached --quiet 2>/dev/null; then
git -C "$dir" stash push -m "push-hybx-sidecars: before pull" 2>/dev/null && stashed=true
fi
git -C "$dir" -c "credential.helper=$CRED_HELPER" pull --rebase "$remote" "$branch" 2>&1 || true
if [ "$stashed" = true ]; then
git -C "$dir" stash pop 2>/dev/null || true
fi
fi
fi
fi
if git -C "$dir" -c "credential.helper=$CRED_HELPER" push gitea "$branch" --set-upstream 2>&1; then
echo " $name: pushed ($branch)"
((ok++)) || true
else
echo " $name: push failed" >&2
((fail++)) || true
fi
done
echo ""
echo "Done: $ok pushed, $skip skipped, $fail failed"
[ "$fail" -gt 0 ] && exit 1
exit 0

View File

@@ -113,6 +113,15 @@ if [ "$NEED_CERT_COUNT" = "0" ]; then
exit 0
fi
# Optional: only process domains matching this grep pattern (e.g. "rpc-fireblocks|ws.rpc-fireblocks")
if [ -n "${CERT_DOMAINS_FILTER:-}" ]; then
NEED_CERT_LIST=$(echo "$NEED_CERT_LIST" | grep -E "$CERT_DOMAINS_FILTER" || true)
NEED_CERT_COUNT=$(echo "$NEED_CERT_LIST" | grep -c . 2>/dev/null || echo "0")
log_info "CERT_DOMAINS_FILTER=$CERT_DOMAINS_FILTER $NEED_CERT_COUNT host(s) to process"
[ "$NEED_CERT_COUNT" = "0" ] && log_warn "No hosts match filter; nothing to do." && exit 0
echo ""
fi
# FIRST_ONLY: process only the first host (verify renewal/working before adding rest)
FIRST_ONLY="${FIRST_ONLY:-0}"
if [ "$FIRST_ONLY" = "1" ] || [ "$FIRST_ONLY" = "true" ] || [ "$FIRST_ONLY" = "yes" ]; then

View File

@@ -10,8 +10,13 @@ PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
# VMID 2101 (Core RPC) is on r630-01; override PROXMOX_HOST if needed
VMID="${VMID:-2101}"
if [[ "$VMID" == "2101" ]]; then
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
else
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
fi
# Colors
RED='\033[0;31m'

View File

@@ -1,6 +1,8 @@
#!/usr/bin/env bash
# Review all Sentry and RPC nodes: status, service, and optional tx-pool eviction.
# Review all Besu VMs: validators, sentries, and RPC nodes (status, service, block height).
# Optional: apply tx-pool eviction with --apply-txpool.
# Run from project root. Usage: bash scripts/review-sentry-and-rpc-nodes.sh [--apply-txpool]
# Note: RPC 2307/2308 (Putu) may show block behind if not synced; check logs if needed.
set -euo pipefail
@@ -15,26 +17,43 @@ R630_02="${PROXMOX_HOST_R630_02:-${PROXMOX_R630_02:-192.168.11.12}}"
ML110="${PROXMOX_HOST_ML110:-${PROXMOX_ML110:-192.168.11.10}}"
# VMID:host:service:config_name (config path under /etc/besu/)
# Sentries 1500-1503 r630-01; 1504 ml110
# RPC 2101 r630-01; 2201,2303,2401 r630-02; 2301,2400,2503-2505 ml110 (2506-2508 destroyed 2026-02-08); 2402 default r630-01
# Host mapping per docs/04-configuration/verification-evidence/BESU_VMIDS_FROM_PROXMOX_20260208.md
# Validators: 1000-1002 r630-01, 1003-1004 ml110
# Sentries: 1500-1502 r630-01; 1503-1506 ml110
# RPC: 2101 r630-01; 2201,2303,2401 r630-02; 2301,2304-2308,2400,2402,2403 ml110; 2503-2505 r630-01 (HYBX; use besu.service, not besu-rpc — if Besu not installed, service is disabled). 2506-2508 destroyed 2026-02-08.
VALIDATORS=(
"1000:$R630_01:besu-validator:config-validator.toml"
"1001:$R630_01:besu-validator:config-validator.toml"
"1002:$R630_01:besu-validator:config-validator.toml"
"1003:$ML110:besu-validator:config-validator.toml"
"1004:$ML110:besu-validator:config-validator.toml"
)
SENTRIES=(
"1500:$R630_01:besu-sentry:config-sentry.toml"
"1501:$R630_01:besu-sentry:config-sentry.toml"
"1502:$R630_01:besu-sentry:config-sentry.toml"
"1503:$R630_01:besu-sentry:config-sentry.toml"
"1503:$ML110:besu-sentry:config-sentry.toml"
"1504:$ML110:besu-sentry:config-sentry.toml"
"1505:$ML110:besu-sentry:config-sentry.toml"
"1506:$ML110:besu-sentry:config-sentry.toml"
)
RPC_NODES=(
"2101:$R630_01:besu-rpc:config-rpc-core.toml"
"2201:$R630_02:besu-rpc:config-rpc-public.toml"
"2301:$ML110:besu-rpc:config-rpc-private.toml"
"2303:$R630_02:besu-rpc:config-rpc.toml"
"2304:$ML110:besu-rpc:config-rpc.toml"
"2305:$ML110:besu-rpc:config-rpc.toml"
"2306:$ML110:besu-rpc:config-rpc.toml"
"2307:$ML110:besu-rpc:config-rpc.toml"
"2308:$ML110:besu-rpc:config-rpc.toml"
"2400:$ML110:besu-rpc:config-rpc.toml"
"2401:$R630_02:besu-rpc:config-rpc.toml"
"2402:$R630_01:besu-rpc:config-rpc.toml"
"2503:$ML110:besu-rpc:config-rpc.toml"
"2504:$ML110:besu-rpc:config-rpc.toml"
"2505:$ML110:besu-rpc:config-rpc.toml"
"2402:$ML110:besu-rpc:config-rpc.toml"
"2403:$ML110:besu-rpc:config-rpc.toml"
"2503:$R630_01:besu-rpc:config-rpc.toml"
"2504:$R630_01:besu-rpc:config-rpc.toml"
"2505:$R630_01:besu-rpc:config-rpc.toml"
)
RED='\033[0;31m'
@@ -62,9 +81,15 @@ review_one() {
return 0
fi
# HYBX 2503-2505 use besu.service; check it if besu-rpc not active
service_status=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no "$ssh_target" "pct exec $vmid -- systemctl is-active $service 2>/dev/null" || echo "unknown")
if [[ "$service_status" != "active" && "$vmid" =~ ^250[345]$ ]]; then
service_status=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no "$ssh_target" "pct exec $vmid -- systemctl is-active besu.service 2>/dev/null" || echo "unknown")
fi
if [[ "$service_status" = "active" ]]; then
echo -e " VMID $vmid: container running, ${GREEN}$service active${NC}"
elif [[ "$service_status" = "inactive" && "$vmid" =~ ^250[345]$ ]]; then
echo -e " VMID $vmid: container running, ${YELLOW}besu.service inactive (HYBX: Besu not installed or disabled)${NC}"
else
echo -e " VMID $vmid: container running, ${YELLOW}$service $service_status${NC}"
fi
@@ -79,7 +104,12 @@ review_one() {
local block_hex block_dec
block_hex=$(echo "$resp" | jq -r '.result' 2>/dev/null)
block_dec=$((block_hex))
echo -e " RPC $ip:8545 → block ${GREEN}$block_dec${NC}"
# Chain 138 is past 2M; flag if node is >50k blocks behind
if [[ "$block_dec" -lt 2050000 ]]; then
echo -e " RPC $ip:8545 → block ${YELLOW}$block_dec (behind)${NC}"
else
echo -e " RPC $ip:8545 → block ${GREEN}$block_dec${NC}"
fi
else
echo -e " RPC $ip:8545 → ${YELLOW}no response${NC}"
fi
@@ -112,6 +142,13 @@ review_one() {
fi
}
echo ""
echo -e "${CYAN}=== Validator Nodes ===${NC}"
for entry in "${VALIDATORS[@]}"; do
IFS=: read -r vmid host service config_name <<< "$entry"
review_one "$vmid" "$host" "$service" "$config_name" "false"
done
echo ""
echo -e "${CYAN}=== Sentry Nodes ===${NC}"
for entry in "${SENTRIES[@]}"; do

View File

@@ -0,0 +1,117 @@
#!/usr/bin/env bash
# Run operator tasks from a host on LAN with access to .env (PRIVATE_KEY, NPM_PASSWORD, etc.).
# Optional: contract deploy, Blockscout verify, backup, Proxmox VM/container creation.
#
# Usage:
# ./scripts/run-all-operator-tasks-from-lan.sh [--dry-run] [--skip-backup] [--skip-verify]
# ./scripts/run-all-operator-tasks-from-lan.sh [--dry-run] --deploy # + contract deploy (phases + TransactionMirror if needed)
# ./scripts/run-all-operator-tasks-from-lan.sh [--dry-run] --create-vms # + create DBIS Core / missing containers
# ./scripts/run-all-operator-tasks-from-lan.sh [--dry-run] --deploy --create-vms # all
#
# Requires: LAN access to 192.168.11.x; smom-dbis-138/.env with PRIVATE_KEY for deploy.
# For create-vms: SSH to PROXMOX_HOST (default 192.168.11.10).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
DRY_RUN=false
SKIP_BACKUP=false
SKIP_VERIFY=false
DO_DEPLOY=false
DO_CREATE_VMS=false
for a in "$@"; do
[[ "$a" == "--dry-run" ]] && DRY_RUN=true
[[ "$a" == "--skip-backup" ]] && SKIP_BACKUP=true
[[ "$a" == "--skip-verify" ]] && SKIP_VERIFY=true
[[ "$a" == "--deploy" ]] && DO_DEPLOY=true
[[ "$a" == "--create-vms" ]] && DO_CREATE_VMS=true
[[ "$a" == "-h" || "$a" == "--help" ]] && {
echo "Usage: $0 [--dry-run] [--skip-backup] [--skip-verify] [--deploy] [--create-vms]"
echo " --dry-run Print steps only, do not run."
echo " --skip-backup Skip NPMplus backup."
echo " --skip-verify Skip Blockscout contract verification."
echo " --deploy Also run contract deployment (smom-dbis-138 phased + TransactionMirror if needed)."
echo " --create-vms Also create Proxmox containers (DBIS Core 6 containers; requires SSH to PROXMOX_HOST)."
echo "See: docs/00-meta/STEPS_FROM_PROXMOX_OR_LAN_WITH_SECRETS.md"
exit 0
}
done
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_ok() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[0;33m[⚠]\033[0m $1"; }
log_err() { echo -e "\033[0;31m[✗]\033[0m $1"; }
echo ""
echo "=== Run all operator tasks (from LAN) ==="
echo " dry-run=$DRY_RUN skip-backup=$SKIP_BACKUP skip-verify=$SKIP_VERIFY deploy=$DO_DEPLOY create-vms=$DO_CREATE_VMS"
echo ""
# 1) Wave 0: NPMplus RPC fix + backup
if [[ "$DRY_RUN" == true ]]; then
echo "[DRY-RUN] Would run: run-wave0-from-lan.sh (NPMplus RPC fix + backup)"
else
bash "$SCRIPT_DIR/run-wave0-from-lan.sh" $([[ "$SKIP_BACKUP" == true ]] && echo --skip-backup) 2>/dev/null || true
fi
echo ""
# 2) Blockscout verification
if [[ "$SKIP_VERIFY" != true ]]; then
if [[ "$DRY_RUN" == true ]]; then
echo "[DRY-RUN] Would run: source smom-dbis-138/.env; ./scripts/verify/run-contract-verification-with-proxy.sh"
else
log_info "Blockscout source verification..."
([[ -f smom-dbis-138/.env ]] && source smom-dbis-138/.env 2>/dev/null; bash "$SCRIPT_DIR/verify/run-contract-verification-with-proxy.sh") || log_warn "Blockscout verify skipped (env or script failed)"
fi
echo ""
fi
# 3) Optional: contract deployment
if [[ "$DO_DEPLOY" == true ]]; then
if [[ "$DRY_RUN" == true ]]; then
echo "[DRY-RUN] Would run: smom-dbis-138 deploy-all-phases.sh (and deploy-transaction-mirror-chain138.sh if needed)"
else
if [[ -f smom-dbis-138/.env ]]; then
source smom-dbis-138/.env 2>/dev/null || true
if [[ -n "${PRIVATE_KEY:-}" ]]; then
log_info "Contract deployment (phased)..."
(cd smom-dbis-138 && ./scripts/deployment/deploy-all-phases.sh) && log_ok "Phased deploy done" || log_warn "Phased deploy failed (may already be deployed)"
log_info "TransactionMirror (if needed)..."
bash "$SCRIPT_DIR/deployment/deploy-transaction-mirror-chain138.sh" 2>/dev/null && log_ok "TransactionMirror deployed" || log_warn "TransactionMirror skipped or failed (add TRANSACTION_MIRROR_ADDRESS to .env if deployed)"
else
log_warn "PRIVATE_KEY not set; skipping deploy"
fi
else
log_warn "smom-dbis-138/.env not found; skipping deploy"
fi
fi
echo ""
fi
# 4) Optional: create Proxmox containers (DBIS Core)
if [[ "$DO_CREATE_VMS" == true ]]; then
if [[ "$DRY_RUN" == true ]]; then
echo "[DRY-RUN] Would run: dbis_core/scripts/deployment/create-dbis-core-containers.sh"
else
if [[ -f dbis_core/scripts/deployment/create-dbis-core-containers.sh ]]; then
log_info "Creating DBIS Core containers (if missing)..."
NON_INTERACTIVE=1 bash dbis_core/scripts/deployment/create-dbis-core-containers.sh 2>/dev/null && log_ok "DBIS Core containers done" || log_warn "DBIS Core create failed or skipped (check PROXMOX_HOST SSH)"
else
log_warn "create-dbis-core-containers.sh not found; skipping"
fi
fi
echo ""
fi
echo "=== Next steps (manual if needed) ==="
echo " sendCrossChain: ./scripts/bridge/run-send-cross-chain.sh <amount_eth> [recipient] # omit --dry-run for real send; needs LINK"
echo " SSH keys: ./scripts/security/setup-ssh-key-auth.sh [--apply]"
echo " Firewall 8006: ./scripts/security/firewall-proxmox-8006.sh [--apply] [CIDR]"
echo " Backup cron: bash scripts/maintenance/schedule-npmplus-backup-cron.sh --install"
echo " Daily/weekly: bash scripts/maintenance/schedule-daily-weekly-cron.sh --install"
echo " Full steps list: docs/00-meta/STEPS_FROM_PROXMOX_OR_LAN_WITH_SECRETS.md"
echo ""

View File

@@ -1,7 +1,11 @@
#!/usr/bin/env bash
# Run all tasks that do NOT require LAN, Proxmox SSH, PRIVATE_KEY, or NPM_PASSWORD.
# Use from dev machine / WSL / CI. For tasks that need LAN/creds, see run-operator-tasks-from-lan.sh.
# Usage: ./scripts/run-completable-tasks-from-anywhere.sh
# Usage: ./scripts/run-completable-tasks-from-anywhere.sh [--dry-run]
# --dry-run Print the four steps only; do not run them (exit 0).
#
# Exit codes (Unix convention): 0 = success (all steps passed), non-zero = failure.
# Do not "fix" exit 0 — it means the script completed successfully.
set -euo pipefail
@@ -9,6 +13,21 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
DRY_RUN=false
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true && break; done
if $DRY_RUN; then
echo "=== Completable from anywhere (--dry-run: commands only) ==="
echo ""
echo "1. Config validation: bash scripts/validation/validate-config-files.sh [--dry-run]"
echo "2. On-chain check (138): SKIP_EXIT=1 bash scripts/verify/check-contracts-on-chain-138.sh || true"
echo "3. All validation: bash scripts/verify/run-all-validation.sh --skip-genesis"
echo "4. Reconcile .env: bash scripts/verify/reconcile-env-canonical.sh --print"
echo ""
echo "Run without --dry-run to execute. Exit 0 = success."
exit 0
fi
echo "=== Completable from anywhere (no LAN/creds) ==="
echo ""
@@ -33,3 +52,4 @@ bash scripts/verify/reconcile-env-canonical.sh --print
echo ""
echo "=== Done. Tasks requiring LAN or credentials: run scripts/run-operator-tasks-from-lan.sh from a host on LAN with NPM_PASSWORD/PRIVATE_KEY set. ==="
exit 0

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env bash
# Run deployments (Chain 138 and multichain). Nothing optional nor future — planned deployments.
# Usage:
# ./scripts/run-optional-deployments.sh --dry-run # print commands only (default)
# ./scripts/run-optional-deployments.sh --execute # run all phases
# ./scripts/run-optional-deployments.sh --execute --phases 6,7 # run only phases 6 and 7
#
# Requires: smom-dbis-138/.env with PRIVATE_KEY, RPC_URL_138.
# See: docs/07-ccip/OPTIONAL_DEPLOYMENTS_START_HERE.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SMOM_DIR="$PROJECT_ROOT/smom-dbis-138"
EXECUTE=""
PHASES=""
while [ $# -gt 0 ]; do
case "$1" in
--execute) EXECUTE=1 ;;
--phases) shift; [ $# -gt 0 ] && PHASES="$1" ;;
esac
shift
done
if [ ! -d "$SMOM_DIR" ]; then
echo "ERROR: smom-dbis-138 not found at $SMOM_DIR"
exit 1
fi
if [ -n "$EXECUTE" ] && [ ! -f "$SMOM_DIR/.env" ]; then
echo "ERROR: smom-dbis-138/.env not found. Create it with PRIVATE_KEY and RPC_URL_138."
exit 1
fi
OPTS="--dry-run"
[ -n "$EXECUTE" ] && OPTS=""
[ -n "$PHASES" ] && OPTS="$OPTS --phases $PHASES"
echo "============================================"
echo "Deployments (smom-dbis-138) — Chain 138 and multichain"
echo "Mode: $([ -n "$EXECUTE" ] && echo 'EXECUTE' || echo 'DRY-RUN (use --execute to run)')"
echo "============================================"
cd "$SMOM_DIR"
if [ -f .env ]; then set -a; source .env; set +a; fi
exec ./scripts/deployment/deploy-optional-future-all.sh $OPTS

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Run completable mapper-related tasks (validation, config hygiene) and print operator checklist for the rest.
# Usage: ./scripts/run-remaining-mapper-tasks.sh [--dry-run]
# See: docs/07-ccip/RECOMMENDED_COMPLETION_CHECKLIST.md, docs/07-ccip/MAPPER_GAPS_DEPLOYMENTS_AND_IMPROVEMENTS.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
DRY_RUN=false
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true && break; done
echo ""
echo "=== Remaining mapper tasks (completable + operator checklist) ==="
echo ""
if $DRY_RUN; then
echo "[--dry-run] Would run: validate-config-files.sh, then print operator steps."
echo ""
exit 0
fi
# 1. Config validation (token-mapping.json, token-mapping-multichain.json, smart-contracts-master.json)
echo "1. Config validation..."
bash scripts/validation/validate-config-files.sh
echo ""
# 2. Optional: on-chain check Chain 138 (contracts from smart-contracts-master.json)
echo "2. On-chain check (Chain 138)..."
SKIP_EXIT=1 bash scripts/verify/check-contracts-on-chain-138.sh 2>/dev/null || true
echo ""
echo "--- Operator-only tasks (require RPC, keys, gas on target chains) ---"
echo ""
echo "High:"
echo " • Wemix (1111): Confirm WETH/USDT/USDC on https://scan.wemix.com/tokens; update config/token-mapping-multichain.json and docs/07-ccip/WEMIX_TOKEN_VERIFICATION.md if different. Then run: bash scripts/validation/validate-config-files.sh"
echo " • Gnosis (100), Celo (42220), Wemix (1111) CCIP bridges: Deploy CCIP WETH9/WETH10 per chain, add destinations, fund with LINK. Runbook: docs/07-ccip/CONFIG_READY_CHAINS_COMPLETION_RUNBOOK.md"
echo ""
echo "Medium:"
echo " • LINK on Mainnet relay: Extend CCIPRelayBridge or deploy LINK receiver; fund; set relaySupported: true for LINK. Runbook: docs/07-ccip/RELAY_BRIDGE_ADD_LINK_SUPPORT_RUNBOOK.md"
echo ""
echo "Low (optional):"
echo " • AddressMapper on other chains: Cronos done. For BSC/Polygon/etc: from smom-dbis-138/ run DeployAddressMapperOtherChain, set mapper in config/smart-contracts-master.json. See docs/07-ccip/OPTIONAL_DEPLOYMENTS_START_HERE.md §A"
echo " • DODO PMM on 138: Deploy DODOPMMIntegration, create pools. See docs/07-ccip/RECOMMENDED_COMPLETION_CHECKLIST.md §6"
echo " • Mainnet trustless stack: Deploy per docs/03-deployment/OPTIONAL_FUTURE_DEPLOYMENTS_RUNBOOK.md. Checklist §7"
echo ""
echo "Full checklist: docs/07-ccip/RECOMMENDED_COMPLETION_CHECKLIST.md"
echo "Single operator doc: docs/07-ccip/REMAINING_OPERATOR_STEPS.md"
echo "Run config when .env set: ./scripts/complete-all-mapper-operator.sh"
echo ""

View File

@@ -24,7 +24,10 @@ cd "$PROJECT_ROOT"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
[ -f "${PROJECT_ROOT}/.env" ] && set +u && source "${PROJECT_ROOT}/.env" 2>/dev/null; set -u
REMOTE_USER="${PROXMOX_USER:-root}"
# SSH user for shell access (use root). PROXMOX_USER in .env may be root@pam for API; that is not valid for SSH.
REMOTE_USER="${REMOTE_SSH_USER:-${PROXMOX_USER:-root}}"
# If REMOTE_USER contains @ (e.g. root@pam from .env), use root for SSH
[[ "$REMOTE_USER" == *"@"* ]] && REMOTE_USER="root"
REMOTE_DIR="${REMOTE_RUN_DIR:-/tmp/proxmox-scripts-run}"
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_ML110:-192.168.11.10}}"

View File

@@ -4,6 +4,7 @@
#
# Usage (from host):
# ssh root@192.168.11.11 "pct exec 5700 -- bash -s" < scripts/setup-dev-vm-users-and-gitea.sh
# (Dev VM IP: 192.168.11.59 from config/ip-addresses.conf)
# Or copy and run:
# pct push 5700 scripts/setup-dev-vm-users-and-gitea.sh /tmp/setup-dev-vm.sh
# pct exec 5700 -- bash /tmp/setup-dev-vm.sh

View File

@@ -1,6 +1,7 @@
#!/usr/bin/env bash
# Proxmox Storage Monitoring Script with Alerts
# Monitors storage usage across all Proxmox nodes and sends alerts
# Monitors storage usage across all Proxmox nodes and sends alerts.
# Optional: set ALERT_EMAIL for mail(1); set ALERT_WEBHOOK for Slack/Discord/Teams webhook (JSON payload).
set -euo pipefail

View File

@@ -0,0 +1,40 @@
#!/usr/bin/env bash
# Sync all nine HYBX sidecars one repo at a time. Run from proxmox repo root.
# Usage: bash scripts/sync-hybx-sidecars-one-by-one.sh [--sync]
# --sync: pull before push (default: push only).
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR/.."
REPOS=(
mifos-fineract-sidecar
mt103-hardcopy-sidecar
off-ledger-2-on-ledger-sidecar
securitization-engine-sidecar
card-networks-sidecar
securities-sidecar
flash-loan-xau-sidecar
server-funds-sidecar
docs-for-sidecars
)
SYNC_ARG=""
[[ "${1:-}" == "--sync" ]] && SYNC_ARG="--sync"
ok=0
fail=0
for name in "${REPOS[@]}"; do
echo ""
echo ">>> Syncing: $name"
if bash scripts/push-hybx-sidecars-to-gitea.sh $SYNC_ARG "$name"; then
((ok++)) || true
else
((fail++)) || true
echo " $name failed (exit $?)"
fi
done
echo ""
echo "Result: $ok ok, $fail failed"
[ "$fail" -gt 0 ] && exit 1
exit 0

View File

@@ -34,6 +34,7 @@ NET_VERSION_EXPECTED = "138"
RPC_NODES: List[Dict[str, str]] = [
# ThirdWeb RPC nodes
{"vmid": "2400", "ip": "192.168.11.240", "group": "thirdweb", "name": "thirdweb-rpc-1"},
{"vmid": "2401", "ip": "192.168.11.241", "group": "thirdweb", "name": "besu-rpc-thirdweb-0x8a-1"},
{"vmid": "2402", "ip": "192.168.11.242", "group": "thirdweb", "name": "besu-rpc-thirdweb-0x8a-2"},
{"vmid": "2403", "ip": "192.168.11.243", "group": "thirdweb", "name": "besu-rpc-thirdweb-0x8a-3"},

View File

@@ -28,7 +28,7 @@ log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
ORACLE_VMID="${ORACLE_VMID:-3500}"
COINGECKO_API_KEY="CG-LxMsQ7jp3Jd6he3VFzP1uUXA"
COINGECKO_API_KEY="${COINGECKO_API_KEY:?COINGECKO_API_KEY must be set. Export from .env or use: export COINGECKO_API_KEY=your-key}"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

View File

@@ -22,7 +22,7 @@ log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
ORACLE_VMID="${ORACLE_VMID:-3500}"
COINGECKO_API_KEY="CG-LxMsQ7jp3Jd6he3VFzP1uUXA"
COINGECKO_API_KEY="${COINGECKO_API_KEY:?Set COINGECKO_API_KEY in env}"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

View File

@@ -1,12 +1,17 @@
#!/usr/bin/env bash
# Validate required config files and optional env vars before deployment/scripts
# Recommendation: docs/10-best-practices/IMPLEMENTATION_CHECKLIST.md (Configuration validation)
# Usage: ./scripts/validation/validate-config-files.sh [--dry-run]
# --dry-run Print what would be validated and exit 0 (no file checks).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
DRY_RUN=false
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true && break; done
log_info() { echo "[INFO] $1"; }
log_ok() { echo "[OK] $1"; }
log_warn() { echo "[WARN] $1"; }
@@ -44,6 +49,13 @@ check_env() {
fi
}
if $DRY_RUN; then
echo "=== Validation (--dry-run: would check) ==="
echo " REQUIRED_FILES: ${REQUIRED_FILES:-<default: config/ip-addresses.conf, .env.example, token-mapping.json, smart-contracts-master.json>}"
echo " OPTIONAL_ENV: $OPTIONAL_ENV"
exit 0
fi
if [[ -n "$REQUIRED_FILES" ]]; then
for f in $REQUIRED_FILES; do
check_file "$f"
@@ -52,6 +64,32 @@ else
# Default: check common locations
[[ -d "$PROJECT_ROOT/config" ]] && check_file "$PROJECT_ROOT/config/ip-addresses.conf" || true
[[ -f "$PROJECT_ROOT/.env.example" ]] && log_ok ".env.example present (copy to .env and fill)" || true
# Token mapping (Chain 138 ↔ Mainnet): optional but validate structure if present
if [[ -f "$PROJECT_ROOT/config/token-mapping.json" ]]; then
log_ok "Found: config/token-mapping.json"
if command -v jq &>/dev/null; then
if jq -e '.tokens | type == "array"' "$PROJECT_ROOT/config/token-mapping.json" &>/dev/null; then
log_ok "token-mapping.json: valid JSON with .tokens array"
else
log_err "token-mapping.json: invalid or missing .tokens array"
ERRORS=$((ERRORS + 1))
fi
fi
else
log_warn "Optional config/token-mapping.json not found (relay uses fallback mapping)"
fi
if [[ -f "$PROJECT_ROOT/config/token-mapping-multichain.json" ]]; then
log_ok "Found: config/token-mapping-multichain.json"
if command -v jq &>/dev/null; then
if jq -e '.pairs | type == "array"' "$PROJECT_ROOT/config/token-mapping-multichain.json" &>/dev/null; then
log_ok "token-mapping-multichain.json: valid JSON with .pairs array"
else
log_err "token-mapping-multichain.json: invalid or missing .pairs array"
ERRORS=$((ERRORS + 1))
fi
fi
fi
[[ -f "$PROJECT_ROOT/config/smart-contracts-master.json" ]] && log_ok "Found: config/smart-contracts-master.json" || true
fi
for v in $OPTIONAL_ENV; do

View File

@@ -15,7 +15,7 @@ PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SMOM="${SMOM_DIR:-${PROJECT_ROOT}/smom-dbis-138}"
ALLTRA="${PROJECT_ROOT}/alltra-lifi-settlement"
RPC="${RPC_URL_138:-http://${RPC_CORE_1:-192.168.11.211}:8545}"
RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
IP_BLOCKSCOUT="${IP_BLOCKSCOUT:-192.168.11.140}"
VERIFIER_URL="${FORGE_VERIFIER_URL:-http://127.0.0.1:3080/api}"
@@ -59,13 +59,23 @@ echo "Blockscout Contract Verification (Chain 138)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
ADDR_MULTICALL="${ADDR_MULTICALL:-0xF4AA429BE277d1a1a1A744C9e5B3aD821a9b96f7}"
ADDR_ORACLE_AGGREGATOR="${ADDR_ORACLE_AGGREGATOR:-0x452a4701d01c0Ff3ED0C547a5adF0659eb4a3ef7}"
ADDR_ORACLE_PROXY="${ADDR_ORACLE_PROXY:-0x404DcD22f82C734361256B441DAAa8DE654CE191}"
ADDR_MULTISIG="${ADDR_MULTISIG:-0xb9E29cFa1f89d369671E640d0BB3aD94Cab43965}"
ADDR_CCIP_RECEIVER="${ADDR_CCIP_RECEIVER:-0xC12236C03b28e675d376774FCE2C2C052488430F}"
ADDR_VOTING="${ADDR_VOTING:-0x022267b26400114aF01BaCcb92456Fe36cfccD93}"
ADDR_CCIP_SENDER="${ADDR_CCIP_SENDER:-0x105F8A15b819948a89153505762444Ee9f324684}"
ADDR_ORACLE_PROXY="${ADDR_ORACLE_PROXY:-0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6}"
ADDR_CCIPWETH10="${ADDR_CCIPWETH10_BRIDGE:-0xe0E93247376aa097dB308B92e6Ba36bA015535D0}"
ADDR_CCIPWETH9="${ADDR_CCIPWETH9_BRIDGE:-${CCIPWETH9_BRIDGE_CHAIN138:-0x971cD9D156f193df8051E48043C476e53ECd4693}}"
should_verify CCIPSender && verify_one "$ADDR_CCIP_SENDER" "CCIPSender" "contracts/ccip/CCIPSender.sol:CCIPSender"
should_verify Multicall && verify_one "$ADDR_MULTICALL" "Multicall" "contracts/utils/Multicall.sol:Multicall"
should_verify Aggregator && verify_one "$ADDR_ORACLE_AGGREGATOR" "Aggregator" "contracts/oracle/Aggregator.sol:Aggregator"
should_verify Proxy && verify_one "$ADDR_ORACLE_PROXY" "Proxy" "contracts/oracle/Proxy.sol:Proxy"
should_verify MultiSig && verify_one "$ADDR_MULTISIG" "MultiSig" "contracts/governance/MultiSig.sol:MultiSig"
should_verify CCIPReceiver && verify_one "$ADDR_CCIP_RECEIVER" "CCIPReceiver" "contracts/ccip/CCIPReceiver.sol:CCIPReceiver"
should_verify Voting && verify_one "$ADDR_VOTING" "Voting" "contracts/governance/Voting.sol:Voting"
should_verify CCIPSender && verify_one "$ADDR_CCIP_SENDER" "CCIPSender" "contracts/ccip/CCIPSender.sol:CCIPSender"
should_verify CCIPWETH10Bridge && verify_one "$ADDR_CCIPWETH10" "CCIPWETH10Bridge" "contracts/ccip/CCIPWETH10Bridge.sol:CCIPWETH10Bridge"
should_verify CCIPWETH9Bridge && verify_one "$ADDR_CCIPWETH9" "CCIPWETH9Bridge" "contracts/ccip/CCIPWETH9Bridge.sol:CCIPWETH9Bridge"

View File

@@ -34,6 +34,8 @@ fi
RECORDS=(
"rpc-http-pub.d-bis.org|$ZONE_D_BIS"
"rpc-http-prv.d-bis.org|$ZONE_D_BIS"
"rpc-fireblocks.d-bis.org|$ZONE_D_BIS"
"ws.rpc-fireblocks.d-bis.org|$ZONE_D_BIS"
)
RECORDS_DEFI=(
"rpc.public-0138.defi-oracle.io|$ZONE_DEFI_ORACLE"

View File

@@ -1,7 +1,7 @@
#!/usr/bin/env bash
# Check that Chain 138 deployed contracts have bytecode on-chain.
# Usage: ./scripts/verify/check-contracts-on-chain-138.sh [RPC_URL] [--dry-run]
# Default RPC: from env (RPC_URL_138, RPC_CORE_1) or config/ip-addresses.conf, else https://rpc-core.d-bis.org
# Default RPC: from env RPC_URL_138 (Chain 138 Core standard) or config/ip-addresses.conf, else https://rpc-core.d-bis.org
# Optional: SKIP_EXIT=1 to exit 0 even when some addresses MISS (e.g. when RPC unreachable from this host).
# Optional: --dry-run to print RPC and address list only (no RPC calls).
#
@@ -14,7 +14,7 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Load project env so RPC_URL_138 / RPC_CORE_1 from config/ip-addresses.conf or smom-dbis-138/.env are used
# Load project env so RPC_URL_138 (Chain 138 Core) from config/ip-addresses.conf or smom-dbis-138/.env is used
[[ -f "${SCRIPT_DIR}/../lib/load-project-env.sh" ]] && source "${SCRIPT_DIR}/../lib/load-project-env.sh" 2>/dev/null || true
# Parse args: first non-option is RPC_URL; --dry-run = print only, no cast calls
@@ -23,50 +23,66 @@ RPC_ARG=""
for a in "$@"; do
if [[ "$a" == "--dry-run" ]]; then DRY_RUN=1; else [[ -z "$RPC_ARG" ]] && RPC_ARG="$a"; fi
done
RPC="${RPC_ARG:-${RPC_URL_138:-${RPC_CORE_1:+http://${RPC_CORE_1}:8545}}}"
RPC="${RPC_ARG:-${RPC_URL_138:-https://rpc-core.d-bis.org}}"
RPC="${RPC:-https://rpc-core.d-bis.org}"
# Chain 138 deployed addresses (canonical list; see docs/11-references/CONTRACT_ADDRESSES_REFERENCE.md)
ADDRESSES=(
"0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" # WETH9
"0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f" # WETH10
"0x99b3511a2d315a497c8112c1fdd8d508d4b1e506" # Multicall / Oracle Aggregator
"0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6" # Oracle Proxy
"0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e" # CCIP Router
"0x105F8A15b819948a89153505762444Ee9f324684" # CCIP Sender
"0x971cD9D156f193df8051E48043C476e53ECd4693" # CCIPWETH9Bridge
"0xe0E93247376aa097dB308B92e6Ba36bA015535D0" # CCIPWETH10Bridge
"0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03" # LINK
"0x93E66202A11B1772E55407B32B44e5Cd8eda7f22" # cUSDT
"0xf22258f57794CC8E06237084b353Ab30fFfa640b" # cUSDC
"0x91Efe92229dbf7C5B38D422621300956B55870Fa" # TokenRegistry
"0xEBFb5C60dE5f7C4baae180CA328D3BB39E1a5133" # TokenFactory
"0xbc54fe2b6fda157c59d59826bcfdbcc654ec9ea1" # ComplianceRegistry
"0x31884f84555210FFB36a19D2471b8eBc7372d0A8" # BridgeVault
"0xF78246eB94c6CB14018E507E60661314E5f4C53f" # FeeCollector
"0x95BC4A997c0670d5DAC64d55cDf3769B53B63C28" # DebtRegistry
"0x0C4FD27018130A00762a802f91a72D6a64a60F14" # PolicyManager
"0x0059e237973179146237aB49f1322E8197c22b21" # TokenImplementation
"0xD3AD6831aacB5386B8A25BB8D8176a6C8a026f04" # Price Feed Keeper
"0x16D9A2cB94A0b92721D93db4A6Cd8023D3338800" # MerchantSettlementRegistry
"0xe77cb26eA300e2f5304b461b0EC94c8AD6A7E46D" # WithdrawalEscrow
"0xAEE4b7fBe82E1F8295951584CBc772b8BBD68575" # UniversalAssetRegistry (proxy)
"0xA6891D5229f2181a34D4FF1B515c3Aa37dd90E0e" # GovernanceController (proxy)
"0xCd42e8eD79Dc50599535d1de48d3dAFa0BE156F8" # UniversalCCIPBridge (proxy)
"0x89aB428c437f23bAB9781ff8Db8D3848e27EeD6c" # BridgeOrchestrator (proxy)
"0x302aF72966aFd21C599051277a48DAa7f01a5f54" # PaymentChannelManager
"0xe5e3bB424c8a0259FDE23F0A58F7e36f73B90aBd" # GenericStateChannelManager
"0x439Fcb2d2ab2f890DCcAE50461Fa7d978F9Ffe1A" # AddressMapper
"0x6eD905A30c552a6e003061A38FD52A5A427beE56" # MirrorManager
"0xFce6f50B312B3D936Ea9693C5C9531CF92a3324c" # Lockbox138
# CREATE2 / deterministic (DeployDeterministicCore.s.sol)
"0x750E4a8adCe9f0e67A420aBE91342DC64Eb90825" # CREATE2Factory
"0xC98602aa574F565b5478E8816BCab03C9De0870f" # UniversalAssetRegistry (proxy, deterministic)
"0x532DE218b94993446Be30eC894442f911499f6a3" # UniversalCCIPBridge (proxy, deterministic)
"0x6427F9739e6B6c3dDb4E94fEfeBcdF35549549d8" # MirrorRegistry
"0x66FEBA2fC9a0B47F26DD4284DAd24F970436B8Dc" # AlltraAdapter
# Addresses to exclude from Chain 138 bytecode check (EOAs or Mainnet-only contracts listed for reference)
EXCLUDE_138=(
"0x4A666F96fC8764181194447A7dFdb7d471b301C8" # Deployer_Admin (EOA)
"0x15DF1D5BFDD8Aa4b380445D4e3E9B38d34283619" # Tether_USDT_Chain138 / MainnetTether (Mainnet only)
"0x4CF42c4F1dBa748601b8938be3E7ABD732E87cE9" # Transaction_Mirror (Mainnet only)
)
# Chain 138 deployed addresses: from config/smart-contracts-master.json when available, else fallback list
if [[ -n "${CONTRACTS_MASTER_JSON:-}" && -f "${CONTRACTS_MASTER_JSON}" ]] && command -v jq &>/dev/null; then
ALL_RAW=($(jq -r '.chains["138"].contracts | to_entries[] | .value' "$CONTRACTS_MASTER_JSON" | sort -u))
ADDRESSES=()
for a in "${ALL_RAW[@]}"; do
skip=0
for ex in "${EXCLUDE_138[@]}"; do [[ "$a" == "$ex" ]] && skip=1 && break; done
[[ $skip -eq 0 ]] && ADDRESSES+=("$a")
done
else
ADDRESSES=(
"0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" # WETH9
"0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f" # WETH10
"0x99b3511a2d315a497c8112c1fdd8d508d4b1e506" # Multicall / Oracle Aggregator
"0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6" # Oracle Proxy
"0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e" # CCIP Router
"0x105F8A15b819948a89153505762444Ee9f324684" # CCIP Sender
"0x971cD9D156f193df8051E48043C476e53ECd4693" # CCIPWETH9Bridge
"0xe0E93247376aa097dB308B92e6Ba36bA015535D0" # CCIPWETH10Bridge
"0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03" # LINK
"0x93E66202A11B1772E55407B32B44e5Cd8eda7f22" # cUSDT
"0xf22258f57794CC8E06237084b353Ab30fFfa640b" # cUSDC
"0x91Efe92229dbf7C5B38D422621300956B55870Fa" # TokenRegistry
"0xEBFb5C60dE5f7C4baae180CA328D3BB39E1a5133" # TokenFactory
"0xbc54fe2b6fda157c59d59826bcfdbcc654ec9ea1" # ComplianceRegistry
"0x31884f84555210FFB36a19D2471b8eBc7372d0A8" # BridgeVault
"0xF78246eB94c6CB14018E507E60661314E5f4C53f" # FeeCollector
"0x95BC4A997c0670d5DAC64d55cDf3769B53B63C28" # DebtRegistry
"0x0C4FD27018130A00762a802f91a72D6a64a60F14" # PolicyManager
"0x0059e237973179146237aB49f1322E8197c22b21" # TokenImplementation
"0xD3AD6831aacB5386B8A25BB8D8176a6C8a026f04" # Price Feed Keeper
"0x16D9A2cB94A0b92721D93db4A6Cd8023D3338800" # MerchantSettlementRegistry
"0xe77cb26eA300e2f5304b461b0EC94c8AD6A7E46D" # WithdrawalEscrow
"0xAEE4b7fBe82E1F8295951584CBc772b8BBD68575" # UniversalAssetRegistry (proxy)
"0xA6891D5229f2181a34D4FF1B515c3Aa37dd90E0e" # GovernanceController (proxy)
"0xCd42e8eD79Dc50599535d1de48d3dAFa0BE156F8" # UniversalCCIPBridge (proxy)
"0x89aB428c437f23bAB9781ff8Db8D3848e27EeD6c" # BridgeOrchestrator (proxy)
"0x302aF72966aFd21C599051277a48DAa7f01a5f54" # PaymentChannelManager
"0xe5e3bB424c8a0259FDE23F0A58F7e36f73B90aBd" # GenericStateChannelManager
"0x439Fcb2d2ab2f890DCcAE50461Fa7d978F9Ffe1A" # AddressMapper
"0x6eD905A30c552a6e003061A38FD52A5A427beE56" # MirrorManager
"0xFce6f50B312B3D936Ea9693C5C9531CF92a3324c" # Lockbox138
"0x750E4a8adCe9f0e67A420aBE91342DC64Eb90825" # CREATE2Factory
"0xC98602aa574F565b5478E8816BCab03C9De0870f" # UniversalAssetRegistry (proxy, deterministic)
"0x532DE218b94993446Be30eC894442f911499f6a3" # UniversalCCIPBridge (proxy, deterministic)
"0x6427F9739e6B6c3dDb4E94fEfeBcdF35549549d8" # MirrorRegistry
"0x66FEBA2fC9a0B47F26DD4284DAd24F970436B8Dc" # AlltraAdapter
)
fi
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Chain 138 — on-chain contract check"
echo "RPC: $RPC"
@@ -113,7 +129,7 @@ for addr in "${ADDRESSES[@]}"; do
done
echo ""
echo "Total: $OK present, $MISS missing/empty (36 addresses: 26 canonical + 5 channels/mirror/trustless + 5 CREATE2). Explorer: https://explorer.d-bis.org/address/<ADDR>"
echo "Total: $OK present, $MISS missing/empty (${#ADDRESSES[@]} addresses). Explorer: https://explorer.d-bis.org/address/<ADDR>"
if [[ $MISS -gt 0 && -z "$rpc_reachable" ]]; then
echo " → RPC was unreachable from this host; see WARN above. Run from LAN/VPN or pass a reachable RPC URL." >&2
fi

View File

@@ -0,0 +1,58 @@
#!/usr/bin/env bash
# Compare RPC 2101 connected peers vs all possible peers (permissions-nodes allowlist).
# Usage: ./scripts/verify/check-rpc-2101-all-peers.sh
# Requires: jq, curl, access to RPC and config/besu-node-lists/permissions-nodes.toml
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
RPC_URL="${RPC_URL_138:-http://192.168.11.211:8545}"
PERMS="${PROJECT_ROOT}/config/besu-node-lists/permissions-nodes.toml"
# Extract allowlist IPs (exclude self 192.168.11.211)
POSSIBLE=$(grep -oE '192\.168\.11\.[0-9]+' "$PERMS" 2>/dev/null | sort -u | grep -v '192.168.11.211' || true)
# Connected peers from admin_peers
CONNECTED=$(curl -s -m 10 -X POST -H "Content-Type: application/json" \
--data '{"jsonrpc":"2.0","method":"admin_peers","params":[],"id":1}' "$RPC_URL" 2>/dev/null \
| jq -r '.result[] | .network.remoteAddress // .remoteAddress // empty' 2>/dev/null \
| sed 's/:30303//' | sort -u || true)
echo ""
echo "=== RPC 2101: all possible peers vs connected ==="
echo " RPC: $RPC_URL"
echo " Allowlist: $PERMS"
echo ""
POSSIBLE_COUNT=$(echo "$POSSIBLE" | grep -c . 2>/dev/null || echo 0)
CONNECTED_COUNT=$(echo "$CONNECTED" | grep -c . 2>/dev/null || echo 0)
echo "Possible peers (allowlist, excluding self 211): $POSSIBLE_COUNT"
echo "Connected peers: $CONNECTED_COUNT"
echo ""
echo "--- Connected (${CONNECTED_COUNT}) ---"
echo "$CONNECTED" | while read -r ip; do [ -n "$ip" ] && echo " $ip"; done
echo ""
# Missing = in possible but not in connected
MISSING=""
while read -r ip; do
[ -z "$ip" ] && continue
if ! echo "$CONNECTED" | grep -qx "$ip" 2>/dev/null; then
MISSING="${MISSING}${ip}\n"
fi
done <<< "$POSSIBLE"
MISSING=$(echo -e "$MISSING" | grep -v '^$' || true)
MISSING_COUNT=$(echo "$MISSING" | grep -c . 2>/dev/null || echo 0)
echo "--- Not connected (in allowlist, ${MISSING_COUNT}) ---"
echo "$MISSING" | while read -r ip; do [ -n "$ip" ] && echo " $ip"; done
echo ""
# Summary
echo "Summary: $CONNECTED_COUNT/$POSSIBLE_COUNT possible peers connected."
if [ "$MISSING_COUNT" -gt 0 ]; then
echo "Not connected: node may be down, or RPC has not yet connected (max-peers=32 on 2101)."
fi
echo ""

Some files were not shown because too many files have changed in this diff Show More