chore: update .env.master.example with new deployment scripts and treasury manager parameters; enhance AGENTS.md with GRU reference primacy details

- Added new deployment script references for Aave quote-push and treasury manager in .env.master.example.
- Updated AGENTS.md to include information on GRU reference primacy versus public PMM mesh execution model.
- Minor updates to various documentation files to reflect changes in policy and operational guidelines.

Made-with: Cursor
This commit is contained in:
defiQUG
2026-04-12 18:20:41 -07:00
parent 6945f69d7d
commit 0d29343941
133 changed files with 7017 additions and 539 deletions

View File

@@ -1,8 +1,11 @@
#!/usr/bin/env bash
# Collect enode from each of the 32 Besu nodes and regenerate static-nodes.json and
# permissions-nodes.toml with 32 unique entries (canonical IPs). Fixes duplicate enode (2400/2401).
# Collect enode from each live Besu node and regenerate static-nodes.json and
# permissions-nodes.toml with unique entries (canonical IPs).
#
# Usage: bash scripts/besu/collect-enodes-from-all-besu-nodes.sh [--dry-run] [--missing-only]
# Usage:
# bash scripts/besu/collect-enodes-from-all-besu-nodes.sh
# bash scripts/besu/collect-enodes-from-all-besu-nodes.sh --missing-only
# bash scripts/besu/collect-enodes-from-all-besu-nodes.sh --apply --vmid 2301
# --missing-only Only try to collect from VMIDs whose IP is not yet in static-nodes.json (fix failures only).
# Output: config/besu-node-lists/static-nodes.json and permissions-nodes.toml (backups as *.bak).
@@ -10,29 +13,63 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
DRY_RUN=false
DRY_RUN=true
MISSING_ONLY=false
for arg in "${@:-}"; do
[[ "$arg" == "--dry-run" ]] && DRY_RUN=true
[[ "$arg" == "--missing-only" ]] && MISSING_ONLY=true
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: bash scripts/besu/collect-enodes-from-all-besu-nodes.sh [--apply] [--dry-run] [--missing-only] [--vmid <N>]
Options:
--dry-run Print intended write actions only (default)
--apply Write regenerated static/perms files
--missing-only Only try VMIDs whose canonical IP is not already present
--vmid <N> Limit collection to one VMID; repeatable
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run)
DRY_RUN=true
shift
;;
--apply)
DRY_RUN=false
shift
;;
--missing-only)
MISSING_ONLY=true
shift
;;
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
STATIC_FILE="${PROJECT_ROOT}/config/besu-node-lists/static-nodes.json"
PERM_FILE="${PROJECT_ROOT}/config/besu-node-lists/permissions-nodes.toml"
SSH_OPTS="-o ConnectTimeout=8 -o StrictHostKeyChecking=accept-new"
# All Besu VMIDs in stable order (validators, sentries, RPCs)
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2103 2201 2301 2303 2304 2305 2306 2400 2401 2402 2403 2500 2501 2502 2503 2504 2505)
# Live Besu VMIDs in stable order (validators, sentries, RPCs)
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 2101 2102 2103 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403 2420 2430 2440 2460 2470 2480)
# VMID -> Proxmox host
declare -A HOST_BY_VMID
for v in 1000 1001 1002 1500 1501 1502 2101 2103 2500 2501 2502 2503 2504 2505; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-192.168.11.11}"; done
for v in 2201 2303 2305 2306 2307 2308 2401; do HOST_BY_VMID[$v]="${PROXMOX_R630_02:-192.168.11.12}"; done
for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_R630_03:-192.168.11.13}"; done
# VMID -> canonical IP (all 32)
# VMID -> canonical IP
declare -A IP_BY_VMID
IP_BY_VMID[1000]=192.168.11.100
IP_BY_VMID[1001]=192.168.11.101
@@ -57,20 +94,33 @@ IP_BY_VMID[2303]=192.168.11.233
IP_BY_VMID[2304]=192.168.11.234
IP_BY_VMID[2305]=192.168.11.235
IP_BY_VMID[2306]=192.168.11.236
IP_BY_VMID[2307]=192.168.11.237
IP_BY_VMID[2308]=192.168.11.238
IP_BY_VMID[2400]=192.168.11.240
IP_BY_VMID[2401]=192.168.11.241
IP_BY_VMID[2402]=192.168.11.242
IP_BY_VMID[2403]=192.168.11.243
IP_BY_VMID[2500]=192.168.11.172
IP_BY_VMID[2501]=192.168.11.173
IP_BY_VMID[2502]=192.168.11.174
IP_BY_VMID[2503]=192.168.11.246
IP_BY_VMID[2504]=192.168.11.247
IP_BY_VMID[2505]=192.168.11.248
IP_BY_VMID[2420]=192.168.11.172
IP_BY_VMID[2430]=192.168.11.173
IP_BY_VMID[2440]=192.168.11.174
IP_BY_VMID[2460]=192.168.11.246
IP_BY_VMID[2470]=192.168.11.247
IP_BY_VMID[2480]=192.168.11.248
selected_vmid() {
local vmid="$1"
[[ ${#TARGET_VMIDS[@]} -eq 0 ]] && return 0
local wanted
for wanted in "${TARGET_VMIDS[@]}"; do
[[ "$vmid" == "$wanted" ]] && return 0
done
return 1
}
get_enode() {
local vmid="$1"
local host="${HOST_BY_VMID[$vmid]:-}"
local host
host="$(get_host_for_vmid "$vmid")"
local ip="${IP_BY_VMID[$vmid]:-}"
[[ -z "$host" || -z "$ip" ]] && return 1
# 1) admin_nodeInfo (RPC)
@@ -95,9 +145,9 @@ get_enode() {
return 0
fi
fi
# 4) For 2501-2505 on r630-01: node may lack Besu binary; export key via helper 2500 (which has Besu)
if [[ "$host" == "${PROXMOX_R630_01:-192.168.11.11}" ]] && [[ "$vmid" =~ ^(2501|2502|2503|2504|2505)$ ]]; then
pubkey=$(ssh $SSH_OPTS "root@$host" "pct exec $vmid -- cat /data/besu/key 2>/dev/null | head -1 > /tmp/key${vmid}.$$ 2>/dev/null && pct push 2500 /tmp/key${vmid}.$$ /tmp/key${vmid} 2>/dev/null && pct exec 2500 -- /opt/besu/bin/besu public-key export --node-private-key-file=/tmp/key${vmid} 2>/dev/null; rm -f /tmp/key${vmid}.$$" 2>/dev/null | grep -oE '0x[0-9a-fA-F]{128}' | head -1 | sed 's/^0x//')
# 4) For edge RPCs without Besu binary, export key via helper 2420 on r630-01.
if [[ "$host" == "${PROXMOX_R630_01:-${PROXMOX_HOST_R630_01:-192.168.11.11}}" ]] && [[ "$vmid" =~ ^(2430|2440|2460|2470|2480)$ ]]; then
pubkey=$(ssh $SSH_OPTS "root@$host" "pct exec $vmid -- cat /data/besu/key 2>/dev/null | head -1 > /tmp/key${vmid}.$$ 2>/dev/null && pct push 2420 /tmp/key${vmid}.$$ /tmp/key${vmid} 2>/dev/null && pct exec 2420 -- /opt/besu/bin/besu public-key export --node-private-key-file=/tmp/key${vmid} 2>/dev/null; rm -f /tmp/key${vmid}.$$" 2>/dev/null | grep -oE '0x[0-9a-fA-F]{128}' | head -1 | sed 's/^0x//')
if [[ -n "$pubkey" && ${#pubkey} -eq 128 ]]; then
echo "enode://${pubkey}@${ip}:30303"
return 0
@@ -125,6 +175,7 @@ done < <(jq -r '.[]' "${STATIC_FILE}.bak" 2>/dev/null)
VMIDS_TO_TRY=()
if $MISSING_ONLY; then
for vmid in "${BESU_VMIDS[@]}"; do
selected_vmid "$vmid" || continue
ip="${IP_BY_VMID[$vmid]:-}"
[[ -z "$ip" ]] && continue
[[ -z "${EXISTING_BY_IP[$ip]:-}" ]] && VMIDS_TO_TRY+=( "$vmid" )
@@ -132,8 +183,11 @@ if $MISSING_ONLY; then
echo "Missing-only: collecting from ${#VMIDS_TO_TRY[@]} VMIDs not in current list (${VMIDS_TO_TRY[*]:-none})."
[[ ${#VMIDS_TO_TRY[@]} -eq 0 ]] && echo "All 32 IPs already present. Nothing to collect." && exit 0
else
VMIDS_TO_TRY=( "${BESU_VMIDS[@]}" )
echo "Collecting enodes from ${#BESU_VMIDS[@]} Besu nodes..."
for vmid in "${BESU_VMIDS[@]}"; do
selected_vmid "$vmid" || continue
VMIDS_TO_TRY+=( "$vmid" )
done
echo "Collecting enodes from ${#VMIDS_TO_TRY[@]} Besu nodes..."
fi
declare -A COLLECTED_BY_VMID
@@ -168,6 +222,7 @@ declare -a MISSING_VMIDS
declare -A USED_NODE_ID
for vmid in "${BESU_VMIDS[@]}"; do
selected_vmid "$vmid" || continue
ip="${IP_BY_VMID[$vmid]:-}"
[[ -z "$ip" ]] && continue
enode=""

View File

@@ -1,34 +1,86 @@
#!/usr/bin/env bash
# Deploy canonical genesis.json, static-nodes.json, permissions-nodes.toml to specified VMIDs
# and restart Besu. Usage: VMIDS="2401 2402 2403 2500 2501 2502 2503 2504 2505" bash scripts/besu/deploy-genesis-and-node-lists-to-rpcs.sh
# Deploy canonical genesis.json, static-nodes.json, permissions-nodes.toml to selected RPC VMIDs
# and restart Besu.
# Usage:
# bash scripts/besu/deploy-genesis-and-node-lists-to-rpcs.sh
# bash scripts/besu/deploy-genesis-and-node-lists-to-rpcs.sh --vmid 2401
# bash scripts/besu/deploy-genesis-and-node-lists-to-rpcs.sh --apply --vmid 2401
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
VMIDS="${VMIDS:-2401 2402 2403 2500 2501 2502 2503 2504 2505}"
DRY_RUN=true
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: bash scripts/besu/deploy-genesis-and-node-lists-to-rpcs.sh [--apply] [--dry-run] [--vmid <N>]
Options:
--dry-run Print intended actions only (default)
--apply Deploy files and restart Besu on selected RPC nodes
--vmid <N> Limit to one VMID; repeatable
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run)
DRY_RUN=true
shift
;;
--apply)
DRY_RUN=false
shift
;;
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
RPC_VMIDS=(2401 2402 2403 2420 2430 2440 2460 2470 2480)
GENESIS="${PROJECT_ROOT}/smom-dbis-138-proxmox/config/genesis.json"
STATIC="${PROJECT_ROOT}/config/besu-node-lists/static-nodes.json"
PERMS="${PROJECT_ROOT}/config/besu-node-lists/permissions-nodes.toml"
declare -A HOST_BY_VMID
for v in 2500 2501 2502 2503 2504 2505; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-192.168.11.11}"; done
for v in 2401; do HOST_BY_VMID[$v]="${PROXMOX_R630_02:-192.168.11.12}"; done
for v in 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-192.168.11.10}"; done
SSH_OPTS="-o ConnectTimeout=10 -o StrictHostKeyChecking=accept-new"
[[ ! -f "$GENESIS" ]] && { echo "ERROR: $GENESIS not found"; exit 1; }
[[ ! -f "$STATIC" ]] && { echo "ERROR: $STATIC not found"; exit 1; }
[[ ! -f "$PERMS" ]] && { echo "ERROR: $PERMS not found"; exit 1; }
echo "Deploying genesis.json + static-nodes.json + permissions-nodes.toml to: $VMIDS"
selected_vmid() {
local vmid="$1"
[[ ${#TARGET_VMIDS[@]} -eq 0 ]] && return 0
local wanted
for wanted in "${TARGET_VMIDS[@]}"; do
[[ "$vmid" == "$wanted" ]] && return 0
done
return 1
}
echo "Deploying genesis.json + static-nodes.json + permissions-nodes.toml to selected RPC VMIDs"
if $DRY_RUN; then echo " [dry-run]"; fi
echo ""
for vmid in $VMIDS; do
host="${HOST_BY_VMID[$vmid]:-}"
for vmid in "${RPC_VMIDS[@]}"; do
selected_vmid "$vmid" || continue
host="$(get_host_for_vmid "$vmid")"
[[ -z "$host" ]] && { echo "VMID $vmid: no host"; continue; }
running=$(ssh $SSH_OPTS "root@$host" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "")
if [[ "$running" != "running" ]]; then
@@ -36,6 +88,10 @@ for vmid in $VMIDS; do
continue
fi
echo "--- VMID $vmid @ $host ---"
if $DRY_RUN; then
echo " [dry-run] would deploy files and restart Besu"
continue
fi
scp -q $SSH_OPTS "$GENESIS" "$STATIC" "$PERMS" "root@${host}:/tmp/" || { echo " scp failed"; continue; }
ssh $SSH_OPTS "root@$host" "pct exec $vmid -- mkdir -p /etc/besu"
ssh $SSH_OPTS "root@$host" "pct push $vmid /tmp/genesis.json /etc/besu/genesis.json && pct push $vmid /tmp/static-nodes.json /etc/besu/static-nodes.json && pct push $vmid /tmp/permissions-nodes.toml /etc/besu/permissions-nodes.toml"

View File

@@ -1,9 +1,12 @@
#!/usr/bin/env bash
# Generate Besu node key (/data/besu/key) only for VMIDs that are missing from
# Generate Besu node key (/data/besu/key) only for selected VMIDs that are missing from
# static-nodes.json (no enode in the list yet). Key file = 64 hex chars (32 bytes).
# After running, use: bash scripts/besu/collect-enodes-from-all-besu-nodes.sh --missing-only
#
# Usage: bash scripts/besu/generate-node-keys-for-missing-vmids.sh [--dry-run] [--collect] [--force]
# Usage:
# bash scripts/besu/generate-node-keys-for-missing-vmids.sh
# bash scripts/besu/generate-node-keys-for-missing-vmids.sh --vmid 1505
# bash scripts/besu/generate-node-keys-for-missing-vmids.sh --apply --vmid 1505 --collect
# --collect Run collect-enodes-from-all-besu-nodes.sh --missing-only after generating keys.
# --force Overwrite existing key file with new 64-hex key (fixes PEM/wrong-format keys).
@@ -11,25 +14,65 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
DRY_RUN=false
DRY_RUN=true
RUN_COLLECT=false
FORCE=false
for arg in "${@:-}"; do
[[ "$arg" == "--dry-run" ]] && DRY_RUN=true
[[ "$arg" == "--collect" ]] && RUN_COLLECT=true
[[ "$arg" == "--force" ]] && FORCE=true
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: bash scripts/besu/generate-node-keys-for-missing-vmids.sh [--apply] [--dry-run] [--collect] [--force] [--vmid <N>]
Options:
--dry-run Print intended actions only (default)
--apply Generate node keys on selected VMIDs
--collect Run collect-enodes-from-all-besu-nodes.sh --missing-only after generating keys
--force Overwrite existing key files
--vmid <N> Limit to one VMID; repeatable
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run)
DRY_RUN=true
shift
;;
--apply)
DRY_RUN=false
shift
;;
--collect)
RUN_COLLECT=true
shift
;;
--force)
FORCE=true
shift
;;
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
STATIC_FILE="${PROJECT_ROOT}/config/besu-node-lists/static-nodes.json"
SSH_OPTS="-o ConnectTimeout=8 -o StrictHostKeyChecking=accept-new"
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 2101 2102 2201 2301 2303 2304 2305 2306 2400 2401 2402 2403 2500 2501 2502 2503 2504 2505)
declare -A HOST_BY_VMID
for v in 1000 1001 1002 1500 1501 1502 2101 2500 2501 2502 2503 2504 2505; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-192.168.11.11}"; done
for v in 2201 2303 2401; do HOST_BY_VMID[$v]="${PROXMOX_R630_02:-192.168.11.12}"; done
for v in 1003 1004 1503 1504 1505 1506 1507 1508 2102 2301 2304 2305 2306 2400 2402 2403; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-192.168.11.10}"; done
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 2101 2102 2103 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403 2420 2430 2440 2460 2470 2480)
declare -A IP_BY_VMID
IP_BY_VMID[1000]=192.168.11.100
IP_BY_VMID[1001]=192.168.11.101
@@ -45,24 +88,39 @@ IP_BY_VMID[1505]=192.168.11.213
IP_BY_VMID[1506]=192.168.11.214
IP_BY_VMID[1507]=192.168.11.244
IP_BY_VMID[1508]=192.168.11.245
IP_BY_VMID[1509]=192.168.11.219
IP_BY_VMID[1510]=192.168.11.220
IP_BY_VMID[2101]=192.168.11.211
IP_BY_VMID[2102]=192.168.11.212
IP_BY_VMID[2103]=192.168.11.217
IP_BY_VMID[2201]=192.168.11.221
IP_BY_VMID[2301]=192.168.11.232
IP_BY_VMID[2303]=192.168.11.233
IP_BY_VMID[2304]=192.168.11.234
IP_BY_VMID[2305]=192.168.11.235
IP_BY_VMID[2306]=192.168.11.236
IP_BY_VMID[2307]=192.168.11.237
IP_BY_VMID[2308]=192.168.11.238
IP_BY_VMID[2400]=192.168.11.240
IP_BY_VMID[2401]=192.168.11.241
IP_BY_VMID[2402]=192.168.11.242
IP_BY_VMID[2403]=192.168.11.243
IP_BY_VMID[2500]=192.168.11.172
IP_BY_VMID[2501]=192.168.11.173
IP_BY_VMID[2502]=192.168.11.174
IP_BY_VMID[2503]=192.168.11.246
IP_BY_VMID[2504]=192.168.11.247
IP_BY_VMID[2505]=192.168.11.248
IP_BY_VMID[2420]=192.168.11.172
IP_BY_VMID[2430]=192.168.11.173
IP_BY_VMID[2440]=192.168.11.174
IP_BY_VMID[2460]=192.168.11.246
IP_BY_VMID[2470]=192.168.11.247
IP_BY_VMID[2480]=192.168.11.248
selected_vmid() {
local vmid="$1"
[[ ${#TARGET_VMIDS[@]} -eq 0 ]] && return 0
local wanted
for wanted in "${TARGET_VMIDS[@]}"; do
[[ "$vmid" == "$wanted" ]] && return 0
done
return 1
}
# Which VMIDs are missing (IP not in current static-nodes.json)
declare -A EXISTING_IP
@@ -74,6 +132,7 @@ done < <(jq -r '.[]' "$STATIC_FILE" 2>/dev/null)
VMIDS_TO_FIX=()
for vmid in "${BESU_VMIDS[@]}"; do
selected_vmid "$vmid" || continue
ip="${IP_BY_VMID[$vmid]:-}"
[[ -z "$ip" ]] && continue
[[ -z "${EXISTING_IP[$ip]:-}" ]] && VMIDS_TO_FIX+=( "$vmid" )
@@ -88,7 +147,7 @@ echo "Generating node keys for ${#VMIDS_TO_FIX[@]} VMIDs (missing from list): ${
echo ""
for vmid in "${VMIDS_TO_FIX[@]}"; do
host="${HOST_BY_VMID[$vmid]:-}"
host="$(get_host_for_vmid "$vmid")"
ip="${IP_BY_VMID[$vmid]:-}"
[[ -z "$host" ]] && echo " $vmid: no host" && continue
if $DRY_RUN; then
@@ -125,5 +184,9 @@ done
echo ""
if $RUN_COLLECT && ! $DRY_RUN; then
echo "Running collect-enodes-from-all-besu-nodes.sh --missing-only..."
bash "${SCRIPT_DIR}/collect-enodes-from-all-besu-nodes.sh" --missing-only
collect_args=(--apply --missing-only)
for vmid in "${TARGET_VMIDS[@]}"; do
collect_args+=(--vmid "$vmid")
done
bash "${SCRIPT_DIR}/collect-enodes-from-all-besu-nodes.sh" "${collect_args[@]}"
fi

View File

@@ -1,17 +1,58 @@
#!/usr/bin/env bash
# Install Besu permanently on nodes that don't have /opt/besu/bin/besu (1505-1508, 2420-2480).
# Install Besu permanently on selected nodes that don't have /opt/besu/bin/besu (1505-1508, 2420-2480).
# Uses install-besu-in-ct-standalone.sh inside each CT; deploys config, genesis, node lists; enables and starts service.
#
# Usage: bash scripts/besu/install-besu-permanent-on-missing-nodes.sh [--dry-run]
# Usage:
# bash scripts/besu/install-besu-permanent-on-missing-nodes.sh
# bash scripts/besu/install-besu-permanent-on-missing-nodes.sh --vmid 1505
# bash scripts/besu/install-besu-permanent-on-missing-nodes.sh --apply --vmid 1505
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
DRY_RUN=true
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: bash scripts/besu/install-besu-permanent-on-missing-nodes.sh [--apply] [--dry-run] [--vmid <N>]
Options:
--dry-run Print intended actions only (default)
--apply Install Besu on selected nodes
--vmid <N> Limit to one VMID; repeatable
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run)
DRY_RUN=true
shift
;;
--apply)
DRY_RUN=false
shift
;;
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
SSH_OPTS="-o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new"
BESU_VERSION="${BESU_VERSION:-23.10.3}"
@@ -22,9 +63,6 @@ PERMS_SRC="${PROJECT_ROOT}/config/besu-node-lists/permissions-nodes.toml"
# VMIDs that may lack Besu (sentries 1505-1508 on ml110; edge RPC 2420-2480 on r630-01)
SENTRY_VMIDS=(1505 1506 1507 1508)
RPC_VMIDS=(2420 2430 2440 2460 2470 2480)
declare -A HOST_BY_VMID
for v in 1505 1506 1507 1508; do HOST_BY_VMID[$v]="${PROXMOX_ML110:-192.168.11.10}"; done
for v in 2420 2430 2440 2460 2470 2480; do HOST_BY_VMID[$v]="${PROXMOX_R630_01:-192.168.11.11}"; done
declare -A IP_BY_VMID
IP_BY_VMID[1505]=192.168.11.213
IP_BY_VMID[1506]=192.168.11.214
@@ -41,8 +79,20 @@ IP_BY_VMID[2480]=192.168.11.248
[[ ! -f "$STATIC_SRC" ]] && { echo "ERROR: $STATIC_SRC not found"; exit 1; }
[[ ! -f "$PERMS_SRC" ]] && { echo "ERROR: $PERMS_SRC not found"; exit 1; }
selected_vmid() {
local vmid="$1"
[[ ${#TARGET_VMIDS[@]} -eq 0 ]] && return 0
local wanted
for wanted in "${TARGET_VMIDS[@]}"; do
[[ "$vmid" == "$wanted" ]] && return 0
done
return 1
}
install_sentry() {
local vmid=$1 host=${HOST_BY_VMID[$vmid]} ip=${IP_BY_VMID[$vmid]}
local vmid=$1 host ip
host="$(get_host_for_vmid "$vmid")"
ip=${IP_BY_VMID[$vmid]}
echo "--- VMID $vmid (sentry @ $ip) ---"
if $DRY_RUN; then echo " [dry-run] would install Besu sentry"; return 0; fi
ssh $SSH_OPTS "root@$host" "pct exec $vmid -- rm -rf /opt/besu 2>/dev/null; true"
@@ -61,7 +111,9 @@ install_sentry() {
}
install_rpc() {
local vmid=$1 host=${HOST_BY_VMID[$vmid]} ip=${IP_BY_VMID[$vmid]}
local vmid=$1 host ip
host="$(get_host_for_vmid "$vmid")"
ip=${IP_BY_VMID[$vmid]}
echo "--- VMID $vmid (RPC @ $ip) ---"
if $DRY_RUN; then echo " [dry-run] would install Besu RPC"; return 0; fi
if ! ssh $SSH_OPTS "root@$host" "pct exec $vmid -- bash -c 'touch /tmp/.w && rm -f /tmp/.w'" 2>/dev/null; then
@@ -138,7 +190,8 @@ echo "Installing Besu permanently on nodes missing /opt/besu/bin/besu (1505-1508
echo ""
for vmid in "${SENTRY_VMIDS[@]}"; do
host="${HOST_BY_VMID[$vmid]:-}"
selected_vmid "$vmid" || continue
host="$(get_host_for_vmid "$vmid")"
has_besu=$(ssh $SSH_OPTS "root@$host" "pct exec $vmid -- test -x /opt/besu/bin/besu 2>/dev/null" && echo yes || echo no)
if [[ "$has_besu" == yes ]]; then
echo "VMID $vmid: Besu already present (skip)"
@@ -148,7 +201,8 @@ for vmid in "${SENTRY_VMIDS[@]}"; do
done
for vmid in "${RPC_VMIDS[@]}"; do
host="${HOST_BY_VMID[$vmid]:-}"
selected_vmid "$vmid" || continue
host="$(get_host_for_vmid "$vmid")"
has_besu=$(ssh $SSH_OPTS "root@$host" "pct exec $vmid -- test -x /opt/besu/bin/besu 2>/dev/null" && echo yes || echo no)
if [[ "$has_besu" == yes ]]; then
echo "VMID $vmid: Besu already present (skip)"
@@ -158,6 +212,6 @@ for vmid in "${RPC_VMIDS[@]}"; do
done
echo ""
echo "Done. Verify: bash scripts/besu/restart-besu-reload-node-lists.sh (optional); then check block production on RPCs."
echo "Done. Verify: bash scripts/besu/restart-besu-reload-node-lists.sh --apply [--vmid <N>] (optional); then check block production on RPCs."
rm -f /tmp/config-sentry.toml /tmp/besu.service /tmp/config.toml 2>/dev/null || true
for v in 2420 2430 2440 2460 2470 2480; do rm -f /tmp/config-rpc-${v}.toml 2>/dev/null; done

186
scripts/besu/match-enodes-to-rpcs.sh Executable file → Normal file
View File

@@ -1,21 +1,16 @@
#!/usr/bin/env bash
# Match enodes from the current static-nodes allowlist to live RPC nodes.
#
# Usage:
# bash scripts/besu/match-enodes-to-rpcs.sh
# bash scripts/besu/match-enodes-to-rpcs.sh --vmid 2301
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Match Enodes from Allowlist to RPC Nodes
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
@@ -24,87 +19,128 @@ CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_section() { echo -e "\n${CYAN}=== $1 ===${NC}"; }
# RPC node mapping (VMID:IP:Hostname)
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: bash scripts/besu/match-enodes-to-rpcs.sh [--vmid <N>]
Options:
--vmid <N> Limit to one VMID; repeatable
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
selected_vmid() {
local vmid="$1"
[[ ${#TARGET_VMIDS[@]} -eq 0 ]] && return 0
local wanted
for wanted in "${TARGET_VMIDS[@]}"; do
[[ "$vmid" == "$wanted" ]] && return 0
done
return 1
}
declare -A RPC_NODES=(
["2101"]="${RPC_CORE_1:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-192.168.11.21}}}}}1}:besu-rpc-core-1"
["2201"]="${RPC_PUBLIC_1:-${RPC_PUBLIC_1:-192.168.11.221}}:besu-rpc-public-1"
["2301"]="${RPC_PRIVATE_1:-${RPC_PRIVATE_1:-192.168.11.232}}:besu-rpc-private-1"
["2303"]="${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-192.168.11.233}}}}}}}:besu-rpc-ali-0x8a"
["2304"]="${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-192.168.11.234}}}}}}}:besu-rpc-ali-0x1"
["2305"]="${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-192.168.11.235}}}}}}}:besu-rpc-luis-0x8a"
["2306"]="${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-192.168.11.236}}}}}}}:besu-rpc-luis-0x1"
["2307"]="${IP_RPC_237:-${IP_RPC_237:-${IP_RPC_237:-192.168.11.237}}}:besu-rpc-putu-0x8a"
["2308"]="${IP_RPC_238:-${IP_RPC_238:-${IP_RPC_238:-192.168.11.238}}}:besu-rpc-putu-0x1"
["2400"]="${RPC_THIRDWEB_PRIMARY:-${RPC_THIRDWEB_PRIMARY:-192.168.11.240}}:thirdweb-rpc-1"
["2401"]="${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-192.168.11.241}}}}}}}:besu-rpc-thirdweb-0x8a-1"
["2402"]="${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-192.168.11.242}}}}}}}:besu-rpc-thirdweb-0x8a-2"
["2403"]="${RPC_THIRDWEB_3:-${RPC_THIRDWEB_3:-${RPC_THIRDWEB_3:-192.168.11.243}}}:besu-rpc-thirdweb-0x8a-3"
["2101"]="${RPC_CORE_1:-192.168.11.211}:besu-rpc-core-1"
["2102"]="${RPC_CORE_2:-192.168.11.212}:besu-rpc-core-2"
["2103"]="${RPC_THIRDWEB_ADMIN_CORE:-192.168.11.217}:besu-rpc-core-thirdweb"
["2201"]="${RPC_PUBLIC_1:-192.168.11.221}:besu-rpc-public-1"
["2301"]="${RPC_PRIVATE_1:-192.168.11.232}:besu-rpc-private-1"
["2303"]="192.168.11.233:besu-rpc-ali-0x8a"
["2304"]="192.168.11.234:besu-rpc-ali-0x1"
["2305"]="192.168.11.235:besu-rpc-luis-0x8a"
["2306"]="192.168.11.236:besu-rpc-luis-0x1"
["2307"]="192.168.11.237:besu-rpc-putu-0x8a"
["2308"]="192.168.11.238:besu-rpc-putu-0x1"
["2400"]="192.168.11.240:thirdweb-rpc-1"
["2401"]="192.168.11.241:besu-rpc-thirdweb-0x8a-1"
["2402"]="192.168.11.242:besu-rpc-thirdweb-0x8a-2"
["2403"]="192.168.11.243:besu-rpc-thirdweb-0x8a-3"
["2420"]="192.168.11.172:besu-rpc-alltra-1"
["2430"]="192.168.11.173:besu-rpc-alltra-2"
["2440"]="192.168.11.174:besu-rpc-alltra-3"
["2460"]="192.168.11.246:besu-rpc-hybx-1"
["2470"]="192.168.11.247:besu-rpc-hybx-2"
["2480"]="192.168.11.248:besu-rpc-hybx-3"
)
# Enodes from allowlist (extract IPs)
declare -A ALLOWLIST_ENODES=(
["${RPC_CORE_1:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-192.168.11.21}}}}}1}"]="enode://6cdc892fa09afa2b05c21cc9a1193a86cf0d195ce81b02a270d8bb987f78ca98ad90d907670796c90fc6e4eaf3b4cae6c0c15871e2564de063beceb4bbfc6532@${RPC_CORE_1:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-192.168.11.21}}}}}1}:30303"
["${RPC_PUBLIC_1:-${RPC_PUBLIC_1:-192.168.11.221}}"]="enode://07daf3d64079faa3982bc8be7aa86c24ef21eca4565aae4a7fd963c55c728de0639d80663834634edf113b9f047d690232ae23423c64979961db4b6449aa6dfd@${RPC_PUBLIC_1:-${RPC_PUBLIC_1:-192.168.11.221}}:30303"
["${RPC_PRIVATE_1:-${RPC_PRIVATE_1:-192.168.11.232}}"]="enode://83eb8c172034afd72846740921f748c77780c3cc0cea45604348ba859bc3a47187e24e5fad7f74e5fe353e86fd35ab7c37f02cfbb8299a850a190b40968bd8e2@${RPC_PRIVATE_1:-${RPC_PRIVATE_1:-192.168.11.232}}:30303"
["${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-192.168.11.233}}}}}}}"]="enode://688f271d94c7995600ae36d25aa2fb92fea0c52e50e86c598be8966515458c1408b67fba76e1f771073e4774a6e399588443da63394ea25d56e6ca36f2288e00@${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-192.168.11.233}}}}}}}:30303"
["${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-192.168.11.234}}}}}}}"]="enode://4dc4b9f8cffbc53349f6535ab9aa7785cbc0ae92928dcf4ef6f90638ace9fc69ff7d19c49a8bda54f78a000579c557ef25fce3c971c6ab0026b6e70c8e6e5cac@${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-192.168.11.234}}}}}}}:30303"
["${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-192.168.11.235}}}}}}}"]="enode://2de9fc2be46c2cedce182af65ac1f5fc5ed258d21cdf0ac2687a16618382159dae1f730650e6730cf7fc5dccb6b97bffd20e271e3eb4df5a69f38a8c4cba91b5@${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-192.168.11.235}}}}}}}:30303"
["${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-192.168.11.236}}}}}}}"]="enode://38bd43b934feaaccb978917c66b0abbf9b62e39bce6064a6d3ec557f61e13b75e293cbb2ab382278adda5ce51f451528c7c37d991255a0c31e9578b85fc1dd5a@${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-192.168.11.236}}}}}}}:30303"
["${IP_RPC_237:-${IP_RPC_237:-${IP_RPC_237:-192.168.11.237}}}"]="enode://f7edb80de20089cb0b3a28b03e0491fafa1c9eb9a0344dadf343757ee2a44b577a861514fd7747a86f631c9e34519aef25a5f8996f20bc8dd460cd2bdc1bd490@${IP_RPC_237:-${IP_RPC_237:-${IP_RPC_237:-192.168.11.237}}}:30303"
["${IP_RPC_238:-${IP_RPC_238:-${IP_RPC_238:-192.168.11.238}}}"]="enode://4e2d4e94909813b7145e0e9cd7e56724f64ba91dd7dca0e70bd70742f930450cf57311f2c220cfe24a20e9f668a8e170755d626f84660aa1fbea85f75557eb8d@${IP_RPC_238:-${IP_RPC_238:-${IP_RPC_238:-192.168.11.238}}}:30303"
["${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-192.168.11.241}}}}}}}"]="enode://38e138ea5a4b0b244e4484b5c327631b5d3c849dcb188ff3d9ff0a8b6ad7edb738303a1a948888c269aa7555e5ff47d75b7b63dbd579d05580b5442b3fa0ebfc@${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-192.168.11.241}}}}}}}:30303"
)
log_section "Matching Enodes to RPC Nodes"
ALLOWLIST_FILE="${PROJECT_ROOT}/config/besu-node-lists/static-nodes.json"
declare -A ALLOWLIST_ENODES
if [[ -f "$ALLOWLIST_FILE" ]]; then
while IFS= read -r enode; do
[[ -z "$enode" ]] && continue
ip=$(echo "$enode" | sed -n 's|enode://[a-fA-F0-9]*@\([0-9.]*\):.*|\1|p')
[[ -n "$ip" ]] && ALLOWLIST_ENODES["$ip"]="$enode"
done < <(jq -r '.[]' "$ALLOWLIST_FILE" 2>/dev/null)
fi
log_section "Matching Enodes To RPC Nodes"
echo ""
printf "%-8s %-18s %-40s %-20s\n" "VMID" "IP Address" "Hostname" "Status"
echo "────────────────────────────────────────────────────────────────────────────────────────────────────"
printf "%-8s %-18s %-36s %-12s\n" "VMID" "IP Address" "Hostname" "Status"
echo "--------------------------------------------------------------------------------"
MATCHED=0
MISSING=0
for vmid in "${!RPC_NODES[@]}"; do
IFS=':' read -r ip hostname <<< "${RPC_NODES[$vmid]}"
if [ -n "${ALLOWLIST_ENODES[$ip]}" ]; then
printf "%-8s %-18s %-40s %-20s\n" "$vmid" "$ip" "$hostname" "MATCHED"
((MATCHED++))
else
printf "%-8s %-18s %-40s %-20s\n" "$vmid" "$ip" "$hostname" "MISSING"
((MISSING++))
fi
for vmid in $(printf "%s\n" "${!RPC_NODES[@]}" | sort -n); do
selected_vmid "$vmid" || continue
IFS=':' read -r ip hostname <<< "${RPC_NODES[$vmid]}"
if [[ -n "${ALLOWLIST_ENODES[$ip]:-}" ]]; then
printf "%-8s %-18s %-36s %-12s\n" "$vmid" "$ip" "$hostname" "MATCHED"
((MATCHED++)) || true
else
printf "%-8s %-18s %-36s %-12s\n" "$vmid" "$ip" "$hostname" "MISSING"
((MISSING++)) || true
fi
done
total=$((MATCHED + MISSING))
echo ""
log_section "Summary"
log_info "Matched: $MATCHED/$total RPC nodes"
log_info "Missing: $MISSING/$total RPC nodes"
log_info "Matched: $MATCHED/13 RPC nodes"
log_info "Missing: $MISSING/13 RPC nodes"
if [ $MISSING -gt 0 ]; then
log_warn "Missing enodes:"
for vmid in "${!RPC_NODES[@]}"; do
IFS=':' read -r ip hostname <<< "${RPC_NODES[$vmid]}"
if [ -z "${ALLOWLIST_ENODES[$ip]}" ]; then
log_warn "$vmid ($ip) - $hostname"
fi
done
if [[ "$MISSING" -gt 0 ]]; then
echo ""
log_warn "Missing enodes:"
for vmid in $(printf "%s\n" "${!RPC_NODES[@]}" | sort -n); do
selected_vmid "$vmid" || continue
IFS=':' read -r ip hostname <<< "${RPC_NODES[$vmid]}"
[[ -n "${ALLOWLIST_ENODES[$ip]:-}" ]] && continue
log_warn " $vmid ($ip) - $hostname"
done
fi
echo ""
log_section "Complete Enode Mapping"
for vmid in "${!RPC_NODES[@]}"; do
IFS=':' read -r ip hostname <<< "${RPC_NODES[$vmid]}"
if [ -n "${ALLOWLIST_ENODES[$ip]}" ]; then
echo "VMID $vmid ($hostname) - $ip:"
echo " ${ALLOWLIST_ENODES[$ip]}"
echo ""
fi
log_section "Matched Enodes"
for vmid in $(printf "%s\n" "${!RPC_NODES[@]}" | sort -n); do
selected_vmid "$vmid" || continue
IFS=':' read -r ip hostname <<< "${RPC_NODES[$vmid]}"
enode="${ALLOWLIST_ENODES[$ip]:-}"
[[ -z "$enode" ]] && continue
echo "VMID $vmid ($hostname) - $ip:"
echo " $enode"
echo ""
done

View File

@@ -1,27 +1,56 @@
#!/usr/bin/env bash
# Confirm Besu version >= 24.1.0 on all nodes (required for EIP-7702 / Cancun)
# Usage: PROXMOX_HOST=${PROXMOX_HOST_ML110:-192.168.11.10} ./scripts/check-besu-version-all-nodes.sh
# Confirm Besu version >= 24.1.0 on selected nodes (required for EIP-7702 / Cancun)
# Usage:
# bash scripts/check-besu-version-all-nodes.sh
# bash scripts/check-besu-version-all-nodes.sh --vmid 2301
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Optional: load IP config for PROXMOX_HOST_ML110 etc.
if [ -f "$PROJECT_ROOT/config/ip-addresses.conf" ]; then
# shellcheck source=../config/ip-addresses.conf
source "$PROJECT_ROOT/config/ip-addresses.conf"
fi
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_ML110:-192.168.11.10}}"
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
MIN_VERSION="24.1.0"
TARGET_VMIDS=()
# All known Besu node VMIDs (validators, sentries, RPC). Override with BESU_VMIDS="2101 2400 2401 2402" for quick check.
if [ -n "${BESU_VMIDS:-}" ]; then
read -ra BESU_VMIDS <<< "$BESU_VMIDS"
else
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 2101 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403)
fi
usage() {
cat <<'EOF'
Usage: bash scripts/check-besu-version-all-nodes.sh [--vmid <N>]
Options:
--vmid <N> Limit to one VMID; repeatable
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
BESU_VMIDS=(1000 1001 1002 1003 1004 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 2101 2102 2103 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403 2420 2430 2440 2460 2470 2480)
selected_vmid() {
local vmid="$1"
[[ ${#TARGET_VMIDS[@]} -eq 0 ]] && return 0
local wanted
for wanted in "${TARGET_VMIDS[@]}"; do
[[ "$vmid" == "$wanted" ]] && return 0
done
return 1
}
RED='\033[0;31m'
GREEN='\033[0;32m'
@@ -85,7 +114,7 @@ is_running() {
}
echo ""
log_info "Besu version check (>= ${MIN_VERSION}) on Proxmox host: $PROXMOX_HOST"
log_info "Besu version check (>= ${MIN_VERSION}) on selected nodes"
log_info "EIP-7702 / Cancun requires Besu >= 24.1.0"
echo ""
@@ -96,13 +125,15 @@ declare -a FAILED_VMIDS
declare -a FAILED_VERSIONS
for vmid in "${BESU_VMIDS[@]}"; do
if ! is_running "$vmid" "$PROXMOX_HOST"; then
selected_vmid "$vmid" || continue
host="$(get_host_for_vmid "$vmid")"
if ! is_running "$vmid" "$host"; then
printf " VMID %-5s %-12s %s\n" "$vmid" "—" "(container not running)"
((SKIP++)) || true
continue
fi
version=$(get_besu_version "$vmid" "$PROXMOX_HOST")
version=$(get_besu_version "$vmid" "$host")
if [ -z "$version" ]; then
printf " VMID %-5s %-12s " "$vmid" "—"

View File

@@ -1,10 +1,16 @@
#!/usr/bin/env bash
# Clear entire Besu blockchain database (NUCLEAR OPTION)
# This will require full re-sync from genesis
# Usage: ./clear-blockchain-database.sh
# Clear selected Besu blockchain databases (NUCLEAR OPTION)
# This will require full re-sync from genesis.
# Usage:
# ./clear-blockchain-database.sh --vmid 2101
# ./clear-blockchain-database.sh --apply --vmid 2101
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
@@ -23,6 +29,44 @@ if ! command -v pct &>/dev/null; then
exit 1
fi
APPLY=false
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: ./clear-blockchain-database.sh --vmid <N> [--vmid <N> ...] [--apply]
Options:
--vmid <N> Required. Limit destructive action to one or more VMIDs.
--apply Perform deletion. Without this flag, the script prints the target VMIDs and exits.
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
--apply)
APPLY=true
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
[[ ${#TARGET_VMIDS[@]} -gt 0 ]] || { log_error "At least one --vmid is required for this destructive script."; usage >&2; exit 2; }
echo "========================================="
echo "Clear Entire Blockchain Database"
echo "========================================="
@@ -38,35 +82,39 @@ log_error " 5. Take SIGNIFICANT TIME to re-sync"
echo ""
log_warn "This is a NUCLEAR OPTION - use only if absolutely necessary"
echo ""
declare -A SERVICE_BY_VMID
for vmid in 1000 1001 1002 1003 1004; do SERVICE_BY_VMID[$vmid]="besu-validator.service"; done
for vmid in 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510; do SERVICE_BY_VMID[$vmid]="besu-sentry.service"; done
for vmid in 2101 2102 2103 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403 2420 2430 2440 2460 2470 2480; do SERVICE_BY_VMID[$vmid]="besu-rpc.service"; done
if ! $APPLY; then
log_warn "Dry-run only. Target VMIDs:"
for vmid in "${TARGET_VMIDS[@]}"; do
log_info " VMID $vmid (host $(get_host_for_vmid "$vmid")) service ${SERVICE_BY_VMID[$vmid]:-unknown}"
done
log_info "Re-run with --apply to proceed."
exit 0
fi
read -p "Type 'DELETE DATABASE' to confirm: " CONFIRM
if [ "$CONFIRM" != "DELETE DATABASE" ]; then
log_info "Aborted"
exit 0
fi
# All Besu nodes
VALIDATORS=(1000 1001 1002 1003 1004)
RPC_NODES=(2500 2501 2502)
log_info "Stopping all Besu nodes..."
for vmid in "${VALIDATORS[@]}"; do
log_info "Stopping selected Besu nodes..."
for vmid in "${TARGET_VMIDS[@]}"; do
service="${SERVICE_BY_VMID[$vmid]:-besu.service}"
if pct status "$vmid" 2>/dev/null | grep -q "running"; then
log_info "Stopping VMID $vmid (validator)..."
pct exec "$vmid" -- systemctl stop besu-validator.service 2>/dev/null || true
fi
done
for vmid in "${RPC_NODES[@]}"; do
if pct status "$vmid" 2>/dev/null | grep -q "running"; then
log_info "Stopping VMID $vmid (RPC)..."
pct exec "$vmid" -- systemctl stop besu-rpc.service 2>/dev/null || true
log_info "Stopping VMID $vmid ($service)..."
pct exec "$vmid" -- systemctl stop "$service" 2>/dev/null || pct exec "$vmid" -- systemctl stop besu.service 2>/dev/null || true
fi
done
sleep 5
log_info "Clearing entire blockchain databases..."
for vmid in "${VALIDATORS[@]}" "${RPC_NODES[@]}"; do
log_info "Clearing selected blockchain databases..."
for vmid in "${TARGET_VMIDS[@]}"; do
if pct status "$vmid" 2>/dev/null | grep -q "running"; then
log_info "Clearing VMID $vmid..."
@@ -84,18 +132,12 @@ for vmid in "${VALIDATORS[@]}" "${RPC_NODES[@]}"; do
fi
done
log_info "Starting all Besu nodes..."
for vmid in "${VALIDATORS[@]}"; do
log_info "Starting selected Besu nodes..."
for vmid in "${TARGET_VMIDS[@]}"; do
service="${SERVICE_BY_VMID[$vmid]:-besu.service}"
if pct status "$vmid" 2>/dev/null | grep -q "running"; then
log_info "Starting VMID $vmid (validator)..."
pct exec "$vmid" -- systemctl start besu-validator.service 2>/dev/null || true
fi
done
for vmid in "${RPC_NODES[@]}"; do
if pct status "$vmid" 2>/dev/null | grep -q "running"; then
log_info "Starting VMID $vmid (RPC)..."
pct exec "$vmid" -- systemctl start besu-rpc.service 2>/dev/null || true
log_info "Starting VMID $vmid ($service)..."
pct exec "$vmid" -- systemctl start "$service" 2>/dev/null || pct exec "$vmid" -- systemctl start besu.service 2>/dev/null || true
fi
done
@@ -113,4 +155,3 @@ log_info "Next steps:"
log_info " 1. Wait for nodes to re-sync (monitor block numbers)"
log_info " 2. Once synced, run: ./scripts/configure-ethereum-mainnet-final.sh"
log_info ""

View File

@@ -3,14 +3,101 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
declare -A NODE_IPS=(
[1505]="192.168.11.213"
[1506]="192.168.11.214"
[1507]="192.168.11.244"
[1508]="192.168.11.245"
[1509]="192.168.11.219"
[1510]="192.168.11.220"
[2101]="${RPC_CORE_1:-192.168.11.211}"
[2102]="192.168.11.212"
[2103]="192.168.11.217"
[2201]="${RPC_PUBLIC_1:-192.168.11.221}"
[2301]="${RPC_PRIVATE_1:-192.168.11.232}"
[2303]="192.168.11.233"
[2304]="192.168.11.234"
[2305]="192.168.11.235"
[2306]="192.168.11.236"
[2307]="192.168.11.237"
[2308]="192.168.11.238"
[2400]="192.168.11.240"
[2401]="${RPC_THIRDWEB_1:-192.168.11.241}"
[2402]="${RPC_THIRDWEB_2:-192.168.11.242}"
[2403]="${RPC_THIRDWEB_3:-192.168.11.243}"
[2420]="192.168.11.172"
[2430]="192.168.11.173"
[2440]="192.168.11.174"
[2460]="192.168.11.246"
[2470]="192.168.11.247"
[2480]="192.168.11.248"
)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
declare -A NODE_NAMES=(
[1505]="besu-sentry-alltra-1"
[1506]="besu-sentry-alltra-2"
[1507]="besu-sentry-hybx-1"
[1508]="besu-sentry-hybx-2"
[1509]="besu-sentry-thirdweb-1"
[1510]="besu-sentry-thirdweb-2"
[2101]="besu-rpc-core-1"
[2102]="besu-rpc-core-2"
[2103]="besu-rpc-admin-core-3"
[2201]="besu-rpc-public-1"
[2301]="besu-rpc-private-1"
[2303]="besu-rpc-private-3"
[2304]="besu-rpc-private-4"
[2305]="besu-rpc-private-5"
[2306]="besu-rpc-private-6"
[2307]="besu-rpc-private-7"
[2308]="besu-rpc-private-8"
[2400]="besu-rpc-thirdweb-primary"
[2401]="besu-rpc-thirdweb-1"
[2402]="besu-rpc-thirdweb-2"
[2403]="besu-rpc-thirdweb-3"
[2420]="besu-rpc-alltra-1"
[2430]="besu-rpc-alltra-2"
[2440]="besu-rpc-alltra-3"
[2460]="besu-rpc-hybx-1"
[2470]="besu-rpc-hybx-2"
[2480]="besu-rpc-hybx-3"
)
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: ./scripts/collect-all-enodes.sh --vmid <N> [--vmid <N> ...]
Options:
--vmid <N> Required. Collect enodes only for the selected VMIDs.
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
[[ ${#TARGET_VMIDS[@]} -gt 0 ]] || { usage >&2; exit 2; }
BLUE='\033[0;34m'
GREEN='\033[0;32m'
@@ -23,17 +110,20 @@ collect_enode() {
local vmid=$1
local ip=$2
local hostname=$3
local host
host="$(get_host_for_vmid "$vmid")"
log "Collecting enode from $vmid ($hostname)..."
local enode=$(pct exec $vmid -- bash -c '
local enode
enode=$(ssh -o StrictHostKeyChecking=no root@"$host" "pct exec $vmid -- bash -c '
if [ -f /data/besu/NODE_ID ]; then
NODE_ID=$(cat /data/besu/NODE_ID)
echo "enode://${NODE_ID}@'"$ip"':30303"
echo \"enode://${NODE_ID}@'"$ip"':30303\"
else
echo "PENDING"
echo PENDING
fi
' 2>/dev/null || echo "ERROR")
'" 2>/dev/null || echo "ERROR")
echo "$vmid|$hostname|$ip|$enode"
}
@@ -45,19 +135,13 @@ echo ""
ENODE_DIR=$(mktemp -d)
log "ALLTRA nodes..."
collect_enode 1505 "${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-192.168.11.21}}}}}3" "besu-sentry-alltra-1" > "$ENODE_DIR/1505.txt" &
collect_enode 1506 "${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-192.168.11.21}}}}}4" "besu-sentry-alltra-2" > "$ENODE_DIR/1506.txt" &
collect_enode 2500 "${IP_SERVICE_172:-${IP_SERVICE_172:-192.168.11.172}}" "besu-rpc-alltra-1" > "$ENODE_DIR/2500.txt" &
collect_enode 2501 "${IP_SERVICE_173:-${IP_SERVICE_173:-192.168.11.173}}" "besu-rpc-alltra-2" > "$ENODE_DIR/2501.txt" &
collect_enode 2502 "${IP_SERVICE_174:-${IP_SERVICE_174:-192.168.11.174}}" "besu-rpc-alltra-3" > "$ENODE_DIR/2502.txt" &
log "HYBX nodes..."
collect_enode 1507 "${IP_RPC_244:-${IP_RPC_244:-${IP_RPC_244:-192.168.11.244}}}" "besu-sentry-hybx-1" > "$ENODE_DIR/1507.txt" &
collect_enode 1508 "${IP_RPC_245:-${IP_RPC_245:-${IP_RPC_245:-192.168.11.245}}}" "besu-sentry-hybx-2" > "$ENODE_DIR/1508.txt" &
collect_enode 2503 "${IP_RPC_246:-${IP_RPC_246:-${IP_RPC_246:-192.168.11.246}}}" "besu-rpc-hybx-1" > "$ENODE_DIR/2503.txt" &
collect_enode 2504 "${IP_RPC_247:-${IP_RPC_247:-${IP_RPC_247:-192.168.11.247}}}" "besu-rpc-hybx-2" > "$ENODE_DIR/2504.txt" &
collect_enode 2505 "${IP_RPC_248:-${IP_RPC_248:-${IP_RPC_248:-192.168.11.248}}}" "besu-rpc-hybx-3" > "$ENODE_DIR/2505.txt" &
for vmid in "${TARGET_VMIDS[@]}"; do
if [[ -z "${NODE_IPS[$vmid]:-}" || -z "${NODE_NAMES[$vmid]:-}" ]]; then
echo "$vmid|unknown|unknown|UNSUPPORTED_VMID" > "$ENODE_DIR/${vmid}.txt"
continue
fi
collect_enode "$vmid" "${NODE_IPS[$vmid]}" "${NODE_NAMES[$vmid]}" > "$ENODE_DIR/${vmid}.txt" &
done
echo ""
log "Waiting for collections..."

View File

@@ -4,24 +4,106 @@
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf"
source "$PROJECT_ROOT/scripts/lib/load-project-env.sh"
get_host_for_vmid() {
local vmid=$1
if [[ "$vmid" =~ ^(1505|1506|1507|1508)$ ]]; then
echo "${PROXMOX_HOST_ML110}"
elif [[ "$vmid" =~ ^(2500|2501|2502|2503|2504|2505)$ ]]; then
echo "${PROXMOX_HOST_R630_01}"
else
echo "${PROXMOX_HOST_R630_01}"
fi
declare -A NODE_IPS=(
[1505]="192.168.11.213"
[1506]="192.168.11.214"
[1507]="192.168.11.244"
[1508]="192.168.11.245"
[1509]="192.168.11.219"
[1510]="192.168.11.220"
[2101]="${RPC_CORE_1:-192.168.11.211}"
[2102]="192.168.11.212"
[2103]="192.168.11.217"
[2201]="${RPC_PUBLIC_1:-192.168.11.221}"
[2301]="${RPC_PRIVATE_1:-192.168.11.232}"
[2303]="192.168.11.233"
[2304]="192.168.11.234"
[2305]="192.168.11.235"
[2306]="192.168.11.236"
[2307]="192.168.11.237"
[2308]="192.168.11.238"
[2400]="192.168.11.240"
[2401]="${RPC_THIRDWEB_1:-192.168.11.241}"
[2402]="${RPC_THIRDWEB_2:-192.168.11.242}"
[2403]="${RPC_THIRDWEB_3:-192.168.11.243}"
[2420]="192.168.11.172"
[2430]="192.168.11.173"
[2440]="192.168.11.174"
[2460]="192.168.11.246"
[2470]="192.168.11.247"
[2480]="192.168.11.248"
)
declare -A NODE_NAMES=(
[1505]="besu-sentry-alltra-1"
[1506]="besu-sentry-alltra-2"
[1507]="besu-sentry-hybx-1"
[1508]="besu-sentry-hybx-2"
[1509]="besu-sentry-thirdweb-1"
[1510]="besu-sentry-thirdweb-2"
[2101]="besu-rpc-core-1"
[2102]="besu-rpc-core-2"
[2103]="besu-rpc-admin-core-3"
[2201]="besu-rpc-public-1"
[2301]="besu-rpc-private-1"
[2303]="besu-rpc-private-3"
[2304]="besu-rpc-private-4"
[2305]="besu-rpc-private-5"
[2306]="besu-rpc-private-6"
[2307]="besu-rpc-private-7"
[2308]="besu-rpc-private-8"
[2400]="besu-rpc-thirdweb-primary"
[2401]="besu-rpc-thirdweb-1"
[2402]="besu-rpc-thirdweb-2"
[2403]="besu-rpc-thirdweb-3"
[2420]="besu-rpc-alltra-1"
[2430]="besu-rpc-alltra-2"
[2440]="besu-rpc-alltra-3"
[2460]="besu-rpc-hybx-1"
[2470]="besu-rpc-hybx-2"
[2480]="besu-rpc-hybx-3"
)
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: ./scripts/collect-enodes-via-rpc.sh --vmid <N> [--vmid <N> ...]
Options:
--vmid <N> Required. Collect enodes only for the selected VMIDs.
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
[[ ${#TARGET_VMIDS[@]} -gt 0 ]] || { usage >&2; exit 2; }
collect_enode() {
local vmid=$1
local ip=$2
local hostname=$3
local host=$(get_host_for_vmid $vmid)
local host
host="$(get_host_for_vmid "$vmid")"
# Try RPC first
local enode=$(ssh -o StrictHostKeyChecking=no root@${host} "pct exec $vmid -- curl -s -X POST -H 'Content-Type: application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"admin_nodeInfo\",\"params\":[],\"id\":1}' http://localhost:8545 2>/dev/null | grep -o '\"enode\":\"[^\"]*\"' | cut -d'\"' -f4" 2>/dev/null)
@@ -42,17 +124,10 @@ collect_enode() {
echo "$vmid|$hostname|$ip|$enode"
}
for vmid in 1505 1506 2500 2501 2502 1507 1508 2503 2504 2505; do
case $vmid in
1505) collect_enode 1505 "${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-192.168.11.21}}}}}3" "besu-sentry-alltra-1" ;;
1506) collect_enode 1506 "${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-192.168.11.21}}}}}4" "besu-sentry-alltra-2" ;;
2500) collect_enode 2500 "${IP_SERVICE_172:-${IP_SERVICE_172:-192.168.11.172}}" "besu-rpc-alltra-1" ;;
2501) collect_enode 2501 "${IP_SERVICE_173:-${IP_SERVICE_173:-192.168.11.173}}" "besu-rpc-alltra-2" ;;
2502) collect_enode 2502 "${IP_SERVICE_174:-${IP_SERVICE_174:-192.168.11.174}}" "besu-rpc-alltra-3" ;;
1507) collect_enode 1507 "${IP_RPC_244:-${IP_RPC_244:-${IP_RPC_244:-192.168.11.244}}}" "besu-sentry-hybx-1" ;;
1508) collect_enode 1508 "${IP_RPC_245:-${IP_RPC_245:-${IP_RPC_245:-192.168.11.245}}}" "besu-sentry-hybx-2" ;;
2503) collect_enode 2503 "${IP_RPC_246:-${IP_RPC_246:-${IP_RPC_246:-192.168.11.246}}}" "besu-rpc-hybx-1" ;;
2504) collect_enode 2504 "${IP_RPC_247:-${IP_RPC_247:-${IP_RPC_247:-192.168.11.247}}}" "besu-rpc-hybx-2" ;;
2505) collect_enode 2505 "${IP_RPC_248:-${IP_RPC_248:-${IP_RPC_248:-192.168.11.248}}}" "besu-rpc-hybx-3" ;;
esac
for vmid in "${TARGET_VMIDS[@]}"; do
if [[ -z "${NODE_IPS[$vmid]:-}" || -z "${NODE_NAMES[$vmid]:-}" ]]; then
echo "$vmid|unknown|unknown|UNSUPPORTED_VMID"
continue
fi
collect_enode "$vmid" "${NODE_IPS[$vmid]}" "${NODE_NAMES[$vmid]}"
done

View File

@@ -3,19 +3,76 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
log() { echo "[INFO] $1"; }
success() { echo "[✓] $1"; }
error() { echo "[ERROR] $1"; }
PROFILE=""
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: ./scripts/configure-rpc-nodes.sh --profile <fullfunction|standardbase> --vmid <N> [--vmid <N> ...]
Options:
--profile <name> Required. Choose the config template to generate.
--vmid <N> Required. Generate config only for the selected VMIDs.
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--profile)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
PROFILE="$2"
shift 2
;;
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
[[ -n "$PROFILE" ]] || { usage >&2; exit 2; }
[[ ${#TARGET_VMIDS[@]} -gt 0 ]] || { usage >&2; exit 2; }
declare -A RPC_IPS=(
[2101]="${RPC_CORE_1:-192.168.11.211}"
[2201]="${RPC_PUBLIC_1:-192.168.11.221}"
[2301]="${RPC_PRIVATE_1:-192.168.11.232}"
[2303]="192.168.11.233"
[2304]="192.168.11.234"
[2305]="192.168.11.235"
[2306]="192.168.11.236"
[2307]="192.168.11.237"
[2308]="192.168.11.238"
[2400]="192.168.11.240"
[2401]="${RPC_THIRDWEB_1:-192.168.11.241}"
[2402]="${RPC_THIRDWEB_2:-192.168.11.242}"
[2403]="${RPC_THIRDWEB_3:-192.168.11.243}"
[2420]="192.168.11.172"
[2430]="192.168.11.173"
[2440]="192.168.11.174"
[2460]="192.168.11.246"
[2470]="192.168.11.247"
[2480]="192.168.11.248"
)
# Function to create full-function RPC config
create_fullfunction_config() {
local vmid=$1
@@ -113,26 +170,28 @@ log "RPC Node Configuration Generator"
log "==================================="
echo ""
# ALLTRA Network
log "Configuring ALLTRA Network RPC Nodes..."
create_fullfunction_config 2500 "${IP_SERVICE_172:-${IP_SERVICE_172:-192.168.11.172}}"
create_standardbase_config 2501 "${IP_SERVICE_173:-${IP_SERVICE_173:-192.168.11.173}}"
create_standardbase_config 2502 "${IP_SERVICE_174:-${IP_SERVICE_174:-192.168.11.174}}"
for vmid in "${TARGET_VMIDS[@]}"; do
ip="${RPC_IPS[$vmid]:-}"
if [[ -z "$ip" ]]; then
error "Unsupported VMID: $vmid"
exit 2
fi
case "$PROFILE" in
fullfunction) create_fullfunction_config "$vmid" "$ip" ;;
standardbase) create_standardbase_config "$vmid" "$ip" ;;
*)
error "Unknown profile: $PROFILE"
usage >&2
exit 2
;;
esac
done
echo ""
# HYBX Network
log "Configuring HYBX Network RPC Nodes..."
create_fullfunction_config 2503 "${IP_RPC_246:-${IP_RPC_246:-${IP_RPC_246:-192.168.11.246}}}"
create_standardbase_config 2504 "${IP_RPC_247:-${IP_RPC_247:-${IP_RPC_247:-192.168.11.247}}}"
create_standardbase_config 2505 "${IP_RPC_248:-${IP_RPC_248:-${IP_RPC_248:-192.168.11.248}}}"
echo ""
log "Configuration files created in /tmp/"
log "Deploy these to respective RPC node config directories:"
log " ALLTRA-RPC-1 (Full): /tmp/besu-config-fullfunction-2500.toml → /opt/besu/config/"
log " ALLTRA-RPC-2 (Std): /tmp/besu-config-standardbase-2501.toml → /opt/besu/config/"
log " ALLTRA-RPC-3 (Std): /tmp/besu-config-standardbase-2502.toml → /opt/besu/config/"
log " HYBX-RPC-1 (Full): /tmp/besu-config-fullfunction-2503.toml → /opt/besu/config/"
log " HYBX-RPC-2 (Std): /tmp/besu-config-standardbase-2504.toml → /opt/besu/config/"
log " HYBX-RPC-3 (Std): /tmp/besu-config-standardbase-2505.toml → /opt/besu/config/"
log "Configuration files created in /tmp/ for profile '$PROFILE':"
for vmid in "${TARGET_VMIDS[@]}"; do
case "$PROFILE" in
fullfunction) log " /tmp/besu-config-fullfunction-$vmid.toml → /opt/besu/config/" ;;
standardbase) log " /tmp/besu-config-standardbase-$vmid.toml → /opt/besu/config/" ;;
esac
done

View File

@@ -4,8 +4,19 @@
#
# Usage: PROXMOX_HOST=192.168.11.11 bash scripts/create-missing-containers-2506-2508.sh [--dry-run]
# HISTORICAL SCRIPT
# VMIDs 2506-2508 were part of a retired migration plan and are documented as destroyed/decommissioned.
# This file remains for reference only.
set -euo pipefail
if [[ "${HISTORICAL_ALLOW_RUN:-0}" != "1" ]]; then
echo "HISTORICAL: create-missing-containers-2506-2508.sh is not a current provisioning runbook." >&2
echo "See docs/04-configuration/ALL_VMIDS_ENDPOINTS.md for the live inventory before creating any CTs." >&2
echo "Set HISTORICAL_ALLOW_RUN=1 only if you intentionally need this legacy script." >&2
exit 1
fi
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true

View File

@@ -0,0 +1,107 @@
#!/usr/bin/env bash
set -euo pipefail
# Deploy only the Mainnet Aave quote-push receiver.
# Default: simulation only. Use --apply to broadcast.
#
# Env:
# PRIVATE_KEY, ETHEREUM_MAINNET_RPC required
# AAVE_POOL_ADDRESS optional; defaults inside the Forge script
# QUOTE_PUSH_RECEIVER_OWNER optional; defaults to deployer derived from PRIVATE_KEY
#
# Usage:
# bash scripts/deployment/deploy-mainnet-aave-quote-push-receiver.sh --dry-run
# bash scripts/deployment/deploy-mainnet-aave-quote-push-receiver.sh --apply
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
SMOM="${PROXMOX_ROOT}/smom-dbis-138"
_qp_private_key="${PRIVATE_KEY-}"
_qp_rpc="${ETHEREUM_MAINNET_RPC-}"
_qp_pool="${AAVE_POOL_ADDRESS-}"
_qp_owner="${QUOTE_PUSH_RECEIVER_OWNER-}"
# shellcheck disable=SC1091
source "${PROXMOX_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
# shellcheck disable=SC1091
source "${SMOM}/scripts/load-env.sh" >/dev/null 2>&1 || true
[[ -n "$_qp_private_key" ]] && export PRIVATE_KEY="$_qp_private_key"
[[ -n "$_qp_rpc" ]] && export ETHEREUM_MAINNET_RPC="$_qp_rpc"
[[ -n "$_qp_pool" ]] && export AAVE_POOL_ADDRESS="$_qp_pool"
[[ -n "$_qp_owner" ]] && export QUOTE_PUSH_RECEIVER_OWNER="$_qp_owner"
unset _qp_private_key _qp_rpc _qp_pool _qp_owner
require_cmd() {
command -v "$1" >/dev/null 2>&1 || {
echo "[fail] missing required command: $1" >&2
exit 1
}
}
require_env() {
local name="$1"
if [[ -z "${!name:-}" ]]; then
echo "[fail] missing required env: $name" >&2
exit 1
fi
}
pick_latest_receiver() {
local mode="$1"
local latest_json="${SMOM}/broadcast/DeployAaveQuotePushFlashReceiver.s.sol/1/run-latest.json"
if [[ "$mode" == "dry-run" ]]; then
latest_json="${SMOM}/broadcast/DeployAaveQuotePushFlashReceiver.s.sol/1/dry-run/run-latest.json"
fi
if [[ ! -f "$latest_json" ]] || ! command -v jq >/dev/null 2>&1; then
return 1
fi
jq -r '.transactions[]? | select(.transactionType == "CREATE" and .contractName == "AaveQuotePushFlashReceiver") | .contractAddress' \
"$latest_json" | tail -n1
}
require_cmd forge
MODE="dry-run"
BROADCAST=()
for arg in "$@"; do
case "$arg" in
--dry-run) MODE="dry-run"; BROADCAST=() ;;
--apply) MODE="apply"; BROADCAST=(--broadcast) ;;
*)
echo "[fail] unknown arg: $arg (use --dry-run or --apply)" >&2
exit 2
;;
esac
done
require_env PRIVATE_KEY
require_env ETHEREUM_MAINNET_RPC
echo "=== deploy-mainnet-aave-quote-push-receiver ($MODE) ==="
if [[ -n "${QUOTE_PUSH_RECEIVER_OWNER:-}" ]]; then
echo "receiver_owner=$QUOTE_PUSH_RECEIVER_OWNER"
fi
(
cd "$SMOM"
forge script script/deploy/DeployAaveQuotePushFlashReceiver.s.sol:DeployAaveQuotePushFlashReceiver \
--rpc-url "$ETHEREUM_MAINNET_RPC" \
"${BROADCAST[@]}" \
-vvvv
)
receiver_addr="$(pick_latest_receiver "$MODE" || true)"
echo
if [[ "$MODE" == "dry-run" ]]; then
echo "Projected receiver address from this dry-run:"
else
echo "After --apply: copy deployed address into .env:"
fi
echo " AAVE_QUOTE_PUSH_RECEIVER_MAINNET=${receiver_addr:-...}"
echo "Shortest manager handoff:"
echo " AAVE_QUOTE_PUSH_RECEIVER_MAINNET=${receiver_addr:-...} QUOTE_PUSH_TREASURY_TAKE_RECEIVER_OWNERSHIP=1 bash scripts/deployment/deploy-mainnet-quote-push-treasury-manager.sh --apply"
echo "Full stack alternative:"
echo " bash scripts/deployment/deploy-mainnet-aave-quote-push-stack.sh --apply"

View File

@@ -3,6 +3,7 @@ set -euo pipefail
# Deploy Mainnet flash quote-push stack (Aave receiver + external unwinder).
# Default: simulation only (no --broadcast). Use --apply to broadcast.
# For receiver-only migrations, use deploy-mainnet-aave-quote-push-receiver.sh.
#
# Env:
# PRIVATE_KEY, ETHEREUM_MAINNET_RPC
@@ -165,6 +166,8 @@ unwinder_addr="$(pick_latest_create_address "$unwinder_script" "$unwinder_contra
echo "After --apply: copy deployed addresses into .env:"
echo " AAVE_QUOTE_PUSH_RECEIVER_MAINNET=${receiver_addr:-...}"
echo " QUOTE_PUSH_EXTERNAL_UNWINDER_MAINNET=${unwinder_addr:-...}"
echo "Optional retained-surplus manager:"
echo " QUOTE_PUSH_TREASURY_TAKE_RECEIVER_OWNERSHIP=1 bash scripts/deployment/deploy-mainnet-quote-push-treasury-manager.sh --apply"
echo "Or rerun immediately with QUOTE_PUSH_UNWINDER_TYPE=${UNW} so run-mainnet-aave-cwusdc-quote-push-once.sh can auto-pick the latest broadcast unwinder."
echo "Then set FLASH_QUOTE_AMOUNT_RAW, UNWIND_MODE, UNWIND_V3_FEE_U24 (or UNWIND_DODO_POOL / two-hop vars) and run:"
echo " bash scripts/deployment/run-mainnet-aave-cwusdc-quote-push-once.sh --dry-run"

View File

@@ -0,0 +1,137 @@
#!/usr/bin/env bash
set -euo pipefail
# Deploy the Mainnet quote-push treasury manager and optionally transfer the
# receiver ownership to it. Default: simulation only. Use --apply to broadcast.
#
# Env:
# PRIVATE_KEY, ETHEREUM_MAINNET_RPC required
# AAVE_QUOTE_PUSH_RECEIVER_MAINNET optional; auto-picks latest receiver broadcast
# QUOTE_PUSH_SURPLUS_TOKEN_MAINNET optional; defaults to mainnet USDC
# QUOTE_PUSH_TREASURY_OWNER optional; defaults to deployer
# QUOTE_PUSH_TREASURY_OPERATOR optional; defaults to owner
# QUOTE_PUSH_TREASURY_GAS_RECIPIENT optional; defaults to owner
# QUOTE_PUSH_TREASURY_RECYCLE_RECIPIENT optional; defaults to owner
# QUOTE_PUSH_RECEIVER_RESERVE_RAW optional; defaults to 0
# QUOTE_PUSH_TREASURY_RESERVE_RAW optional; defaults to 0
# QUOTE_PUSH_TREASURY_TAKE_RECEIVER_OWNERSHIP optional; 1 transfers receiver ownership to manager
#
# Usage:
# bash scripts/deployment/deploy-mainnet-quote-push-treasury-manager.sh --dry-run
# QUOTE_PUSH_TREASURY_TAKE_RECEIVER_OWNERSHIP=1 \
# bash scripts/deployment/deploy-mainnet-quote-push-treasury-manager.sh --apply
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
SMOM="${PROXMOX_ROOT}/smom-dbis-138"
_qp_private_key="${PRIVATE_KEY-}"
_qp_rpc="${ETHEREUM_MAINNET_RPC-}"
_qp_receiver="${AAVE_QUOTE_PUSH_RECEIVER_MAINNET-}"
_qp_token="${QUOTE_PUSH_SURPLUS_TOKEN_MAINNET-}"
_qp_owner="${QUOTE_PUSH_TREASURY_OWNER-}"
_qp_operator="${QUOTE_PUSH_TREASURY_OPERATOR-}"
_qp_gas_recipient="${QUOTE_PUSH_TREASURY_GAS_RECIPIENT-}"
_qp_recycle_recipient="${QUOTE_PUSH_TREASURY_RECYCLE_RECIPIENT-}"
_qp_receiver_reserve="${QUOTE_PUSH_RECEIVER_RESERVE_RAW-}"
_qp_manager_reserve="${QUOTE_PUSH_TREASURY_RESERVE_RAW-}"
_qp_take_receiver="${QUOTE_PUSH_TREASURY_TAKE_RECEIVER_OWNERSHIP-}"
# shellcheck disable=SC1091
source "${PROXMOX_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
# shellcheck disable=SC1091
source "${SMOM}/scripts/load-env.sh" >/dev/null 2>&1 || true
[[ -n "$_qp_private_key" ]] && export PRIVATE_KEY="$_qp_private_key"
[[ -n "$_qp_rpc" ]] && export ETHEREUM_MAINNET_RPC="$_qp_rpc"
[[ -n "$_qp_receiver" ]] && export AAVE_QUOTE_PUSH_RECEIVER_MAINNET="$_qp_receiver"
[[ -n "$_qp_token" ]] && export QUOTE_PUSH_SURPLUS_TOKEN_MAINNET="$_qp_token"
[[ -n "$_qp_owner" ]] && export QUOTE_PUSH_TREASURY_OWNER="$_qp_owner"
[[ -n "$_qp_operator" ]] && export QUOTE_PUSH_TREASURY_OPERATOR="$_qp_operator"
[[ -n "$_qp_gas_recipient" ]] && export QUOTE_PUSH_TREASURY_GAS_RECIPIENT="$_qp_gas_recipient"
[[ -n "$_qp_recycle_recipient" ]] && export QUOTE_PUSH_TREASURY_RECYCLE_RECIPIENT="$_qp_recycle_recipient"
[[ -n "$_qp_receiver_reserve" ]] && export QUOTE_PUSH_RECEIVER_RESERVE_RAW="$_qp_receiver_reserve"
[[ -n "$_qp_manager_reserve" ]] && export QUOTE_PUSH_TREASURY_RESERVE_RAW="$_qp_manager_reserve"
[[ -n "$_qp_take_receiver" ]] && export QUOTE_PUSH_TREASURY_TAKE_RECEIVER_OWNERSHIP="$_qp_take_receiver"
unset _qp_private_key _qp_rpc _qp_receiver _qp_token _qp_owner _qp_operator _qp_gas_recipient
unset _qp_recycle_recipient _qp_receiver_reserve _qp_manager_reserve _qp_take_receiver
require_cmd() {
command -v "$1" >/dev/null 2>&1 || {
echo "[fail] missing required command: $1" >&2
exit 1
}
}
require_env() {
local name="$1"
if [[ -z "${!name:-}" ]]; then
echo "[fail] missing required env: $name" >&2
exit 1
fi
}
pick_latest_create_address() {
local script_name="$1"
local contract_name="$2"
local mode="${3:-apply}"
local latest_json="${SMOM}/broadcast/${script_name}/1/run-latest.json"
if [[ "$mode" == "dry-run" ]]; then
latest_json="${SMOM}/broadcast/${script_name}/1/dry-run/run-latest.json"
fi
if [[ ! -f "$latest_json" ]] || ! command -v jq >/dev/null 2>&1; then
return 1
fi
jq -r --arg contract "$contract_name" \
'.transactions[]? | select(.transactionType == "CREATE" and .contractName == $contract) | .contractAddress' \
"$latest_json" | tail -n1
}
require_cmd forge
MODE="dry-run"
BROADCAST=()
for arg in "$@"; do
case "$arg" in
--dry-run) MODE="dry-run"; BROADCAST=() ;;
--apply) MODE="apply"; BROADCAST=(--broadcast) ;;
*)
echo "[fail] unknown arg: $arg (use --dry-run or --apply)" >&2
exit 2
;;
esac
done
require_env PRIVATE_KEY
require_env ETHEREUM_MAINNET_RPC
if [[ -z "${AAVE_QUOTE_PUSH_RECEIVER_MAINNET:-}" ]]; then
inferred_receiver="$(pick_latest_create_address "DeployAaveQuotePushFlashReceiver.s.sol" "AaveQuotePushFlashReceiver" "apply" || true)"
if [[ -n "$inferred_receiver" && "$inferred_receiver" != "null" ]]; then
export AAVE_QUOTE_PUSH_RECEIVER_MAINNET="$inferred_receiver"
fi
fi
require_env AAVE_QUOTE_PUSH_RECEIVER_MAINNET
echo "=== deploy-mainnet-quote-push-treasury-manager ($MODE) ==="
echo "receiver=$AAVE_QUOTE_PUSH_RECEIVER_MAINNET"
echo "take_receiver_ownership=${QUOTE_PUSH_TREASURY_TAKE_RECEIVER_OWNERSHIP:-0}"
(
cd "$SMOM"
forge script script/deploy/DeployQuotePushTreasuryManager.s.sol:DeployQuotePushTreasuryManager \
--rpc-url "$ETHEREUM_MAINNET_RPC" \
"${BROADCAST[@]}" \
-vvvv
)
manager_addr="$(pick_latest_create_address "DeployQuotePushTreasuryManager.s.sol" "QuotePushTreasuryManager" "$MODE" || true)"
echo
if [[ "$MODE" == "dry-run" ]]; then
echo "Projected treasury manager address from this dry-run:"
else
echo "After --apply: copy deployed addresses into .env:"
fi
echo " QUOTE_PUSH_TREASURY_MANAGER_MAINNET=${manager_addr:-...}"
echo "Optional keeper entrypoint:"
echo " bash scripts/deployment/run-mainnet-aave-quote-push-keeper.sh --dry-run"

View File

@@ -0,0 +1,124 @@
#!/usr/bin/env bash
set -euo pipefail
# Harvest receiver surplus into the treasury manager and/or distribute quote to
# the configured gas and recycle recipients. Default: simulation only.
#
# Env:
# PRIVATE_KEY, ETHEREUM_MAINNET_RPC required
# QUOTE_PUSH_TREASURY_MANAGER_MAINNET optional; auto-picks latest manager broadcast
# QUOTE_PUSH_TREASURY_HARVEST optional; defaults to 1
# QUOTE_PUSH_TREASURY_GAS_DISTRIBUTION_RAW optional; defaults to 0
# QUOTE_PUSH_TREASURY_RECYCLE_DISTRIBUTION_RAW optional; defaults to 0
#
# Usage:
# bash scripts/deployment/manage-mainnet-quote-push-treasury.sh --dry-run
# QUOTE_PUSH_TREASURY_GAS_DISTRIBUTION_RAW=1000 \
# QUOTE_PUSH_TREASURY_RECYCLE_DISTRIBUTION_RAW=2000 \
# bash scripts/deployment/manage-mainnet-quote-push-treasury.sh --apply
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
SMOM="${PROXMOX_ROOT}/smom-dbis-138"
_qp_private_key="${PRIVATE_KEY-}"
_qp_rpc="${ETHEREUM_MAINNET_RPC-}"
_qp_manager="${QUOTE_PUSH_TREASURY_MANAGER_MAINNET-}"
_qp_harvest="${QUOTE_PUSH_TREASURY_HARVEST-}"
_qp_gas_raw="${QUOTE_PUSH_TREASURY_GAS_DISTRIBUTION_RAW-}"
_qp_recycle_raw="${QUOTE_PUSH_TREASURY_RECYCLE_DISTRIBUTION_RAW-}"
# shellcheck disable=SC1091
source "${PROXMOX_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
# shellcheck disable=SC1091
source "${SMOM}/scripts/load-env.sh" >/dev/null 2>&1 || true
[[ -n "$_qp_private_key" ]] && export PRIVATE_KEY="$_qp_private_key"
[[ -n "$_qp_rpc" ]] && export ETHEREUM_MAINNET_RPC="$_qp_rpc"
[[ -n "$_qp_manager" ]] && export QUOTE_PUSH_TREASURY_MANAGER_MAINNET="$_qp_manager"
[[ -n "$_qp_harvest" ]] && export QUOTE_PUSH_TREASURY_HARVEST="$_qp_harvest"
[[ -n "$_qp_gas_raw" ]] && export QUOTE_PUSH_TREASURY_GAS_DISTRIBUTION_RAW="$_qp_gas_raw"
[[ -n "$_qp_recycle_raw" ]] && export QUOTE_PUSH_TREASURY_RECYCLE_DISTRIBUTION_RAW="$_qp_recycle_raw"
unset _qp_private_key _qp_rpc _qp_manager _qp_harvest _qp_gas_raw _qp_recycle_raw
require_cmd() {
command -v "$1" >/dev/null 2>&1 || {
echo "[fail] missing required command: $1" >&2
exit 1
}
}
require_env() {
local name="$1"
if [[ -z "${!name:-}" ]]; then
echo "[fail] missing required env: $name" >&2
exit 1
fi
}
pick_latest_manager() {
local latest_json="${SMOM}/broadcast/DeployQuotePushTreasuryManager.s.sol/1/run-latest.json"
if [[ ! -f "$latest_json" ]] || ! command -v jq >/dev/null 2>&1; then
return 1
fi
jq -r '.transactions[]? | select(.transactionType == "CREATE" and .contractName == "QuotePushTreasuryManager") | .contractAddress' \
"$latest_json" | tail -n1
}
require_cmd cast
require_cmd forge
MODE="dry-run"
BROADCAST=()
for arg in "$@"; do
case "$arg" in
--dry-run) MODE="dry-run"; BROADCAST=() ;;
--apply) MODE="apply"; BROADCAST=(--broadcast) ;;
*)
echo "[fail] unknown arg: $arg (use --dry-run or --apply)" >&2
exit 2
;;
esac
done
require_env PRIVATE_KEY
require_env ETHEREUM_MAINNET_RPC
if [[ -z "${QUOTE_PUSH_TREASURY_MANAGER_MAINNET:-}" ]]; then
inferred_manager="$(pick_latest_manager || true)"
if [[ -n "$inferred_manager" && "$inferred_manager" != "null" ]]; then
export QUOTE_PUSH_TREASURY_MANAGER_MAINNET="$inferred_manager"
fi
fi
require_env QUOTE_PUSH_TREASURY_MANAGER_MAINNET
DEPLOYER="$(cast wallet address --private-key "$PRIVATE_KEY")"
manager_owner="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'owner()(address)' --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
manager_operator="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'operator()(address)' --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
if [[ "${manager_owner,,}" != "${DEPLOYER,,}" && "${manager_operator,,}" != "${DEPLOYER,,}" ]]; then
echo "[fail] deployer $DEPLOYER is neither manager owner ($manager_owner) nor operator ($manager_operator)" >&2
exit 1
fi
quote_balance="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'quoteBalance()(uint256)' --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
available_quote="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'availableQuote()(uint256)' --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
receiver_sweepable="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'receiverSweepableQuote()(uint256)' --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
echo "=== manage-mainnet-quote-push-treasury ($MODE) ==="
echo "manager=$QUOTE_PUSH_TREASURY_MANAGER_MAINNET"
echo "manager_owner=$manager_owner"
echo "manager_operator=$manager_operator"
echo "harvest=${QUOTE_PUSH_TREASURY_HARVEST:-1}"
echo "gas_distribution_raw=${QUOTE_PUSH_TREASURY_GAS_DISTRIBUTION_RAW:-0}"
echo "recycle_distribution_raw=${QUOTE_PUSH_TREASURY_RECYCLE_DISTRIBUTION_RAW:-0}"
echo "quote_balance_raw=$quote_balance"
echo "available_quote_raw=$available_quote"
echo "receiver_sweepable_raw=$receiver_sweepable"
(
cd "$SMOM"
forge script script/flash/ManageQuotePushTreasuryManager.s.sol:ManageQuotePushTreasuryManager \
--rpc-url "$ETHEREUM_MAINNET_RPC" \
"${BROADCAST[@]}" \
-vvvv
)

View File

@@ -10,6 +10,8 @@ set -euo pipefail
# (Aave flashLoan USDC → PMM swap USDC→cWUSDC → external unwind cWUSDC→USDC
# → repay Aave + premium). Each successful round **raises PMM quote reserves** and
# **draws base** from the pool, moving base-heavy / quote-thin books toward peg.
# Retained quote surplus stays on the receiver until swept by the owner and can
# then be recycled into wallet-funded pool growth.
#
# This script is read-only + local modeling:
# - reads live pool reserves (cast)
@@ -102,7 +104,12 @@ echo
echo "4) Reserve peg monitor after any live work:"
echo " bash scripts/verify/check-mainnet-cwusdc-usdc-reserve-peg.sh"
echo
echo "5) Pick a valid Uniswap V3 unwind (no guessing fee tiers):"
echo "5) Receiver surplus accounting and recycle path:"
echo " bash scripts/verify/report-mainnet-aave-quote-push-surplus-accounting.sh"
echo " bash scripts/deployment/sweep-mainnet-aave-quote-push-receiver-surplus.sh --dry-run"
echo " bash scripts/deployment/recycle-mainnet-aave-quote-push-surplus.sh --dry-run"
echo
echo "6) Pick a valid Uniswap V3 unwind (no guessing fee tiers):"
echo " bash scripts/verify/probe-uniswap-v3-cwusdc-usdc-mainnet.sh"
echo " If no pool: UNWIND_MODE=2 — build path: bash scripts/verify/build-uniswap-v3-exact-input-path-hex.sh ... (VERIFY_POOLS=1 optional)"
echo " Calculations report (dry-run snapshots): reports/status/mainnet_cwusdc_usdc_quote_push_calculations_2026-04-12.md"

View File

@@ -185,8 +185,11 @@ _pmm_fund_grid_err() {
trap '_pmm_fund_grid_err' ERR
_chunk_wall_start="$SECONDS"
_prog_every="${PMM_SOAK_FUND_PROGRESS_EVERY:-50}"
[[ "$_prog_every" =~ ^[0-9]+$ ]] || _prog_every=50
# Avoid names starting with "_" inside (( )) under set -u (some shells/envs treat expansion oddly).
fund_progress_every=50
if [[ -n "${PMM_SOAK_FUND_PROGRESS_EVERY:-}" && "${PMM_SOAK_FUND_PROGRESS_EVERY}" =~ ^[0-9]+$ ]]; then
fund_progress_every="${PMM_SOAK_FUND_PROGRESS_EVERY}"
fi
n=0
for r in "${RECIPIENTS[@]}"; do
n=$((n + 1))
@@ -197,10 +200,12 @@ for r in "${RECIPIENTS[@]}"; do
cast send "$TOKEN_ADDR" 'transfer(address,uint256)(bool)' "$r" "$AMOUNT" \
--rpc-url "$RPC" --private-key "$PK" --legacy --gas-price "$GAS_WEI"
fi
if [[ "$_prog_every" -gt 0 ]] && (( n % _prog_every == 0 || n == total )); then
_el=$((SECONDS - _chunk_wall_start))
python3 -c "n=$n;total=$total;el=$_el;avg=el/n if n else 0.0;rem=total-n;eta=int(rem*avg) if rem>0 else 0;print(f'[fund-grid] progress {n}/{total} chunk_elapsed_s={el} avg_s_per_tx={avg:.2f} est_remaining_this_chunk_s={eta}')" 2>/dev/null || true
elif [[ "$_prog_every" -eq 0 ]] && [[ "$n" -eq "$total" ]]; then
if [[ "${fund_progress_every}" -gt 0 ]]; then
if (( n % fund_progress_every == 0 )) || (( n == total )); then
_el=$((SECONDS - _chunk_wall_start))
python3 -c "n=$n;total=$total;el=$_el;avg=el/n if n else 0.0;rem=total-n;eta=int(rem*avg) if rem>0 else 0;print(f'[fund-grid] progress {n}/{total} chunk_elapsed_s={el} avg_s_per_tx={avg:.2f} est_remaining_this_chunk_s={eta}')" 2>/dev/null || true
fi
elif [[ "$n" -eq "$total" ]]; then
echo "[fund-grid] progress ${n}/${total} chunk_elapsed_s=$((SECONDS - _chunk_wall_start)) (final)"
fi
done

View File

@@ -0,0 +1,155 @@
#!/usr/bin/env bash
set -euo pipefail
# Sweep retained quote surplus from the receiver to the deployer and, when the
# deployer gas reserve is healthy, immediately recycle that quote into the
# wallet-funded cWUSDC/USDC peg tranche helper.
#
# Default: simulation only. Use --apply to broadcast / execute.
#
# Env:
# ETHEREUM_MAINNET_RPC required
# PRIVATE_KEY required
# AAVE_QUOTE_PUSH_RECEIVER_MAINNET required
# QUOTE_PUSH_SURPLUS_TOKEN_MAINNET optional; defaults to mainnet USDC
# QUOTE_PUSH_RECEIVER_RESERVE_RAW optional; amount to keep on receiver after sweep
# QUOTE_PUSH_DEPLOYER_GAS_FLOOR_ETH optional; default 0.003
# QUOTE_PUSH_OPERATION_BUFFER_ETH optional; default 0.0005
# QUOTE_PUSH_NATIVE_TOKEN_PRICE optional; default 3200
#
# Usage:
# source scripts/lib/load-project-env.sh
# bash scripts/deployment/recycle-mainnet-aave-quote-push-surplus.sh --dry-run
# bash scripts/deployment/recycle-mainnet-aave-quote-push-surplus.sh --apply
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
SMOM="${PROXMOX_ROOT}/smom-dbis-138"
DEFAULT_USDC_MAINNET="0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"
# shellcheck disable=SC1091
source "${PROXMOX_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
# shellcheck disable=SC1091
source "${SMOM}/scripts/load-env.sh" >/dev/null 2>&1 || true
require_cmd() {
command -v "$1" >/dev/null 2>&1 || {
echo "[fail] missing required command: $1" >&2
exit 1
}
}
require_env() {
local name="$1"
if [[ -z "${!name:-}" ]]; then
echo "[fail] missing required env: $name" >&2
exit 1
fi
}
pick_latest_receiver() {
local latest_json="${SMOM}/broadcast/DeployAaveQuotePushFlashReceiver.s.sol/1/run-latest.json"
if [[ ! -f "$latest_json" ]] || ! command -v jq >/dev/null 2>&1; then
return 1
fi
jq -r '.transactions[]? | select(.transactionType == "CREATE" and .contractName == "AaveQuotePushFlashReceiver") | .contractAddress' \
"$latest_json" | tail -n1
}
require_cmd cast
require_cmd python3
MODE="dry-run"
for arg in "$@"; do
case "$arg" in
--dry-run) MODE="dry-run" ;;
--apply) MODE="apply" ;;
*)
echo "[fail] unknown arg: $arg (use --dry-run or --apply)" >&2
exit 2
;;
esac
done
require_env ETHEREUM_MAINNET_RPC
require_env PRIVATE_KEY
if [[ -z "${AAVE_QUOTE_PUSH_RECEIVER_MAINNET:-}" ]]; then
inferred_receiver="$(pick_latest_receiver || true)"
if [[ -n "$inferred_receiver" && "$inferred_receiver" != "null" ]]; then
export AAVE_QUOTE_PUSH_RECEIVER_MAINNET="$inferred_receiver"
fi
fi
require_env AAVE_QUOTE_PUSH_RECEIVER_MAINNET
DEPLOYER="$(cast wallet address --private-key "$PRIVATE_KEY")"
TOKEN="${QUOTE_PUSH_SURPLUS_TOKEN_MAINNET:-$DEFAULT_USDC_MAINNET}"
RECEIVER_RESERVE_RAW="${QUOTE_PUSH_RECEIVER_RESERVE_RAW:-0}"
GAS_FLOOR_ETH="${QUOTE_PUSH_DEPLOYER_GAS_FLOOR_ETH:-0.003}"
OP_BUFFER_ETH="${QUOTE_PUSH_OPERATION_BUFFER_ETH:-0.0005}"
NATIVE_TOKEN_PRICE="${QUOTE_PUSH_NATIVE_TOKEN_PRICE:-3200}"
deployer_eth="$(cast balance "$DEPLOYER" --ether --rpc-url "$ETHEREUM_MAINNET_RPC")"
receiver_quote_raw="$(cast call "$TOKEN" 'balanceOf(address)(uint256)' "$AAVE_QUOTE_PUSH_RECEIVER_MAINNET" --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
read -r sweepable_raw gas_shortfall_eth <<EOF
$(python3 - "$deployer_eth" "$receiver_quote_raw" "$RECEIVER_RESERVE_RAW" "$GAS_FLOOR_ETH" "$OP_BUFFER_ETH" <<'PY'
import sys
deployer_eth = float(sys.argv[1])
receiver_quote_raw = int(sys.argv[2])
receiver_reserve_raw = int(sys.argv[3])
gas_floor_eth = float(sys.argv[4])
op_buffer_eth = float(sys.argv[5])
sweepable_raw = max(0, receiver_quote_raw - receiver_reserve_raw)
gas_shortfall_eth = max(0.0, gas_floor_eth + op_buffer_eth - deployer_eth)
print(sweepable_raw, gas_shortfall_eth)
PY
)
EOF
gas_floor_breached="$(
python3 - "$gas_shortfall_eth" <<'PY'
import sys
print("yes" if float(sys.argv[1]) > 0 else "no")
PY
)"
echo "=== recycle-mainnet-aave-quote-push-surplus ($MODE) ==="
echo "deployer=$DEPLOYER"
echo "receiver=$AAVE_QUOTE_PUSH_RECEIVER_MAINNET"
echo "token=$TOKEN"
echo "receiver_quote_raw=$receiver_quote_raw"
echo "receiver_reserve_raw=$RECEIVER_RESERVE_RAW"
echo "sweepable_raw=$sweepable_raw"
echo "deployer_eth=$deployer_eth"
echo "gas_floor_eth=$GAS_FLOOR_ETH"
echo "operation_buffer_eth=$OP_BUFFER_ETH"
echo "native_token_price=$NATIVE_TOKEN_PRICE"
bash "${PROXMOX_ROOT}/scripts/verify/report-mainnet-aave-quote-push-surplus-accounting.sh"
if (( sweepable_raw == 0 )); then
echo "[stop] no sweepable receiver surplus is available" >&2
exit 3
fi
if [[ "$MODE" == "dry-run" ]]; then
bash "${PROXMOX_ROOT}/scripts/deployment/sweep-mainnet-aave-quote-push-receiver-surplus.sh" --dry-run
if [[ "$gas_floor_breached" == "yes" ]]; then
echo "[stop] deployer gas reserve is below recycle floor; sweep can proceed, but pool recycle is intentionally skipped"
exit 0
fi
bash "${PROXMOX_ROOT}/scripts/deployment/apply-mainnet-cwusdc-usdc-peg-tranche-from-wallet.sh" --dry-run
exit 0
fi
bash "${PROXMOX_ROOT}/scripts/deployment/sweep-mainnet-aave-quote-push-receiver-surplus.sh" --apply
if [[ "$gas_floor_breached" == "yes" ]]; then
echo "[stop] swept surplus to deployer, but deployer ETH is still below recycle floor; skipping pool-growth tranche"
exit 0
fi
bash "${PROXMOX_ROOT}/scripts/deployment/apply-mainnet-cwusdc-usdc-peg-tranche-from-wallet.sh" --apply

View File

@@ -285,3 +285,4 @@ echo "flash_quote_amount_raw=$FLASH_QUOTE_AMOUNT_RAW unwind_mode=$UM"
)
echo "Done. Re-check: bash scripts/verify/check-mainnet-cwusdc-usdc-reserve-peg.sh"
echo "Receiver surplus accounting: bash scripts/verify/report-mainnet-aave-quote-push-surplus-accounting.sh"

View File

@@ -0,0 +1,356 @@
#!/usr/bin/env bash
set -euo pipefail
# Single entrypoint for the quote-push maintenance loop:
# 1. plan current accounting / gas policy
# 2. execute one flash quote-push
# 3. recycle retained quote via treasury manager or direct sweep fallback
#
# Default: simulation only. Use --apply to broadcast / execute.
#
# Env:
# PRIVATE_KEY, ETHEREUM_MAINNET_RPC required
# DODO_PMM_INTEGRATION_MAINNET required unless QUOTE_PUSH_KEEPER_SKIP_FLASH=1
# QUOTE_PUSH_TREASURY_MANAGER_MAINNET optional; when set, manager path is preferred
# QUOTE_PUSH_SURPLUS_TOKEN_MAINNET optional; defaults to mainnet USDC
# QUOTE_PUSH_RECEIVER_RESERVE_RAW optional; defaults to 0
# QUOTE_PUSH_DEPLOYER_GAS_FLOOR_ETH optional; defaults to 0.003
# QUOTE_PUSH_OPERATION_BUFFER_ETH optional; defaults to 0.0005
# QUOTE_PUSH_NATIVE_TOKEN_PRICE optional; defaults to 3200
# QUOTE_PUSH_KEEPER_SKIP_FLASH optional; default 0
# QUOTE_PUSH_KEEPER_SKIP_RECYCLE optional; default 0
#
# Usage:
# bash scripts/deployment/run-mainnet-aave-quote-push-keeper.sh --dry-run
# bash scripts/deployment/run-mainnet-aave-quote-push-keeper.sh --apply
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
SMOM="${PROXMOX_ROOT}/smom-dbis-138"
DEFAULT_USDC_MAINNET="0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"
_qp_private_key="${PRIVATE_KEY-}"
_qp_rpc="${ETHEREUM_MAINNET_RPC-}"
_qp_manager="${QUOTE_PUSH_TREASURY_MANAGER_MAINNET-}"
_qp_token="${QUOTE_PUSH_SURPLUS_TOKEN_MAINNET-}"
_qp_receiver_reserve="${QUOTE_PUSH_RECEIVER_RESERVE_RAW-}"
_qp_gas_floor="${QUOTE_PUSH_DEPLOYER_GAS_FLOOR_ETH-}"
_qp_gas_buffer="${QUOTE_PUSH_OPERATION_BUFFER_ETH-}"
_qp_native_price="${QUOTE_PUSH_NATIVE_TOKEN_PRICE-}"
_qp_skip_flash="${QUOTE_PUSH_KEEPER_SKIP_FLASH-}"
_qp_skip_recycle="${QUOTE_PUSH_KEEPER_SKIP_RECYCLE-}"
# shellcheck disable=SC1091
source "${PROXMOX_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
# shellcheck disable=SC1091
source "${SMOM}/scripts/load-env.sh" >/dev/null 2>&1 || true
[[ -n "$_qp_private_key" ]] && export PRIVATE_KEY="$_qp_private_key"
[[ -n "$_qp_rpc" ]] && export ETHEREUM_MAINNET_RPC="$_qp_rpc"
[[ -n "$_qp_manager" ]] && export QUOTE_PUSH_TREASURY_MANAGER_MAINNET="$_qp_manager"
[[ -n "$_qp_token" ]] && export QUOTE_PUSH_SURPLUS_TOKEN_MAINNET="$_qp_token"
[[ -n "$_qp_receiver_reserve" ]] && export QUOTE_PUSH_RECEIVER_RESERVE_RAW="$_qp_receiver_reserve"
[[ -n "$_qp_gas_floor" ]] && export QUOTE_PUSH_DEPLOYER_GAS_FLOOR_ETH="$_qp_gas_floor"
[[ -n "$_qp_gas_buffer" ]] && export QUOTE_PUSH_OPERATION_BUFFER_ETH="$_qp_gas_buffer"
[[ -n "$_qp_native_price" ]] && export QUOTE_PUSH_NATIVE_TOKEN_PRICE="$_qp_native_price"
[[ -n "$_qp_skip_flash" ]] && export QUOTE_PUSH_KEEPER_SKIP_FLASH="$_qp_skip_flash"
[[ -n "$_qp_skip_recycle" ]] && export QUOTE_PUSH_KEEPER_SKIP_RECYCLE="$_qp_skip_recycle"
unset _qp_private_key _qp_rpc _qp_manager _qp_token _qp_receiver_reserve _qp_gas_floor
unset _qp_gas_buffer _qp_native_price _qp_skip_flash _qp_skip_recycle
require_cmd() {
command -v "$1" >/dev/null 2>&1 || {
echo "[fail] missing required command: $1" >&2
exit 1
}
}
pick_latest_manager() {
local latest_json="${SMOM}/broadcast/DeployQuotePushTreasuryManager.s.sol/1/run-latest.json"
if [[ ! -f "$latest_json" ]] || ! command -v jq >/dev/null 2>&1; then
return 1
fi
jq -r '.transactions[]? | select(.transactionType == "CREATE" and .contractName == "QuotePushTreasuryManager") | .contractAddress' \
"$latest_json" | tail -n1
}
pick_latest_receiver() {
local latest_json="${SMOM}/broadcast/DeployAaveQuotePushFlashReceiver.s.sol/1/run-latest.json"
if [[ ! -f "$latest_json" ]] || ! command -v jq >/dev/null 2>&1; then
return 1
fi
jq -r '.transactions[]? | select(.transactionType == "CREATE" and .contractName == "AaveQuotePushFlashReceiver") | .contractAddress' \
"$latest_json" | tail -n1
}
to_human() {
python3 - "$1" <<'PY'
import sys
print(f"{int(sys.argv[1]) / 1_000_000:.6f}")
PY
}
compute_keeper_plan() {
local manager_addr="${1:-}"
local deployer_eth="$2"
local token_addr="$3"
local receiver_addr="$4"
local receiver_reserve_raw="$5"
local gas_floor_eth="$6"
local gas_buffer_eth="$7"
local native_price="$8"
local receiver_quote_raw=0
local manager_quote_raw=0
local manager_available_raw=0
local receiver_sweepable_raw=0
local total_controlled_raw=0
local gas_distribution_raw=0
local recycle_distribution_raw=0
local gas_shortfall_eth=0
local gas_shortfall_quote_raw=0
local gas_recipient=""
local recycle_recipient=""
if [[ -n "$manager_addr" ]]; then
manager_quote_raw="$(cast call "$manager_addr" 'quoteBalance()(uint256)' --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
manager_available_raw="$(cast call "$manager_addr" 'availableQuote()(uint256)' --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
receiver_sweepable_raw="$(cast call "$manager_addr" 'receiverSweepableQuote()(uint256)' --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
gas_recipient="$(cast call "$manager_addr" 'gasRecipient()(address)' --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
recycle_recipient="$(cast call "$manager_addr" 'recycleRecipient()(address)' --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
else
receiver_quote_raw="$(cast call "$token_addr" 'balanceOf(address)(uint256)' "$receiver_addr" --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
receiver_sweepable_raw="$(
python3 - "$receiver_quote_raw" "$receiver_reserve_raw" <<'PY'
import sys
receiver_quote_raw = int(sys.argv[1])
receiver_reserve_raw = int(sys.argv[2])
print(max(0, receiver_quote_raw - receiver_reserve_raw))
PY
)"
fi
eval "$(
python3 - "$deployer_eth" "$gas_floor_eth" "$gas_buffer_eth" "$native_price" \
"${manager_available_raw:-0}" "${receiver_sweepable_raw:-0}" <<'PY'
import math
import sys
deployer_eth = float(sys.argv[1])
gas_floor_eth = float(sys.argv[2])
gas_buffer_eth = float(sys.argv[3])
native_price = float(sys.argv[4])
manager_available_raw = int(sys.argv[5])
receiver_sweepable_raw = int(sys.argv[6])
gas_shortfall_eth = max(0.0, gas_floor_eth + gas_buffer_eth - deployer_eth)
gas_shortfall_quote_raw = math.ceil(gas_shortfall_eth * native_price * 1_000_000)
total_controlled_raw = manager_available_raw + receiver_sweepable_raw
gas_distribution_raw = min(total_controlled_raw, gas_shortfall_quote_raw)
recycle_distribution_raw = max(0, total_controlled_raw - gas_distribution_raw)
print(f"GAS_SHORTFALL_ETH={gas_shortfall_eth}")
print(f"GAS_SHORTFALL_QUOTE_RAW={gas_shortfall_quote_raw}")
print(f"TOTAL_CONTROLLED_RAW={total_controlled_raw}")
print(f"GAS_DISTRIBUTION_RAW={gas_distribution_raw}")
print(f"RECYCLE_DISTRIBUTION_RAW={recycle_distribution_raw}")
PY
)"
KEEPER_MANAGER_QUOTE_RAW="${manager_quote_raw:-0}"
KEEPER_MANAGER_AVAILABLE_RAW="${manager_available_raw:-0}"
KEEPER_RECEIVER_QUOTE_RAW="${receiver_quote_raw:-0}"
KEEPER_RECEIVER_SWEEPABLE_RAW="${receiver_sweepable_raw:-0}"
KEEPER_TOTAL_CONTROLLED_RAW="${TOTAL_CONTROLLED_RAW:-0}"
KEEPER_GAS_SHORTFALL_ETH="${GAS_SHORTFALL_ETH:-0}"
KEEPER_GAS_SHORTFALL_QUOTE_RAW="${GAS_SHORTFALL_QUOTE_RAW:-0}"
KEEPER_GAS_DISTRIBUTION_RAW="${GAS_DISTRIBUTION_RAW:-0}"
KEEPER_RECYCLE_DISTRIBUTION_RAW="${RECYCLE_DISTRIBUTION_RAW:-0}"
KEEPER_GAS_RECIPIENT="${gas_recipient:-}"
KEEPER_RECYCLE_RECIPIENT="${recycle_recipient:-}"
}
require_cmd cast
require_cmd python3
require_cmd forge
MODE="dry-run"
for arg in "$@"; do
case "$arg" in
--dry-run) MODE="dry-run" ;;
--apply) MODE="apply" ;;
*)
echo "[fail] unknown arg: $arg (use --dry-run or --apply)" >&2
exit 2
;;
esac
done
if [[ -z "${PRIVATE_KEY:-}" || -z "${ETHEREUM_MAINNET_RPC:-}" ]]; then
echo "[fail] PRIVATE_KEY and ETHEREUM_MAINNET_RPC are required" >&2
exit 1
fi
if [[ -z "${QUOTE_PUSH_TREASURY_MANAGER_MAINNET:-}" ]]; then
inferred_manager="$(pick_latest_manager || true)"
if [[ -n "$inferred_manager" && "$inferred_manager" != "null" ]]; then
export QUOTE_PUSH_TREASURY_MANAGER_MAINNET="$inferred_manager"
fi
fi
if [[ -z "${AAVE_QUOTE_PUSH_RECEIVER_MAINNET:-}" ]]; then
inferred_receiver="$(pick_latest_receiver || true)"
if [[ -n "$inferred_receiver" && "$inferred_receiver" != "null" ]]; then
export AAVE_QUOTE_PUSH_RECEIVER_MAINNET="$inferred_receiver"
fi
fi
SKIP_FLASH="${QUOTE_PUSH_KEEPER_SKIP_FLASH:-0}"
SKIP_RECYCLE="${QUOTE_PUSH_KEEPER_SKIP_RECYCLE:-0}"
TOKEN="${QUOTE_PUSH_SURPLUS_TOKEN_MAINNET:-$DEFAULT_USDC_MAINNET}"
RECEIVER="${AAVE_QUOTE_PUSH_RECEIVER_MAINNET:-}"
RECEIVER_RESERVE_RAW="${QUOTE_PUSH_RECEIVER_RESERVE_RAW:-0}"
GAS_FLOOR_ETH="${QUOTE_PUSH_DEPLOYER_GAS_FLOOR_ETH:-0.003}"
OP_BUFFER_ETH="${QUOTE_PUSH_OPERATION_BUFFER_ETH:-0.0005}"
NATIVE_TOKEN_PRICE="${QUOTE_PUSH_NATIVE_TOKEN_PRICE:-3200}"
DEPLOYER="$(cast wallet address --private-key "$PRIVATE_KEY")"
MANAGER="${QUOTE_PUSH_TREASURY_MANAGER_MAINNET:-}"
echo "=== run-mainnet-aave-quote-push-keeper ($MODE) ==="
echo "deployer=$DEPLOYER"
echo "manager=${MANAGER:-<direct-receiver-path>}"
echo "skip_flash=$SKIP_FLASH"
echo "skip_recycle=$SKIP_RECYCLE"
bash "${PROXMOX_ROOT}/scripts/verify/report-mainnet-aave-quote-push-surplus-accounting.sh"
deployer_eth="$(cast balance "$DEPLOYER" --ether --rpc-url "$ETHEREUM_MAINNET_RPC")"
compute_keeper_plan "$MANAGER" "$deployer_eth" "$TOKEN" "$RECEIVER" "$RECEIVER_RESERVE_RAW" "$GAS_FLOOR_ETH" "$OP_BUFFER_ETH" "$NATIVE_TOKEN_PRICE"
echo
echo "=== Keeper policy plan ==="
echo "deployer_eth=$deployer_eth"
echo "gas_shortfall_eth=$KEEPER_GAS_SHORTFALL_ETH"
echo "gas_shortfall_quote_raw=$KEEPER_GAS_SHORTFALL_QUOTE_RAW human=$(to_human "$KEEPER_GAS_SHORTFALL_QUOTE_RAW")"
if [[ -n "$MANAGER" ]]; then
echo "manager_quote_raw=$KEEPER_MANAGER_QUOTE_RAW human=$(to_human "$KEEPER_MANAGER_QUOTE_RAW")"
echo "manager_available_raw=$KEEPER_MANAGER_AVAILABLE_RAW human=$(to_human "$KEEPER_MANAGER_AVAILABLE_RAW")"
echo "receiver_sweepable_raw=$KEEPER_RECEIVER_SWEEPABLE_RAW human=$(to_human "$KEEPER_RECEIVER_SWEEPABLE_RAW")"
echo "gas_recipient=${KEEPER_GAS_RECIPIENT:-<unset>}"
echo "recycle_recipient=${KEEPER_RECYCLE_RECIPIENT:-<unset>}"
else
echo "receiver_quote_raw=$KEEPER_RECEIVER_QUOTE_RAW human=$(to_human "$KEEPER_RECEIVER_QUOTE_RAW")"
echo "receiver_sweepable_raw=$KEEPER_RECEIVER_SWEEPABLE_RAW human=$(to_human "$KEEPER_RECEIVER_SWEEPABLE_RAW")"
fi
echo "total_controlled_raw=$KEEPER_TOTAL_CONTROLLED_RAW human=$(to_human "$KEEPER_TOTAL_CONTROLLED_RAW")"
echo "planned_gas_distribution_raw=$KEEPER_GAS_DISTRIBUTION_RAW human=$(to_human "$KEEPER_GAS_DISTRIBUTION_RAW")"
echo "planned_recycle_distribution_raw=$KEEPER_RECYCLE_DISTRIBUTION_RAW human=$(to_human "$KEEPER_RECYCLE_DISTRIBUTION_RAW")"
if [[ "$SKIP_RECYCLE" != "1" ]]; then
if [[ -n "$MANAGER" ]]; then
manager_receiver_owned="$(cast call "$MANAGER" 'isReceiverOwnedByManager()(bool)' --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
if [[ "$manager_receiver_owned" != "true" ]]; then
cat >&2 <<EOF
[fail] treasury manager is not the receiver owner, so it cannot harvest retained quote yet
[hint] Deploy or transfer ownership with:
bash scripts/deployment/deploy-mainnet-aave-quote-push-receiver.sh --apply
AAVE_QUOTE_PUSH_RECEIVER_MAINNET=<new_receiver> QUOTE_PUSH_TREASURY_TAKE_RECEIVER_OWNERSHIP=1 bash scripts/deployment/deploy-mainnet-quote-push-treasury-manager.sh --apply
EOF
exit 1
fi
else
receiver_owner="$(cast call "$RECEIVER" 'owner()(address)' --rpc-url "$ETHEREUM_MAINNET_RPC" 2>/dev/null | awk '{print $1}' || true)"
if [[ -z "$receiver_owner" ]]; then
cat >&2 <<EOF
[fail] receiver does not expose owner()/sweep support: ${RECEIVER:-<unset>}
[hint] Redeploy the updated receiver, then hand it to the treasury manager:
bash scripts/deployment/deploy-mainnet-aave-quote-push-receiver.sh --apply
AAVE_QUOTE_PUSH_RECEIVER_MAINNET=<new_receiver> QUOTE_PUSH_TREASURY_TAKE_RECEIVER_OWNERSHIP=1 bash scripts/deployment/deploy-mainnet-quote-push-treasury-manager.sh --apply
EOF
exit 1
fi
fi
fi
if [[ "$MODE" == "dry-run" && -n "$MANAGER" ]]; then
export QUOTE_PUSH_TREASURY_HARVEST=1
export QUOTE_PUSH_TREASURY_GAS_HOLDBACK_TARGET_RAW="$KEEPER_GAS_SHORTFALL_QUOTE_RAW"
bash "${PROXMOX_ROOT}/scripts/deployment/run-mainnet-aave-quote-push-managed-cycle.sh" --dry-run
if [[ "${KEEPER_RECYCLE_RECIPIENT,,}" == "${DEPLOYER,,}" ]]; then
echo "[plan] A live managed cycle would route recycle quote back to the deployer, after which the wallet LP helper can be attempted."
fi
exit 0
fi
if [[ "$SKIP_FLASH" != "1" ]]; then
bash "${PROXMOX_ROOT}/scripts/deployment/run-mainnet-aave-cwusdc-quote-push-once.sh" "--${MODE}"
fi
echo
echo "=== Post-execution accounting ==="
bash "${PROXMOX_ROOT}/scripts/verify/report-mainnet-aave-quote-push-surplus-accounting.sh"
if [[ "$SKIP_RECYCLE" == "1" ]]; then
echo "[stop] recycle phase skipped by QUOTE_PUSH_KEEPER_SKIP_RECYCLE=1"
exit 0
fi
deployer_eth="$(cast balance "$DEPLOYER" --ether --rpc-url "$ETHEREUM_MAINNET_RPC")"
compute_keeper_plan "$MANAGER" "$deployer_eth" "$TOKEN" "$RECEIVER" "$RECEIVER_RESERVE_RAW" "$GAS_FLOOR_ETH" "$OP_BUFFER_ETH" "$NATIVE_TOKEN_PRICE"
if (( KEEPER_TOTAL_CONTROLLED_RAW == 0 )); then
echo "[stop] no controllable quote is available for recycle"
exit 0
fi
if [[ -n "$MANAGER" ]]; then
export QUOTE_PUSH_TREASURY_HARVEST="$(
python3 - "$KEEPER_RECEIVER_SWEEPABLE_RAW" <<'PY'
import sys
print("1" if int(sys.argv[1]) > 0 else "0")
PY
)"
export QUOTE_PUSH_TREASURY_GAS_DISTRIBUTION_RAW="$KEEPER_GAS_DISTRIBUTION_RAW"
export QUOTE_PUSH_TREASURY_RECYCLE_DISTRIBUTION_RAW="$KEEPER_RECYCLE_DISTRIBUTION_RAW"
bash "${PROXMOX_ROOT}/scripts/deployment/manage-mainnet-quote-push-treasury.sh" "--${MODE}"
if [[ "$MODE" == "dry-run" ]]; then
if [[ "${KEEPER_RECYCLE_RECIPIENT,,}" == "${DEPLOYER,,}" && "$KEEPER_RECYCLE_DISTRIBUTION_RAW" != "0" ]]; then
echo "[plan] After a live manager distribution to the deployer, the wallet tranche helper will be attempted."
fi
exit 0
fi
if python3 - "$KEEPER_GAS_SHORTFALL_ETH" <<'PY'
import sys
sys.exit(0 if float(sys.argv[1]) > 0 else 1)
PY
then
echo "[stop] quote was distributed, but deployer ETH is still below the recycle floor; skipping wallet LP tranche"
exit 0
fi
if (( KEEPER_RECYCLE_DISTRIBUTION_RAW == 0 )); then
echo "[stop] no recycle allocation remains after gas holdback"
exit 0
fi
if [[ -z "${KEEPER_RECYCLE_RECIPIENT:-}" || "${KEEPER_RECYCLE_RECIPIENT,,}" != "${DEPLOYER,,}" ]]; then
echo "[stop] recycle recipient is not the deployer wallet, so the wallet LP helper is intentionally skipped"
exit 0
fi
if ! bash "${PROXMOX_ROOT}/scripts/deployment/apply-mainnet-cwusdc-usdc-peg-tranche-from-wallet.sh" --apply; then
status=$?
if [[ "$status" == "3" ]]; then
echo "[stop] recycle distribution completed, but no wallet-funded LP tranche is currently affordable"
exit 0
fi
exit "$status"
fi
else
bash "${PROXMOX_ROOT}/scripts/deployment/recycle-mainnet-aave-quote-push-surplus.sh" "--${MODE}"
fi

View File

@@ -0,0 +1,278 @@
#!/usr/bin/env bash
set -euo pipefail
# Run one manager-backed Mainnet quote-push cycle:
# flash quote-push -> harvest retained quote into manager -> distribute to configured recipients.
#
# Default: simulation only. Use --apply to broadcast.
#
# Required env:
# PRIVATE_KEY, ETHEREUM_MAINNET_RPC
# DODO_PMM_INTEGRATION_MAINNET
# QUOTE_PUSH_TREASURY_MANAGER_MAINNET
# AAVE_QUOTE_PUSH_RECEIVER_MAINNET optional; defaults to latest broadcast or canonical receiver
# QUOTE_PUSH_EXTERNAL_UNWINDER_MAINNET optional; can be auto-picked from latest broadcast
# FLASH_QUOTE_AMOUNT_RAW optional; defaults to 200000
#
# Optional treasury env:
# QUOTE_PUSH_TREASURY_HARVEST optional; defaults to 1
# QUOTE_PUSH_TREASURY_GAS_HOLDBACK_TARGET_RAW optional; defaults to 0
#
# Usage:
# bash scripts/deployment/run-mainnet-aave-quote-push-managed-cycle.sh --dry-run
# QUOTE_PUSH_TREASURY_GAS_HOLDBACK_TARGET_RAW=0 bash scripts/deployment/run-mainnet-aave-quote-push-managed-cycle.sh --apply
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
SMOM="${PROXMOX_ROOT}/smom-dbis-138"
DEFAULT_AAVE_QUOTE_PUSH_RECEIVER_MAINNET="0x241cb416aaFC2654078b7E2376adED2bDeFbCBa2"
DEFAULT_POOL_CWUSDC_USDC_MAINNET="0x69776fc607e9edA8042e320e7e43f54d06c68f0E"
_qp_private_key="${PRIVATE_KEY-}"
_qp_rpc="${ETHEREUM_MAINNET_RPC-}"
_qp_receiver="${AAVE_QUOTE_PUSH_RECEIVER_MAINNET-}"
_qp_manager="${QUOTE_PUSH_TREASURY_MANAGER_MAINNET-}"
_qp_unwinder="${QUOTE_PUSH_EXTERNAL_UNWINDER_MAINNET-}"
_qp_amount="${FLASH_QUOTE_AMOUNT_RAW-}"
_qp_unwind_type="${QUOTE_PUSH_UNWINDER_TYPE-}"
_qp_unwind_mode="${UNWIND_MODE-}"
_qp_pool="${POOL_CWUSDC_USDC_MAINNET-}"
_qp_integration="${DODO_PMM_INTEGRATION_MAINNET-}"
_qp_pool_a="${UNWIND_TWO_HOP_POOL_A-}"
_qp_pool_b="${UNWIND_TWO_HOP_POOL_B-}"
_qp_mid_token="${UNWIND_TWO_HOP_MID_TOKEN-}"
_qp_min_mid_out="${UNWIND_MIN_MID_OUT_RAW-}"
_qp_min_out_pmm="${MIN_OUT_PMM-}"
_qp_min_out_unwind="${MIN_OUT_UNWIND-}"
_qp_fee_u24="${UNWIND_V3_FEE_U24-}"
_qp_dodo_pool="${UNWIND_DODO_POOL-}"
_qp_v3_path="${UNWIND_V3_PATH_HEX-}"
_qp_intermediate_token="${UNWIND_INTERMEDIATE_TOKEN-}"
_qp_min_intermediate_out="${UNWIND_MIN_INTERMEDIATE_OUT_RAW-}"
_qp_treasury_harvest="${QUOTE_PUSH_TREASURY_HARVEST-}"
_qp_treasury_holdback="${QUOTE_PUSH_TREASURY_GAS_HOLDBACK_TARGET_RAW-}"
# shellcheck disable=SC1091
source "${PROXMOX_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
# shellcheck disable=SC1091
source "${SMOM}/scripts/load-env.sh" >/dev/null 2>&1 || true
[[ -n "$_qp_private_key" ]] && export PRIVATE_KEY="$_qp_private_key"
[[ -n "$_qp_rpc" ]] && export ETHEREUM_MAINNET_RPC="$_qp_rpc"
[[ -n "$_qp_receiver" ]] && export AAVE_QUOTE_PUSH_RECEIVER_MAINNET="$_qp_receiver"
[[ -n "$_qp_manager" ]] && export QUOTE_PUSH_TREASURY_MANAGER_MAINNET="$_qp_manager"
[[ -n "$_qp_unwinder" ]] && export QUOTE_PUSH_EXTERNAL_UNWINDER_MAINNET="$_qp_unwinder"
[[ -n "$_qp_amount" ]] && export FLASH_QUOTE_AMOUNT_RAW="$_qp_amount"
[[ -n "$_qp_unwind_type" ]] && export QUOTE_PUSH_UNWINDER_TYPE="$_qp_unwind_type"
[[ -n "$_qp_unwind_mode" ]] && export UNWIND_MODE="$_qp_unwind_mode"
[[ -n "$_qp_pool" ]] && export POOL_CWUSDC_USDC_MAINNET="$_qp_pool"
[[ -n "$_qp_integration" ]] && export DODO_PMM_INTEGRATION_MAINNET="$_qp_integration"
[[ -n "$_qp_pool_a" ]] && export UNWIND_TWO_HOP_POOL_A="$_qp_pool_a"
[[ -n "$_qp_pool_b" ]] && export UNWIND_TWO_HOP_POOL_B="$_qp_pool_b"
[[ -n "$_qp_mid_token" ]] && export UNWIND_TWO_HOP_MID_TOKEN="$_qp_mid_token"
[[ -n "$_qp_min_mid_out" ]] && export UNWIND_MIN_MID_OUT_RAW="$_qp_min_mid_out"
[[ -n "$_qp_min_out_pmm" ]] && export MIN_OUT_PMM="$_qp_min_out_pmm"
[[ -n "$_qp_min_out_unwind" ]] && export MIN_OUT_UNWIND="$_qp_min_out_unwind"
[[ -n "$_qp_fee_u24" ]] && export UNWIND_V3_FEE_U24="$_qp_fee_u24"
[[ -n "$_qp_dodo_pool" ]] && export UNWIND_DODO_POOL="$_qp_dodo_pool"
[[ -n "$_qp_v3_path" ]] && export UNWIND_V3_PATH_HEX="$_qp_v3_path"
[[ -n "$_qp_intermediate_token" ]] && export UNWIND_INTERMEDIATE_TOKEN="$_qp_intermediate_token"
[[ -n "$_qp_min_intermediate_out" ]] && export UNWIND_MIN_INTERMEDIATE_OUT_RAW="$_qp_min_intermediate_out"
[[ -n "$_qp_treasury_harvest" ]] && export QUOTE_PUSH_TREASURY_HARVEST="$_qp_treasury_harvest"
[[ -n "$_qp_treasury_holdback" ]] && export QUOTE_PUSH_TREASURY_GAS_HOLDBACK_TARGET_RAW="$_qp_treasury_holdback"
unset _qp_private_key _qp_rpc _qp_receiver _qp_manager _qp_unwinder _qp_amount _qp_unwind_type _qp_unwind_mode
unset _qp_pool _qp_integration _qp_pool_a _qp_pool_b _qp_mid_token _qp_min_mid_out _qp_min_out_pmm
unset _qp_min_out_unwind _qp_fee_u24 _qp_dodo_pool _qp_v3_path _qp_intermediate_token _qp_min_intermediate_out
unset _qp_treasury_harvest _qp_treasury_holdback
BROADCAST=()
if (($# == 0)); then
:
else
for a in "$@"; do
case "$a" in
--apply) BROADCAST=(--broadcast) ;;
--dry-run) BROADCAST=() ;;
*)
echo "[fail] unknown arg: $a" >&2
exit 2
;;
esac
done
fi
require() {
local n="$1"
if [[ -z "${!n:-}" ]]; then
echo "[fail] missing required env: $n" >&2
exit 1
fi
}
pick_latest_create_address() {
local script_name="$1"
local contract_name="$2"
local latest_json="${SMOM}/broadcast/${script_name}/1/run-latest.json"
if [[ ! -f "$latest_json" ]]; then
return 1
fi
if ! command -v jq >/dev/null 2>&1; then
return 1
fi
jq -r --arg contract "$contract_name" \
'.transactions[]? | select(.transactionType == "CREATE" and .contractName == $contract) | .contractAddress' \
"$latest_json" | tail -n1
}
pick_default_unwinder() {
local addr=""
PICK_DEFAULT_UNWINDER_ADDR=""
PICK_DEFAULT_UNWINDER_MODE=""
addr="$(pick_latest_create_address "DeployTwoHopDodoIntegrationUnwinder.s.sol" "TwoHopDodoIntegrationUnwinder" || true)"
if [[ -n "$addr" && "$addr" != "null" ]]; then
PICK_DEFAULT_UNWINDER_ADDR="$addr"
PICK_DEFAULT_UNWINDER_MODE="4"
return 0
fi
addr="$(pick_latest_create_address "DeployDODOIntegrationExternalUnwinder.s.sol" "DODOIntegrationExternalUnwinder" || true)"
if [[ -n "$addr" && "$addr" != "null" ]]; then
PICK_DEFAULT_UNWINDER_ADDR="$addr"
PICK_DEFAULT_UNWINDER_MODE="1"
return 0
fi
addr="$(pick_latest_create_address "DeployUniswapV3ExternalUnwinder.s.sol" "UniswapV3ExternalUnwinder" || true)"
if [[ -n "$addr" && "$addr" != "null" ]]; then
PICK_DEFAULT_UNWINDER_ADDR="$addr"
PICK_DEFAULT_UNWINDER_MODE="0"
return 0
fi
addr="$(pick_latest_create_address "DeployDODOToUniswapV3MultiHopExternalUnwinder.s.sol" "DODOToUniswapV3MultiHopExternalUnwinder" || true)"
if [[ -n "$addr" && "$addr" != "null" ]]; then
PICK_DEFAULT_UNWINDER_ADDR="$addr"
PICK_DEFAULT_UNWINDER_MODE="5"
return 0
fi
return 1
}
UNW="${QUOTE_PUSH_UNWINDER_TYPE:-}"
if [[ -z "${AAVE_QUOTE_PUSH_RECEIVER_MAINNET:-}" ]]; then
inferred_receiver="$(pick_latest_create_address "DeployAaveQuotePushFlashReceiver.s.sol" "AaveQuotePushFlashReceiver" || true)"
export AAVE_QUOTE_PUSH_RECEIVER_MAINNET="${inferred_receiver:-$DEFAULT_AAVE_QUOTE_PUSH_RECEIVER_MAINNET}"
fi
if [[ -z "${POOL_CWUSDC_USDC_MAINNET:-}" || "${_qp_pool:-}" == "" ]]; then
export POOL_CWUSDC_USDC_MAINNET="$DEFAULT_POOL_CWUSDC_USDC_MAINNET"
fi
if [[ -z "${FLASH_QUOTE_AMOUNT_RAW:-}" ]]; then
export FLASH_QUOTE_AMOUNT_RAW=200000
fi
if [[ -z "${MIN_OUT_PMM:-}" ]]; then
export MIN_OUT_PMM=1
fi
if [[ -z "${QUOTE_PUSH_EXTERNAL_UNWINDER_MAINNET:-}" && -n "$UNW" ]]; then
unwind_script=""
unwind_contract=""
case "$UNW" in
univ3)
unwind_script="DeployUniswapV3ExternalUnwinder.s.sol"
unwind_contract="UniswapV3ExternalUnwinder"
export UNWIND_MODE="${UNWIND_MODE:-0}"
;;
dodo)
unwind_script="DeployDODOIntegrationExternalUnwinder.s.sol"
unwind_contract="DODOIntegrationExternalUnwinder"
export UNWIND_MODE="${UNWIND_MODE:-1}"
;;
two_hop_dodo)
unwind_script="DeployTwoHopDodoIntegrationUnwinder.s.sol"
unwind_contract="TwoHopDodoIntegrationUnwinder"
export UNWIND_MODE="${UNWIND_MODE:-4}"
;;
dodo_univ3)
unwind_script="DeployDODOToUniswapV3MultiHopExternalUnwinder.s.sol"
unwind_contract="DODOToUniswapV3MultiHopExternalUnwinder"
export UNWIND_MODE="${UNWIND_MODE:-5}"
;;
*)
echo "[fail] QUOTE_PUSH_UNWINDER_TYPE must be univ3, dodo, two_hop_dodo, or dodo_univ3 when set" >&2
exit 1
;;
esac
inferred_unwinder="$(pick_latest_create_address "$unwind_script" "$unwind_contract" || true)"
if [[ -n "$inferred_unwinder" && "$inferred_unwinder" != "null" ]]; then
export QUOTE_PUSH_EXTERNAL_UNWINDER_MAINNET="$inferred_unwinder"
fi
fi
if [[ -z "${QUOTE_PUSH_EXTERNAL_UNWINDER_MAINNET:-}" ]]; then
if pick_default_unwinder; then
if [[ -n "$PICK_DEFAULT_UNWINDER_ADDR" && "$PICK_DEFAULT_UNWINDER_ADDR" != "null" ]]; then
export QUOTE_PUSH_EXTERNAL_UNWINDER_MAINNET="$PICK_DEFAULT_UNWINDER_ADDR"
export UNWIND_MODE="$PICK_DEFAULT_UNWINDER_MODE"
fi
fi
fi
if [[ "${UNWIND_MODE:-}" == "4" ]]; then
export UNWIND_TWO_HOP_POOL_A="${UNWIND_TWO_HOP_POOL_A:-0xe944b7Cb012A0820c07f54D51e92f0e1C74168DB}"
export UNWIND_TWO_HOP_POOL_B="${UNWIND_TWO_HOP_POOL_B:-0x27f3aE7EE71Be3d77bAf17d4435cF8B895DD25D2}"
export UNWIND_TWO_HOP_MID_TOKEN="${UNWIND_TWO_HOP_MID_TOKEN:-0xaF5017d0163ecb99d9B5D94e3b4D7b09Af44D8AE}"
export UNWIND_MIN_MID_OUT_RAW="${UNWIND_MIN_MID_OUT_RAW:-1}"
elif [[ "${UNWIND_MODE:-}" == "5" ]]; then
export UNWIND_DODO_POOL="${UNWIND_DODO_POOL:-0xCC0fd27A40775c9AfcD2BBd3f7c902b0192c247A}"
export UNWIND_INTERMEDIATE_TOKEN="${UNWIND_INTERMEDIATE_TOKEN:-0xdAC17F958D2ee523a2206206994597C13D831ec7}"
export UNWIND_MIN_INTERMEDIATE_OUT_RAW="${UNWIND_MIN_INTERMEDIATE_OUT_RAW:-1}"
fi
require ETHEREUM_MAINNET_RPC
require PRIVATE_KEY
require QUOTE_PUSH_TREASURY_MANAGER_MAINNET
require AAVE_QUOTE_PUSH_RECEIVER_MAINNET
require QUOTE_PUSH_EXTERNAL_UNWINDER_MAINNET
require DODO_PMM_INTEGRATION_MAINNET
require FLASH_QUOTE_AMOUNT_RAW
UM="${UNWIND_MODE:-0}"
if [[ "$UM" == "0" ]]; then
require UNWIND_V3_FEE_U24
elif [[ "$UM" == "1" ]]; then
require UNWIND_DODO_POOL
elif [[ "$UM" == "2" ]]; then
require UNWIND_V3_PATH_HEX
elif [[ "$UM" == "4" ]]; then
require UNWIND_TWO_HOP_POOL_A
require UNWIND_TWO_HOP_POOL_B
require UNWIND_TWO_HOP_MID_TOKEN
elif [[ "$UM" == "5" ]]; then
require UNWIND_DODO_POOL
require UNWIND_INTERMEDIATE_TOKEN
require UNWIND_V3_PATH_HEX
else
echo "[fail] UNWIND_MODE must be 0, 1, 2, 4, or 5" >&2
exit 1
fi
echo "receiver=$AAVE_QUOTE_PUSH_RECEIVER_MAINNET"
echo "manager=$QUOTE_PUSH_TREASURY_MANAGER_MAINNET"
echo "unwinder=$QUOTE_PUSH_EXTERNAL_UNWINDER_MAINNET"
echo "flash_quote_amount_raw=$FLASH_QUOTE_AMOUNT_RAW unwind_mode=$UM"
echo "gas_holdback_target_raw=${QUOTE_PUSH_TREASURY_GAS_HOLDBACK_TARGET_RAW:-0}"
(
cd "$SMOM"
forge script script/flash/RunManagedMainnetAaveCwusdcUsdcQuotePushCycle.s.sol:RunManagedMainnetAaveCwusdcUsdcQuotePushCycle \
--rpc-url "$ETHEREUM_MAINNET_RPC" \
"${BROADCAST[@]}" \
-vvvv
)

View File

@@ -9,6 +9,7 @@ set -euo pipefail
# FLASH_MODEL_SCAN_SIZES — comma-separated gross flash quote sizes in raw units (default 5e6,1e7,2e7)
# PMM_FLASH_EXIT_PRICE_CMD — passed to node --external-exit-price-cmd (default: printf 1.02)
# GAS_GWEI, NATIVE_PRICE — optional overrides for the economics model
# If GAS_GWEI is unset, the script reads the live mainnet gas price and converts it to gwei.
# FLASH_MODEL_GAS_TX_COUNT, FLASH_MODEL_GAS_PER_TX — gas row (defaults 3 / 250000)
# FLASH_MODEL_MAX_POST_TRADE_DEV_BPS — deviation guard (default 500; raise only for stress math, not production)
#
@@ -40,7 +41,7 @@ INTEGRATION="${DODO_PMM_INTEGRATION_MAINNET:-}"
POOL="${POOL_CWUSDC_USDC_MAINNET:-0x69776fc607e9edA8042e320e7e43f54d06c68f0E}"
SCAN="${FLASH_MODEL_SCAN_SIZES:-5000000,10000000,25000000}"
EXIT_CMD="${PMM_FLASH_EXIT_PRICE_CMD:-printf 1.02}"
GAS_GWEI="${GAS_GWEI:-40}"
GAS_GWEI="${GAS_GWEI:-}"
NATIVE_PRICE="${NATIVE_PRICE:-3200}"
GAS_TXN="${FLASH_MODEL_GAS_TX_COUNT:-3}"
GAS_PER="${FLASH_MODEL_GAS_PER_TX:-250000}"
@@ -62,6 +63,15 @@ out="$(cast call "$POOL" 'getVaultReserve()(uint256,uint256)' --rpc-url "$RPC_UR
B="$(printf '%s\n' "$out" | sed -n '1p' | awk '{print $1}')"
Q="$(printf '%s\n' "$out" | sed -n '2p' | awk '{print $1}')"
if [[ -z "$GAS_GWEI" ]]; then
gas_price_wei="$(cast gas-price --rpc-url "$RPC_URL" | awk '{print $1}')"
GAS_GWEI="$(python3 - "$gas_price_wei" <<'PY'
import sys
print(f"{int(sys.argv[1]) / 1_000_000_000:.9f}")
PY
)"
fi
PMM_JS="${PROXMOX_ROOT}/scripts/analytics/pmm-flash-push-break-even.mjs"
echo "=== Flash quote-push model sweep (dry-run only) ==="
@@ -92,3 +102,4 @@ for x in "${sizes[@]}"; do
done
echo "Live execution: deploy-mainnet-aave-quote-push-stack.sh then run-mainnet-aave-cwusdc-quote-push-once.sh (see plan-mainnet-cwusdc-flash-quote-push-rebalance.sh)"
echo "Receiver surplus accounting: bash scripts/verify/report-mainnet-aave-quote-push-surplus-accounting.sh"

View File

@@ -0,0 +1,166 @@
#!/usr/bin/env bash
set -euo pipefail
# Sweep retained surplus from a deployed Mainnet Aave quote-push receiver.
# Default: simulation only. Use --apply to broadcast.
#
# Env:
# PRIVATE_KEY, ETHEREUM_MAINNET_RPC, AAVE_QUOTE_PUSH_RECEIVER_MAINNET
# QUOTE_PUSH_SURPLUS_TOKEN_MAINNET optional; defaults to mainnet USDC
# QUOTE_PUSH_SURPLUS_RECIPIENT optional; defaults to deployer derived from PRIVATE_KEY
# QUOTE_PUSH_SURPLUS_RESERVE_RAW optional; keep this much on the receiver when sweeping surplus mode
# QUOTE_PUSH_SURPLUS_EXACT_AMOUNT_RAW optional; if > 0, sweep this exact amount instead of balance - reserve
#
# Usage:
# source scripts/lib/load-project-env.sh
# bash scripts/deployment/sweep-mainnet-aave-quote-push-receiver-surplus.sh --dry-run
# QUOTE_PUSH_SURPLUS_RESERVE_RAW=50000 bash scripts/deployment/sweep-mainnet-aave-quote-push-receiver-surplus.sh --apply
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
SMOM="${PROXMOX_ROOT}/smom-dbis-138"
DEFAULT_USDC_MAINNET="0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"
_qp_private_key="${PRIVATE_KEY-}"
_qp_rpc="${ETHEREUM_MAINNET_RPC-}"
_qp_receiver="${AAVE_QUOTE_PUSH_RECEIVER_MAINNET-}"
_qp_token="${QUOTE_PUSH_SURPLUS_TOKEN_MAINNET-}"
_qp_recipient="${QUOTE_PUSH_SURPLUS_RECIPIENT-}"
_qp_reserve="${QUOTE_PUSH_SURPLUS_RESERVE_RAW-}"
_qp_exact="${QUOTE_PUSH_SURPLUS_EXACT_AMOUNT_RAW-}"
# shellcheck disable=SC1091
source "${PROXMOX_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
# shellcheck disable=SC1091
source "${SMOM}/scripts/load-env.sh" >/dev/null 2>&1 || true
[[ -n "$_qp_private_key" ]] && export PRIVATE_KEY="$_qp_private_key"
[[ -n "$_qp_rpc" ]] && export ETHEREUM_MAINNET_RPC="$_qp_rpc"
[[ -n "$_qp_receiver" ]] && export AAVE_QUOTE_PUSH_RECEIVER_MAINNET="$_qp_receiver"
[[ -n "$_qp_token" ]] && export QUOTE_PUSH_SURPLUS_TOKEN_MAINNET="$_qp_token"
[[ -n "$_qp_recipient" ]] && export QUOTE_PUSH_SURPLUS_RECIPIENT="$_qp_recipient"
[[ -n "$_qp_reserve" ]] && export QUOTE_PUSH_SURPLUS_RESERVE_RAW="$_qp_reserve"
[[ -n "$_qp_exact" ]] && export QUOTE_PUSH_SURPLUS_EXACT_AMOUNT_RAW="$_qp_exact"
unset _qp_private_key _qp_rpc _qp_receiver _qp_token _qp_recipient _qp_reserve _qp_exact
require_cmd() {
command -v "$1" >/dev/null 2>&1 || {
echo "[fail] missing required command: $1" >&2
exit 1
}
}
require_cmd cast
require_cmd forge
require_env() {
local name="$1"
if [[ -z "${!name:-}" ]]; then
echo "[fail] missing required env: $name" >&2
exit 1
fi
}
pick_latest_receiver() {
local latest_json="${SMOM}/broadcast/DeployAaveQuotePushFlashReceiver.s.sol/1/run-latest.json"
if [[ ! -f "$latest_json" ]] || ! command -v jq >/dev/null 2>&1; then
return 1
fi
jq -r '.transactions[]? | select(.transactionType == "CREATE" and .contractName == "AaveQuotePushFlashReceiver") | .contractAddress' \
"$latest_json" | tail -n1
}
MODE="dry-run"
BROADCAST=()
for arg in "$@"; do
case "$arg" in
--dry-run) MODE="dry-run"; BROADCAST=() ;;
--apply) MODE="apply"; BROADCAST=(--broadcast) ;;
*)
echo "[fail] unknown arg: $arg (use --dry-run or --apply)" >&2
exit 2
;;
esac
done
require_env PRIVATE_KEY
require_env ETHEREUM_MAINNET_RPC
if [[ -z "${AAVE_QUOTE_PUSH_RECEIVER_MAINNET:-}" ]]; then
inferred_receiver="$(pick_latest_receiver || true)"
if [[ -n "$inferred_receiver" && "$inferred_receiver" != "null" ]]; then
export AAVE_QUOTE_PUSH_RECEIVER_MAINNET="$inferred_receiver"
fi
fi
require_env AAVE_QUOTE_PUSH_RECEIVER_MAINNET
TOKEN="${QUOTE_PUSH_SURPLUS_TOKEN_MAINNET:-$DEFAULT_USDC_MAINNET}"
DEPLOYER="$(cast wallet address --private-key "$PRIVATE_KEY")"
RECIPIENT="${QUOTE_PUSH_SURPLUS_RECIPIENT:-$DEPLOYER}"
RESERVE_RAW="${QUOTE_PUSH_SURPLUS_RESERVE_RAW:-0}"
EXACT_RAW="${QUOTE_PUSH_SURPLUS_EXACT_AMOUNT_RAW:-0}"
receiver_owner="$(cast call "$AAVE_QUOTE_PUSH_RECEIVER_MAINNET" 'owner()(address)' --rpc-url "$ETHEREUM_MAINNET_RPC" 2>/dev/null | awk '{print $1}' || true)"
if [[ -z "$receiver_owner" ]]; then
cat >&2 <<EOF
[fail] receiver does not expose owner()/sweep support: $AAVE_QUOTE_PUSH_RECEIVER_MAINNET
[hint] Redeploy the updated receiver, then point AAVE_QUOTE_PUSH_RECEIVER_MAINNET at the new address:
bash scripts/deployment/deploy-mainnet-aave-quote-push-receiver.sh --apply
EOF
exit 1
fi
if [[ "${receiver_owner,,}" != "${DEPLOYER,,}" ]]; then
echo "[fail] deployer $DEPLOYER is not the receiver owner ($receiver_owner)" >&2
exit 1
fi
receiver_balance_raw="$(
cast call "$TOKEN" 'balanceOf(address)(uint256)' "$AAVE_QUOTE_PUSH_RECEIVER_MAINNET" --rpc-url "$ETHEREUM_MAINNET_RPC" \
| awk '{print $1}'
)"
if (( EXACT_RAW > 0 )); then
if (( receiver_balance_raw < EXACT_RAW )); then
echo "[fail] receiver balance too small for exact sweep: have=$receiver_balance_raw need=$EXACT_RAW" >&2
exit 1
fi
sweep_raw="$EXACT_RAW"
else
if (( receiver_balance_raw <= RESERVE_RAW )); then
echo "[stop] nothing to sweep: receiver_balance_raw=$receiver_balance_raw reserve_raw=$RESERVE_RAW" >&2
exit 3
fi
sweep_raw=$((receiver_balance_raw - RESERVE_RAW))
fi
echo "=== sweep-mainnet-aave-quote-push-receiver-surplus ($MODE) ==="
echo "receiver=$AAVE_QUOTE_PUSH_RECEIVER_MAINNET"
echo "receiver_owner=$receiver_owner"
echo "token=$TOKEN"
echo "recipient=$RECIPIENT"
echo "receiver_balance_raw=$receiver_balance_raw"
echo "reserve_raw=$RESERVE_RAW"
echo "exact_raw=$EXACT_RAW"
echo "sweep_raw=$sweep_raw"
(
cd "$SMOM"
forge script script/flash/SweepAaveQuotePushFlashReceiverSurplus.s.sol:SweepAaveQuotePushFlashReceiverSurplus \
--rpc-url "$ETHEREUM_MAINNET_RPC" \
"${BROADCAST[@]}" \
-vvvv
)
if [[ "$MODE" == "apply" ]]; then
receiver_after="$(
cast call "$TOKEN" 'balanceOf(address)(uint256)' "$AAVE_QUOTE_PUSH_RECEIVER_MAINNET" --rpc-url "$ETHEREUM_MAINNET_RPC" \
| awk '{print $1}'
)"
recipient_after="$(
cast call "$TOKEN" 'balanceOf(address)(uint256)' "$RECIPIENT" --rpc-url "$ETHEREUM_MAINNET_RPC" \
| awk '{print $1}'
)"
echo "receiver_balance_after_raw=$receiver_after"
echo "recipient_balance_after_raw=$recipient_after"
fi

View File

@@ -2,8 +2,19 @@
# Diagnose and Fix Storage Issues for Proxmox Container Migrations
# This script checks storage configuration and fixes issues to enable migrations
# HISTORICAL SCRIPT
# Targets migration-era CTs (1504/2503/2504/6201) and includes embedded passwords from that period.
# Keep for reference only; do not treat as a live runbook.
set -euo pipefail
if [[ "${HISTORICAL_ALLOW_RUN:-0}" != "1" ]]; then
echo "HISTORICAL: diagnose-and-fix-migration-storage.sh is a migration-era helper, not a current operator script." >&2
echo "Use current Proxmox storage and rebalance runbooks instead." >&2
echo "Set HISTORICAL_ALLOW_RUN=1 only if you intentionally need this legacy script." >&2
exit 1
fi
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
@@ -490,4 +501,3 @@ main() {
}
main "$@"

View File

@@ -1,25 +1,62 @@
#!/usr/bin/env bash
# Fix Besu installation on all nodes
# Fix Besu installation on selected nodes.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf"
source "$PROJECT_ROOT/scripts/lib/load-project-env.sh"
get_host_for_vmid() {
local vmid=$1
if [[ "$vmid" =~ ^(1505|1506|1507|1508)$ ]]; then
echo "${PROXMOX_HOST_ML110}"
elif [[ "$vmid" =~ ^(2500|2501|2502|2503|2504|2505)$ ]]; then
echo "${PROXMOX_HOST_R630_01}"
else
echo "${PROXMOX_HOST_R630_01}"
fi
APPLY=false
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: ./scripts/fix-besu-installation.sh --vmid <N> [--vmid <N> ...] [--apply]
Options:
--vmid <N> Required. Limit fix to one or more VMIDs.
--apply Perform the install fix. Without this flag, the script prints the target VMIDs and exits.
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
--apply)
APPLY=true
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
[[ ${#TARGET_VMIDS[@]} -gt 0 ]] || { usage >&2; exit 2; }
if ! $APPLY; then
echo "Dry-run only. Target VMIDs:"
for vmid in "${TARGET_VMIDS[@]}"; do
echo " VMID $vmid on $(get_host_for_vmid "$vmid")"
done
echo "Re-run with --apply to perform the Besu installation fix."
exit 0
fi
fix_besu() {
local vmid=$1
local host=$(get_host_for_vmid $vmid)
local host
host="$(get_host_for_vmid "$vmid")"
ssh -o StrictHostKeyChecking=no root@${host} "pct exec $vmid -- bash -c '
cd /opt
@@ -37,8 +74,8 @@ fix_besu() {
'" 2>&1 | grep -E "(Extracting|fixed)" || true
}
for vmid in 1505 1506 2500 2501 2502 1507 1508 2503 2504 2505; do
fix_besu $vmid &
for vmid in "${TARGET_VMIDS[@]}"; do
fix_besu "$vmid" &
done
wait
echo "Besu installation fixed on all nodes"
echo "Besu installation fix attempted on selected nodes"

View File

@@ -1,10 +1,16 @@
#!/usr/bin/env bash
# Flush mempools on all Besu nodes (validators, sentries, RPC)
# This script must be run on the Proxmox host
# Usage: ./flush-all-mempools-proxmox.sh
# Flush mempools on selected Besu nodes (validators, sentries, RPC)
# This script must be run on the Proxmox host.
# Usage:
# ./flush-all-mempools-proxmox.sh --vmid 2101
# ./flush-all-mempools-proxmox.sh --apply --vmid 2101
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
@@ -25,16 +31,63 @@ if ! command -v pct &>/dev/null; then
exit 1
fi
# All Besu nodes
VALIDATORS=(1000 1001 1002 1003 1004)
SENTRIES=(1500 1501 1502 1503)
RPC_NODES=(2500 2501 2502)
APPLY=false
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: ./flush-all-mempools-proxmox.sh --vmid <N> [--vmid <N> ...] [--apply]
Options:
--vmid <N> Required. Limit restart to one or more VMIDs.
--apply Restart services. Without this flag, the script prints the target VMIDs and exits.
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
--apply)
APPLY=true
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
[[ ${#TARGET_VMIDS[@]} -gt 0 ]] || { log_error "At least one --vmid is required."; usage >&2; exit 2; }
declare -A SERVICE_BY_VMID
for vmid in 1000 1001 1002 1003 1004; do SERVICE_BY_VMID[$vmid]="besu-validator.service"; done
for vmid in 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510; do SERVICE_BY_VMID[$vmid]="besu-sentry.service"; done
for vmid in 2101 2102 2103 2201 2301 2303 2304 2305 2306 2307 2308 2400 2401 2402 2403 2420 2430 2440 2460 2470 2480; do SERVICE_BY_VMID[$vmid]="besu-rpc.service"; done
echo "========================================="
echo "Flush All Besu Mempools"
echo "========================================="
echo ""
if ! $APPLY; then
log_warn "Dry-run only. Target VMIDs:"
for vmid in "${TARGET_VMIDS[@]}"; do
log_info " VMID $vmid service ${SERVICE_BY_VMID[$vmid]:-unknown}"
done
log_info "Re-run with --apply to restart services."
exit 0
fi
# Function to restart Besu service
restart_besu_service() {
local vmid=$1
@@ -57,48 +110,19 @@ restart_besu_service() {
fi
}
# Restart validators
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Validators (1000-1004)"
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
VALIDATOR_SUCCESS=0
for vmid in "${VALIDATORS[@]}"; do
if restart_besu_service "$vmid" "besu-validator.service"; then
((VALIDATOR_SUCCESS++))
SUCCESS=0
for vmid in "${TARGET_VMIDS[@]}"; do
service="${SERVICE_BY_VMID[$vmid]:-besu.service}"
if restart_besu_service "$vmid" "$service"; then
((SUCCESS++))
fi
done
log_info "Validators restarted: $VALIDATOR_SUCCESS/${#VALIDATORS[@]}"
echo ""
# Restart sentries
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Sentries (1500-1503)"
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
SENTRY_SUCCESS=0
for vmid in "${SENTRIES[@]}"; do
if restart_besu_service "$vmid" "besu-sentry.service"; then
((SENTRY_SUCCESS++))
fi
done
log_info "Sentries restarted: $SENTRY_SUCCESS/${#SENTRIES[@]}"
echo ""
# Restart RPC nodes
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "RPC Nodes (2500-2502)"
log_info "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
RPC_SUCCESS=0
for vmid in "${RPC_NODES[@]}"; do
if restart_besu_service "$vmid" "besu-rpc.service"; then
((RPC_SUCCESS++))
fi
done
log_info "RPC nodes restarted: $RPC_SUCCESS/${#RPC_NODES[@]}"
log_info "Nodes restarted: $SUCCESS/${#TARGET_VMIDS[@]}"
echo ""
# Summary
TOTAL_SUCCESS=$((VALIDATOR_SUCCESS + SENTRY_SUCCESS + RPC_SUCCESS))
TOTAL_NODES=$((${#VALIDATORS[@]} + ${#SENTRIES[@]} + ${#RPC_NODES[@]}))
TOTAL_SUCCESS=$SUCCESS
TOTAL_NODES=${#TARGET_VMIDS[@]}
echo "========================================="
echo "Summary"
@@ -112,7 +136,7 @@ sleep 15
log_info "Verifying services are running..."
VERIFIED=0
for vmid in "${VALIDATORS[@]}" "${SENTRIES[@]}" "${RPC_NODES[@]}"; do
for vmid in "${TARGET_VMIDS[@]}"; do
if pct status "$vmid" 2>/dev/null | grep -q "running"; then
if pct exec "$vmid" -- pgrep -f "besu" >/dev/null 2>&1; then
log_success "✓ VMID $vmid: Besu running"
@@ -134,4 +158,3 @@ log_info "Next steps:"
log_info " 1. Wait for all nodes to sync"
log_info " 2. Run: ./scripts/configure-ethereum-mainnet-final.sh"
log_info ""

View File

@@ -4,13 +4,10 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
VMID="${1:-}"
USERNAME="${2:-rpc-user}"
EXPIRY_DAYS="${3:-365}"
@@ -28,10 +25,15 @@ error() { echo -e "${RED}[ERROR]${NC} $1"; }
if [ -z "$VMID" ]; then
error "Usage: $0 <VMID> <username> [expiry_days]"
error "Example: $0 2503 ali-full-access 365"
error "Example: $0 2101 fireblocks-access 365"
exit 1
fi
PROXMOX_HOST="$(get_host_for_vmid "$VMID")"
if [ "$VMID" != "2101" ]; then
warn "VMID $VMID is non-standard for the JWT-protected private RPC; current documented target is VMID 2101."
fi
# Get JWT secret from container or saved file
JWT_SECRET=""
@@ -115,24 +117,12 @@ PYTHON_SCRIPT
echo ""
echo "Token: $TOKEN"
echo ""
# Get IP address
declare -A RPC_IPS=(
[2503]="${RPC_ALI_1_ALT:-${RPC_ALI_1_ALT:-${RPC_ALI_1_ALT:-192.168.11.253}}}"
[2504]="${RPC_ALI_2_ALT:-${RPC_ALI_2_ALT:-${RPC_ALI_2_ALT:-192.168.11.254}}}"
[2505]="${RPC_LUIS_1:-${RPC_LUIS_1:-${RPC_LUIS_1:-192.168.11.255}}}"
[2506]="${RPC_LUIS_2:-${RPC_LUIS_2:-${RPC_LUIS_2:-192.168.11.202}}}"
[2507]="${RPC_PUTU_1:-${RPC_PUTU_1:-${RPC_PUTU_1:-192.168.11.203}}}"
[2508]="${RPC_PUTU_2:-${RPC_PUTU_2:-${RPC_PUTU_2:-192.168.11.204}}}"
)
IP="${RPC_IPS[$VMID]:-unknown}"
echo "Usage:"
echo " curl -k -H 'Authorization: Bearer $TOKEN' \\"
echo " -H 'Content-Type: application/json' \\"
echo " -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}' \\"
echo " https://${IP}/"
echo " https://rpc-http-prv.d-bis.org/"
echo ""
exit 0
fi
@@ -140,4 +130,3 @@ fi
error "Failed to generate JWT token. Python3 is required."
exit 1

View File

@@ -2,6 +2,11 @@
set -euo pipefail
# Quick reference guide for JWT authentication
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
JWT_VMID="${JWT_VMID:-2101}"
JWT_HOST="$(get_host_for_vmid "$JWT_VMID")"
cat <<'REF'
╔════════════════════════════════════════════════════════════════╗
@@ -20,10 +25,10 @@ Public (No Auth):
🔑 GENERATE TOKEN
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
./scripts/generate-jwt-token.sh [username] [expiry_days]
./scripts/generate-jwt-token-for-container.sh 2101 [username] [expiry_days]
Example:
./scripts/generate-jwt-token.sh my-app 30
./scripts/generate-jwt-token-for-container.sh 2101 my-app 30
🧪 TEST ENDPOINT
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
@@ -34,36 +39,35 @@ curl -k \
-d '{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}' \
https://rpc-http-prv.d-bis.org
REF
cat <<EOF
🔍 CHECK STATUS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
ssh root@${PROXMOX_HOST_ML110:-192.168.11.10} "pct exec 2501 -- systemctl status nginx jwt-validator"
ssh root@${JWT_HOST} "pct exec ${JWT_VMID} -- systemctl status nginx jwt-validator"
📊 VIEW LOGS
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Access logs
ssh root@${PROXMOX_HOST_ML110:-192.168.11.10} "pct exec 2501 -- tail -f /var/log/nginx/rpc-http-prv-access.log"
ssh root@${JWT_HOST} "pct exec ${JWT_VMID} -- tail -f /var/log/nginx/rpc-http-prv-access.log"
# Error logs
ssh root@${PROXMOX_HOST_ML110:-192.168.11.10} "pct exec 2501 -- tail -f /var/log/nginx/rpc-http-prv-error.log"
ssh root@${JWT_HOST} "pct exec ${JWT_VMID} -- tail -f /var/log/nginx/rpc-http-prv-error.log"
# JWT validator logs
ssh root@${PROXMOX_HOST_ML110:-192.168.11.10} "pct exec 2501 -- journalctl -u jwt-validator -f"
ssh root@${JWT_HOST} "pct exec ${JWT_VMID} -- journalctl -u jwt-validator -f"
🔧 TROUBLESHOOTING
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Restart services
ssh root@${PROXMOX_HOST_ML110:-192.168.11.10} "pct exec 2501 -- systemctl restart nginx jwt-validator"
ssh root@${JWT_HOST} "pct exec ${JWT_VMID} -- systemctl restart nginx jwt-validator"
# Test nginx config
ssh root@${PROXMOX_HOST_ML110:-192.168.11.10} "pct exec 2501 -- nginx -t"
ssh root@${JWT_HOST} "pct exec ${JWT_VMID} -- nginx -t"
# Check JWT secret
ssh root@${PROXMOX_HOST_ML110:-192.168.11.10} "pct exec 2501 -- cat /etc/nginx/jwt_secret"
ssh root@${JWT_HOST} "pct exec ${JWT_VMID} -- cat /etc/nginx/jwt_secret"
📚 DOCUMENTATION
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
@@ -71,4 +75,4 @@ docs/04-configuration/RPC_JWT_AUTHENTICATION.md
docs/04-configuration/RPC_JWT_SETUP_COMPLETE.md
docs/04-configuration/RPC_DNS_CONFIGURATION.md
REF
EOF

View File

@@ -31,7 +31,7 @@
**ensure-legacy-monitor-networkd-via-ssh.sh** — SSHs to r630-01 and fixes the legacy `3000`-`3003` monitor/RPC-adjacent LXCs so `systemd-networkd` is enabled host-side and started in-guest. This is the safe path for unprivileged guests where `systemctl enable` fails from inside the CT. `--dry-run` / `--apply`; same `PROXMOX_SAFE_DEFAULTS` behavior as other guarded maintenance scripts.
**check-and-fix-explorer-lag.sh** — Checks RPC vs Blockscout block; if lag > threshold (default 500), runs `fix-explorer-indexer-lag.sh` (restart Blockscout).
**check-and-fix-explorer-lag.sh** — Checks both RPC vs Blockscout head lag and recent transaction visibility lag. If the explorer head is behind, or if recent on-chain non-empty blocks are present but the explorers latest indexed transaction trails them by more than the configured threshold, it runs `fix-explorer-indexer-lag.sh` (restart Blockscout). It does **not** restart for a genuinely quiet chain with empty recent head blocks.
**schedule-explorer-lag-cron.sh** — Install cron for lag check-and-fix: every 6 hours (0, 6, 12, 18). Log: `logs/explorer-lag-fix.log`. Use `--show` to print the line, `--install` to add to crontab, `--remove` to remove. Run from a persistent host checkout; set `CRON_PROJECT_ROOT=/srv/proxmox` when installing on a Proxmox node.
**All schedule-*.sh installers** — Refuse transient roots such as `/tmp/...`. Install from a persistent checkout only.

View File

@@ -1,8 +1,12 @@
#!/usr/bin/env bash
# Check explorer indexer lag; if above threshold, run fix-explorer-indexer-lag.sh (restart Blockscout).
# Check explorer data-plane lag; if block lag or transaction visibility lag is high,
# run fix-explorer-indexer-lag.sh (restart Blockscout).
# For use from cron. Run from project root. Requires LAN/SSH to r630-02 for the fix.
# Usage: bash scripts/maintenance/check-and-fix-explorer-lag.sh
# Env: EXPLORER_INDEXER_LAG_THRESHOLD (default 500)
# Env:
# EXPLORER_INDEXER_LAG_THRESHOLD (default 500)
# EXPLORER_TX_VISIBILITY_LAG_BLOCK_THRESHOLD (default 32)
# EXPLORER_TX_VISIBILITY_SAMPLE_BLOCKS (default 128)
set -euo pipefail
@@ -14,6 +18,8 @@ IP_RPC_2201="${RPC_2201:-192.168.11.221}"
IP_BLOCKSCOUT="${IP_BLOCKSCOUT:-192.168.11.140}"
BLOCKSCOUT_API_PORT="${BLOCKSCOUT_API_PORT:-4000}"
EXPLORER_INDEXER_LAG_THRESHOLD="${EXPLORER_INDEXER_LAG_THRESHOLD:-500}"
EXPLORER_TX_VISIBILITY_LAG_BLOCK_THRESHOLD="${EXPLORER_TX_VISIBILITY_LAG_BLOCK_THRESHOLD:-32}"
EXPLORER_TX_VISIBILITY_SAMPLE_BLOCKS="${EXPLORER_TX_VISIBILITY_SAMPLE_BLOCKS:-128}"
get_rpc_block() {
local hex
@@ -32,8 +38,45 @@ get_explorer_block() {
[ -n "$block" ] && echo "$block"
}
get_explorer_latest_tx_block() {
local body block
body=$(curl -sf --max-time 10 "http://${IP_BLOCKSCOUT}:${BLOCKSCOUT_API_PORT}/api/v2/stats" 2>/dev/null || true)
[ -z "$body" ] && return
block=$(echo "$body" | jq -r '.freshness.latest_indexed_transaction.block_number // empty' 2>/dev/null || true)
[ -n "$block" ] && [ "$block" != "null" ] && echo "$block"
}
get_recent_chain_activity() {
local latest="$1"
local newest_non_empty=""
local total_txs=0
local non_empty_count=0
local block_num block_json tx_count
for ((offset=0; offset<EXPLORER_TX_VISIBILITY_SAMPLE_BLOCKS; offset++)); do
block_num=$((latest - offset))
[ "$block_num" -lt 0 ] && break
block_json=$(curl -sf --max-time 10 -X POST -H "Content-Type: application/json" \
-d "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getBlockByNumber\",\"params\":[\"$(printf '0x%x' "$block_num")\",false],\"id\":1}" \
"http://${IP_RPC_2201}:8545" 2>/dev/null || true)
[ -z "$block_json" ] && continue
tx_count=$(echo "$block_json" | jq -r '.result.transactions | length' 2>/dev/null || echo "")
[ -z "$tx_count" ] && continue
total_txs=$((total_txs + tx_count))
if [ "$tx_count" -gt 0 ] 2>/dev/null; then
non_empty_count=$((non_empty_count + 1))
if [ -z "$newest_non_empty" ]; then
newest_non_empty="$block_num"
fi
fi
done
printf '%s %s %s\n' "${newest_non_empty:-}" "$non_empty_count" "$total_txs"
}
rpc_block=$(get_rpc_block)
explorer_block=$(get_explorer_block)
explorer_latest_tx_block=$(get_explorer_latest_tx_block)
if [ -z "$rpc_block" ] || [ -z "$explorer_block" ]; then
echo "$(date -Iseconds) SKIP (RPC or Blockscout unreachable)"
@@ -42,9 +85,31 @@ fi
lag=$((rpc_block - explorer_block))
if [ "$lag" -le "${EXPLORER_INDEXER_LAG_THRESHOLD}" ] 2>/dev/null; then
echo "$(date -Iseconds) OK lag=$lag (threshold=${EXPLORER_INDEXER_LAG_THRESHOLD})"
echo "$(date -Iseconds) OK block_lag=$lag (threshold=${EXPLORER_INDEXER_LAG_THRESHOLD})"
else
echo "$(date -Iseconds) BLOCK_LAG $lag > ${EXPLORER_INDEXER_LAG_THRESHOLD} — running fix"
bash "$PROJECT_ROOT/scripts/fix-explorer-indexer-lag.sh" 2>&1 || true
exit 0
fi
echo "$(date -Iseconds) LAG $lag > ${EXPLORER_INDEXER_LAG_THRESHOLD} — running fix"
bash "$PROJECT_ROOT/scripts/fix-explorer-indexer-lag.sh" 2>&1 || true
read -r newest_non_empty recent_non_empty_count recent_tx_total <<<"$(get_recent_chain_activity "$rpc_block")"
if [ -z "$newest_non_empty" ]; then
echo "$(date -Iseconds) QUIET_CHAIN sample_blocks=${EXPLORER_TX_VISIBILITY_SAMPLE_BLOCKS} non_empty=0 txs=0"
exit 0
fi
if [ -z "$explorer_latest_tx_block" ]; then
echo "$(date -Iseconds) TX_VISIBILITY missing explorer latest tx while chain shows recent txs (newest_non_empty=$newest_non_empty) — running fix"
bash "$PROJECT_ROOT/scripts/fix-explorer-indexer-lag.sh" 2>&1 || true
exit 0
fi
tx_visibility_gap=$((newest_non_empty - explorer_latest_tx_block))
if [ "$tx_visibility_gap" -gt "${EXPLORER_TX_VISIBILITY_LAG_BLOCK_THRESHOLD}" ] 2>/dev/null; then
echo "$(date -Iseconds) TX_VISIBILITY_LAG gap=$tx_visibility_gap newest_non_empty=$newest_non_empty explorer_latest_tx=$explorer_latest_tx_block sample_non_empty=$recent_non_empty_count sample_txs=$recent_tx_total — running fix"
bash "$PROJECT_ROOT/scripts/fix-explorer-indexer-lag.sh" 2>&1 || true
exit 0
fi
echo "$(date -Iseconds) OK tx_visibility_gap=$tx_visibility_gap newest_non_empty=$newest_non_empty explorer_latest_tx=$explorer_latest_tx_block sample_non_empty=$recent_non_empty_count sample_txs=$recent_tx_total"

View File

@@ -2,7 +2,7 @@
# Deep dive: diagnose and fix every 502 from E2E routing.
# For each known backend (domain → IP:port), SSH to Proxmox, check container + port, fix.
#
# Usage: ./scripts/maintenance/diagnose-and-fix-502s-via-ssh.sh [--dry-run] [--diagnose-only]
# Usage: ./scripts/maintenance/diagnose-and-fix-502s-via-ssh.sh [--apply] [--dry-run] [--diagnose-only]
# Requires: SSH to r630-01, r630-02, ml110 (key-based).
set -euo pipefail
@@ -10,12 +10,14 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" ]] && source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# HYBX RPC 2503,2504,2505 are on ML110 per get_host_for_vmid
type get_host_for_vmid &>/dev/null && HYBX_HOST="$(get_host_for_vmid 2503)" || HYBX_HOST="$R630_01"
DRY_RUN=false
DRY_RUN=true
DIAGNOSE_ONLY=false
for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true; [[ "$a" == "--diagnose-only" ]] && DIAGNOSE_ONLY=true; done
for a in "$@"; do
[[ "$a" == "--dry-run" ]] && DRY_RUN=true
[[ "$a" == "--apply" ]] && DRY_RUN=false
[[ "$a" == "--diagnose-only" ]] && DIAGNOSE_ONLY=true
done
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
@@ -25,7 +27,7 @@ ML110="${PROXMOX_HOST_ML110:-192.168.11.10}"
# DBIS (r630-01)
# rpc-http-prv (r630-01)
# MIM4U www (r630-02)
# Alltra/HYBX RPC (r630-01 per BESU_NODES)
# Edge RPC (r630-01)
# Cacti-alltra/hybx - IPs .177 and .251 (VMID TBD)
BACKENDS=(
"dbis-admin.d-bis.org|192.168.11.130|80|10130|$R630_01|dbis-frontend nginx"
@@ -34,12 +36,12 @@ BACKENDS=(
"dbis-api-2.d-bis.org|192.168.11.156|3000|10151|$R630_01|dbis-api node"
"rpc-http-prv.d-bis.org|192.168.11.211|8545|2101|$R630_01|besu RPC"
"www.mim4u.org|192.168.11.37|80|7810|$R630_02|mim-web nginx"
"rpc-alltra.d-bis.org|192.168.11.172|8545|2500|$R630_01|besu alltra"
"rpc-alltra-2.d-bis.org|192.168.11.173|8545|2501|$R630_01|besu alltra"
"rpc-alltra-3.d-bis.org|192.168.11.174|8545|2502|$R630_01|besu alltra"
"rpc-hybx.d-bis.org|192.168.11.246|8545|2503|${HYBX_HOST:-$R630_01}|besu hybx"
"rpc-hybx-2.d-bis.org|192.168.11.247|8545|2504|${HYBX_HOST:-$R630_01}|besu hybx"
"rpc-hybx-3.d-bis.org|192.168.11.248|8545|2505|${HYBX_HOST:-$R630_01}|besu hybx"
"rpc-alltra.d-bis.org|192.168.11.172|8545|2420|$R630_01|besu alltra"
"rpc-alltra-2.d-bis.org|192.168.11.173|8545|2430|$R630_01|besu alltra"
"rpc-alltra-3.d-bis.org|192.168.11.174|8545|2440|$R630_01|besu alltra"
"rpc-hybx.d-bis.org|192.168.11.246|8545|2460|$R630_01|besu hybx"
"rpc-hybx-2.d-bis.org|192.168.11.247|8545|2470|$R630_01|besu hybx"
"rpc-hybx-3.d-bis.org|192.168.11.248|8545|2480|$R630_01|besu hybx"
"cacti-alltra.d-bis.org|192.168.11.177|80|5201|$R630_02|cacti web"
"cacti-hybx.d-bis.org|192.168.11.251|80|5202|$R630_02|cacti web"
)
@@ -86,14 +88,6 @@ for line in "${BACKENDS[@]}"; do
continue
fi
status=$(run_ssh "$host" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "missing")
# If HYBX (25032505) empty on ML110, try r630-01
if [[ -z "$status" || "$status" == "missing" ]] && [[ "$vmid" == "2503" || "$vmid" == "2504" || "$vmid" == "2505" ]] && [[ "$host" == "$ML110" ]]; then
alt_status=$(run_ssh "$R630_01" "pct status $vmid 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "")
if [[ "$alt_status" == "running" ]]; then
host="$R630_01"
status="running"
fi
fi
if [[ "$status" != "running" ]]; then
log_warn " Container $vmid status: ${status:-empty} (host $host)"
if [[ "$DRY_RUN" != true && "$DIAGNOSE_ONLY" != true ]]; then

View File

@@ -1,31 +1,59 @@
#!/usr/bin/env bash
# Collect RPC diagnostics for VMIDs 2101 and 2500-2505: listening ports and Besu journal.
# Run from project root. Requires SSH to r630-01 (and ml110 if 2503-2505 are there).
# Collect RPC diagnostics for selected core/edge RPC VMIDs: listening ports and Besu journal.
# Run from project root. Uses the shared live VMID placement map.
# Output is suitable for piping to a file or tee.
#
# Usage: ./scripts/maintenance/diagnose-rpc-502s.sh
# Usage:
# ./scripts/maintenance/diagnose-rpc-502s.sh
# ./scripts/maintenance/diagnose-rpc-502s.sh --vmid 2101
# See: docs/00-meta/502_DEEP_DIVE_ROOT_CAUSES_AND_FIXES.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
R630_01="${PROXMOX_HOST_R630_01:-${PROXMOX_R630_01:-192.168.11.11}}"
ML110="${PROXMOX_HOST_ML110:-${PROXMOX_ML110:-192.168.11.10}}"
SSH_OPTS="-o ConnectTimeout=8 -o StrictHostKeyChecking=accept-new"
TARGET_VMIDS=()
usage() {
cat <<'EOF'
Usage: ./scripts/maintenance/diagnose-rpc-502s.sh [--vmid <N>]
Options:
--vmid <N> Limit to one VMID; repeatable
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
TARGET_VMIDS+=("$2")
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
run() { ssh $SSH_OPTS "root@$1" "$2" 2>/dev/null || echo "(command failed or host unreachable)"; }
# VMID -> host (2503-2505 may be on ml110 or r630-01)
get_host() {
local v=$1
case $v in
2101|2500|2501|2502) echo "$R630_01" ;;
2503|2504|2505) echo "$R630_01" ;; # default; try ml110 if not running on r630
*) echo "$R630_01" ;;
esac
selected_vmid() {
local vmid="$1"
[[ ${#TARGET_VMIDS[@]} -eq 0 ]] && return 0
local wanted
for wanted in "${TARGET_VMIDS[@]}"; do
[[ "$vmid" == "$wanted" ]] && return 0
done
return 1
}
echo "=============================================="
@@ -33,20 +61,12 @@ echo "RPC 502 diagnostics — $(date -Iseconds)"
echo "=============================================="
echo ""
for vmid in 2101 2500 2501 2502 2503 2504 2505; do
host=$(get_host "$vmid")
for vmid in 2101 2420 2430 2440 2460 2470 2480; do
selected_vmid "$vmid" || continue
host="$(get_host_for_vmid "$vmid")"
status=$(run "$host" "pct status $vmid 2>/dev/null | awk '{print \$2}'" || echo "unknown")
echo "--- VMID $vmid @ $host (status: $status) ---"
if [[ "$status" != "running" ]]; then
# If on r630 and not running, try ml110 for 2503-2505
if [[ "$vmid" =~ ^250[345]$ ]] && [[ "$host" == "$R630_01" ]]; then
status2=$(run "$ML110" "pct status $vmid 2>/dev/null | awk '{print \$2}'" || echo "")
if [[ "$status2" == "running" ]]; then
host="$ML110"
status="$status2"
echo " (found on $ML110)"
fi
fi
if [[ "$status" != "running" ]]; then
echo " Container not running. Skip."
echo ""
@@ -66,5 +86,5 @@ done
echo "=============================================="
echo "If 8545 is not in ss -tlnp, Besu is not binding. Check journal for genesis/nodekey/config errors."
echo "Then run: ./scripts/besu/fix-all-besu-nodes.sh (optionally --no-restart first)"
echo "Then run: ./scripts/besu/fix-all-besu-nodes.sh --vmid <N> (optionally --no-restart first)"
echo "=============================================="

View File

@@ -1,8 +1,8 @@
#!/usr/bin/env bash
# Fix all 502 backends using all means: DBIS (nginx + dbis-api), Besu (2101 + 2500-2505), Cacti (nginx).
# Fix all 502 backends using all means: DBIS (nginx + dbis-api), Besu (2101 + edge RPCs), Cacti (nginx).
# Run from project root. Requires SSH to r630-01, r630-02.
#
# Usage: ./scripts/maintenance/fix-all-502s-comprehensive.sh [--dry-run]
# Usage: ./scripts/maintenance/fix-all-502s-comprehensive.sh [--apply] [--dry-run]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
@@ -13,8 +13,11 @@ PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
R630_01="${PROXMOX_HOST_R630_01:-192.168.11.11}"
R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
DRY_RUN=false
[[ "${1:-}" == "--dry-run" ]] && DRY_RUN=true
DRY_RUN=true
for arg in "$@"; do
[[ "$arg" == "--dry-run" ]] && DRY_RUN=true
[[ "$arg" == "--apply" ]] && DRY_RUN=false
done
run() {
if $DRY_RUN; then echo -e "\033[0;36m[DRY-RUN]\033[0m Would run on $1: ${2:0:80}..."; return 0; fi
@@ -69,16 +72,12 @@ if run "$R630_01" "pct status 2101 2>/dev/null | awk '{print \$2}'" 2>/dev/null
fi
if $DRY_RUN; then log "Would run fix-core-rpc-2101.sh"; else "${SCRIPT_DIR}/fix-core-rpc-2101.sh" --apply 2>/dev/null && ok "2101 fix run" || warn "2101 fix had issues"; fi
# --- 2500-2505 Alltra/HYBX RPC: ensure nodekey then start besu ---
for v in 2500 2501 2502 2503 2504 2505; do
# --- Edge RPCs: ensure nodekey then start besu ---
for v in 2420 2430 2440 2460 2470 2480; do
host="$R630_01"
type get_host_for_vmid &>/dev/null && host="$(get_host_for_vmid "$v" 2>/dev/null)" || true
[[ -z "$host" ]] && host="$R630_01"
status=$(run "$host" "pct status $v 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "")
if [[ "$status" != "running" ]] && [[ "$host" == "192.168.11.10" ]]; then
status=$(run "$R630_01" "pct status $v 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "")
[[ "$status" == "running" ]] && host="$R630_01"
fi
log "$v (rpc-alltra/hybx): nodekey + Besu on $host..."
if [[ "$status" == "running" ]]; then
run "$host" "pct exec $v -- sh -c 'mkdir -p /data/besu; [ -f /data/besu/nodekey ] || [ -f /data/besu/key ] || openssl rand -hex 32 > /data/besu/nodekey'" 2>/dev/null || true

View File

@@ -9,9 +9,10 @@
# r630-03 / local-lvm: everything else on ml110 (validators, core-2, private, 2304, sentries, thirdweb)
#
# Usage (from LAN, SSH key to Proxmox nodes):
# ./scripts/maintenance/migrate-ml110-besu-rpc-to-r630-02-03.sh # migrate all still on ml110
# ./scripts/maintenance/migrate-ml110-besu-rpc-to-r630-02-03.sh # dry-run all still on ml110
# ./scripts/maintenance/migrate-ml110-besu-rpc-to-r630-02-03.sh --apply
# ./scripts/maintenance/migrate-ml110-besu-rpc-to-r630-02-03.sh --dry-run
# ./scripts/maintenance/migrate-ml110-besu-rpc-to-r630-02-03.sh 2305 # single VMID
# ./scripts/maintenance/migrate-ml110-besu-rpc-to-r630-02-03.sh --vmid 2305 # single VMID
#
# Prerequisites: ml110, r630-02, r630-03 in same cluster; storages active on targets.
#
@@ -20,7 +21,7 @@ set -uo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
SRC_IP="${PROXMOX_HOST_ML110:-192.168.11.10}"
SSH_OPTS="-o BatchMode=yes -o ConnectTimeout=20 -o StrictHostKeyChecking=accept-new"
@@ -45,12 +46,45 @@ done
ALL_ORDER=(1503 1504 1505 1506 1507 1508 2400 2402 2403 2305 2306 2307 2308 2304 2301 2102 1003 1004)
DRY_RUN=false
DRY_RUN=true
SINGLE=()
for arg in "$@"; do
[[ "$arg" == "--dry-run" ]] && DRY_RUN=true
[[ "$arg" =~ ^[0-9]+$ ]] && SINGLE+=("$arg")
usage() {
cat <<'EOF'
Usage: ./scripts/maintenance/migrate-ml110-besu-rpc-to-r630-02-03.sh [--apply] [--dry-run] [--vmid <N>]
Options:
--dry-run Print intended migration commands only (default)
--apply Run pct migrate for selected VMIDs
--vmid <N> Limit to one VMID; repeatable
EOF
}
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run)
DRY_RUN=true
shift
;;
--apply)
DRY_RUN=false
shift
;;
--vmid)
[[ $# -ge 2 ]] || { usage >&2; exit 2; }
SINGLE+=("$2")
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown argument: $1" >&2
usage >&2
exit 2
;;
esac
done
log() { echo "[$(date -Iseconds)] $*"; }

View File

@@ -4,6 +4,7 @@
#
# Usage:
# ./scripts/maintenance/run-all-maintenance-via-proxmox-ssh.sh
# ./scripts/maintenance/run-all-maintenance-via-proxmox-ssh.sh --apply
# ./scripts/maintenance/run-all-maintenance-via-proxmox-ssh.sh --no-npm
# ./scripts/maintenance/run-all-maintenance-via-proxmox-ssh.sh --e2e
# ./scripts/maintenance/run-all-maintenance-via-proxmox-ssh.sh --verbose # show all step output (no 2>/dev/null)
@@ -15,7 +16,7 @@
# 0. make-rpc-vmids-writable-via-ssh.sh — Stop 2101,2500-2505; e2fsck rootfs; start (r630-01)
# 1. resolve-and-fix-all-via-proxmox-ssh.sh — Dev VM IP, start containers, DBIS (r630-01, ml110)
# 2. fix-rpc-2101-jna-reinstall.sh — 2101 Besu reinstall (r630-01)
# 3. install-besu-permanent-on-missing-nodes.sh — Besu on 2500-2505, 1505-1508 (r630-01, ml110)
# 3. install-besu-permanent-on-missing-nodes.sh — Besu on 2420-2480 and 1505-1508
# 4. address-all-remaining-502s.sh — backends + NPM proxy + RPC diagnostics
# 5. [optional] verify-end-to-end-routing.sh — E2E (if --e2e)
#
@@ -36,12 +37,13 @@ R630_02="${PROXMOX_HOST_R630_02:-192.168.11.12}"
SKIP_NPM=false
RUN_E2E=false
DRY_RUN=false
DRY_RUN=true
VERBOSE=false
for arg in "${@:-}"; do
[[ "$arg" == "--no-npm" ]] && SKIP_NPM=true
[[ "$arg" == "--e2e" ]] && RUN_E2E=true
[[ "$arg" == "--dry-run" ]] && DRY_RUN=true
[[ "$arg" == "--apply" ]] && DRY_RUN=false
[[ "$arg" == "--verbose" ]] && VERBOSE=true
done
@@ -128,8 +130,8 @@ echo ""
# 3. Install Besu on missing nodes (r630-01, ml110)
echo "[3/5] Install Besu on missing nodes..."
echo "--- 3/5: Install Besu on missing nodes (r630-01, ml110) ---"
if run_step "${PROJECT_ROOT}/scripts/besu/install-besu-permanent-on-missing-nodes.sh"; then
echo "--- 3/5: Install Besu on missing nodes ---"
if run_step "${PROJECT_ROOT}/scripts/besu/install-besu-permanent-on-missing-nodes.sh" --apply; then
echo " Done."
else
echo " Step had failures (e.g. disk full or read-only CT)."

View File

@@ -1,8 +1,18 @@
#!/usr/bin/env bash
# Migrate containers 1504, 2503, 2504, 6201 from ml110 to pve using local storage
# HISTORICAL SCRIPT
# This migration plan targets retired 25xx-era placement and should not be used for current fleet moves.
set -euo pipefail
if [[ "${HISTORICAL_ALLOW_RUN:-0}" != "1" ]]; then
echo "HISTORICAL: migrate-containers-to-pve-local.sh is not a current migration runbook." >&2
echo "Use docs/04-configuration/PROXMOX_LOAD_BALANCING_RUNBOOK.md and current health planners instead." >&2
echo "Set HISTORICAL_ALLOW_RUN=1 only if you intentionally need this legacy script." >&2
exit 1
fi
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
@@ -220,4 +230,3 @@ main() {
}
main "$@"

View File

@@ -1,8 +1,18 @@
#!/usr/bin/env bash
# Migrate containers 1504, 2503, 2504, 6201 from ml110 to r630-01 using thin1 storage
# HISTORICAL SCRIPT
# This migration plan targets retired 25xx-era placement and should not be used for current fleet moves.
set -euo pipefail
if [[ "${HISTORICAL_ALLOW_RUN:-0}" != "1" ]]; then
echo "HISTORICAL: migrate-to-pve-thin1.sh is not a current migration runbook." >&2
echo "Use docs/04-configuration/PROXMOX_LOAD_BALANCING_RUNBOOK.md and current health planners instead." >&2
echo "Set HISTORICAL_ALLOW_RUN=1 only if you intentionally need this legacy script." >&2
exit 1
fi
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
@@ -266,4 +276,3 @@ main() {
}
main "$@"

View File

@@ -4,14 +4,12 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
VMID=2501
VMID="${1:-2101}"
PROXMOX_HOST="$(get_host_for_vmid "$VMID")"
# Colors
RED='\033[0;31m'
@@ -34,7 +32,7 @@ ISSUES=0
WARNINGS=0
# Check 1: Container status
check "Checking VMID $VMID status..."
check "Checking VMID $VMID status on $PROXMOX_HOST..."
STATUS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct status $VMID 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "unknown")
@@ -71,8 +69,8 @@ if [[ "$EXISTING_CONFIG" == "yes" ]]; then
"pct exec $VMID -- grep -E 'server_name.*rpc-' /etc/nginx/sites-available/rpc 2>/dev/null | head -3" || echo "")
if echo "$DOMAINS" | grep -q "rpc-http-pub\|rpc-ws-pub"; then
warn "Existing config uses rpc-http-pub/rpc-ws-pub (should be on VMID 2502)"
warn "Script will create new config 'rpc-perm' for rpc-http-prv/rpc-ws-prv"
warn "Existing config uses rpc-http-pub/rpc-ws-pub (should not be reused for the JWT-protected private endpoint)"
warn "Script will create new config 'rpc-perm' for rpc-http-prv/rpc-ws-prv on VMID $VMID"
warn "Old config will be disabled but not deleted"
fi
else
@@ -135,17 +133,16 @@ echo "Summary"
echo "=========================================="
if [ $ISSUES -eq 0 ] && [ $WARNINGS -eq 0 ]; then
info "All checks passed! Ready to run configure-nginx-jwt-auth.sh"
info "All checks passed! Ready to run configure-nginx-jwt-auth.sh for VMID $VMID"
exit 0
elif [ $ISSUES -eq 0 ]; then
warn "$WARNINGS warning(s) found, but setup can proceed"
echo ""
info "Ready to run configure-nginx-jwt-auth.sh"
info "Ready to run configure-nginx-jwt-auth.sh for VMID $VMID"
exit 0
else
error "$ISSUES issue(s) found that must be resolved first"
echo ""
error "Please fix the issues above before running configure-nginx-jwt-auth.sh"
error "Please fix the issues above before running configure-nginx-jwt-auth.sh for VMID $VMID"
exit 1
fi

View File

@@ -20,7 +20,7 @@ if $DRY_RUN; then
echo "=== Completable from anywhere (--dry-run: commands only) ==="
echo ""
echo "10 steps (see script body). Summary:"
echo "1. Config validation: bash scripts/validation/validate-config-files.sh [--dry-run]"
echo "1. Config validation: bash scripts/validation/validate-config-files.sh [--dry-run] (includes GRU reference-primacy + peg-bands hook when cross-chain-pmm-lps is present)"
echo " (optional: python3 -m pip install check-jsonschema — step 1 then validates config/dbis-institutional JSON Schemas too)"
echo "2. On-chain check (138): SKIP_EXIT=1 bash scripts/verify/check-contracts-on-chain-138.sh || true"
echo "3. All validation: bash scripts/verify/run-all-validation.sh --skip-genesis"

View File

@@ -1,6 +1,16 @@
#!/usr/bin/env bash
set -euo pipefail
# HISTORICAL SCRIPT
# VMID 2500 was replaced by the current RPC fleet (2101/2201/2301/24xx).
# This script is kept for migration-era forensics only.
if [[ "${HISTORICAL_ALLOW_RUN:-0}" != "1" ]]; then
echo "HISTORICAL: troubleshoot-rpc-2500.sh targets retired VMID 2500 and is not a current runbook." >&2
echo "Use current health checks such as scripts/health/check-rpc-vms-health.sh --vmid <N> instead." >&2
echo "Set HISTORICAL_ALLOW_RUN=1 only if you intentionally need this legacy script." >&2
exit 1
fi
# Troubleshoot RPC-01 at VMID 2500
# Usage: ./troubleshoot-rpc-2500.sh
@@ -243,4 +253,3 @@ echo ""
log_info "For detailed logs, run:"
log_info " pct exec $VMID -- journalctl -u besu-rpc.service -f"
echo ""

View File

@@ -54,6 +54,7 @@ if $DRY_RUN; then
echo " REQUIRED_FILES: ${REQUIRED_FILES:-<default: config/ip-addresses.conf, .env.example, token-mapping*.json, gru-transport-active.json, gru-iso4217-currency-manifest.json, gru-governance-supervision-profile.json>}"
echo " OPTIONAL_ENV: ${OPTIONAL_ENV:-<empty; set VALIDATE_OPTIONAL_ENV for Proxmox API vars>}"
echo " config/xdc-zero: validate-xdc-zero-config.sh (when config/xdc-zero exists)"
echo " GRU reference primacy: scripts/verify/check-gru-reference-primacy-integration.sh (doc links + peg-bands hook)"
exit 0
fi
@@ -126,6 +127,12 @@ function readJson(relativePath) {
return JSON.parse(fs.readFileSync(path.join(projectRoot, relativePath), 'utf8'));
}
function readJsonMaybe(relativePath) {
const full = path.join(projectRoot, relativePath);
if (!fs.existsSync(full)) return null;
return JSON.parse(fs.readFileSync(full, 'utf8'));
}
function normalizeAddress(address) {
return typeof address === 'string' ? address.trim().toLowerCase() : '';
}
@@ -144,8 +151,17 @@ function refConfigured(ref) {
const active = readJson('config/gru-transport-active.json');
const multichain = readJson('config/token-mapping-multichain.json');
const deployment = readJson('cross-chain-pmm-lps/config/deployment-status.json');
const poolMatrix = readJson('cross-chain-pmm-lps/config/pool-matrix.json');
const deploymentRel = 'cross-chain-pmm-lps/config/deployment-status.json';
const poolMatrixRel = 'cross-chain-pmm-lps/config/pool-matrix.json';
const deployment = readJsonMaybe(deploymentRel);
const poolMatrix = readJsonMaybe(poolMatrixRel);
const hasDeploymentOverlay = deployment !== null && poolMatrix !== null;
if (!hasDeploymentOverlay) {
console.warn(
'[WARN] Missing cross-chain-pmm-lps deployment overlay (one of deployment-status.json / pool-matrix.json). '
+ 'Skipping deployment/pool-matrix cross-checks. For full checks: git submodule update --init cross-chain-pmm-lps'
);
}
const currencyManifest = readJson('config/gru-iso4217-currency-manifest.json');
const monetaryUnitManifest = readJson('config/gru-monetary-unit-manifest.json');
@@ -186,7 +202,8 @@ function getMappingToken(fromChainId, toChainId, mappingKey) {
}
function getExpectedPoolKey(chainId, mirroredSymbol) {
const chain = poolMatrix.chains?.[String(chainId)];
if (!hasDeploymentOverlay || !poolMatrix?.chains) return null;
const chain = poolMatrix.chains[String(chainId)];
const hubStable = typeof chain?.hubStable === 'string' ? chain.hubStable.trim() : '';
if (!hubStable) return null;
return `${chainId}-${mirroredSymbol}-${hubStable}`;
@@ -279,12 +296,14 @@ for (const pair of active.transportPairs || []) {
}
}
const deploymentChain = deployment.chains?.[String(destinationChainId)];
const deployedMirror = deploymentChain?.cwTokens?.[mirroredSymbol] || deploymentChain?.gasMirrors?.[mirroredSymbol];
if (!deploymentChain || !isNonZeroAddress(deployedMirror)) {
errors.push(`transportPairs[${pair.key}] mapping exists but deployment-status.json has no deployed ${mirroredSymbol} for chain ${destinationChainId}`);
} else if (mappingToken && normalizeAddress(deployedMirror) !== normalizeAddress(mappingToken.addressTo)) {
errors.push(`transportPairs[${pair.key}] deployment-status.json ${mirroredSymbol} does not match token-mapping-multichain.json addressTo`);
if (hasDeploymentOverlay) {
const deploymentChain = deployment.chains?.[String(destinationChainId)];
const deployedMirror = deploymentChain?.cwTokens?.[mirroredSymbol] || deploymentChain?.gasMirrors?.[mirroredSymbol];
if (!deploymentChain || !isNonZeroAddress(deployedMirror)) {
errors.push(`transportPairs[${pair.key}] mapping exists but deployment-status.json has no deployed ${mirroredSymbol} for chain ${destinationChainId}`);
} else if (mappingToken && normalizeAddress(deployedMirror) !== normalizeAddress(mappingToken.addressTo)) {
errors.push(`transportPairs[${pair.key}] deployment-status.json ${mirroredSymbol} does not match token-mapping-multichain.json addressTo`);
}
}
if (pair.assetClass === 'gas_native') {
@@ -349,19 +368,21 @@ for (const pool of active.publicPools || []) {
errors.push(`publicPools[${pool.key}] is active but has no poolAddress`);
continue;
}
const deploymentChain = deployment.chains?.[String(pool.chainId)];
const deployedStable = Array.isArray(deploymentChain?.pmmPools) ? deploymentChain.pmmPools : [];
const deployedVolatile = Array.isArray(deploymentChain?.pmmPoolsVolatile) ? deploymentChain.pmmPoolsVolatile : [];
const deployedGas = Array.isArray(deploymentChain?.gasPmmPools) ? deploymentChain.gasPmmPools : [];
const deploymentMatch = [...deployedStable, ...deployedVolatile].some(
(entry) => normalizeAddress(entry?.poolAddress) === normalizeAddress(pool.poolAddress)
);
const gasDeploymentMatch = deployedGas.some(
(entry) => normalizeAddress(entry?.poolAddress) === normalizeAddress(pool.poolAddress)
);
const stagedPlaceholder = String(pool.phase || '').toLowerCase().includes('staged');
if (!deploymentMatch && !gasDeploymentMatch && !stagedPlaceholder) {
errors.push(`publicPools[${pool.key}] is active but deployment-status.json does not contain its poolAddress`);
if (hasDeploymentOverlay) {
const deploymentChain = deployment.chains?.[String(pool.chainId)];
const deployedStable = Array.isArray(deploymentChain?.pmmPools) ? deploymentChain.pmmPools : [];
const deployedVolatile = Array.isArray(deploymentChain?.pmmPoolsVolatile) ? deploymentChain.pmmPoolsVolatile : [];
const deployedGas = Array.isArray(deploymentChain?.gasPmmPools) ? deploymentChain.gasPmmPools : [];
const deploymentMatch = [...deployedStable, ...deployedVolatile].some(
(entry) => normalizeAddress(entry?.poolAddress) === normalizeAddress(pool.poolAddress)
);
const gasDeploymentMatch = deployedGas.some(
(entry) => normalizeAddress(entry?.poolAddress) === normalizeAddress(pool.poolAddress)
);
const stagedPlaceholder = String(pool.phase || '').toLowerCase().includes('staged');
if (!deploymentMatch && !gasDeploymentMatch && !stagedPlaceholder) {
errors.push(`publicPools[${pool.key}] is active but deployment-status.json does not contain its poolAddress`);
}
}
}
}
@@ -423,59 +444,61 @@ for (const exposure of active.gasProtocolExposure || []) {
}
}
for (const [chainIdKey, deploymentChain] of Object.entries(deployment.chains || {})) {
const destinationChainId = Number(chainIdKey);
if (destinationChainId === canonicalChainId) continue;
if (deploymentChain?.bridgeAvailable !== true) continue;
if (hasDeploymentOverlay) {
for (const [chainIdKey, deploymentChain] of Object.entries(deployment.chains || {})) {
const destinationChainId = Number(chainIdKey);
if (destinationChainId === canonicalChainId) continue;
if (deploymentChain?.bridgeAvailable !== true) continue;
const mappingPair = getMappingPair(canonicalChainId, destinationChainId);
if (!mappingPair) continue;
const mappingPair = getMappingPair(canonicalChainId, destinationChainId);
if (!mappingPair) continue;
let compatible = true;
for (const token of enabledCanonicalTokens.filter((entry) => entry.registryFamily !== 'gas_native')) {
const mappingKey = String(token.mappingKey || '');
const mirroredSymbol = String(token.mirroredSymbol || '');
const mappingToken = mappingKey ? (mappingPair.tokens || []).find((entry) => entry.key === mappingKey) : null;
const deployedMirror = deploymentChain?.cwTokens?.[mirroredSymbol];
const expectedPoolKey = getExpectedPoolKey(destinationChainId, mirroredSymbol);
let compatible = true;
for (const token of enabledCanonicalTokens.filter((entry) => entry.registryFamily !== 'gas_native')) {
const mappingKey = String(token.mappingKey || '');
const mirroredSymbol = String(token.mirroredSymbol || '');
const mappingToken = mappingKey ? (mappingPair.tokens || []).find((entry) => entry.key === mappingKey) : null;
const deployedMirror = deploymentChain?.cwTokens?.[mirroredSymbol];
const expectedPoolKey = getExpectedPoolKey(destinationChainId, mirroredSymbol);
if (
!mappingKey ||
!mappingToken ||
!isNonZeroAddress(mappingToken.addressTo) ||
!isNonZeroAddress(deployedMirror) ||
normalizeAddress(mappingToken.addressTo) !== normalizeAddress(deployedMirror) ||
!expectedPoolKey
) {
compatible = false;
break;
if (
!mappingKey ||
!mappingToken ||
!isNonZeroAddress(mappingToken.addressTo) ||
!isNonZeroAddress(deployedMirror) ||
normalizeAddress(mappingToken.addressTo) !== normalizeAddress(deployedMirror) ||
!expectedPoolKey
) {
compatible = false;
break;
}
}
}
if (!compatible) continue;
if (!compatible) continue;
const enabledChain = enabledChainsArray.find((chain) => Number(chain.chainId) === destinationChainId);
if (!enabledChain) {
errors.push(`compatible destination chain ${destinationChainId} (${deploymentChain?.name || 'unknown'}) is missing from enabledDestinationChains`);
continue;
}
for (const token of enabledCanonicalTokens.filter((entry) => entry.registryFamily !== 'gas_native')) {
const expectedPairKey = `${canonicalChainId}-${destinationChainId}-${token.symbol}-${token.mirroredSymbol}`;
const expectedPoolKey = getExpectedPoolKey(destinationChainId, String(token.mirroredSymbol || ''));
const pair = transportPairsByKey.get(expectedPairKey);
if (!pair) {
errors.push(`compatible destination chain ${destinationChainId} is missing transport pair ${expectedPairKey}`);
const enabledChain = enabledChainsArray.find((chain) => Number(chain.chainId) === destinationChainId);
if (!enabledChain) {
errors.push(`compatible destination chain ${destinationChainId} (${deploymentChain?.name || 'unknown'}) is missing from enabledDestinationChains`);
continue;
}
if (expectedPoolKey && !publicPoolsByKey.has(expectedPoolKey)) {
errors.push(`compatible destination chain ${destinationChainId} is missing public pool placeholder ${expectedPoolKey}`);
}
for (const token of enabledCanonicalTokens.filter((entry) => entry.registryFamily !== 'gas_native')) {
const expectedPairKey = `${canonicalChainId}-${destinationChainId}-${token.symbol}-${token.mirroredSymbol}`;
const expectedPoolKey = getExpectedPoolKey(destinationChainId, String(token.mirroredSymbol || ''));
const pair = transportPairsByKey.get(expectedPairKey);
if (expectedPoolKey && !(pair.publicPoolKeys || []).includes(expectedPoolKey)) {
errors.push(`transportPairs[${pair.key}] must include the pool-matrix first-hop key ${expectedPoolKey}`);
if (!pair) {
errors.push(`compatible destination chain ${destinationChainId} is missing transport pair ${expectedPairKey}`);
continue;
}
if (expectedPoolKey && !publicPoolsByKey.has(expectedPoolKey)) {
errors.push(`compatible destination chain ${destinationChainId} is missing public pool placeholder ${expectedPoolKey}`);
}
if (expectedPoolKey && !(pair.publicPoolKeys || []).includes(expectedPoolKey)) {
errors.push(`transportPairs[${pair.key}] must include the pool-matrix first-hop key ${expectedPoolKey}`);
}
}
}
}
@@ -791,6 +814,15 @@ if command -v python3 &>/dev/null; then
done
fi
if [[ -f "$PROJECT_ROOT/scripts/verify/check-gru-reference-primacy-integration.sh" ]]; then
if bash "$PROJECT_ROOT/scripts/verify/check-gru-reference-primacy-integration.sh"; then
log_ok "GRU reference primacy (doc links + peg-bands gruPolicyIntegration when submodule present)"
else
log_err "GRU reference primacy integration check failed (see scripts/verify/check-gru-reference-primacy-integration.sh)"
ERRORS=$((ERRORS + 1))
fi
fi
if [[ $ERRORS -gt 0 ]]; then
log_err "Validation failed with $ERRORS error(s). Set VALIDATE_REQUIRED_FILES='path1 path2' to require specific files."
exit 1

View File

@@ -0,0 +1,100 @@
#!/usr/bin/env bash
# Verify GRU reference-primacy doc exists, peg-bands.json carries the machine hook
# (when cross-chain-pmm-lps is present), and canonical consumers still link the doc.
# Usage: bash scripts/verify/check-gru-reference-primacy-integration.sh
# Exit: 0 ok, 1 failure
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
DOC_REL="docs/04-configuration/GRU_REFERENCE_PRIMACY_AND_MESH_EXECUTION_MODEL.md"
DOC="$PROJECT_ROOT/$DOC_REL"
MARKER="GRU_REFERENCE_PRIMACY_AND_MESH_EXECUTION_MODEL.md"
err() { printf '[ERROR] %s\n' "$*" >&2; }
ok() { printf '[OK] %s\n' "$*"; }
ERRORS=0
if [[ ! -f "$DOC" ]]; then
err "Missing $DOC_REL"
exit 1
fi
if ! grep -qi 'reference primacy' "$DOC"; then
err "$DOC_REL: expected 'reference primacy' wording"
ERRORS=$((ERRORS + 1))
fi
ok "Found and scanned $DOC_REL"
# Parent-repo files that must keep a pointer to the canonical doc (integration, not orphan).
REQUIRED_LINK_FILES=(
"$PROJECT_ROOT/docs/04-configuration/GRU_C_STAR_V2_STANDARDS_MATRIX_AND_IMPLEMENTATION_PLAN.md"
"$PROJECT_ROOT/docs/MASTER_INDEX.md"
"$PROJECT_ROOT/AGENTS.md"
"$PROJECT_ROOT/docs/04-configuration/README.md"
"$PROJECT_ROOT/docs/11-references/PMM_DEX_ROUTING_STATUS.md"
"$PROJECT_ROOT/docs/11-references/LIQUIDITY_POOLS_MASTER_MAP.md"
"$PROJECT_ROOT/docs/11-references/CHAIN138_GRID_6534_WALLET_FUNDING_PLAN.md"
"$PROJECT_ROOT/docs/11-references/GRU_V2_PUBLIC_PROTOCOL_DEPLOYMENT_STATUS.md"
"$PROJECT_ROOT/docs/03-deployment/PHASE_C_CW_AND_EDGE_POOLS_RUNBOOK.md"
"$PROJECT_ROOT/.cursor/rules/project-doc-and-deployment-refs.mdc"
)
for f in "${REQUIRED_LINK_FILES[@]}"; do
rel="${f#"$PROJECT_ROOT"/}"
if [[ ! -f "$f" ]]; then
err "Missing expected file: $rel"
ERRORS=$((ERRORS + 1))
continue
fi
if ! grep -qF "$MARKER" "$f"; then
err "$rel: must reference $MARKER"
ERRORS=$((ERRORS + 1))
else
ok "Link present: $rel"
fi
done
CC_README="$PROJECT_ROOT/cross-chain-pmm-lps/README.md"
if [[ -f "$CC_README" ]]; then
if ! grep -qF "$MARKER" "$CC_README"; then
err "cross-chain-pmm-lps/README.md must reference $MARKER"
ERRORS=$((ERRORS + 1))
else
ok "Link present: cross-chain-pmm-lps/README.md"
fi
else
ok "Submodule cross-chain-pmm-lps absent — skipped README check"
fi
PEGB="$PROJECT_ROOT/cross-chain-pmm-lps/config/peg-bands.json"
if [[ -f "$PEGB" ]]; then
if command -v jq &>/dev/null; then
if jq -e \
'(.gruPolicyIntegration | type == "object")
and (.gruPolicyIntegration.referencePrimacyDoc | type == "string")
and (.gruPolicyIntegration.referencePrimacyDoc | test("GRU_REFERENCE_PRIMACY_AND_MESH_EXECUTION_MODEL"))
and (.gruPolicyIntegration.meshExecutionRole | type == "string")
and (.gruPolicyIntegration.meshExecutionRole | length > 0)' \
"$PEGB" &>/dev/null; then
ok "peg-bands.json: gruPolicyIntegration hook present"
else
err "peg-bands.json: missing or invalid .gruPolicyIntegration (referencePrimacyDoc + meshExecutionRole)"
ERRORS=$((ERRORS + 1))
fi
else
err "jq not installed — cannot validate peg-bands.json gruPolicyIntegration"
ERRORS=$((ERRORS + 1))
fi
else
ok "peg-bands.json absent (submodule not checked out) — skipped peg-bands hook check"
fi
if [[ "$ERRORS" -gt 0 ]]; then
err "GRU reference primacy integration: $ERRORS error(s)"
exit 1
fi
ok "GRU reference primacy integration checks passed."
exit 0

View File

@@ -93,7 +93,11 @@ echo " Router paused(): $router_paused"
echo
if [[ "$delivery_enabled" == "true" && "$shedding" == "false" && ( "$router_paused" == "false" || "$router_paused" == "unknown" ) ]]; then
echo "Lane is already deliverable."
if [[ "$bridge_ready" == "yes" ]]; then
echo "Lane is already deliverable."
else
echo "Lane is operational, but current bridge float is below the configured floor."
fi
exit 0
fi

View File

@@ -0,0 +1,219 @@
#!/usr/bin/env bash
set -euo pipefail
# Read-only accounting report for retained Mainnet quote-push surplus:
# - deployer ETH gas headroom vs configured floor
# - receiver quote balance and sweepable amount
# - recommended holdback for gas-cover accounting (in quote terms)
# - recommended recycleable quote for pool growth
#
# Env:
# ETHEREUM_MAINNET_RPC required
# PRIVATE_KEY or QUOTE_PUSH_DEPLOYER_ADDRESS one required
# AAVE_QUOTE_PUSH_RECEIVER_MAINNET required
# QUOTE_PUSH_SURPLUS_TOKEN_MAINNET optional; defaults to USDC mainnet
# QUOTE_PUSH_RECEIVER_RESERVE_RAW optional; receiver reserve kept after sweep (default 0)
# QUOTE_PUSH_DEPLOYER_GAS_FLOOR_ETH optional; default 0.003
# QUOTE_PUSH_OPERATION_BUFFER_ETH optional; default 0.0005
# QUOTE_PUSH_NATIVE_TOKEN_PRICE optional; default 3200 quote per native token
#
# Usage:
# source scripts/lib/load-project-env.sh
# bash scripts/verify/report-mainnet-aave-quote-push-surplus-accounting.sh
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
SMOM="${PROXMOX_ROOT}/smom-dbis-138"
DEFAULT_USDC_MAINNET="0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"
DEFAULT_CWUSDC_MAINNET="0x2de5F116bFcE3d0f922d9C8351e0c5Fc24b9284a"
# shellcheck disable=SC1091
source "${PROXMOX_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
# shellcheck disable=SC1091
source "${SMOM}/scripts/load-env.sh" >/dev/null 2>&1 || true
require_cmd() {
command -v "$1" >/dev/null 2>&1 || {
echo "[fail] missing required command: $1" >&2
exit 1
}
}
require_env() {
local name="$1"
if [[ -z "${!name:-}" ]]; then
echo "[fail] missing required env: $name" >&2
exit 1
fi
}
require_cmd cast
require_cmd python3
require_env ETHEREUM_MAINNET_RPC
pick_latest_receiver() {
local latest_json="${SMOM}/broadcast/DeployAaveQuotePushFlashReceiver.s.sol/1/run-latest.json"
if [[ ! -f "$latest_json" ]] || ! command -v jq >/dev/null 2>&1; then
return 1
fi
jq -r '.transactions[]? | select(.transactionType == "CREATE" and .contractName == "AaveQuotePushFlashReceiver") | .contractAddress' \
"$latest_json" | tail -n1
}
pick_latest_manager() {
local latest_json="${SMOM}/broadcast/DeployQuotePushTreasuryManager.s.sol/1/run-latest.json"
if [[ ! -f "$latest_json" ]] || ! command -v jq >/dev/null 2>&1; then
return 1
fi
jq -r '.transactions[]? | select(.transactionType == "CREATE" and .contractName == "QuotePushTreasuryManager") | .contractAddress' \
"$latest_json" | tail -n1
}
if [[ -z "${AAVE_QUOTE_PUSH_RECEIVER_MAINNET:-}" ]]; then
inferred_receiver="$(pick_latest_receiver || true)"
if [[ -n "$inferred_receiver" && "$inferred_receiver" != "null" ]]; then
export AAVE_QUOTE_PUSH_RECEIVER_MAINNET="$inferred_receiver"
fi
fi
if [[ -z "${QUOTE_PUSH_TREASURY_MANAGER_MAINNET:-}" ]]; then
inferred_manager="$(pick_latest_manager || true)"
if [[ -n "$inferred_manager" && "$inferred_manager" != "null" ]]; then
export QUOTE_PUSH_TREASURY_MANAGER_MAINNET="$inferred_manager"
fi
fi
require_env AAVE_QUOTE_PUSH_RECEIVER_MAINNET
if [[ -n "${PRIVATE_KEY:-}" ]]; then
DEPLOYER="$(cast wallet address --private-key "$PRIVATE_KEY")"
elif [[ -n "${QUOTE_PUSH_DEPLOYER_ADDRESS:-}" ]]; then
DEPLOYER="${QUOTE_PUSH_DEPLOYER_ADDRESS}"
else
echo "[fail] set PRIVATE_KEY or QUOTE_PUSH_DEPLOYER_ADDRESS" >&2
exit 1
fi
TOKEN="${QUOTE_PUSH_SURPLUS_TOKEN_MAINNET:-$DEFAULT_USDC_MAINNET}"
CWUSDC="${CWUSDC_MAINNET:-$DEFAULT_CWUSDC_MAINNET}"
RECEIVER="${AAVE_QUOTE_PUSH_RECEIVER_MAINNET}"
RECEIVER_RESERVE_RAW="${QUOTE_PUSH_RECEIVER_RESERVE_RAW:-0}"
GAS_FLOOR_ETH="${QUOTE_PUSH_DEPLOYER_GAS_FLOOR_ETH:-0.003}"
OP_BUFFER_ETH="${QUOTE_PUSH_OPERATION_BUFFER_ETH:-0.0005}"
NATIVE_TOKEN_PRICE="${QUOTE_PUSH_NATIVE_TOKEN_PRICE:-3200}"
deployer_eth="$(cast balance "$DEPLOYER" --ether --rpc-url "$ETHEREUM_MAINNET_RPC")"
deployer_quote_raw="$(cast call "$TOKEN" 'balanceOf(address)(uint256)' "$DEPLOYER" --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
deployer_base_raw="$(cast call "$CWUSDC" 'balanceOf(address)(uint256)' "$DEPLOYER" --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
receiver_quote_raw="$(cast call "$TOKEN" 'balanceOf(address)(uint256)' "$RECEIVER" --rpc-url "$ETHEREUM_MAINNET_RPC" | awk '{print $1}')"
python3 - "$deployer_eth" "$deployer_quote_raw" "$deployer_base_raw" "$receiver_quote_raw" \
"$RECEIVER_RESERVE_RAW" "$GAS_FLOOR_ETH" "$OP_BUFFER_ETH" "$NATIVE_TOKEN_PRICE" "$DEPLOYER" "$RECEIVER" "$TOKEN" <<'PY'
import math
import sys
deployer_eth = float(sys.argv[1])
deployer_quote_raw = int(sys.argv[2])
deployer_base_raw = int(sys.argv[3])
receiver_quote_raw = int(sys.argv[4])
receiver_reserve_raw = int(sys.argv[5])
gas_floor_eth = float(sys.argv[6])
op_buffer_eth = float(sys.argv[7])
native_token_price = float(sys.argv[8])
deployer = sys.argv[9]
receiver = sys.argv[10]
token = sys.argv[11]
recycle_floor_eth = gas_floor_eth + op_buffer_eth
gas_shortfall_eth = max(0.0, recycle_floor_eth - deployer_eth)
gas_shortfall_quote_raw = math.ceil(gas_shortfall_eth * native_token_price * 1_000_000)
sweepable_raw = max(0, receiver_quote_raw - receiver_reserve_raw)
holdback_for_gas_raw = min(sweepable_raw, gas_shortfall_quote_raw)
recycleable_raw = max(0, sweepable_raw - holdback_for_gas_raw)
matched_base_cap_raw = min(recycleable_raw, deployer_base_raw)
def human(raw: int) -> str:
return f"{raw / 1_000_000:.6f}"
print("=== Mainnet quote-push surplus accounting ===")
print(f"deployer={deployer}")
print(f"receiver={receiver}")
print(f"token={token}")
print(f"deployer_eth={deployer_eth:.18f}")
print(f"gas_floor_eth={gas_floor_eth:.18f}")
print(f"operation_buffer_eth={op_buffer_eth:.18f}")
print(f"recycle_floor_eth={recycle_floor_eth:.18f}")
print(f"gas_shortfall_eth={gas_shortfall_eth:.18f}")
print(f"native_token_price_quote={native_token_price:.6f}")
print(f"gas_shortfall_quote_raw={gas_shortfall_quote_raw} human={human(gas_shortfall_quote_raw)}")
print(f"receiver_quote_raw={receiver_quote_raw} human={human(receiver_quote_raw)}")
print(f"receiver_reserve_raw={receiver_reserve_raw} human={human(receiver_reserve_raw)}")
print(f"sweepable_raw={sweepable_raw} human={human(sweepable_raw)}")
print(f"deployer_quote_raw={deployer_quote_raw} human={human(deployer_quote_raw)}")
print(f"deployer_cWUSDC_raw={deployer_base_raw} human={human(deployer_base_raw)}")
print(f"holdback_for_gas_raw={holdback_for_gas_raw} human={human(holdback_for_gas_raw)}")
print(f"recycleable_quote_raw={recycleable_raw} human={human(recycleable_raw)}")
print(f"matched_base_cap_raw={matched_base_cap_raw} human={human(matched_base_cap_raw)}")
print("")
print("Recommended next steps:")
print("1. Preview receiver surplus sweep:")
print(" bash scripts/deployment/sweep-mainnet-aave-quote-push-receiver-surplus.sh --dry-run")
if gas_shortfall_eth > 0:
print("2. Replenish deployer gas reserve before recycling surplus to the pool.")
print(
f" At current assumptions, hold back about {human(holdback_for_gas_raw)} quote units to cover the ETH shortfall."
)
print("3. After deployer ETH is back above the recycle floor, recycle the remainder into pool growth:")
print(" bash scripts/deployment/recycle-mainnet-aave-quote-push-surplus.sh --dry-run")
else:
print("2. Deployer gas reserve is above the recycle floor. Swept quote can be recycled into pool growth.")
print(" bash scripts/deployment/recycle-mainnet-aave-quote-push-surplus.sh --dry-run")
if matched_base_cap_raw == 0:
print("3. Wallet cWUSDC is the limiting asset for matched recycling; no pool-growth tranche is currently fundable.")
else:
print(
f"3. Current matched recycle ceiling is {human(matched_base_cap_raw)} quote/base units using deployer-held cWUSDC."
)
PY
if [[ -n "${QUOTE_PUSH_TREASURY_MANAGER_MAINNET:-}" ]]; then
manager_quote_raw="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'quoteBalance()(uint256)' --rpc-url "$ETHEREUM_MAINNET_RPC" 2>/dev/null | awk '{print $1}' || true)"
manager_available_raw="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'availableQuote()(uint256)' --rpc-url "$ETHEREUM_MAINNET_RPC" 2>/dev/null | awk '{print $1}' || true)"
manager_receiver_sweepable_raw="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'receiverSweepableQuote()(uint256)' --rpc-url "$ETHEREUM_MAINNET_RPC" 2>/dev/null | awk '{print $1}' || true)"
manager_gas_recipient="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'gasRecipient()(address)' --rpc-url "$ETHEREUM_MAINNET_RPC" 2>/dev/null | awk '{print $1}' || true)"
manager_recycle_recipient="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'recycleRecipient()(address)' --rpc-url "$ETHEREUM_MAINNET_RPC" 2>/dev/null | awk '{print $1}' || true)"
manager_receiver_owner="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'receiverOwner()(address)' --rpc-url "$ETHEREUM_MAINNET_RPC" 2>/dev/null | awk '{print $1}' || true)"
manager_receiver_owned="$(cast call "$QUOTE_PUSH_TREASURY_MANAGER_MAINNET" 'isReceiverOwnedByManager()(bool)' --rpc-url "$ETHEREUM_MAINNET_RPC" 2>/dev/null | awk '{print $1}' || true)"
if [[ -n "$manager_quote_raw" && -n "$manager_available_raw" ]]; then
manager_quote_human="$(python3 - "$manager_quote_raw" <<'PY'
import sys
print(f"{int(sys.argv[1]) / 1_000_000:.6f}")
PY
)"
manager_available_human="$(python3 - "$manager_available_raw" <<'PY'
import sys
print(f"{int(sys.argv[1]) / 1_000_000:.6f}")
PY
)"
manager_receiver_sweepable_human="$(python3 - "${manager_receiver_sweepable_raw:-0}" <<'PY'
import sys
print(f"{int(sys.argv[1]) / 1_000_000:.6f}")
PY
)"
cat <<EOF
=== Treasury manager state ===
manager=${QUOTE_PUSH_TREASURY_MANAGER_MAINNET}
manager_quote_raw=${manager_quote_raw} human=${manager_quote_human}
manager_available_raw=${manager_available_raw} human=${manager_available_human}
manager_receiver_sweepable_raw=${manager_receiver_sweepable_raw:-0} human=${manager_receiver_sweepable_human}
manager_receiver_owner=${manager_receiver_owner:-unknown}
manager_receiver_owned_by_manager=${manager_receiver_owned:-unknown}
manager_gas_recipient=${manager_gas_recipient:-unknown}
manager_recycle_recipient=${manager_recycle_recipient:-unknown}
EOF
fi
fi