chore: sync docs, config schemas, scripts, and meta task alignment

- Institutional / JVMTM / reserve-provenance / GRU transport + standards JSON
- Validation and verify scripts (Blockscout labels, x402, GRU preflight, P1 local path)
- Wormhole wiring in AGENTS, MCP_SETUP, MASTER_INDEX, 04-configuration README
- Meta docs, integration gaps, live verification log, architecture updates
- CI validate-config workflow updates

Operator/LAN items, submodule working trees, and public token-aggregation edge
routes remain follow-up (see TODOS_CONSOLIDATED P1).

Made-with: Cursor
This commit is contained in:
defiQUG
2026-03-31 22:31:39 -07:00
parent 00880304d4
commit 7ac74f432b
948 changed files with 47476 additions and 490 deletions

View File

@@ -2,12 +2,18 @@
# Send WETH cross-chain via CCIP (Chain 138 → destination chain).
# Usage: ./scripts/bridge/run-send-cross-chain.sh <amount_eth> [recipient] [--dry-run]
# Env: CCIP_DEST_CHAIN_SELECTOR, GAS_PRICE, GAS_LIMIT, CONFIRM_ABOVE_ETH (prompt above this amount)
# Version: 2026-01-31
# Version: 2026-03-30
set -euo pipefail
[[ "${DEBUG:-0}" = "1" ]] && set -x
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
had_nounset=0
if [[ $- == *u* ]]; then
had_nounset=1
set +u
fi
source "${SCRIPT_DIR}/../lib/load-project-env.sh"
(( had_nounset )) && set -u
[[ -z "${PRIVATE_KEY:-}" ]] && { echo "PRIVATE_KEY required"; exit 1; }
[[ -z "${CCIPWETH9_BRIDGE_CHAIN138:-}" ]] && { echo "CCIPWETH9_BRIDGE_CHAIN138 required"; exit 1; }
@@ -29,6 +35,39 @@ RPC="${RPC_URL_138:-$CHAIN138_RPC}"
[[ -z "$RPC" ]] && { echo "ERROR: RPC_URL_138 or CHAIN138_RPC required"; exit 1; }
BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138}"
extract_first_address() {
echo "$1" | grep -oE '0x[a-fA-F0-9]{40}' | sed -n '1p'
}
lower() {
echo "$1" | tr '[:upper:]' '[:lower:]'
}
DEST_RAW="$(cast call "$BRIDGE" 'destinations(uint64)((uint64,address,bool))' "$DEST_SELECTOR" --rpc-url "$RPC" 2>/dev/null || echo "")"
DEST_ADDR="$(extract_first_address "$DEST_RAW")"
AVALANCHE_SELECTOR_VALUE="${AVALANCHE_SELECTOR:-6433500567565415381}"
if [[ "$DEST_SELECTOR" == "$AVALANCHE_SELECTOR_VALUE" ]]; then
AVALANCHE_NATIVE_BRIDGE="${CCIPWETH9_BRIDGE_AVALANCHE:-}"
if [[ -n "$AVALANCHE_NATIVE_BRIDGE" ]] && [[ "$(lower "$DEST_ADDR")" == "$(lower "$AVALANCHE_NATIVE_BRIDGE")" ]] && [[ "${ALLOW_UNSUPPORTED_AVAX_NATIVE:-0}" != "1" ]]; then
cat <<EOF
ERROR: current Avalanche destination mapping points at the native AVAX WETH9 bridge ($DEST_ADDR).
That path is not live from the current Chain 138 router. On 2026-03-30, a live test message to the
native AVAX bridge remained unprocessed because the Chain 138 router emits MessageSent events but
the AVAX native bridge only accepts ccipReceive from its own trusted AVAX router.
Use the relay-backed AVAX receiver instead, or set ALLOW_UNSUPPORTED_AVAX_NATIVE=1 if you are
intentionally testing the unsupported native path.
EOF
exit 1
fi
if [[ -n "$DEST_ADDR" ]]; then
echo "Info: Avalanche send will use current mapped receiver $DEST_ADDR"
fi
fi
# Confirmation for large amounts
CONFIRM_ABOVE="${CONFIRM_ABOVE_ETH:-1}"
if [[ -n "$CONFIRM_ABOVE" ]] && awk -v a="$AMOUNT_ETH" -v b="$CONFIRM_ABOVE" 'BEGIN{exit !(a+0>=b+0)}' 2>/dev/null; then

View File

@@ -252,7 +252,7 @@ update_container_configs() {
pct exec $vmid -- bash -c '
# Update .env files
find /opt /home /root -name \".env\" -type f 2>/dev/null | while read f; do
[ -r \"\$f\" ] && sed -i \"s|10.200.0.10|${ORDER_POSTGRES_PRIMARY:-${ORDER_POSTGRES_PRIMARY:-192.168.11.44}}|g; s|10.200.0.11|${ORDER_POSTGRES_REPLICA:-${ORDER_POSTGRES_REPLICA:-192.168.11.45}}|g; s|10.200.0.20|${ORDER_REDIS_IP:-192.168.11.38}|g; s|10.200.0.30|${IP_SERVICE_40:-${IP_SERVICE_40:-${IP_SERVICE_40:-192.168.11.40}}}|g; s|10.200.0.40|${IP_SERVICE_41:-${IP_SERVICE_41:-${IP_SERVICE_41:-192.168.11.41}}}|g; s|10.200.0.50|${IP_SERVICE_49:-${IP_SERVICE_49:-${IP_SERVICE_49:-192.168.11.49}}}|g; s|10.200.0.60|${IP_SERVICE_42:-${IP_SERVICE_42:-${IP_SERVICE_42:-192.168.11.42}}}|g; s|10.200.0.70|${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}}}|g; s|10.200.0.80|${IP_SERVICE_43:-${IP_SERVICE_43:-${IP_SERVICE_43:-192.168.11.43}}}|g; s|10.200.0.90|${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}}}|g; s|10.200.0.91|${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}}}|g; s|10.200.0.92|${IP_MIM_WEB:-192.168.11.37}|g; s|10.200.0.200|${ORDER_REDIS_REPLICA:-${ORDER_REDIS_REPLICA:-${ORDER_REDIS_REPLICA:-192.168.11.46}}}|g; s|10.200.0.201|${IP_SERVICE_47:-${IP_SERVICE_47:-${IP_SERVICE_47:-192.168.11.47}}}|g; s|10.200.0.202|${IP_ORDER_OPENSEARCH:-${IP_ORDER_OPENSEARCH:-${IP_ORDER_OPENSEARCH:-192.168.11.48}}}|g; s|10.200.0.210|${IP_ORDER_HAPROXY:-${IP_ORDER_HAPROXY:-192.168.11.39}}|g; s|10.200.0.230|${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}}}|g\" \"\$f\" 2>/dev/null || true
[ -r \"\$f\" ] && sed -i \"s|10.200.0.10|${ORDER_POSTGRES_PRIMARY:-${ORDER_POSTGRES_PRIMARY:-192.168.11.44}}|g; s|10.200.0.11|${ORDER_POSTGRES_REPLICA:-${ORDER_POSTGRES_REPLICA:-192.168.11.45}}|g; s|10.200.0.20|${ORDER_REDIS_IP:-192.168.11.38}|g; s|10.200.0.30|${IP_SERVICE_40:-${IP_SERVICE_40:-${IP_SERVICE_40:-192.168.11.40}}}|g; s|10.200.0.40|${IP_SERVICE_41:-${IP_SERVICE_41:-${IP_SERVICE_41:-192.168.11.41}}}|g; s|10.200.0.50|${IP_SERVICE_49:-${IP_SERVICE_49:-${IP_SERVICE_49:-192.168.11.49}}}|g; s|10.200.0.60|${IP_SERVICE_42:-${IP_SERVICE_42:-${IP_SERVICE_42:-192.168.11.42}}}|g; s|10.200.0.70|${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}}}|g; s|10.200.0.80|${IP_SERVICE_43:-${IP_SERVICE_43:-${IP_SERVICE_43:-192.168.11.43}}}|g; s|10.200.0.90|${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}}}|g; s|10.200.0.91|${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}}}|g; s|10.200.0.92|${IP_ORDER_MCP_LEGAL:-192.168.11.94}|g; s|10.200.0.200|${ORDER_REDIS_REPLICA:-${ORDER_REDIS_REPLICA:-${ORDER_REDIS_REPLICA:-192.168.11.46}}}|g; s|10.200.0.201|${IP_SERVICE_47:-${IP_SERVICE_47:-${IP_SERVICE_47:-192.168.11.47}}}|g; s|10.200.0.202|${IP_ORDER_OPENSEARCH:-${IP_ORDER_OPENSEARCH:-${IP_ORDER_OPENSEARCH:-192.168.11.48}}}|g; s|10.200.0.210|${IP_ORDER_HAPROXY:-${IP_ORDER_HAPROXY:-192.168.11.39}}|g; s|10.200.0.230|${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}}}|g\" \"\$f\" 2>/dev/null || true
done
echo \"Configs updated for CT $vmid\"
'

View File

@@ -329,7 +329,7 @@ declare -A ip_mappings=(
["10.200.0.80"]="${IP_SERVICE_43:-${IP_SERVICE_43:-${IP_SERVICE_43:-192.168.11.43}}}" # order-eresidency
["10.200.0.90"]="${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}}}" # order-portal-public
["10.200.0.91"]="${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}}}" # order-portal-internal
["10.200.0.92"]="${IP_MIM_WEB:-192.168.11.37}" # order-mcp-legal
["10.200.0.92"]="${IP_ORDER_MCP_LEGAL:-192.168.11.94}" # order-mcp-legal
["10.200.0.200"]="${ORDER_REDIS_REPLICA:-${ORDER_REDIS_REPLICA:-${ORDER_REDIS_REPLICA:-192.168.11.46}}}" # order-prometheus
["10.200.0.201"]="${IP_SERVICE_47:-${IP_SERVICE_47:-${IP_SERVICE_47:-192.168.11.47}}}" # order-grafana
["10.200.0.202"]="${IP_ORDER_OPENSEARCH:-${IP_ORDER_OPENSEARCH:-${IP_ORDER_OPENSEARCH:-192.168.11.48}}}" # order-opensearch

View File

@@ -1,16 +1,16 @@
#!/usr/bin/env bash
# Deploy token-aggregation service for publication (token lists, CoinGecko/CMC reports).
# Deploy token-aggregation service for publication (token lists, CoinGecko/CMC reports, bridge/routes).
# Run on explorer VM (VMID 5000) or host that serves explorer.d-bis.org.
#
# Prerequisites: Node 20+, PostgreSQL (for full indexing; report API may work with minimal config)
# Prerequisites: Node 20+, PostgreSQL (for full indexing; API responds with defaults if DB empty)
# Usage: ./scripts/deploy-token-aggregation-for-publication.sh [INSTALL_DIR]
#
# After deploy: Run apply-nginx-token-aggregation-proxy.sh to proxy /api/v1/ to this service.
# After deploy: nginx must proxy /api/v1/ to this service BEFORE Blockscout (see TOKEN_AGGREGATION_REPORT_API_RUNBOOK).
# Explorer layouts vary: port 3000 or 3001 — match TOKEN_AGG_PORT in apply-nginx scripts.
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
# Default: user-writable dir in repo (no sudo). Use /opt/token-aggregation with sudo for system install.
INSTALL_DIR="${1:-$REPO_ROOT/token-aggregation-build}"
SVC_DIR="$REPO_ROOT/smom-dbis-138/services/token-aggregation"
@@ -27,22 +27,29 @@ cd "$INSTALL_DIR"
if [ ! -f .env ]; then
if [ -f .env.example ]; then
cp .env.example .env
echo "Created .env from .env.example — edit with CUSDC_ADDRESS_138, CUSDT_ADDRESS_138, DATABASE_URL"
echo "Created .env from .env.example — set DATABASE_URL for persistent index; CUSDT/CUSDC already defaulted."
else
echo "Create .env with at least: CUSDC_ADDRESS_138, CUSDT_ADDRESS_138, CHAIN_138_RPC_URL"
echo "Create .env with at least DATABASE_URL (and optional CHAIN_138_RPC_URL)." >&2
fi
fi
if command -v pnpm >/dev/null 2>&1 && [ -f "$REPO_ROOT/pnpm-lock.yaml" ]; then
(cd "$REPO_ROOT" && pnpm install --filter token-aggregation-service --no-frozen-lockfile 2>/dev/null) || true
fi
npm install --omit=dev 2>/dev/null || npm install
npm run build 2>/dev/null || true
npm run build
echo ""
echo "Token-aggregation built. Start with:"
echo " cd $INSTALL_DIR && node dist/index.js"
echo "Or add systemd unit. Default port: 3000"
echo "Or add systemd unit. Default port from code: 3000 (match nginx TOKEN_AGG_PORT / fix-explorer-http-api-v1-proxy.sh uses 3001)."
echo ""
echo "Then apply nginx proxy (on same host):"
echo " TOKEN_AGG_PORT=3000 CONFIG_FILE=/etc/nginx/sites-available/blockscout \\"
echo " bash $REPO_ROOT/explorer-monorepo/scripts/apply-nginx-token-aggregation-proxy.sh"
echo "Then apply nginx proxy (on same host), e.g.:"
echo " TOKEN_AGG_PORT=3001 CONFIG_FILE=/etc/nginx/sites-available/blockscout \\"
echo " bash $REPO_ROOT/scripts/fix-explorer-http-api-v1-proxy.sh"
echo " # or: explorer-monorepo/scripts/apply-nginx-token-aggregation-proxy.sh"
echo ""
echo "Verify: curl -s https://explorer.d-bis.org/api/v1/report/token-list?chainId=138 | jq '.tokens | length'"
echo "Verify:"
echo " pnpm run verify:token-aggregation-api"
echo " SKIP_BRIDGE_ROUTES=0 bash scripts/verify/check-public-report-api.sh https://explorer.d-bis.org"

View File

@@ -19,7 +19,7 @@ fi
LXCS=(
"${RTGS_ORCH_VMID:-5805} ${RTGS_ORCH_HOSTNAME:-rtgs-orchestrator-1} ${RTGS_ORCH_IP:-192.168.11.93} 4096 2 24"
"${RTGS_FX_VMID:-5806} ${RTGS_FX_HOSTNAME:-rtgs-fx-1} ${RTGS_FX_IP:-192.168.11.94} 4096 2 24"
"${RTGS_FX_VMID:-5806} ${RTGS_FX_HOSTNAME:-rtgs-fx-1} ${RTGS_FX_IP:-192.168.11.99} 4096 2 24"
"${RTGS_LIQ_VMID:-5807} ${RTGS_LIQ_HOSTNAME:-rtgs-liquidity-1} ${RTGS_LIQ_IP:-192.168.11.95} 4096 2 24"
)

View File

@@ -20,6 +20,7 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
REPO_ROOT="$PROJECT_ROOT"
SMOM="${PROJECT_ROOT}/smom-dbis-138"
DRY_RUN=""
@@ -37,9 +38,21 @@ if [[ ! -f "$SMOM/.env" ]]; then
echo "Missing $SMOM/.env. Abort." >&2
exit 1
fi
set -a
source "$SMOM/.env"
set +a
if [[ -f "$SMOM/scripts/lib/deployment/dotenv.sh" ]]; then
# shellcheck disable=SC1090
source "$SMOM/scripts/lib/deployment/dotenv.sh"
load_deployment_env --repo-root "$SMOM"
else
had_nounset=0
if [[ $- == *u* ]]; then
had_nounset=1
set +u
fi
set -a
source "$SMOM/.env"
set +a
(( had_nounset )) && set -u
fi
# 2) RPC: Core (2101) only — no Public fallback for deployments
RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
@@ -169,4 +182,4 @@ done
echo ""
echo "Running on-chain verification..."
"$PROJECT_ROOT/scripts/verify/check-contracts-on-chain-138.sh" "$RPC"
"$REPO_ROOT/scripts/verify/check-contracts-on-chain-138.sh" "$RPC"

View File

@@ -4,12 +4,19 @@
# - Remove .env.local on CT 7801; install .env with PORTAL_LOCAL_LOGIN_* + NEXTAUTH_SECRET.
# - Run sync-sankofa-portal-7801.sh (rebuild portal with updated auth.ts).
#
# Keycloak SSO: If repo .env defines KEYCLOAK_CLIENT_SECRET (and optional KEYCLOAK_URL / REALM /
# CLIENT_ID), those values are written into the pushed .env. After sync, sankofa-portal-merge-keycloak-env-from-repo.sh
# runs to mirror OIDC settings into .env.local as well. Without KEYCLOAK_CLIENT_SECRET in .env,
# use keycloak-sankofa-ensure-client-redirects*.sh then the merge script.
#
# Usage: ./scripts/deployment/enable-sankofa-portal-login-7801.sh [--dry-run]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
@@ -20,6 +27,9 @@ LOCAL_EMAIL="${PORTAL_LOCAL_LOGIN_EMAIL:-portal@sankofa.nexus}"
if [[ "${1:-}" == "--dry-run" ]]; then
echo "[DRY-RUN] Would patch Keycloak ${VMID_KC}, write .env on ${VMID_PORTAL}, sync portal"
if [[ -n "${KEYCLOAK_CLIENT_SECRET:-}" ]]; then
echo "[DRY-RUN] Would run sankofa-portal-merge-keycloak-env-from-repo.sh after sync (KEYCLOAK_CLIENT_SECRET is set)"
fi
exit 0
fi
@@ -34,10 +44,10 @@ NEXT_PUBLIC_GRAPHQL_WS_ENDPOINT=ws://192.168.11.50:4000/graphql-ws
NEXTAUTH_URL=https://portal.sankofa.nexus
NEXTAUTH_SECRET=${NEXTAUTH_SEC}
KEYCLOAK_URL=https://keycloak.sankofa.nexus
KEYCLOAK_REALM=master
KEYCLOAK_CLIENT_ID=sankofa-portal
KEYCLOAK_CLIENT_SECRET=
KEYCLOAK_URL=${KEYCLOAK_URL:-https://keycloak.sankofa.nexus}
KEYCLOAK_REALM=${KEYCLOAK_REALM:-master}
KEYCLOAK_CLIENT_ID=${KEYCLOAK_CLIENT_ID:-sankofa-portal}
KEYCLOAK_CLIENT_SECRET=${KEYCLOAK_CLIENT_SECRET:-}
PORTAL_LOCAL_LOGIN_EMAIL=${LOCAL_EMAIL}
PORTAL_LOCAL_LOGIN_PASSWORD=${GEN_PASS}
@@ -83,12 +93,24 @@ echo ""
echo "📤 Syncing portal source + rebuild…"
bash "${SCRIPT_DIR}/sync-sankofa-portal-7801.sh"
if [[ -n "${KEYCLOAK_CLIENT_SECRET:-}" ]]; then
echo ""
echo "🔐 Mirroring Keycloak OIDC into portal .env + .env.local (merge script)…"
bash "${SCRIPT_DIR}/sankofa-portal-merge-keycloak-env-from-repo.sh"
fi
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ Sign in at https://portal.sankofa.nexus (NEXTAUTH_URL)"
echo " Email: ${LOCAL_EMAIL}"
echo " Password: ${GEN_PASS}"
echo ""
echo "SSO: Add NPM host keycloak.sankofa.nexus → ${IP_KEYCLOAK:-192.168.11.52}:8080, then create Keycloak"
echo " confidential client sankofa-portal; set KEYCLOAK_CLIENT_SECRET in .env and re-sync."
if [[ -n "${KEYCLOAK_CLIENT_SECRET:-}" ]]; then
echo "SSO: Keycloak client secret was taken from repo .env; portal CT updated via merge script."
else
echo "SSO: No KEYCLOAK_CLIENT_SECRET in repo .env — local login only until you:"
echo " 1) NPM: keycloak.sankofa.nexus → ${IP_KEYCLOAK:-192.168.11.52}:8080"
echo " 2) ./scripts/deployment/keycloak-sankofa-ensure-client-redirects-via-proxmox-pct.sh"
echo " 3) ./scripts/deployment/sankofa-portal-merge-keycloak-env-from-repo.sh"
fi
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

View File

@@ -0,0 +1,177 @@
#!/usr/bin/env bash
# Create or reset the Keycloak master-realm "admin" user directly in PostgreSQL (Keycloak 24 Quarkus
# has no bootstrap-admin CLI). Use when user_entity is empty or you must rotate the admin password.
#
# Requirements: SSH to Proxmox, pct to PostgreSQL CT (default 7803), sudo postgres psql on DB "keycloak".
# Does not print the password to stdout; writes it to a file you pass, or merges into repo .env.
#
# Usage:
# KEYCLOAK_ADMIN_PASSWORD='your-secure-value' ./scripts/deployment/keycloak-bootstrap-or-reset-master-admin-db.sh
# ./scripts/deployment/keycloak-bootstrap-or-reset-master-admin-db.sh # generates password → .env
#
# Env:
# PROXMOX_HOST (default 192.168.11.11), POSTGRES_CT_VMID (7803), KEYCLOAK_CT_VMID (7802)
# KEYCLOAK_ADMIN_USERNAME (default admin), KEYCLOAK_DB_NAME (keycloak)
# KEYCLOAK_ADMIN_PASSWORD — if unset, a random alphanumeric password is generated
# WRITE_ENV_FILE — path to .env to upsert KEYCLOAK_ADMIN + KEYCLOAK_ADMIN_PASSWORD (default: repo .env)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
POSTGRES_CT_VMID="${POSTGRES_CT_VMID:-7803}"
KEYCLOAK_CT_VMID="${KEYCLOAK_CT_VMID:-${SANKOFA_KEYCLOAK_VMID:-7802}}"
ADMIN_USER="${KEYCLOAK_ADMIN:-admin}"
DB_NAME="${KEYCLOAK_DB_NAME:-keycloak}"
WRITE_ENV_FILE="${WRITE_ENV_FILE:-${PROJECT_ROOT}/.env}"
SSH_OPTS=(-o BatchMode=yes -o StrictHostKeyChecking=accept-new -o ConnectTimeout=15)
gen_pass() {
openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | head -c 32
}
NEW_PASS="${KEYCLOAK_ADMIN_PASSWORD:-}"
if [[ -z "$NEW_PASS" ]]; then
NEW_PASS="$(gen_pass)"
fi
SQL_GEN="$(mktemp)"
trap 'rm -f "$SQL_GEN"' EXIT
python3 - "$NEW_PASS" "$ADMIN_USER" >"$SQL_GEN" <<'PY'
import json, base64, hashlib, os, sys, time, uuid
password, admin_user = sys.argv[1], sys.argv[2]
salt = os.urandom(16)
iters = 27500
dk = hashlib.pbkdf2_hmac("sha256", password.encode("utf-8"), salt, iters)
secret_data = json.dumps(
{
"value": base64.b64encode(dk).decode(),
"salt": base64.b64encode(salt).decode(),
"additionalParameters": {},
},
separators=(",", ":"),
)
credential_data = json.dumps(
{"hashIterations": iters, "algorithm": "pbkdf2-sha256", "additionalParameters": {}},
separators=(",", ":"),
)
ts = int(time.time() * 1000)
user_id = str(uuid.uuid4())
cred_id = str(uuid.uuid4())
def q(s: str) -> str:
return s.replace("'", "''")
sd, cd = q(secret_data), q(credential_data)
user_esc = q(admin_user)
print("BEGIN;")
print(
f"""
DO $do$
DECLARE
rid TEXT;
r_admin TEXT;
r_default TEXT;
uid TEXT;
n INT;
v_secret TEXT := '{sd}';
v_cred TEXT := '{cd}';
BEGIN
SELECT id INTO rid FROM realm WHERE name = 'master' LIMIT 1;
IF rid IS NULL THEN
RAISE EXCEPTION 'realm master not found';
END IF;
SELECT id INTO r_admin FROM keycloak_role
WHERE realm_id = rid AND name = 'admin' AND client IS NULL LIMIT 1;
SELECT id INTO r_default FROM keycloak_role
WHERE realm_id = rid AND name = 'default-roles-master' AND client IS NULL LIMIT 1;
IF r_admin IS NULL OR r_default IS NULL THEN
RAISE EXCEPTION 'missing admin or default-roles-master role';
END IF;
SELECT COUNT(*) INTO n FROM user_entity WHERE realm_id = rid AND username = '{user_esc}';
IF n = 0 THEN
INSERT INTO user_entity (
id, email, email_constraint, email_verified, enabled, realm_id, username, created_timestamp, not_before
) VALUES (
'{user_id}',
'{user_esc}@sankofa.nexus',
'{user_esc}@sankofa.nexus',
true,
true,
rid,
'{user_esc}',
{ts},
0
);
uid := '{user_id}';
INSERT INTO user_role_mapping (role_id, user_id) VALUES (r_admin, uid);
INSERT INTO user_role_mapping (role_id, user_id) VALUES (r_default, uid);
ELSE
SELECT id INTO uid FROM user_entity WHERE realm_id = rid AND username = '{user_esc}' LIMIT 1;
END IF;
DELETE FROM credential WHERE user_id = uid AND type = 'password';
INSERT INTO credential (id, salt, type, user_id, created_date, user_label, secret_data, credential_data, priority)
VALUES (
'{cred_id}',
NULL,
'password',
uid,
{ts},
NULL,
v_secret,
v_cred,
10
);
END
$do$;
"""
)
print("COMMIT;")
PY
ssh "${SSH_OPTS[@]}" "root@${PROXMOX_HOST}" \
"pct exec ${POSTGRES_CT_VMID} -- sudo -u postgres psql -d ${DB_NAME} -v ON_ERROR_STOP=1 -f -" <"$SQL_GEN"
ssh "${SSH_OPTS[@]}" "root@${PROXMOX_HOST}" \
"pct exec ${KEYCLOAK_CT_VMID} -- systemctl restart keycloak"
echo "[ok] Keycloak master admin user '${ADMIN_USER}' password set in DB; Keycloak restarted on CT ${KEYCLOAK_CT_VMID}."
if [[ -n "${WRITE_ENV_FILE}" ]]; then
python3 - "${WRITE_ENV_FILE}" "${NEW_PASS}" "${ADMIN_USER}" <<'PY'
import re
import sys
from pathlib import Path
path, password, admin_user = Path(sys.argv[1]), sys.argv[2], sys.argv[3]
text = path.read_text() if path.exists() else ""
def upsert_line(body: str, key: str, value: str) -> str:
line = f"{key}={value}"
if re.search(rf"^{re.escape(key)}=", body, flags=re.M):
return re.sub(rf"^{re.escape(key)}=.*$", line, body, flags=re.M, count=1)
if body and not body.endswith("\n"):
body += "\n"
return body + line + "\n"
text = upsert_line(text, "KEYCLOAK_ADMIN", admin_user)
text = upsert_line(text, "KEYCLOAK_ADMIN_PASSWORD", password)
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(text)
PY
echo "[ok] Updated ${WRITE_ENV_FILE} (KEYCLOAK_ADMIN, KEYCLOAK_ADMIN_PASSWORD)."
fi

View File

@@ -0,0 +1,194 @@
#!/usr/bin/env bash
# Create or update Keycloak OIDC client (default sankofa-portal) with portal/admin redirect URIs.
# Runs Admin API against http://127.0.0.1:8080 inside the Keycloak CT (no LAN to NPM required).
# After a new client is created, repo .env gets KEYCLOAK_CLIENT_SECRET — push it to CT 7801 with
# ./scripts/deployment/sankofa-portal-merge-keycloak-env-from-repo.sh
#
# If the client is created, prints a JSON footer line for the operator .env:
# __SANKOFA_KEYCLOAK_FOOTER__{"created":true,"clientSecret":"..."}
#
# Loads repo .env. Env: PROXMOX_HOST, KEYCLOAK_CT_VMID (7802), KEYCLOAK_* .
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
if [ -f "$PROJECT_ROOT/.env" ]; then
set +u
set -a
# shellcheck source=/dev/null
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set +a
set -u
fi
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
KEYCLOAK_CT_VMID="${KEYCLOAK_CT_VMID:-${SANKOFA_KEYCLOAK_VMID:-7802}}"
REALM="${KEYCLOAK_REALM:-master}"
CLIENT_ID="${KEYCLOAK_CLIENT_ID:-sankofa-portal}"
ADMIN_USER="${KEYCLOAK_ADMIN:-admin}"
ADMIN_PASS="${KEYCLOAK_ADMIN_PASSWORD:-}"
SSH_OPTS=(-o BatchMode=yes -o StrictHostKeyChecking=accept-new -o ConnectTimeout=15)
if [ -z "$ADMIN_PASS" ]; then
echo "KEYCLOAK_ADMIN_PASSWORD is not set in .env" >&2
exit 1
fi
OUT="$(
ssh "${SSH_OPTS[@]}" "root@${PROXMOX_HOST}" \
"pct exec ${KEYCLOAK_CT_VMID} -- env KC_PASS=\"${ADMIN_PASS}\" ADMUSER=\"${ADMIN_USER}\" REALM=\"${REALM}\" CLIENT_ID=\"${CLIENT_ID}\" python3 -u -" <<'PY'
import json
import os
import secrets
import urllib.error
import urllib.parse
import urllib.request
base = "http://127.0.0.1:8080"
realm = os.environ["REALM"]
client_id = os.environ["CLIENT_ID"]
admin_user = os.environ["ADMUSER"]
password = os.environ["KC_PASS"]
desired_redirects = [
"https://portal.sankofa.nexus/*",
"https://portal.sankofa.nexus",
"https://admin.sankofa.nexus/*",
"https://admin.sankofa.nexus",
]
desired_origins = [
"https://portal.sankofa.nexus",
"https://admin.sankofa.nexus",
]
created_new = False
portal_secret = None
def post_form(url: str, data: dict) -> dict:
body = urllib.parse.urlencode(data).encode()
req = urllib.request.Request(url, data=body, method="POST")
with urllib.request.urlopen(req, timeout=60) as resp:
return json.loads(resp.read().decode())
tok = post_form(
f"{base}/realms/master/protocol/openid-connect/token",
{
"grant_type": "password",
"client_id": "admin-cli",
"username": admin_user,
"password": password,
},
)
access = tok.get("access_token")
if not access:
raise SystemExit(f"token failed: {tok}")
list_url = f"{base}/admin/realms/{realm}/clients?clientId={urllib.parse.quote(client_id)}"
r = urllib.request.Request(list_url, headers={"Authorization": f"Bearer {access}"})
with urllib.request.urlopen(r, timeout=60) as resp:
clients = json.loads(resp.read().decode())
if not clients:
portal_secret = secrets.token_urlsafe(48)
new_client = {
"clientId": client_id,
"name": "Sankofa Portal",
"enabled": True,
"protocol": "openid-connect",
"publicClient": False,
"standardFlowEnabled": True,
"implicitFlowEnabled": False,
"directAccessGrantsEnabled": False,
"serviceAccountsEnabled": False,
"redirectUris": desired_redirects,
"webOrigins": desired_origins,
"secret": portal_secret,
}
cr = urllib.request.Request(
f"{base}/admin/realms/{realm}/clients",
data=json.dumps(new_client).encode(),
headers={"Authorization": f"Bearer {access}", "Content-Type": "application/json"},
method="POST",
)
try:
with urllib.request.urlopen(cr, timeout=120) as resp:
if resp.getcode() not in (200, 201):
raise SystemExit(f"create client unexpected HTTP {resp.getcode()}")
except urllib.error.HTTPError as e:
err = e.read().decode() if e.fp else str(e)
raise SystemExit(f"POST client failed HTTP {e.code}: {err}") from e
created_new = True
r = urllib.request.Request(list_url, headers={"Authorization": f"Bearer {access}"})
with urllib.request.urlopen(r, timeout=60) as resp:
clients = json.loads(resp.read().decode())
if not clients:
raise SystemExit("client create did not persist")
internal_id = clients[0]["id"]
get_url = f"{base}/admin/realms/{realm}/clients/{internal_id}"
r2 = urllib.request.Request(get_url, headers={"Authorization": f"Bearer {access}"})
with urllib.request.urlopen(r2, timeout=60) as resp:
full = json.loads(resp.read().decode())
redirs = list(dict.fromkeys((full.get("redirectUris") or []) + desired_redirects))
origins = list(dict.fromkeys((full.get("webOrigins") or []) + desired_origins))
full["redirectUris"] = redirs
full["webOrigins"] = origins
if portal_secret and not full.get("secret"):
full["secret"] = portal_secret
put = urllib.request.Request(
get_url,
data=json.dumps(full).encode(),
headers={"Authorization": f"Bearer {access}", "Content-Type": "application/json"},
method="PUT",
)
try:
with urllib.request.urlopen(put, timeout=120) as resp:
code = resp.getcode()
except urllib.error.HTTPError as e:
err = e.read().decode() if e.fp else str(e)
raise SystemExit(f"PUT failed HTTP {e.code}: {err}") from e
if code not in (200, 204):
raise SystemExit(f"PUT unexpected HTTP {code}")
action = "Created" if created_new else "Updated"
print(f"{action} Keycloak client {client_id!r} (redirect URIs + web origins).", flush=True)
footer = {"created": bool(created_new)}
if portal_secret:
footer["clientSecret"] = portal_secret
print("__SANKOFA_KEYCLOAK_FOOTER__" + json.dumps(footer), flush=True)
PY
)"
echo "$OUT" | sed '/__SANKOFA_KEYCLOAK_FOOTER__/d'
FOOTER=$(echo "$OUT" | grep '^__SANKOFA_KEYCLOAK_FOOTER__' | sed 's/^__SANKOFA_KEYCLOAK_FOOTER__//' || true)
if [[ -n "$FOOTER" ]]; then
CREATED="$(echo "$FOOTER" | jq -r '.created // false')"
SEC="$(echo "$FOOTER" | jq -r '.clientSecret // empty')"
if [[ "$CREATED" == "true" ]] && [[ -n "$SEC" ]] && [[ "$SEC" != "null" ]]; then
python3 - "${PROJECT_ROOT}/.env" "${SEC}" <<'PY'
import re
import sys
from pathlib import Path
path, sec = Path(sys.argv[1]), sys.argv[2]
text = path.read_text() if path.exists() else ""
def upsert(body: str, key: str, value: str) -> str:
line = f"{key}={value}"
if re.search(rf"^{re.escape(key)}=", body, flags=re.M):
return re.sub(rf"^{re.escape(key)}=.*$", line, body, flags=re.M, count=1)
if body and not body.endswith("\n"):
body += "\n"
return body + line + "\n"
text = upsert(text, "KEYCLOAK_CLIENT_SECRET", sec)
path.write_text(text)
PY
echo "[ok] Wrote KEYCLOAK_CLIENT_SECRET to .env (portal Keycloak OIDC path enabled)." >&2
fi
fi

View File

@@ -36,9 +36,22 @@ fi
# 3) Load env for RPC and nonce checks (no secrets printed)
[[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ]] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
set -a
source "$SMOM/.env"
set +a
if [[ -f "$SMOM/scripts/lib/deployment/dotenv.sh" ]]; then
# shellcheck disable=SC1090
source "$SMOM/scripts/lib/deployment/dotenv.sh"
load_deployment_env --repo-root "$SMOM"
else
local_had_nounset=0
if [[ $- == *u* ]]; then
local_had_nounset=1
set +u
fi
set -a
# shellcheck disable=SC1090
source "$SMOM/.env"
set +a
(( local_had_nounset )) && set -u
fi
RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
if [[ -z "${PRIVATE_KEY:-}" ]]; then

View File

@@ -0,0 +1,136 @@
#!/usr/bin/env bash
# Rotate the Chain 138 oracle publisher off the shared deployer key by provisioning
# a dedicated transmitter key, adding it to the oracle aggregator, updating CT 3500,
# and removing the legacy deployer transmitter after the new key confirms an update.
#
# Usage:
# bash scripts/deployment/rotate-oracle-publisher-transmitter.sh [--dry-run]
#
# Env overrides:
# PROXMOX_NODE_IP default 192.168.11.12
# ORACLE_VMID default 3500
# ORACLE_SECRET_FILE default ~/.secure-secrets/chain138-oracle-publisher.env
# ORACLE_AGGREGATOR_ADDRESS default 0x99b3511a2d315a497c8112c1fdd8d508d4b1e506
# ORACLE_FUND_WEI default 100000000000000000 (0.1 native token)
# NEW_ORACLE_PRIVATE_KEY optional pre-generated 0x-prefixed key
#
set -euo pipefail
DRY_RUN=0
if [[ "${1:-}" == "--dry-run" ]]; then
DRY_RUN=1
fi
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
cd "$ROOT"
had_nounset=0
if [[ $- == *u* ]]; then
had_nounset=1
set +u
fi
set -a
source scripts/lib/load-project-env.sh >/dev/null 2>&1
set +a
if [[ "$had_nounset" -eq 1 ]]; then
set -u
fi
RPC="${DEPLOY_RPC_URL_138:-${RPC_URL_138:-http://192.168.11.211:8545}}"
NODE_IP="${PROXMOX_NODE_IP:-192.168.11.12}"
ORACLE_VMID="${ORACLE_VMID:-3500}"
AGG="${ORACLE_AGGREGATOR_ADDRESS:-0x99b3511a2d315a497c8112c1fdd8d508d4b1e506}"
SECRET_FILE="${ORACLE_SECRET_FILE:-$HOME/.secure-secrets/chain138-oracle-publisher.env}"
DEPLOYER_ADDR="$(cast wallet address --private-key "$PRIVATE_KEY")"
NEW_KEY="${NEW_ORACLE_PRIVATE_KEY:-0x$(openssl rand -hex 32)}"
NEW_ADDR="$(cast wallet address --private-key "$NEW_KEY")"
ORACLE_FUND_WEI="${ORACLE_FUND_WEI:-100000000000000000}"
echo "Oracle transmitter rotation"
echo " node: $NODE_IP"
echo " vmid: $ORACLE_VMID"
echo " aggregator: $AGG"
echo " admin: $DEPLOYER_ADDR"
echo " new signer: $NEW_ADDR"
echo " secret file: $SECRET_FILE"
echo " fund wei: $ORACLE_FUND_WEI"
if [[ "$DRY_RUN" -eq 1 ]]; then
exit 0
fi
mkdir -p "$(dirname "$SECRET_FILE")"
umask 077
cat >"$SECRET_FILE" <<EOF
CHAIN138_ORACLE_PUBLISHER_PRIVATE_KEY=$NEW_KEY
CHAIN138_ORACLE_PUBLISHER_ADDRESS=$NEW_ADDR
CHAIN138_ORACLE_AGGREGATOR_ADDRESS=$AGG
CHAIN138_ORACLE_ROTATED_AT=$(date -u +%Y-%m-%dT%H:%M:%SZ)
EOF
if [[ "$(cast call "$AGG" 'isTransmitter(address)(bool)' "$NEW_ADDR" --rpc-url "$RPC")" != "true" ]]; then
cast send "$AGG" 'addTransmitter(address)' "$NEW_ADDR" \
--rpc-url "$RPC" \
--private-key "$PRIVATE_KEY" \
--legacy \
--gas-price 1000000000 \
>/dev/null
fi
new_balance="$(cast balance "$NEW_ADDR" --rpc-url "$RPC")"
if [[ "$new_balance" -lt "$ORACLE_FUND_WEI" ]]; then
cast send "$NEW_ADDR" \
--value "$ORACLE_FUND_WEI" \
--rpc-url "$RPC" \
--private-key "$PRIVATE_KEY" \
--legacy \
--gas-price 1000000000 \
>/dev/null
fi
ssh -o BatchMode=yes -o StrictHostKeyChecking=no "root@$NODE_IP" "\
pct exec $ORACLE_VMID -- bash -lc 'set -euo pipefail; \
ENV=/opt/oracle-publisher/.env; \
grep -q \"^PRIVATE_KEY=\" \$ENV && sed -i \"s|^PRIVATE_KEY=.*|PRIVATE_KEY=$NEW_KEY|\" \$ENV || echo \"PRIVATE_KEY=$NEW_KEY\" >> \$ENV; \
systemctl restart oracle-publisher.service; \
systemctl is-active oracle-publisher.service >/dev/null'"
echo "Waiting for new transmitter to confirm an oracle update..."
tx_hash=""
for _ in {1..24}; do
line="$(ssh -o BatchMode=yes -o StrictHostKeyChecking=no "root@$NODE_IP" "pct exec $ORACLE_VMID -- journalctl -u oracle-publisher.service -n 20 --no-pager | grep 'Transaction confirmed:' | tail -n 1" || true)"
if [[ -n "$line" ]]; then
tx_hash="$(printf '%s' "$line" | grep -oE '0x[a-fA-F0-9]{64}' | tail -n 1 || true)"
fi
if [[ -n "$tx_hash" ]]; then
tx_from="$(cast receipt "$tx_hash" --rpc-url "$RPC" | awk '/^from /{print $2}')"
if [[ "${tx_from,,}" == "${NEW_ADDR,,}" ]]; then
break
fi
fi
sleep 5
done
if [[ -z "$tx_hash" ]]; then
echo "ERROR: No confirmed oracle update observed from the new transmitter." >&2
exit 1
fi
tx_from="$(cast receipt "$tx_hash" --rpc-url "$RPC" | awk '/^from /{print $2}')"
if [[ "${tx_from,,}" != "${NEW_ADDR,,}" ]]; then
echo "ERROR: Latest confirmed oracle update was not sent by the new transmitter: $tx_from" >&2
exit 1
fi
if [[ "$(cast call "$AGG" 'isTransmitter(address)(bool)' "$DEPLOYER_ADDR" --rpc-url "$RPC")" == "true" ]]; then
cast send "$AGG" 'removeTransmitter(address)' "$DEPLOYER_ADDR" \
--rpc-url "$RPC" \
--private-key "$PRIVATE_KEY" \
--legacy \
--gas-price 1000000000 \
>/dev/null
fi
echo "Rotation complete."
echo " new transmitter: $NEW_ADDR"
echo " confirmed tx: $tx_hash"
echo " deployer signer removed from transmitter set."

View File

@@ -17,8 +17,32 @@
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
REPO_ROOT="$PROJECT_ROOT"
SMOM="$PROJECT_ROOT/smom-dbis-138"
load_smom_env() {
if [[ -f "$SMOM/scripts/lib/deployment/dotenv.sh" ]]; then
# shellcheck disable=SC1090
source "$SMOM/scripts/lib/deployment/dotenv.sh"
load_deployment_env --repo-root "$SMOM"
return 0
fi
if [[ -f "$SMOM/.env" ]]; then
local had_nounset=0
if [[ $- == *u* ]]; then
had_nounset=1
set +u
fi
set -a
# shellcheck disable=SC1090
source "$SMOM/.env"
set +a
(( had_nounset )) && set -u
return 0
fi
return 1
}
DRY_RUN=""
SKIP_MIRROR=""
SKIP_MESH=""
@@ -82,8 +106,7 @@ if [[ -z "$SKIP_REGISTER_GRU" ]]; then
if [[ -n "$DRY_RUN" ]]; then
echo "[DRY-RUN] cd $SMOM && forge script script/deploy/RegisterGRUCompliantTokens.s.sol --rpc-url \$RPC_URL_138 --broadcast --private-key \$PRIVATE_KEY --with-gas-price 1000000000"
else
if [[ -f "$SMOM/.env" ]]; then
set -a; source "$SMOM/.env"; set +a
if load_smom_env; then
# Fallback: Register script expects CUSDT_ADDRESS_138/CUSDC_ADDRESS_138; use COMPLIANT_USDT/COMPLIANT_USDC if set
[[ -z "${CUSDT_ADDRESS_138:-}" && -n "${COMPLIANT_USDT:-}" ]] && export CUSDT_ADDRESS_138="$COMPLIANT_USDT"
[[ -z "${CUSDC_ADDRESS_138:-}" && -n "${COMPLIANT_USDC:-}" ]] && export CUSDC_ADDRESS_138="$COMPLIANT_USDC"
@@ -108,8 +131,8 @@ if [[ -z "$SKIP_VERIFY" ]]; then
if [[ -n "$DRY_RUN" ]]; then
echo "[DRY-RUN] $PROJECT_ROOT/scripts/verify/check-contracts-on-chain-138.sh"
else
[[ -f "$SMOM/.env" ]] && set -a && source "$SMOM/.env" && set +a
"$PROJECT_ROOT/scripts/verify/check-contracts-on-chain-138.sh" "${RPC_URL_138:-}" || true
load_smom_env || true
"$REPO_ROOT/scripts/verify/check-contracts-on-chain-138.sh" "${RPC_URL_138:-}" || true
fi
echo ""
else

View File

@@ -3,11 +3,12 @@
# See docs/07-ccip/CW_DEPLOY_AND_WIRE_RUNBOOK.md and docs/00-meta/CW_BRIDGE_TASK_LIST.md.
#
# Usage:
# ./scripts/deployment/run-cw-remaining-steps.sh [--dry-run] [--deploy] [--update-mapping] [--verify]
# ./scripts/deployment/run-cw-remaining-steps.sh [--dry-run] [--deploy] [--update-mapping] [--verify] [--verify-hard-peg]
# --dry-run Run deploy-cw in dry-run mode (print commands only).
# --deploy Run deploy-cw on all chains (requires RPC/PRIVATE_KEY in smom-dbis-138/.env).
# --update-mapping Update config/token-mapping-multichain.json from CWUSDT_*/CWUSDC_* in .env.
# --verify For each chain with CWUSDT_* set, check MINTER_ROLE/BURNER_ROLE on cW* for CW_BRIDGE_*.
# --verify-hard-peg Check Avalanche hard-peg bridge controls for cWUSDT/cWUSDC.
# With no options, runs --dry-run then --update-mapping (if any CWUSDT_* in .env).
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
@@ -18,6 +19,7 @@ DRY_RUN=false
DO_DEPLOY=false
DO_UPDATE_MAPPING=false
DO_VERIFY=false
DO_VERIFY_HARD_PEG=false
for a in "$@"; do
case "$a" in
@@ -25,9 +27,10 @@ for a in "$@"; do
--deploy) DO_DEPLOY=true ;;
--update-mapping) DO_UPDATE_MAPPING=true ;;
--verify) DO_VERIFY=true ;;
--verify-hard-peg) DO_VERIFY_HARD_PEG=true ;;
esac
done
if ! $DRY_RUN && ! $DO_DEPLOY && ! $DO_UPDATE_MAPPING && ! $DO_VERIFY; then
if ! $DRY_RUN && ! $DO_DEPLOY && ! $DO_UPDATE_MAPPING && ! $DO_VERIFY && ! $DO_VERIFY_HARD_PEG; then
DRY_RUN=true
DO_UPDATE_MAPPING=true
fi
@@ -37,7 +40,9 @@ if [[ ! -f "$SMOM/.env" ]]; then
exit 1
fi
set -a
set +u
source "$SMOM/.env"
set -u
set +a
# Chain name (env suffix) -> chainId for 138 -> chain pairs
@@ -135,4 +140,57 @@ if $DO_VERIFY; then
done
fi
call_or_unavailable() {
local rpc="$1"
local address="$2"
local signature="$3"
shift 3
if [[ -z "$rpc" || -z "$address" ]]; then
printf 'unavailable\n'
return
fi
cast call "$address" "$signature" "$@" --rpc-url "$rpc" 2>/dev/null || printf 'legacy-or-unavailable\n'
}
if $DO_VERIFY_HARD_PEG; then
echo "=== Verify Avalanche hard-peg bridge state ==="
CHAIN138_L1_BRIDGE="${CW_L1_BRIDGE_CHAIN138:-}"
AVAX_CW_BRIDGE="${CW_BRIDGE_AVALANCHE:-}"
RESERVE_VERIFIER="${CW_RESERVE_VERIFIER_CHAIN138:-}"
AVALANCHE_SELECTOR_VALUE="${AVALANCHE_SELECTOR:-6433500567565415381}"
CW_CANONICAL_USDT_ADDR="${CW_CANONICAL_USDT:-${COMPLIANT_USDT_ADDRESS:-${CUSDT_ADDRESS_138:-}}}"
CW_CANONICAL_USDC_ADDR="${CW_CANONICAL_USDC:-${COMPLIANT_USDC_ADDRESS:-${CUSDC_ADDRESS_138:-}}}"
echo " Chain 138 L1 bridge: $CHAIN138_L1_BRIDGE"
echo " Avalanche cW bridge: $AVAX_CW_BRIDGE"
echo " Reserve verifier: ${RESERVE_VERIFIER:-unconfigured}"
echo " Avalanche selector: $AVALANCHE_SELECTOR_VALUE"
if [[ -n "$CHAIN138_L1_BRIDGE" ]]; then
echo " L1 bridge reserveVerifier(): $(call_or_unavailable "$RPC_URL_138" "$CHAIN138_L1_BRIDGE" "reserveVerifier()(address)")"
fi
for entry in "cUSDT:$CW_CANONICAL_USDT_ADDR:$CW_MAX_OUTSTANDING_USDT_AVALANCHE" "cUSDC:$CW_CANONICAL_USDC_ADDR:$CW_MAX_OUTSTANDING_USDC_AVALANCHE"; do
IFS=":" read -r label token desired_cap <<<"$entry"
if [[ -z "$token" ]]; then
echo " $label: canonical token not set"
continue
fi
echo " $label supportedCanonicalToken(): $(call_or_unavailable "$RPC_URL_138" "$CHAIN138_L1_BRIDGE" "supportedCanonicalToken(address)(bool)" "$token")"
echo " $label maxOutstanding(): $(call_or_unavailable "$RPC_URL_138" "$CHAIN138_L1_BRIDGE" "maxOutstanding(address,uint64)(uint256)" "$token" "$AVALANCHE_SELECTOR_VALUE")"
if [[ -n "$desired_cap" ]]; then
echo " $label desired maxOutstanding env: $desired_cap"
fi
echo " $label tokenPairFrozen(): $(call_or_unavailable "$AVALANCHE_RPC_URL" "$AVAX_CW_BRIDGE" "tokenPairFrozen(address)(bool)" "$token")"
if [[ -n "$RESERVE_VERIFIER" ]]; then
echo " $label verifier tokenConfigs(): $(call_or_unavailable "$RPC_URL_138" "$RESERVE_VERIFIER" "tokenConfigs(address)(bool,address,bool,bool,bool)" "$token" | tr '\n' ' ' | xargs)"
echo " $label verifier getVerificationStatus(): $(call_or_unavailable "$RPC_URL_138" "$RESERVE_VERIFIER" "getVerificationStatus(address,uint64)((bool,bool,bool,bool,bool,bool,bool,bool,uint256,uint256,uint256,uint256,uint256,uint256,uint256,uint256))" "$token" "$AVALANCHE_SELECTOR_VALUE" | tr '\n' ' ' | xargs)"
fi
done
echo " Avalanche destinationFrozen(138): $(call_or_unavailable "$AVALANCHE_RPC_URL" "$AVAX_CW_BRIDGE" "destinationFrozen(uint64)(bool)" 138)"
fi
echo "Done. See docs/07-ccip/CW_DEPLOY_AND_WIRE_RUNBOOK.md for Phase E (relay and E2E)."

View File

@@ -0,0 +1,102 @@
#!/usr/bin/env bash
# Merge Keycloak OIDC settings from the operator repo .env into LXC 7801
# (/opt/sankofa-portal/.env and .env.local). Uses base64 over ssh for the client secret
# so special characters in KEYCLOAK_CLIENT_SECRET do not break the remote shell.
#
# Requires KEYCLOAK_CLIENT_SECRET (and loads repo .env via load-project-env when sourced
# from repo root, or export vars before calling).
#
# Run after creating the confidential client (e.g. keycloak-sankofa-ensure-client-redirects*.sh)
# or when rotating KEYCLOAK_CLIENT_SECRET.
#
# Usage:
# ./scripts/deployment/sankofa-portal-merge-keycloak-env-from-repo.sh
# ./scripts/deployment/sankofa-portal-merge-keycloak-env-from-repo.sh --dry-run
# ./scripts/deployment/sankofa-portal-merge-keycloak-env-from-repo.sh --no-restart
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/scripts/lib/load-project-env.sh" 2>/dev/null || true
# shellcheck source=/dev/null
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_R630_01:-192.168.11.11}}"
VMID="${SANKOFA_PORTAL_VMID:-7801}"
CT_DIR="${SANKOFA_PORTAL_CT_DIR:-/opt/sankofa-portal}"
SERVICE_NAME="${SANKOFA_PORTAL_SERVICE:-sankofa-portal}"
SSH_OPTS=(-o BatchMode=yes -o ConnectTimeout=15 -o StrictHostKeyChecking=accept-new)
KC_URL="${KEYCLOAK_URL:-https://keycloak.sankofa.nexus}"
KC_REALM="${KEYCLOAK_REALM:-master}"
KC_CID="${KEYCLOAK_CLIENT_ID:-sankofa-portal}"
DRY_RUN=false
NO_RESTART=false
for arg in "$@"; do
case "$arg" in
--dry-run) DRY_RUN=true ;;
--no-restart) NO_RESTART=true ;;
esac
done
if [[ -z "${KEYCLOAK_CLIENT_SECRET:-}" ]]; then
echo "ERROR: KEYCLOAK_CLIENT_SECRET is not set. Add it to repo .env (from Keycloak admin or" >&2
echo " keycloak-sankofa-ensure-client-redirects-via-proxmox-pct.sh when the client is created)," >&2
echo " then: source scripts/lib/load-project-env.sh && $0" >&2
exit 1
fi
if $DRY_RUN; then
echo "[DRY-RUN] Would upsert KEYCLOAK_* on CT ${VMID} ${CT_DIR}/.env and .env.local"
echo "[DRY-RUN] KEYCLOAK_URL=${KC_URL} KEYCLOAK_REALM=${KC_REALM} KEYCLOAK_CLIENT_ID=${KC_CID}"
echo "[DRY-RUN] restart: $([[ "$NO_RESTART" == true ]] && echo no || echo yes)"
exit 0
fi
B64="$(printf '%s' "$KEYCLOAK_CLIENT_SECRET" | base64 -w0)"
ssh "${SSH_OPTS[@]}" "root@${PROXMOX_HOST}" \
"pct exec ${VMID} -- env KCSEC_B64='${B64}' KC_URL='${KC_URL}' KC_REALM='${KC_REALM}' KC_CID='${KC_CID}' CT_DIR='${CT_DIR}' python3 -" <<'PY'
import base64
import os
import re
from pathlib import Path
sec = base64.b64decode(os.environ["KCSEC_B64"]).decode("utf-8")
ct = Path(os.environ["CT_DIR"])
keys = {
"KEYCLOAK_URL": os.environ["KC_URL"],
"KEYCLOAK_REALM": os.environ["KC_REALM"],
"KEYCLOAK_CLIENT_ID": os.environ["KC_CID"],
"KEYCLOAK_CLIENT_SECRET": sec,
}
def upsert(text: str, k: str, v: str) -> str:
line = f"{k}={v}"
if re.search(rf"^{re.escape(k)}=", text, flags=re.M):
return re.sub(rf"^{re.escape(k)}=.*$", line, text, flags=re.M, count=1)
if text and not text.endswith("\n"):
text += "\n"
return text + line + "\n"
for fname in (".env", ".env.local"):
p = ct / fname
body = p.read_text() if p.exists() else ""
for k, v in keys.items():
body = upsert(body, k, v)
p.parent.mkdir(parents=True, exist_ok=True)
p.write_text(body)
print(f"upserted Keycloak keys in {p}")
PY
if [[ "$NO_RESTART" == true ]]; then
echo "[ok] Keycloak OIDC vars merged on CT ${VMID} (no service restart)."
else
ssh "${SSH_OPTS[@]}" "root@${PROXMOX_HOST}" \
"pct exec ${VMID} -- systemctl restart ${SERVICE_NAME} && pct exec ${VMID} -- systemctl is-active ${SERVICE_NAME}"
echo "[ok] Keycloak OIDC vars merged on CT ${VMID}; ${SERVICE_NAME} restarted."
fi

View File

@@ -3,8 +3,9 @@
# then run RegisterGRUCompliantTokens to register all c* as GRU in UniversalAssetRegistry.
#
# Addresses are from docs/11-references/CONTRACT_ADDRESSES_REFERENCE.md and TOKENS_AND_NETWORKS_MINTABLE_TO_DEPLOYER.md.
# Usage: ./scripts/deployment/set-dotenv-c-tokens-and-register-gru.sh [--no-register]
# Usage: ./scripts/deployment/set-dotenv-c-tokens-and-register-gru.sh [--no-register] [--register-v2]
# --no-register Only update .env; do not run RegisterGRUCompliantTokens.
# --register-v2 After V1 registration, also register staged V2 cUSDT/cUSDC using RegisterGRUCompliantTokensV2.
#
# Note: RegisterGRUCompliantTokens requires (1) broadcast account has REGISTRAR_ROLE, and (2) the
# UniversalAssetRegistry *implementation* (not just proxy) exposes registerGRUCompliantAsset.
@@ -21,7 +22,11 @@ SMOM="$PROJECT_ROOT/smom-dbis-138"
ENV_FILE="$SMOM/.env"
RUN_REGISTER=1
for a in "$@"; do [[ "$a" == "--no-register" ]] && RUN_REGISTER=0; done
RUN_REGISTER_V2=0
for a in "$@"; do
[[ "$a" == "--no-register" ]] && RUN_REGISTER=0
[[ "$a" == "--register-v2" ]] && RUN_REGISTER_V2=1
done
if [[ ! -f "$ENV_FILE" ]]; then
echo "Missing $ENV_FILE. Create it first (e.g. copy from .env.example)." >&2
@@ -45,6 +50,10 @@ set_env_var "COMPLIANT_USDT" "0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
set_env_var "COMPLIANT_USDC" "0xf22258f57794CC8E06237084b353Ab30fFfa640b"
set_env_var "CUSDT_ADDRESS_138" "0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
set_env_var "CUSDC_ADDRESS_138" "0xf22258f57794CC8E06237084b353Ab30fFfa640b"
set_env_var "COMPLIANT_USDT_V2" "0x8d342d321DdEe97D0c5011DAF8ca0B59DA617D29"
set_env_var "COMPLIANT_USDC_V2" "0x1ac3F4942a71E86A9682D91837E1E71b7BACdF99"
set_env_var "CUSDT_V2_ADDRESS_138" "0x8d342d321DdEe97D0c5011DAF8ca0B59DA617D29"
set_env_var "CUSDC_V2_ADDRESS_138" "0x1ac3F4942a71E86A9682D91837E1E71b7BACdF99"
# cEURC (TOKENS_AND_NETWORKS_MINTABLE_TO_DEPLOYER)
set_env_var "CEURC_ADDRESS_138" "0x8085961F9cF02b4d800A3c6d386D31da4B34266a"
@@ -68,7 +77,7 @@ set_env_var "WETH10" "0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f"
set_env_var "LINK_TOKEN" "0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03"
set_env_var "CCIP_FEE_TOKEN" "0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03"
echo "Done. Set: COMPLIANT_USDT, COMPLIANT_USDC, all C*_ADDRESS_138 (cUSDT, cUSDC, cEURC, cEURT, cGBPC, cGBPT, cAUDC, cJPYC, cCHFC, cCADC, cXAUC, cXAUT), UNIVERSAL_ASSET_REGISTRY, WETH9, WETH10, LINK_TOKEN, CCIP_FEE_TOKEN."
echo "Done. Set: COMPLIANT_USDT, COMPLIANT_USDC, COMPLIANT_USDT_V2, COMPLIANT_USDC_V2, all C*_ADDRESS_138 (including staged V2 addresses), UNIVERSAL_ASSET_REGISTRY, WETH9, WETH10, LINK_TOKEN, CCIP_FEE_TOKEN."
echo "All c* on explorer.d-bis.org/tokens must be GRU-registered. See docs/04-configuration/EXPLORER_TOKENS_GRU_POLICY.md."
if [[ "$RUN_REGISTER" -eq 0 ]]; then
@@ -85,4 +94,15 @@ RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
export RPC_URL_138="$RPC"
(cd "$SMOM" && forge script script/deploy/RegisterGRUCompliantTokens.s.sol \
--rpc-url "$RPC_URL_138" --broadcast --private-key "$PRIVATE_KEY" --with-gas-price 1000000000)
if [[ "$RUN_REGISTER_V2" -eq 1 ]]; then
echo ""
echo "=== Registering staged c* V2 inventory as GRU (RegisterGRUCompliantTokensV2) ==="
(cd "$SMOM" && forge script script/deploy/RegisterGRUCompliantTokensV2.s.sol \
--rpc-url "$RPC_URL_138" --broadcast --private-key "$PRIVATE_KEY" --with-gas-price 1000000000)
else
echo ""
echo "V2 note: COMPLIANT_USDT_V2 / COMPLIANT_USDC_V2 are staged in .env but not auto-registered."
echo "Use --register-v2 once downstream registry consumers are ready for version-aware symbols."
fi
echo "=== Done. ==="

View File

@@ -0,0 +1,58 @@
#!/usr/bin/env bash
# Ensure the explorer nginx HTTP server block proxies /api/v1/ to token-aggregation.
# Run inside VMID 5000.
set -euo pipefail
CONFIG_FILE="${CONFIG_FILE:-/etc/nginx/sites-available/blockscout}"
TOKEN_AGG_PORT="${TOKEN_AGG_PORT:-3001}"
if [[ ! -f "$CONFIG_FILE" ]]; then
echo "Config not found: $CONFIG_FILE" >&2
exit 1
fi
python3 - "$CONFIG_FILE" "$TOKEN_AGG_PORT" <<'PY'
from pathlib import Path
import sys
cfg = Path(sys.argv[1])
port = sys.argv[2]
text = cfg.read_text()
parts = text.split("# HTTPS server - Blockscout Explorer", 1)
if len(parts) != 2:
raise SystemExit("Could not locate HTTP/HTTPS server boundary")
http_block, https_block = parts
if "location /api/v1/" in http_block:
print("HTTP block already has /api/v1/")
raise SystemExit(0)
marker = " # Blockscout API endpoint - MUST come before the redirect location\n"
if marker not in http_block:
raise SystemExit("HTTP block marker not found")
snippet = f""" # Token-aggregation API (Chain 138 Snap: market data, swap quote, bridge routes)
location /api/v1/ {{
proxy_pass http://127.0.0.1:{port}/api/v1/;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 60s;
add_header Access-Control-Allow-Origin *;
}}
# Blockscout API endpoint - MUST come before the redirect location
"""
http_block = http_block.replace(marker, snippet, 1)
cfg.write_text(http_block + "# HTTPS server - Blockscout Explorer" + https_block)
print(f"Inserted HTTP /api/v1/ proxy to 127.0.0.1:{port}")
PY
nginx -t
nginx -s reload

View File

@@ -24,6 +24,7 @@ IP_TO_VMID = {
"192.168.11.51": "7801",
"192.168.11.52": "7802",
"192.168.11.53": "7803",
"192.168.11.94": "10092",
"192.168.11.57": "6201",
"192.168.11.64": "6400",
"192.168.11.65": "6000",
@@ -79,6 +80,7 @@ IP_TO_HOSTNAME = {
"192.168.11.51": "sankofa-portal-1",
"192.168.11.52": "sankofa-keycloak-1",
"192.168.11.53": "sankofa-postgres-1",
"192.168.11.94": "order-mcp-legal",
"192.168.11.57": "firefly-ali-1",
"192.168.11.64": "indy-1",
"192.168.11.65": "fabric-1",

View File

@@ -0,0 +1,83 @@
#!/usr/bin/env python3
"""Export the JVMTM transaction-grade compliance matrix JSON to CSV."""
from __future__ import annotations
import csv
import json
import sys
from pathlib import Path
FIELDNAMES = [
"control_id",
"phase",
"domain",
"requirement",
"validation_method",
"blocking_level",
"applies_to_rail",
"source_audit_rows",
"repo_evidence_artifacts",
"validator_command",
"failure_action",
"high_value_override",
"notes",
]
def format_artifacts(artifacts: list[dict[str, str]]) -> str:
return " | ".join(f'{artifact["artifact_type"]}:{artifact["ref"]}' for artifact in artifacts)
def render_rows(matrix: dict) -> list[dict[str, str]]:
rows: list[dict[str, str]] = []
for control in matrix["controls"]:
rows.append(
{
"control_id": control["control_id"],
"phase": control["phase"],
"domain": control["domain"],
"requirement": control["requirement"],
"validation_method": control["validation_method"],
"blocking_level": control["blocking_level"],
"applies_to_rail": " | ".join(control["applies_to_rail"]),
"source_audit_rows": " | ".join(control["source_audit_rows"]),
"repo_evidence_artifacts": format_artifacts(control["repo_evidence_artifacts"]),
"validator_command": control["validator_command"],
"failure_action": control["failure_action"],
"high_value_override": control["high_value_override"],
"notes": control["notes"],
}
)
return rows
def main() -> int:
repo_root = Path(__file__).resolve().parents[2]
matrix_path = (
Path(sys.argv[1])
if len(sys.argv) > 1
else repo_root / "config/jvmtm-regulatory-closure/transaction-compliance-matrix.json"
)
csv_path = (
Path(sys.argv[2])
if len(sys.argv) > 2
else repo_root / "config/jvmtm-regulatory-closure/transaction-compliance-matrix.csv"
)
matrix = json.loads(matrix_path.read_text(encoding="utf-8"))
rows = render_rows(matrix)
csv_path.parent.mkdir(parents=True, exist_ok=True)
with csv_path.open("w", encoding="utf-8", newline="") as handle:
writer = csv.DictWriter(handle, fieldnames=FIELDNAMES, lineterminator="\n")
writer.writeheader()
writer.writerows(rows)
print(f"Wrote {csv_path} ({len(rows)} controls)")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -14,10 +14,46 @@ VMID_MIM_WEB="${VMID_MIM_WEB:-7810}"
PROXMOX_HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}"
MIM_WEB_IP="${IP_MIM_WEB:-192.168.11.37}"
DEST="/var/www/html"
MIM_API_UPSTREAM="${MIM_API_UPSTREAM:-http://192.168.11.36:3001}"
echo "Building MIM4U frontend..."
(cd "$MIM_ROOT" && npm run build)
echo "Deploying dist to root@$PROXMOX_HOST (pct exec $VMID_MIM_WEB) at $DEST ..."
# Copy into container: tar from host, extract in container
tar czf - -C "$MIM_ROOT/dist" . | ssh "root@$PROXMOX_HOST" "pct exec $VMID_MIM_WEB -- tar xzf - -C $DEST"
echo "Ensuring nginx proxies /api/ to $MIM_API_UPSTREAM ..."
ssh "root@$PROXMOX_HOST" "pct exec $VMID_MIM_WEB -- bash -lc 'cat > /etc/nginx/sites-available/mim4u <<\"EOF\"
server {
listen 80;
server_name mim4u.org www.mim4u.org secure.mim4u.org training.mim4u.org;
root /var/www/html;
index index.html index.htm;
location /api/ {
proxy_pass ${MIM_API_UPSTREAM}/api/;
proxy_http_version 1.1;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
location / {
try_files \$uri \$uri/ /index.html;
}
location /health {
access_log off;
return 200 \"healthy\\n\";
add_header Content-Type text/plain;
}
location ~* \\.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot)$ {
expires 1y;
add_header Cache-Control \"public, immutable\";
}
}
EOF
nginx -t && systemctl reload nginx'"
echo "Done. Verify: curl -I http://${MIM_WEB_IP}:80/"

View File

@@ -46,8 +46,13 @@ const DOMAINS = [
// d-bis.org zone - Private RPC endpoints (VMID 2101: besu-rpc-core-1)
{ domain: 'rpc-http-prv.d-bis.org', target: 'http://192.168.11.211:8545', websocket: true }, // VMID 2101
{ domain: 'rpc-ws-prv.d-bis.org', target: 'http://192.168.11.211:8546', websocket: true }, // VMID 2101
{ domain: 'rpc-core.d-bis.org', target: 'http://192.168.11.211:8545', websocket: true }, // VMID 2101 (alias)
// d-bis.org zone - DBIS Core Services
// d-bis.org zone - DBIS Core Services (canonical + legacy)
{ domain: 'd-bis.org', target: 'http://192.168.11.54:3001', websocket: false }, // Gov Portals DBIS VMID 7804
{ domain: 'www.d-bis.org', target: 'http://192.168.11.54:3001', websocket: false },
{ domain: 'admin.d-bis.org', target: 'http://192.168.11.130:80', websocket: false },
{ domain: 'core.d-bis.org', target: 'http://192.168.11.155:3000', websocket: false },
{ domain: 'dbis-admin.d-bis.org', target: 'http://192.168.11.130:80', websocket: false }, // VMID 10130: dbis-frontend
{ domain: 'dbis-api.d-bis.org', target: 'http://192.168.11.155:3000', websocket: false }, // VMID 10150: dbis-api-primary
{ domain: 'dbis-api-2.d-bis.org', target: 'http://192.168.11.156:3000', websocket: false }, // VMID 10151: dbis-api-secondary
@@ -63,8 +68,9 @@ const DOMAINS = [
// defi-oracle.io zone - ThirdWeb RPC (VMID 2400: thirdweb-rpc-1)
// Note: Uses HTTPS and port 443 (Nginx with RPC Translator)
{ domain: 'rpc.public-0138.defi-oracle.io', target: 'https://192.168.11.240:443', websocket: true }, // VMID 2400
{ domain: 'rpc.defi-oracle.io', target: 'https://192.168.11.240:443', websocket: true }, // VMID 2400 - HTTP RPC
{ domain: 'wss.defi-oracle.io', target: 'https://192.168.11.240:443', websocket: true }, // VMID 2400 - WebSocket RPC
// Align with update-npmplus-proxy-hosts-api.sh + RPC_ENDPOINTS_MASTER: defi-oracle rpc/wss → VMID 2201 Besu (not 2400)
{ domain: 'rpc.defi-oracle.io', target: 'http://192.168.11.221:8545', websocket: true },
{ domain: 'wss.defi-oracle.io', target: 'http://192.168.11.221:8546', websocket: true },
];
// www.* domains that redirect to parent domains

View File

@@ -52,8 +52,8 @@ echo "🔄 Updating NPMplus Proxy Hosts via API"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# NPMplus API can stall indefinitely without --max-time (override e.g. NPM_CURL_MAX_TIME=300)
NPM_CURL_MAX_TIME="${NPM_CURL_MAX_TIME:-120}"
# NPMplus API can stall indefinitely without --max-time; large proxy-host lists may need 300s+ (override NPM_CURL_MAX_TIME)
NPM_CURL_MAX_TIME="${NPM_CURL_MAX_TIME:-300}"
# -L: port 81 often 301s HTTP→HTTPS; POST /api/tokens without -L returns 400 "Payload is undefined"
curl_npm() { curl -s -k -L --connect-timeout 10 --max-time "$NPM_CURL_MAX_TIME" "$@"; }
@@ -335,6 +335,8 @@ update_proxy_host "rpc-http-pub.d-bis.org" "http://${RPC_PUBLIC_1}:8545" true fa
update_proxy_host "rpc-ws-pub.d-bis.org" "http://${RPC_PUBLIC_1}:8546" true false && updated_count=$((updated_count + 1)) || { add_proxy_host "rpc-ws-pub.d-bis.org" "${RPC_PUBLIC_1}" 8546 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "rpc-http-prv.d-bis.org" "http://${RPC_CORE_1}:8545" true false && updated_count=$((updated_count + 1)) || { add_proxy_host "rpc-http-prv.d-bis.org" "${RPC_CORE_1}" 8545 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "rpc-ws-prv.d-bis.org" "http://${RPC_CORE_1}:8546" true false && updated_count=$((updated_count + 1)) || { add_proxy_host "rpc-ws-prv.d-bis.org" "${RPC_CORE_1}" 8546 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
# rpc-core.d-bis.org — same Besu backend as rpc-http-prv (VMID 2101); public HTTPS alias for wallets/scripts off-LAN
update_proxy_host "rpc-core.d-bis.org" "http://${RPC_CORE_1}:8545" true false && updated_count=$((updated_count + 1)) || { add_proxy_host "rpc-core.d-bis.org" "${RPC_CORE_1}" 8545 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
# ThirdWeb Admin Core RPC — VMID ${RPC_THIRDWEB_ADMIN_CORE_VMID:-2103} @ ${RPC_THIRDWEB_ADMIN_CORE} (HTTPS + WSS via NPMplus; block_exploits off for JSON-RPC POST)
RPC_THIRDWEB_ADMIN_CORE="${RPC_THIRDWEB_ADMIN_CORE:-192.168.11.217}"
update_proxy_host "rpc.tw-core.d-bis.org" "http://${RPC_THIRDWEB_ADMIN_CORE}:8545" true false && updated_count=$((updated_count + 1)) || { add_proxy_host "rpc.tw-core.d-bis.org" "${RPC_THIRDWEB_ADMIN_CORE}" 8545 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
@@ -356,10 +358,25 @@ update_proxy_host "ws.rpc2.d-bis.org" "http://${RPC_PUBLIC_1}:8546" true false &
# Fireblocks-dedicated RPC (VMID 2301)
update_proxy_host "rpc-fireblocks.d-bis.org" "http://${RPC_FIREBLOCKS_1:-${RPC_PRIVATE_1}}:8545" true false && updated_count=$((updated_count + 1)) || { add_proxy_host "rpc-fireblocks.d-bis.org" "${RPC_FIREBLOCKS_1:-${RPC_PRIVATE_1}}" 8545 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "ws.rpc-fireblocks.d-bis.org" "http://${RPC_FIREBLOCKS_1:-${RPC_PRIVATE_1}}:8546" true false && updated_count=$((updated_count + 1)) || { add_proxy_host "ws.rpc-fireblocks.d-bis.org" "${RPC_FIREBLOCKS_1:-${RPC_PRIVATE_1}}" 8546 true false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "dbis-admin.d-bis.org" "http://${IP_DBIS_FRONTEND:-192.168.11.130}:80" false && updated_count=$((updated_count + 1)) || failed_count=$((failed_count + 1))
# DBIS canonical web — DBIS_INSTITUTIONAL_SUBDOMAINS.md (d-bis.org public, admin, secure, core; legacy dbis-admin)
IP_DBIS_PUBLIC_APEX="${IP_DBIS_PUBLIC_APEX:-192.168.11.54}"
DBIS_PUBLIC_APEX_PORT="${DBIS_PUBLIC_APEX_PORT:-3001}"
IP_DBIS_CORE_CLIENT="${IP_DBIS_CORE_CLIENT:-192.168.11.155}"
DBIS_CORE_CLIENT_PORT="${DBIS_CORE_CLIENT_PORT:-3000}"
update_proxy_host "d-bis.org" "http://${IP_DBIS_PUBLIC_APEX}:${DBIS_PUBLIC_APEX_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "d-bis.org" "${IP_DBIS_PUBLIC_APEX}" "${DBIS_PUBLIC_APEX_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "www.d-bis.org" "http://${IP_DBIS_PUBLIC_APEX}:${DBIS_PUBLIC_APEX_PORT}" false false "https://d-bis.org" && updated_count=$((updated_count + 1)) || { add_proxy_host "www.d-bis.org" "${IP_DBIS_PUBLIC_APEX}" "${DBIS_PUBLIC_APEX_PORT}" false false "https://d-bis.org" && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "admin.d-bis.org" "http://${IP_DBIS_FRONTEND:-192.168.11.130}:80" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "admin.d-bis.org" "${IP_DBIS_FRONTEND:-192.168.11.130}" 80 false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "core.d-bis.org" "http://${IP_DBIS_CORE_CLIENT}:${DBIS_CORE_CLIENT_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "core.d-bis.org" "${IP_DBIS_CORE_CLIENT}" "${DBIS_CORE_CLIENT_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "dbis-admin.d-bis.org" "http://${IP_DBIS_FRONTEND:-192.168.11.130}:80" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "dbis-admin.d-bis.org" "${IP_DBIS_FRONTEND:-192.168.11.130}" 80 false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "dbis-api.d-bis.org" "http://${IP_DBIS_API:-192.168.11.155}:3000" false && updated_count=$((updated_count + 1)) || failed_count=$((failed_count + 1))
update_proxy_host "dbis-api-2.d-bis.org" "http://${IP_DBIS_API_2:-192.168.11.156}:3000" false && updated_count=$((updated_count + 1)) || { add_proxy_host "dbis-api-2.d-bis.org" "${IP_DBIS_API_2:-192.168.11.156}" 3000 false true && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "secure.d-bis.org" "http://${IP_DBIS_FRONTEND:-192.168.11.130}:80" false && updated_count=$((updated_count + 1)) || { add_proxy_host "secure.d-bis.org" "${IP_DBIS_FRONTEND:-192.168.11.130}" 80 false true && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
# DBIS institutional program hostnames — same public apex as d-bis.org until dedicated apps split (DBIS_INSTITUTIONAL_SUBDOMAINS.md)
for _dbis_inst in members developers research policy ops identity status sandbox interop; do
update_proxy_host "${_dbis_inst}.d-bis.org" "http://${IP_DBIS_PUBLIC_APEX}:${DBIS_PUBLIC_APEX_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "${_dbis_inst}.d-bis.org" "${IP_DBIS_PUBLIC_APEX}" "${DBIS_PUBLIC_APEX_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
done
# data.d-bis.org — E2E checks /v1/health; align with primary DBIS API
update_proxy_host "data.d-bis.org" "http://${IP_DBIS_API:-192.168.11.155}:3000" false && updated_count=$((updated_count + 1)) || { add_proxy_host "data.d-bis.org" "${IP_DBIS_API:-192.168.11.155}" 3000 false true && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
# DApp (VMID 5801) — frontend-dapp for Chain 138 bridge
update_proxy_host "dapp.d-bis.org" "http://${IP_DAPP_LXC:-192.168.11.58}:80" false && updated_count=$((updated_count + 1)) || { add_proxy_host "dapp.d-bis.org" "${IP_DAPP_LXC:-192.168.11.58}" 80 false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
# MIM4U - VMID 7810 (mim-web-1) @ ${IP_MIM_WEB:-192.168.11.37} - Web Frontend serves main site and proxies /api/* to 7811
@@ -380,10 +397,11 @@ IP_SANKOFA_PORTAL="${IP_SANKOFA_PORTAL:-${IP_SERVICE_51:-192.168.11.51}}"
IP_SANKOFA_PHOENIX_API="${IP_SANKOFA_PHOENIX_API:-${IP_SERVICE_50:-192.168.11.50}}"
SANKOFA_PORTAL_PORT="${SANKOFA_PORTAL_PORT:-3000}"
SANKOFA_PHOENIX_API_PORT="${SANKOFA_PHOENIX_API_PORT:-4000}"
IP_SANKOFA_PUBLIC_WEB="${IP_SANKOFA_PUBLIC_WEB:-${IP_SANKOFA_PORTAL}}"
SANKOFA_PUBLIC_WEB_PORT="${SANKOFA_PUBLIC_WEB_PORT:-${SANKOFA_PORTAL_PORT}}"
# Resolved before portal/SSO rows so dash can default to client SSO stack
IP_SANKOFA_CLIENT_SSO="${IP_SANKOFA_CLIENT_SSO:-${IP_SANKOFA_PORTAL}}"
SANKOFA_CLIENT_SSO_PORT="${SANKOFA_CLIENT_SSO_PORT:-${SANKOFA_PORTAL_PORT}}"
IP_SANKOFA_PUBLIC_WEB="${IP_SANKOFA_PUBLIC_WEB:-${IP_SANKOFA_PORTAL}}"
SANKOFA_PUBLIC_WEB_PORT="${SANKOFA_PUBLIC_WEB_PORT:-${SANKOFA_PORTAL_PORT}}"
update_proxy_host "sankofa.nexus" "http://${IP_SANKOFA_PUBLIC_WEB}:${SANKOFA_PUBLIC_WEB_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "sankofa.nexus" "${IP_SANKOFA_PUBLIC_WEB}" "${SANKOFA_PUBLIC_WEB_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "www.sankofa.nexus" "http://${IP_SANKOFA_PUBLIC_WEB}:${SANKOFA_PUBLIC_WEB_PORT}" false false "https://sankofa.nexus" && updated_count=$((updated_count + 1)) || { add_proxy_host "www.sankofa.nexus" "${IP_SANKOFA_PUBLIC_WEB}" "${SANKOFA_PUBLIC_WEB_PORT}" false false "https://sankofa.nexus" && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "phoenix.sankofa.nexus" "http://${IP_SANKOFA_PHOENIX_API}:${SANKOFA_PHOENIX_API_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "phoenix.sankofa.nexus" "${IP_SANKOFA_PHOENIX_API}" "${SANKOFA_PHOENIX_API_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
@@ -410,13 +428,15 @@ update_proxy_host "studio.sankofa.nexus" "http://${IP_SANKOFA_STUDIO}:${SANKOFA_
# Client SSO hostnames (Next.js portal stack on 7801 typical). NEXTAUTH_URL / Keycloak redirects: https://portal.sankofa.nexus (and https://admin.sankofa.nexus).
update_proxy_host "portal.sankofa.nexus" "http://${IP_SANKOFA_CLIENT_SSO}:${SANKOFA_CLIENT_SSO_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "portal.sankofa.nexus" "${IP_SANKOFA_CLIENT_SSO}" "${SANKOFA_CLIENT_SSO_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
update_proxy_host "admin.sankofa.nexus" "http://${IP_SANKOFA_CLIENT_SSO}:${SANKOFA_CLIENT_SSO_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "admin.sankofa.nexus" "${IP_SANKOFA_CLIENT_SSO}" "${SANKOFA_CLIENT_SSO_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
# Operator systems dashboard — only when IP_SANKOFA_DASH is set (see config/ip-addresses.conf).
# Operator systems dashboard — IP_SANKOFA_DASH when set (port defaults 3000); else client SSO / portal so DNS + TLS get HTTP 200 until a dedicated dash app ships.
if [[ -n "${IP_SANKOFA_DASH:-}" ]]; then
SANKOFA_DASH_PORT="${SANKOFA_DASH_PORT:-3000}"
update_proxy_host "dash.sankofa.nexus" "http://${IP_SANKOFA_DASH}:${SANKOFA_DASH_PORT}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "dash.sankofa.nexus" "${IP_SANKOFA_DASH}" "${SANKOFA_DASH_PORT}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
SANKOFA_DASH_IP="${IP_SANKOFA_DASH}"
SANKOFA_DASH_PORT_EFFECTIVE="${SANKOFA_DASH_PORT:-3000}"
else
echo " Skipping dash.sankofa.nexus (set IP_SANKOFA_DASH and SANKOFA_DASH_PORT to provision upstream)."
SANKOFA_DASH_IP="${IP_SANKOFA_CLIENT_SSO}"
SANKOFA_DASH_PORT_EFFECTIVE="${SANKOFA_CLIENT_SSO_PORT}"
fi
update_proxy_host "dash.sankofa.nexus" "http://${SANKOFA_DASH_IP}:${SANKOFA_DASH_PORT_EFFECTIVE}" false false && updated_count=$((updated_count + 1)) || { add_proxy_host "dash.sankofa.nexus" "${SANKOFA_DASH_IP}" "${SANKOFA_DASH_PORT_EFFECTIVE}" false false && updated_count=$((updated_count + 1)); } || failed_count=$((failed_count + 1))
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

View File

@@ -22,6 +22,10 @@ Scripts for the **OMNL** tenancy ([omnl.hybxfinance.io](https://omnl.hybxfinance
| **omnl-office-create-adf-singapore.sh** | Create Office for ADF ASIAN PACIFIC HOLDING SINGAPORE PTE LTD (child of OMNL Head Office). Idempotent by externalId `202328126M`. See [ADF_ASIAN_PACIFIC_SINGAPORE_OFFICE_RUNBOOK.md](../../docs/04-configuration/mifos-omnl-central-bank/ADF_ASIAN_PACIFIC_SINGAPORE_OFFICE_RUNBOOK.md). |
| **omnl-transaction-package-snapshot.sh** | **Regulator Section 2:** `GET /offices` + `GET /glaccounts``omnl_transaction_package_snapshot.json`, then **enrich** offices with LEI/entity names from `OMNL_ENTITY_MASTER_DATA.json` (`scripts/omnl/jq/enrich-snapshot-entity-master.jq`). `OUT_DIR` / `OUT_FILE` / `ENTITY_DATA` optional. |
| **omnl-office-create-bank-kanaya.sh** | Create **Bank Kanaya** office (`externalId=BANK-KANAYA-ID`, parent HO). Idempotent. `DRY_RUN=1` first. See [BANK_KANAYA_OFFICE_RUNBOOK.md](../../docs/04-configuration/mifos-omnl-central-bank/BANK_KANAYA_OFFICE_RUNBOOK.md). |
| **omnl-office-create-pt-cakra-investama.sh** | Create **PT. CAKRA INVESTAMA INTERNATIONAL** office (`externalId=OMNL-ID-JKT-CAKRA-001`, parent HO). Idempotent. |
| **omnl-client-create-pt-cakra-investama.sh** | Corporate **client** for CAKRA (NPWP + director contact). Idempotent by `OMNL-ID-JKT-CAKRA-CLIENT`. Banking/tax extras: `data/pt-cakra-investama-sidecar.json`. |
| **omnl-user-cakra-office-create.sh** | Staff + user `bpramukantoro` (Office Admin) for CAKRA office. Requires `OMNL_CAKRA_ADMIN_PASSWORD` or `CAKRA_GENERATE_PASSWORD=1`. If `POST /users` returns 500, link **staff** in Fineract UI (see script stderr). |
| **omnl-cakra-onboarding-complete.sh** | Runs office → GL (optional) → client → user. `SKIP_GL=1`, `SKIP_USER=1`, `STRICT_ONBOARDING=1` optional. |
| **build-transaction-package-zip.sh** | **Zip:** `transaction-package-HYBX-BATCH-001.zip` — binder + 215k ledger + Merkle + Appendix. Stages snapshot, **enrich** from `OMNL_ENTITY_MASTER_DATA.json`, copies that JSON (+ `.md`) into `Volume_A/Section_2/`. Needs root `omnl_transaction_package_snapshot.json` or `ALLOW_MISSING_OMNL_SNAPSHOT=1`. |
| **generate-transaction-package-evidence.py** | Ledger, exhibits, e-sign policy, `GENERATED_EVIDENCE_ESIGN_MANIFEST.json`. |
| **apply-qes-tsa-to-staging.sh** | Optional RFC 3161 TSA + CMS on anchor (`TSA_URL`, `QES_SIGN_*`). |
@@ -30,6 +34,16 @@ Scripts for the **OMNL** tenancy ([omnl.hybxfinance.io](https://omnl.hybxfinance
| **check-transaction-package-4995-readiness.sh** | **4.995 gate:** structural checks; `--strict` requires live OMNL snapshot, finalized ISO vault hashes, completed regulatory annex, signed attestation JSON. See `INDONESIA_PACKAGE_4_995_EVIDENCE_STANDARD.md`. |
| **run-transaction-package-ci-smoke.sh** | **CI / dev:** fast package build (10-row fixture ledger, no snapshot), `verify-transaction-package-commitment.py` + structural `check-transaction-package-4995-readiness.sh`. Unsets `TSA_URL`. |
| **omnl-pvp-post-clearing-bank-kanaya.sh** | **PvP clearing JEs** (HO Dr2410/Cr2100; Kanaya Dr2100/Cr1410). `DRY_RUN=1` default; `OFFICE_ID_HO` / `OFFICE_ID_KANAYA` / `AMOUNT_MINOR_UNITS`. See [PvP_MULTILATERAL_NET_SETTLEMENT_BANK_KANAYA.md](../../docs/04-configuration/mifos-omnl-central-bank/PvP_MULTILATERAL_NET_SETTLEMENT_BANK_KANAYA.md). |
| **generate-3way-reconciliation-evidence.sh** | **Operational 3-way:** Fineract GL balance + Chain 138 ERC20 `balanceOf` + optional bank (`JVMTM_BANK_BALANCE_JSON` or env). Outputs `output/jvmtm-evidence/latest-3way-result.json` with `evidence_tier` / `evidence_gaps`. See `config/jvmtm-regulatory-closure/OPERATIONAL_EVIDENCE_VS_TEMPLATES.md`. |
| **verify-ack-before-credit.sh** | Compare ACK ISO timestamp to Fineract `journalentries/{id}` `transactionDate` (conservative ordering check). |
| **fetch-kyt-vendor-report.sh** | Vendor KYT fetch or **REFUSED** manifest (exit 2) if no `KYT_API_URL` / export — no fake PASS. |
| **bcp-rpc-failover-smoke.sh** | Appends real `eth_blockNumber` RPC check to `output/jvmtm-evidence/bcp/failover-execution-log.txt`; optional `RPC_URL_138_SECONDARY`. |
| **validate-reserve-provenance-package.sh** | **JSON Schema** check for `config/reserve-provenance-package/` (10 attestation JSON files + `schemas/reserve-provenance-package.schema.json`). CI: `validate-config.yml`. |
| **build-omnl-e2e-settlement-audit-archive.sh** | **E2E archive:** settlement JSONs, schema + examples (incl. **settlement-event.chain138-primary.example.json**), **JVMTM closure** dirs + **`INAAUDJVMTM_2025_AUDIT_CLOSURE_MATRIX.md`** (Tables B/C/D vs `018215821582/INAAUDJVMTM/2025`), **`reserve-provenance-package/`** (3FR funding attestation layer), `schemas/jvmtm/*.schema.json`, M1/RTGS docs + **OJK_BI_AUDIT_JVMTM_REMEDIATION_AND_UETR_POLICY.md**, clearing + 102B + chain attestation scripts, **AUDIT_PROOF.json** ( **`chainAttestation` + optional `chainAttestationMainnet`** ), **SETTLEMENT_CLOSURE.json**, **MANIFEST.json** + **MANIFEST.sha256**, **`cast receipt`** for 138 + mainnet when **`CHAIN_ATTESTATION_TX_HASH_MAINNET`** / **`latest-dual-attestation.json`** + **`ETHEREUM_MAINNET_RPC`**, `FETCH_LIVE_EVIDENCE=1` paginated **journalentries** offices **1, 21, 22**. **`JVMTM_CLOSURE_DIR`** = dir of live closure JSON (see `config/jvmtm-regulatory-closure/README.md`). Output: `output/omnl-e2e-settlement-audit-<UTC>.zip`. |
| **build-office22-office21-compliance-archive.sh** | **Zip + manifest** for Office **22** (CAKRA) with Office **21** (Kanaya) artefacts: IPSAS/IFRS memo, matrix, PvP runbooks, M1/PvP scripts, CAKRA onboarding, maker-checker. Optional `FETCH_LIVE_EVIDENCE=1` pulls `/journalentries` + `/offices` into `evidence/`. Output: `output/office22-office21-compliance-archive-<UTC>.zip` with `MANIFEST.json` + `MANIFEST.sha256`. |
| **omnl-m1-clearing-102b-chunked.sh** | **102B USD M1** office 21→22: **102 × 1B USD** chunks (Fineract-safe line size). `CHUNK_CENTS`, `STAMP` optional. Same compliance vars as clearing script when `DRY_RUN=0`. |
| **omnl-chain138-attestation-tx.sh** | **Dual-anchor attestation:** 0-value self `cast send` on **Chain 138**; also **Ethereum mainnet** when `ETHEREUM_MAINNET_RPC` or `RPC_URL_MAINNET` is set (unless `ATTEST_INCLUDE_MAINNET=0`). Writes `output/jvmtm-evidence/latest-dual-attestation.json` + `.env`. **Mainnet uses real ETH.** Optional `CORRELATION_ID``keccak256` log. `DRY_RUN=1` prints `cast` lines. |
| **omnl-m1-clearing-transfer-between-offices.sh** | **M1 PvP-style branch realloc:** unwind **Dr1410/Cr2100** at source office, book **Dr2100/Cr1410** at target (default 21→22). Auto-amount from GL **2100** debits at source or `AMOUNT=`. Live post requires `COMPLIANCE_AUTH_REF` + `COMPLIANCE_APPROVER` (material). `WRITE_MAKER_PAYLOADS=1` for checker workflow. Appends **IPSAS/IFRS** tag to `comments` (`COMPLIANCE_STANDARD_MEMO`); memo [OMNL_IPSAS_IFRS_INTEROFFICE_COMPLIANCE.md](../../docs/04-configuration/mifos-omnl-central-bank/OMNL_IPSAS_IFRS_INTEROFFICE_COMPLIANCE.md). **Operator runbook:** [OMNL_M1_INTEROFFICE_OFFICE_TO_OFFICE_CLEARING_RUNBOOK.md](../../docs/04-configuration/mifos-omnl-central-bank/OMNL_M1_INTEROFFICE_OFFICE_TO_OFFICE_CLEARING_RUNBOOK.md). |
| **resolve_ids.sh** | Resolve GL IDs (1410, 2100, 2410) and payment type; write `ids.env`. Run before closures/reconciliation/templates. See [OPERATING_RAILS.md](../../docs/04-configuration/mifos-omnl-central-bank/OPERATING_RAILS.md). |
| **omnl-gl-closures-post.sh** | Post GL closures for Office 20 and HO (idempotent). `CLOSING_DATE=yyyy-MM-dd`, `DRY_RUN=1`. See [OPERATING_RAILS.md](../../docs/04-configuration/mifos-omnl-central-bank/OPERATING_RAILS.md). |
| **omnl-reconciliation-office20.sh** | Snapshot Office 20 (offices + GL + trial balance), timestamp, sha256. `OUT_DIR=./reconciliation`. See [OPERATING_RAILS.md](../../docs/04-configuration/mifos-omnl-central-bank/OPERATING_RAILS.md). |
@@ -122,6 +136,11 @@ bash scripts/omnl/omnl-office-create-pelican.sh
# ADF Asian Pacific Holding Singapore Pte Ltd — create office (child of OMNL Head Office, externalId 202328126M)
DRY_RUN=1 bash scripts/omnl/omnl-office-create-adf-singapore.sh
bash scripts/omnl/omnl-office-create-adf-singapore.sh
# PT. CAKRA INVESTAMA INTERNATIONAL — office + client + GL + user (password or CAKRA_GENERATE_PASSWORD=1)
DRY_RUN=1 bash scripts/omnl/omnl-office-create-pt-cakra-investama.sh
OMNL_CAKRA_ADMIN_PASSWORD='…' bash scripts/omnl/omnl-cakra-onboarding-complete.sh
# Or: CAKRA_GENERATE_PASSWORD=1 bash scripts/omnl/omnl-cakra-onboarding-complete.sh
```
**Transaction package — env vars**

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env bash
# Append a real RPC reachability line (primary, optional secondary) — minimal execution evidence, not full DR.
# This is a smoke check: eth_blockNumber over HTTP JSON-RPC. For data-centre DR, attach separate drill logs.
#
# Env (after load-project-env):
# RPC_URL_138 — primary
# RPC_URL_138_SECONDARY — optional fallback URL
# JVMTM_BCP_LOG — default output/jvmtm-evidence/bcp/failover-execution-log.txt
set -eo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
# shellcheck source=scripts/lib/load-project-env.sh
set +u
source "${REPO_ROOT}/scripts/lib/load-project-env.sh"
set -euo pipefail
LOG="${JVMTM_BCP_LOG:-${REPO_ROOT}/output/jvmtm-evidence/bcp/failover-execution-log.txt}"
mkdir -p "$(dirname "$LOG")"
P="${RPC_URL_138:-http://192.168.11.211:8545}"
S="${RPC_URL_138_SECONDARY:-}"
TS="$(date -u +%Y-%m-%dT%H:%M:%SZ)"
rpc_ping() {
local url="$1"
curl -sS -m 8 -X POST "$url" \
-H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' 2>/dev/null | jq -r '.result // empty' || true
}
R1="$(rpc_ping "$P")"
if [[ -n "$R1" ]]; then
echo "${TS} primary_ok rpc=${P} block=${R1}" >> "$LOG"
echo "primary_ok $R1" >&2
exit 0
fi
echo "${TS} primary_fail rpc=${P}" >> "$LOG"
if [[ -n "$S" ]]; then
R2="$(rpc_ping "$S")"
if [[ -n "$R2" ]]; then
echo "${TS} secondary_ok rpc=${S} block=${R2} (after primary fail)" >> "$LOG"
echo "secondary_ok $R2" >&2
exit 0
fi
echo "${TS} secondary_fail rpc=${S}" >> "$LOG"
fi
echo "FAIL: no RPC responded" >&2
exit 1

View File

@@ -0,0 +1,158 @@
#!/usr/bin/env bash
# Build a zip archive + JSON manifest for Office 22 (PT CAKRA) context including
# Office 21 (Bank Kanaya), M1 realloc, IPSAS/IFRS compliance memo, PvP runbooks, and scripts.
#
# Optional: FETCH_LIVE_EVIDENCE=1 appends Fineract GET /journalentries for offices 21 and 22
# (needs omnl-fineract/.env or .env).
#
# Output: output/office22-office21-compliance-archive-<UTC>.zip
#
# Usage:
# bash scripts/omnl/build-office22-office21-compliance-archive.sh
# FETCH_LIVE_EVIDENCE=1 bash scripts/omnl/build-office22-office21-compliance-archive.sh
# KEEP_STAGE=1 — retain staging folder under output/ after zip (default: delete staging dir).
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
OUT_BASE="${OUT_BASE:-${REPO_ROOT}/output}"
FETCH_LIVE_EVIDENCE="${FETCH_LIVE_EVIDENCE:-0}"
STAMP_UTC="${STAMP_UTC:-$(date -u +%Y%m%dT%H%M%SZ)}"
ARCHIVE_BASENAME="office22-office21-compliance-archive-${STAMP_UTC}"
STAGE="${OUT_BASE}/${ARCHIVE_BASENAME}"
ZIP_PATH="${OUT_BASE}/${ARCHIVE_BASENAME}.zip"
mkdir -p "$STAGE"
# --- Copy tree: archivePath -> source relative to REPO_ROOT
declare -a PAIRS=(
"docs/mifos-omnl-central-bank/OMNL_IPSAS_IFRS_INTEROFFICE_COMPLIANCE.md:docs/04-configuration/mifos-omnl-central-bank/OMNL_IPSAS_IFRS_INTEROFFICE_COMPLIANCE.md"
"docs/mifos-omnl-central-bank/OMNL_JOURNAL_LEDGER_MATRIX.md:docs/04-configuration/mifos-omnl-central-bank/OMNL_JOURNAL_LEDGER_MATRIX.md"
"docs/mifos-omnl-central-bank/OMNL_PHASE_C_INTEROFFICE_DUE_TO_DUE_FROM.md:docs/04-configuration/mifos-omnl-central-bank/OMNL_PHASE_C_INTEROFFICE_DUE_TO_DUE_FROM.md"
"docs/mifos-omnl-central-bank/BANK_KANAYA_OFFICE_RUNBOOK.md:docs/04-configuration/mifos-omnl-central-bank/BANK_KANAYA_OFFICE_RUNBOOK.md"
"docs/mifos-omnl-central-bank/PvP_MULTILATERAL_NET_SETTLEMENT_BANK_KANAYA.md:docs/04-configuration/mifos-omnl-central-bank/PvP_MULTILATERAL_NET_SETTLEMENT_BANK_KANAYA.md"
"docs/mifos-omnl-central-bank/README.md:docs/04-configuration/mifos-omnl-central-bank/README.md"
"docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_EXECUTION_STATUS_2026-03-29.md:docs/03-deployment/DBIS_OMNL_INDONESIA_BNI_E2E_EXECUTION_STATUS_2026-03-29.md"
"scripts/omnl/omnl-m1-clearing-transfer-between-offices.sh:scripts/omnl/omnl-m1-clearing-transfer-between-offices.sh"
"scripts/omnl/omnl-pvp-post-clearing-bank-kanaya.sh:scripts/omnl/omnl-pvp-post-clearing-bank-kanaya.sh"
"scripts/omnl/data/pt-cakra-investama-sidecar.json:scripts/omnl/data/pt-cakra-investama-sidecar.json"
"scripts/omnl/omnl-office-create-pt-cakra-investama.sh:scripts/omnl/omnl-office-create-pt-cakra-investama.sh"
"scripts/omnl/omnl-client-create-pt-cakra-investama.sh:scripts/omnl/omnl-client-create-pt-cakra-investama.sh"
"scripts/omnl/omnl-cakra-onboarding-complete.sh:scripts/omnl/omnl-cakra-onboarding-complete.sh"
"scripts/omnl/omnl-user-cakra-office-create.sh:scripts/omnl/omnl-user-cakra-office-create.sh"
"scripts/omnl/omnl-je-maker.sh:scripts/omnl/omnl-je-maker.sh"
"scripts/omnl/omnl-je-checker.sh:scripts/omnl/omnl-je-checker.sh"
)
copy_pair() {
local dest="$1"
local src="$2"
local full="${REPO_ROOT}/${src}"
if [ ! -f "$full" ]; then
echo "WARN: missing (skip): $src" >&2
return 0
fi
local dir
dir="$(dirname "${STAGE}/${dest}")"
mkdir -p "$dir"
cp -a "$full" "${STAGE}/${dest}"
}
for pair in "${PAIRS[@]}"; do
IFS=':' read -r dest src <<< "$pair"
copy_pair "$dest" "$src"
done
mkdir -p "${STAGE}/evidence"
if [ "$FETCH_LIVE_EVIDENCE" = "1" ]; then
if [ -f "${REPO_ROOT}/omnl-fineract/.env" ]; then
set +u
# shellcheck disable=SC1090
source "${REPO_ROOT}/omnl-fineract/.env" 2>/dev/null || true
set -u
elif [ -f "${REPO_ROOT}/.env" ]; then
set +u
# shellcheck disable=SC1090
source "${REPO_ROOT}/.env" 2>/dev/null || true
set -u
fi
BASE_URL="${OMNL_FINERACT_BASE_URL:-}"
if [ -n "$BASE_URL" ] && [ -n "${OMNL_FINERACT_PASSWORD:-}" ]; then
TENANT="${OMNL_FINERACT_TENANT:-omnl}"
CURL=(curl -sS -H "Fineract-Platform-TenantId: ${TENANT}" -u "${OMNL_FINERACT_USER:-app.omnl}:${OMNL_FINERACT_PASSWORD}")
for oid in 21 22; do
"${CURL[@]}" "${BASE_URL}/journalentries?officeId=${oid}&offset=0&limit=500" > "${STAGE}/evidence/journalentries-office${oid}.json" || true
done
"${CURL[@]}" "${BASE_URL}/offices" > "${STAGE}/evidence/offices.json" || true
echo "Live evidence written under evidence/" >&2
else
echo "WARN: FETCH_LIVE_EVIDENCE=1 but OMNL credentials missing; skipped API fetch." >&2
fi
fi
# README inside archive
cat > "${STAGE}/README_ARCHIVE.txt" <<EOF
Office 22 (PT. CAKRA INVESTAMA INTERNATIONAL) + Office 21 (Bank Kanaya) compliance archive
Generated (UTC): ${STAMP_UTC}
Repository: omnl / HYBX OMNL Fineract compliance and M1 beneficiary realloc artefacts.
Includes:
- IPSAS / IFRS (IFGA default) inter-office memo and journal matrix excerpts
- Phase C interoffice and PvP Bank Kanaya runbooks
- M1 transfer and PvP posting scripts (bash)
- PT CAKRA onboarding scripts and sidecar JSON
- Maker-checker helpers
- Optional evidence/*.json if built with FETCH_LIVE_EVIDENCE=1
Verify: extract zip, then: sha256sum -c MANIFEST.sha256
EOF
NDJSON="${STAGE}/._manifest_items.ndjson"
: > "$NDJSON"
cd "$STAGE"
while IFS= read -r f; do
p="${f#./}"
[ -z "$p" ] && continue
case "$p" in MANIFEST.json|MANIFEST.sha256|._*) continue ;; esac
sz=$(wc -c < "$f" | tr -d ' ')
h=$(sha256sum "$f" | awk '{print $1}')
jq -n --arg path "$p" --arg sha "$h" --argjson size "$sz" \
'{path: $path, sha256: $sha, sizeBytes: $size}' >> "$NDJSON"
done < <(find . -type f | sort)
ITEMS=$(jq -s '.' "$NDJSON")
GIT_COMMIT=$(git -C "$REPO_ROOT" rev-parse HEAD 2>/dev/null || echo "n/a")
jq -n \
--arg id "$ARCHIVE_BASENAME" \
--arg gen "$(date -u -Iseconds)" \
--arg git "$GIT_COMMIT" \
--argjson benef '{"officeId": 22, "name": "PT. CAKRA INVESTAMA INTERNATIONAL", "externalId": "OMNL-ID-JKT-CAKRA-001"}' \
--argjson src '{"officeId": 21, "name": "Bank Kanaya (Indonesia)", "externalId": "BANK-KANAYA-ID"}' \
--argjson files "$ITEMS" \
'{
archiveId: $id,
generatedAtUtc: $gen,
repositoryHeadCommit: $git,
beneficiaryOffice: $benef,
sourceOffice: $src,
description: "Office 22 package with Office 21 + IPSAS/IFRS memo, PvP/M1 realloc scripts and runbooks",
files: $files
}' > "${STAGE}/MANIFEST.json"
# Checksums for every file in archive (including MANIFEST.json)
find . -type f ! -name '._*' ! -name 'MANIFEST.sha256' | sort | while IFS= read -r f; do
p="${f#./}"
sha256sum "$f" | awk -v p="$p" '{print $1 " " p}'
done > "${STAGE}/MANIFEST.sha256"
rm -f "$NDJSON"
cd "$REPO_ROOT"
rm -f "$ZIP_PATH"
(
cd "$OUT_BASE"
zip -r -q "$ZIP_PATH" "$ARCHIVE_BASENAME"
)
echo "Wrote $ZIP_PATH" >&2
ls -la "$ZIP_PATH" >&2
if [ "${KEEP_STAGE:-0}" != "1" ]; then
rm -rf "$STAGE"
fi

View File

@@ -0,0 +1,610 @@
#!/usr/bin/env bash
# Build complete E2E archive: Audit Proof + Settlement + Closure cluster (OMNL Fineract + Chain 138).
#
# Stages:
# - Canonical settlement events (output/settlement-events/*.json)
# - settlement-event schema + min example (validation target)
# - OMNL runbooks and RTGS cross-link doc
# - Scripts: M1 clearing, 102B chunked, chain attestation
# - AUDIT_PROOF.json, SETTLEMENT_CLOSURE.json, README_E2E_ARCHIVE.txt
# - Optional live Fineract evidence (offices 1, 21, 22) and cast receipt for attestation tx
#
# Env:
# FETCH_LIVE_EVIDENCE=1 — pull /journalentries + /offices + /glaccounts (needs omnl-fineract/.env)
# CHAIN_ATTESTATION_TX_HASH — default 102B closure attestation on Chain 138 (override if different)
# CHAIN_ATTESTATION_TX_HASH_MAINNET — optional Ethereum mainnet 0-value attestation tx (dual-anchor)
# ETHEREUM_MAINNET_RPC / RPC_URL_MAINNET — for mainnet cast receipt; if unset, mainnet receipt skipped
# Auto: if output/jvmtm-evidence/latest-dual-attestation.json exists (from omnl-chain138-attestation-tx.sh),
# CHAIN_ATTESTATION_TX_HASH_MAINNET is read from chainId 1 when env not set.
# RPC_URL_138 — for cast receipt (default LAN Core RPC)
# KEEP_STAGE=1 — keep staging dir after zip
# JVMTM_CLOSURE_DIR — optional dir with live regulatory closure JSON/txt (see config/jvmtm-regulatory-closure/README.md).
# If unset, stages repo examples (placeholders) into reconciliation/, liquidity/, acknowledgements/, etc.
# If JVMTM_CLOSURE_DIR/transactions/*.json exists, stages live transaction execution envelopes into transactions/.
# Stages config/reserve-provenance-package/ (3FR reserve attestation JSON) when present.
#
# Output: output/omnl-e2e-settlement-audit-<UTC>.zip
#
# Usage:
# bash scripts/omnl/build-omnl-e2e-settlement-audit-archive.sh
# FETCH_LIVE_EVIDENCE=1 bash scripts/omnl/build-omnl-e2e-settlement-audit-archive.sh
#
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
OUT_BASE="${OUT_BASE:-${REPO_ROOT}/output}"
FETCH_LIVE_EVIDENCE="${FETCH_LIVE_EVIDENCE:-0}"
JVMTM_CLOSURE_DIR="${JVMTM_CLOSURE_DIR:-}"
STAMP_UTC="${STAMP_UTC:-$(date -u +%Y%m%dT%H%M%SZ)}"
ARCHIVE_BASENAME="omnl-e2e-settlement-audit-${STAMP_UTC}"
STAGE="${OUT_BASE}/${ARCHIVE_BASENAME}"
ZIP_PATH="${OUT_BASE}/${ARCHIVE_BASENAME}.zip"
CHAIN_ATTESTATION_TX_HASH="${CHAIN_ATTESTATION_TX_HASH:-0xb90f2da51d9c506f552d276d9aa57f4ae485528f2ee6025f435f188d09d405f4}"
RPC_URL_138="${RPC_URL_138:-http://192.168.11.211:8545}"
if [ -f "${REPO_ROOT}/smom-dbis-138/.env" ]; then
set +u
set -a
# shellcheck disable=SC1090
source "${REPO_ROOT}/smom-dbis-138/.env" 2>/dev/null || true
set +a
set -u
fi
if [ -f "${REPO_ROOT}/.env" ]; then
set +u
set -a
# shellcheck disable=SC1090
source "${REPO_ROOT}/.env" 2>/dev/null || true
set +a
set -u
fi
ETHEREUM_MAINNET_RPC="${ETHEREUM_MAINNET_RPC:-${RPC_URL_MAINNET:-}}"
CHAIN_ATTESTATION_TX_HASH_MAINNET="${CHAIN_ATTESTATION_TX_HASH_MAINNET:-}"
DUAL_ATTEST_JSON="${REPO_ROOT}/output/jvmtm-evidence/latest-dual-attestation.json"
if [ -z "$CHAIN_ATTESTATION_TX_HASH_MAINNET" ] && [ -f "$DUAL_ATTEST_JSON" ] && command -v jq &>/dev/null; then
CHAIN_ATTESTATION_TX_HASH_MAINNET="$(jq -r '[.attestations[]? | select(.chainId == 1)] | first | .transactionHash // empty' "$DUAL_ATTEST_JSON")"
fi
mkdir -p "$STAGE"/{settlement-events,schemas,schemas/jvmtm,examples,docs,scripts,evidence,audit-proof,reconciliation,liquidity,acknowledgements,exceptions,validation,bcp,disaster-recovery,monitoring,transactions,config/jvmtm-regulatory-closure,config/jvmtm-regulatory-closure/examples}
copy_if_exists() {
local dest_dir="$1"
local src="$2"
local full="${REPO_ROOT}/${src}"
if [ -f "$full" ]; then
cp -a "$full" "${STAGE}/${dest_dir}/"
else
echo "WARN: missing (skip): $src" >&2
fi
}
# JVMTM / regulatory closure: live dir overrides repo examples (basename match).
jvmtm_stage() {
local basename="$1"
local dest_subdir="$2"
local example_path="$3"
local dest="${STAGE}/${dest_subdir}/${basename}"
if [ -n "$JVMTM_CLOSURE_DIR" ] && [ -f "${JVMTM_CLOSURE_DIR}/${basename}" ]; then
cp -a "${JVMTM_CLOSURE_DIR}/${basename}" "$dest"
echo "JVMTM: staged live ${basename} -> ${dest_subdir}/" >&2
else
cp -a "${REPO_ROOT}/${example_path}" "$dest"
echo "JVMTM: staged template ${basename} -> ${dest_subdir}/ (set JVMTM_CLOSURE_DIR for live)" >&2
fi
}
# Settlement artefacts (repo-generated)
for f in omnl-102b-ledger-and-chain-20260331.json omnl-102b-notional-status-20260331.json omnl-m1-kanaya-cakra-20260331.json; do
copy_if_exists "settlement-events" "output/settlement-events/$f"
done
copy_if_exists "schemas" "config/dbis-institutional/schemas/settlement-event.schema.json"
copy_if_exists "examples" "config/dbis-institutional/examples/settlement-event.example.json"
copy_if_exists "examples" "config/dbis-institutional/examples/settlement-event.chain138-primary.example.json"
copy_if_exists "examples" "config/dbis-institutional/examples/settlement-event.min.json"
# JVMTM regulatory closure artefacts (Tables B/C/D style evidence)
jvmtm_stage "daily-3way-reconciliation-report.json" "reconciliation" "config/jvmtm-regulatory-closure/examples/daily-3way-reconciliation-report.example.json"
jvmtm_stage "prefunding-proof.json" "liquidity" "config/jvmtm-regulatory-closure/examples/prefunding-proof.example.json"
jvmtm_stage "pre-settlement-ack.json" "acknowledgements" "config/jvmtm-regulatory-closure/examples/pre-settlement-ack.example.json"
jvmtm_stage "sample-exception-event.json" "exceptions" "config/jvmtm-regulatory-closure/examples/sample-exception-event.example.json"
jvmtm_stage "kyt-screening-result.json" "validation" "config/jvmtm-regulatory-closure/examples/kyt-screening-result.example.json"
jvmtm_stage "recovery-time-report.json" "bcp" "config/jvmtm-regulatory-closure/examples/recovery-time-report.example.json"
jvmtm_stage "failover-test-log.txt" "bcp" "config/jvmtm-regulatory-closure/examples/failover-test-log.example.txt"
jvmtm_stage "DR-simulation-report.json" "disaster-recovery" "config/jvmtm-regulatory-closure/examples/dr-simulation-report.example.json"
jvmtm_stage "real-time-balance-snapshot.json" "monitoring" "config/jvmtm-regulatory-closure/examples/real-time-balance-snapshot.example.json"
if [ -n "$JVMTM_CLOSURE_DIR" ] && [ -d "${JVMTM_CLOSURE_DIR}/transactions" ]; then
shopt -s nullglob
tx_files=("${JVMTM_CLOSURE_DIR}/transactions/"*.json)
if [ "${#tx_files[@]}" -gt 0 ]; then
cp -a "${tx_files[@]}" "${STAGE}/transactions/"
echo "JVMTM: staged ${#tx_files[@]} live transaction envelope(s) -> transactions/" >&2
fi
shopt -u nullglob
fi
if [ -n "$JVMTM_CLOSURE_DIR" ] && [ -f "${JVMTM_CLOSURE_DIR}/exception-policy.md" ]; then
cp -a "${JVMTM_CLOSURE_DIR}/exception-policy.md" "${STAGE}/exceptions/exception-policy.md"
else
cp -a "${REPO_ROOT}/config/jvmtm-regulatory-closure/policies/exception-policy.md" "${STAGE}/exceptions/exception-policy.md"
fi
if [ -n "$JVMTM_CLOSURE_DIR" ] && [ -f "${JVMTM_CLOSURE_DIR}/retry-log.txt" ]; then
cp -a "${JVMTM_CLOSURE_DIR}/retry-log.txt" "${STAGE}/exceptions/retry-log.txt"
else
printf '%s\n' "retry-log (template) — append exception_id, timestamp, action per resolution; JVMTM closure" > "${STAGE}/exceptions/retry-log.txt"
fi
for js in "$REPO_ROOT"/config/jvmtm-regulatory-closure/schemas/*.schema.json; do
[ -f "$js" ] || continue
cp -a "$js" "${STAGE}/schemas/jvmtm/"
done
for pack_file in \
README.md \
OPERATIONAL_EVIDENCE_VS_TEMPLATES.md \
INAAUDJVMTM_2025_AUDIT_CLOSURE_MATRIX.md \
JVMTM_TRANSACTION_GRADE_COMPLIANCE_MATRIX.md \
transaction-compliance-matrix.json \
transaction-compliance-matrix.csv
do
if [ -f "${REPO_ROOT}/config/jvmtm-regulatory-closure/${pack_file}" ]; then
cp -a "${REPO_ROOT}/config/jvmtm-regulatory-closure/${pack_file}" "${STAGE}/config/jvmtm-regulatory-closure/"
fi
done
if [ -f "${REPO_ROOT}/config/jvmtm-regulatory-closure/INAAUDJVMTM_2025_AUDIT_CLOSURE_MATRIX.md" ]; then
echo "Included config/jvmtm-regulatory-closure/INAAUDJVMTM_2025_AUDIT_CLOSURE_MATRIX.md (Tables B/C/D closure mapping)" >&2
fi
for pack_example in \
transaction-compliance-execution.example.json \
transaction-compliance-execution.blocked.example.json
do
if [ -f "${REPO_ROOT}/config/jvmtm-regulatory-closure/examples/${pack_example}" ]; then
cp -a "${REPO_ROOT}/config/jvmtm-regulatory-closure/examples/${pack_example}" "${STAGE}/config/jvmtm-regulatory-closure/examples/"
fi
done
# Machine-generated 3-way result (run generate-3way-reconciliation-evidence.sh before archiving)
if [ -f "${REPO_ROOT}/output/jvmtm-evidence/latest-3way-result.json" ]; then
cp -a "${REPO_ROOT}/output/jvmtm-evidence/latest-3way-result.json" "${STAGE}/reconciliation/3way-result.json"
echo "Included reconciliation/3way-result.json from output/jvmtm-evidence/latest-3way-result.json" >&2
fi
# Reserve provenance + funding attestation (3FR narrative; staged bank/KYT pending)
if [ -d "${REPO_ROOT}/config/reserve-provenance-package" ]; then
cp -a "${REPO_ROOT}/config/reserve-provenance-package" "${STAGE}/"
echo "Included reserve-provenance-package/ (legal, settlement, provenance, bank, kyt, reconciliation, reserve, governance)" >&2
fi
declare -a DOCS=(
"docs/04-configuration/mifos-omnl-central-bank/OMNL_M1_INTEROFFICE_OFFICE_TO_OFFICE_CLEARING_RUNBOOK.md"
"docs/04-configuration/mifos-omnl-central-bank/OMNL_IPSAS_IFRS_INTEROFFICE_COMPLIANCE.md"
"docs/04-configuration/mifos-omnl-central-bank/OMNL_PHASE_C_INTEROFFICE_DUE_TO_DUE_FROM.md"
"docs/03-deployment/OMNL_DBIS_CORE_CHAIN138_SMART_VAULT_RTGS_RUNBOOK.md"
"docs/03-deployment/OJK_BI_AUDIT_JVMTM_REMEDIATION_AND_UETR_POLICY.md"
"config/dbis-institutional/README.md"
)
for d in "${DOCS[@]}"; do
if [ -f "${REPO_ROOT}/$d" ]; then
mkdir -p "${STAGE}/$(dirname "$d")"
cp -a "${REPO_ROOT}/$d" "${STAGE}/$d"
fi
done
declare -a SCRIPTS=(
"scripts/omnl/omnl-m1-clearing-transfer-between-offices.sh"
"scripts/omnl/omnl-m1-clearing-102b-chunked.sh"
"scripts/omnl/omnl-chain138-attestation-tx.sh"
"scripts/omnl/build-omnl-e2e-settlement-audit-archive.sh"
"scripts/validation/validate-dbis-institutional-schemas.sh"
"scripts/validation/validate-jvmtm-regulatory-closure-schemas.sh"
"scripts/validation/validate-jvmtm-transaction-compliance-pack.py"
"scripts/jvmtm/export-transaction-compliance-matrix-csv.py"
"scripts/omnl/generate-3way-reconciliation-evidence.sh"
"scripts/omnl/verify-ack-before-credit.sh"
"scripts/omnl/fetch-kyt-vendor-report.sh"
"scripts/omnl/bcp-rpc-failover-smoke.sh"
"scripts/validation/validate-reserve-provenance-package.sh"
)
for s in "${SCRIPTS[@]}"; do
if [ -f "${REPO_ROOT}/$s" ]; then
mkdir -p "${STAGE}/$(dirname "$s")"
cp -a "${REPO_ROOT}/$s" "${STAGE}/$s"
chmod a+x "${STAGE}/$s" 2>/dev/null || true
fi
done
# Chain receipts (best-effort): Chain 138 + optional Ethereum mainnet
if command -v cast &>/dev/null && [ -n "$CHAIN_ATTESTATION_TX_HASH" ]; then
if cast receipt "$CHAIN_ATTESTATION_TX_HASH" --rpc-url "$RPC_URL_138" &>"${STAGE}/evidence/chain138-attestation-receipt.txt"; then
echo "Wrote evidence/chain138-attestation-receipt.txt" >&2
else
echo "WARN: cast receipt failed; wrote empty or partial file" >&2
fi
else
echo "cast not found or hash empty; skip Chain 138 on-chain receipt" >&2
echo "CHAIN_ATTESTATION_TX_HASH=${CHAIN_ATTESTATION_TX_HASH:-}" > "${STAGE}/evidence/chain138-attestation-placeholder.txt"
fi
if command -v cast &>/dev/null && [ -n "${CHAIN_ATTESTATION_TX_HASH_MAINNET:-}" ] && [ -n "$ETHEREUM_MAINNET_RPC" ]; then
if cast receipt "$CHAIN_ATTESTATION_TX_HASH_MAINNET" --rpc-url "$ETHEREUM_MAINNET_RPC" &>"${STAGE}/evidence/mainnet-attestation-receipt.txt"; then
echo "Wrote evidence/mainnet-attestation-receipt.txt" >&2
else
echo "WARN: mainnet cast receipt failed; check ETHEREUM_MAINNET_RPC and CHAIN_ATTESTATION_TX_HASH_MAINNET" >&2
fi
elif [ -n "${CHAIN_ATTESTATION_TX_HASH_MAINNET:-}" ] && [ -z "$ETHEREUM_MAINNET_RPC" ]; then
echo "CHAIN_ATTESTATION_TX_HASH_MAINNET=${CHAIN_ATTESTATION_TX_HASH_MAINNET}" > "${STAGE}/evidence/mainnet-attestation-placeholder.txt"
echo "WARN: mainnet tx hash set but ETHEREUM_MAINNET_RPC unset; wrote mainnet-attestation-placeholder.txt" >&2
fi
# Live Fineract pulls
if [ "$FETCH_LIVE_EVIDENCE" = "1" ]; then
if [ -f "${REPO_ROOT}/omnl-fineract/.env" ]; then
set +u
# shellcheck disable=SC1090
source "${REPO_ROOT}/omnl-fineract/.env" 2>/dev/null || true
set +u
elif [ -f "${REPO_ROOT}/.env" ]; then
set +u
# shellcheck disable=SC1090
source "${REPO_ROOT}/.env" 2>/dev/null || true
set +u
fi
BASE_URL="${OMNL_FINERACT_BASE_URL:-}"
if [ -n "$BASE_URL" ] && [ -n "${OMNL_FINERACT_PASSWORD:-}" ]; then
TENANT="${OMNL_FINERACT_TENANT:-omnl}"
CURL=(curl -sS -H "Fineract-Platform-TenantId: ${TENANT}" -u "${OMNL_FINERACT_USER:-app.omnl}:${OMNL_FINERACT_PASSWORD}")
for oid in 1 21 22; do
offset=0
limit=500
merge="${STAGE}/evidence/._je_batches_${oid}.ndjson"
: > "$merge"
while true; do
resp=$("${CURL[@]}" "${BASE_URL}/journalentries?officeId=${oid}&offset=${offset}&limit=${limit}")
echo "$resp" | jq -c '.pageItems // []' >> "$merge"
n=$(echo "$resp" | jq '.pageItems | length')
total=$(echo "$resp" | jq -r '.totalFilteredRecords // 0')
offset=$((offset + n))
if [ "$n" -lt "$limit" ] || [ "$offset" -ge "$total" ]; then
break
fi
done
jq -s 'map(.[]) | { officeId: '"$oid"', totalLines: length, pageItems: . }' "$merge" > "${STAGE}/evidence/journalentries-office${oid}-all.json"
rm -f "$merge"
done
"${CURL[@]}" "${BASE_URL}/offices" > "${STAGE}/evidence/offices.json" || true
"${CURL[@]}" "${BASE_URL}/glaccounts" > "${STAGE}/evidence/glaccounts.json" || true
echo "Live Fineract evidence written." >&2
else
echo "WARN: FETCH_LIVE_EVIDENCE=1 but OMNL credentials missing." >&2
fi
fi
GIT_COMMIT=$(git -C "$REPO_ROOT" rev-parse HEAD 2>/dev/null || echo "n/a")
GENERATED="$(date -u -Iseconds)"
TRANSACTION_ENVELOPE_COUNT="$(find "${STAGE}/transactions" -maxdepth 1 -type f -name '*.json' | wc -l | tr -d ' ')"
TRANSACTION_ENVELOPES_JSON="$(cd "$STAGE" && find ./transactions -maxdepth 1 -type f -name '*.json' | sort | jq -R -s 'split("\n") | map(select(length > 0)) | map(sub("^\\./"; ""))')"
# Validate settlement JSONs in archive (best-effort)
VALIDATION_NOTE="Install check-jsonschema (see config/dbis-institutional/README.md), then: check-jsonschema --schemafile schemas/settlement-event.schema.json settlement-events/*.json"
TRANSACTION_ENVELOPE_VALIDATION_NOTE="No staged transaction execution envelopes."
if command -v check-jsonschema &>/dev/null; then
FAILS=()
for sf in "${STAGE}"/settlement-events/*.json; do
[ -f "$sf" ] || continue
check-jsonschema --schemafile "${STAGE}/schemas/settlement-event.schema.json" "$sf" &>/dev/null || FAILS+=("$(basename "$sf")")
done
if [ "${#FAILS[@]}" -eq 0 ]; then
VALIDATION_NOTE="All settlement-events/*.json passed check-jsonschema against bundled schema."
else
VALIDATION_NOTE="Schema validation FAIL: ${FAILS[*]}"
fi
TX_FAILS=()
for tf in "${STAGE}"/transactions/*.json; do
[ -f "$tf" ] || continue
check-jsonschema --schemafile "${STAGE}/schemas/jvmtm/transaction-compliance-execution.schema.json" "$tf" &>/dev/null || TX_FAILS+=("$(basename "$tf")")
done
if [ "$TRANSACTION_ENVELOPE_COUNT" -gt 0 ]; then
if [ "${#TX_FAILS[@]}" -eq 0 ]; then
TRANSACTION_ENVELOPE_VALIDATION_NOTE="All transactions/*.json passed check-jsonschema against bundled transaction-compliance-execution.schema.json."
else
echo "ERROR: transaction execution envelope schema validation failed: ${TX_FAILS[*]}" >&2
exit 1
fi
fi
elif [ "$TRANSACTION_ENVELOPE_COUNT" -gt 0 ]; then
TRANSACTION_ENVELOPE_VALIDATION_NOTE="Transaction execution envelopes staged, but schema validation skipped because check-jsonschema is not installed."
fi
cat > "${STAGE}/README_E2E_ARCHIVE.txt" <<EOF
OMNL + Chain 138 — E2E Settlement, Audit Proof, Closure Cluster
================================================================
Generated (UTC): ${STAMP_UTC}
Repository HEAD: ${GIT_COMMIT}
Contents
--------
settlement-events/ Canonical JSON (schema: settlement-event.schema.json)
schemas/ settlement-event.schema.json; schemas/jvmtm/*.schema.json (JVMTM closure)
examples/ settlement-event.example.json, settlement-event.chain138-primary.example.json, settlement-event.min.json
reconciliation/ daily-3way-reconciliation-report.json (template or JVMTM_CLOSURE_DIR); 3way-result.json when present (from generate-3way-reconciliation-evidence.sh)
liquidity/ prefunding-proof.json
acknowledgements/ pre-settlement-ack.json
exceptions/ exception-policy.md, sample-exception-event.json, retry-log.txt
validation/ kyt-screening-result.json (optional evidence)
bcp/ recovery-time-report.json, failover-test-log.txt
disaster-recovery/ DR-simulation-report.json
monitoring/ real-time-balance-snapshot.json
transactions/ optional live transaction-compliance-execution envelopes from JVMTM_CLOSURE_DIR/transactions/*.json
config/jvmtm-regulatory-closure/ README, OPERATIONAL_EVIDENCE_VS_TEMPLATES.md, INAAUDJVMTM_2025_AUDIT_CLOSURE_MATRIX.md (018215821582/INAAUDJVMTM/2025)
transaction-grade matrix (.md + .json + .csv) and example execution envelopes
docs/ M1 clearing runbook, IPSAS memo, Phase C, RTGS+Smart Vault runbook, OJK/BI identifier policy, dbis-institutional README
scripts/ M1 clearing, 102B chunked, chain attestation, this builder, dbis + JVMTM validators, matrix CSV exporter
evidence/ chain138-attestation-receipt.txt; mainnet-attestation-receipt.txt when dual-anchor hash + ETHEREUM_MAINNET_RPC set; optional Fineract journal pulls
audit-proof/ AUDIT_PROOF.json, SETTLEMENT_CLOSURE.json
reserve-provenance-package/ 3FR funding origin attestation layer (see README.md inside; bank/KYT truthfully pending)
Closure summary (102B USD path)
-------------------------------
- Ledger: 102 × USD 1,000,000,000.00 per Fineract journal line (office 21 → 22), refs OMNL-102B-CH{1..102}-OF21-TO22-20260331
- Chain 138: attestation tx ${CHAIN_ATTESTATION_TX_HASH} (Chain ID 138), deployer 0x4A666F96fC8764181194447A7dFdb7d471b301C8
- Ethereum mainnet (optional): ${CHAIN_ATTESTATION_TX_HASH_MAINNET:-not recorded in this build} when dual-anchor — see AUDIT_PROOF.json chainAttestationMainnet
- Correlation ID (closure): 1a62dd79-dca7-4bbf-b7f7-3d73e1f5912d
Verify integrity
----------------
cd ${ARCHIVE_BASENAME}
sha256sum -c MANIFEST.sha256
Schema validation (optional)
-----------------------------
${VALIDATION_NOTE}
JVMTM / regulatory closure JSON (optional)
------------------------------------------
check-jsonschema --schemafile schemas/jvmtm/daily-3way-reconciliation-report.schema.json reconciliation/daily-3way-reconciliation-report.json
(and similarly for liquidity/, acknowledgements/, exceptions/sample-exception-event.json, validation/, bcp/*.json, disaster-recovery/, monitoring/)
Or: bash scripts/validation/validate-jvmtm-regulatory-closure-schemas.sh (validates repo examples; re-run against archive paths after edits)
Transaction execution envelopes
-------------------------------
${TRANSACTION_ENVELOPE_VALIDATION_NOTE}
Reserve provenance package (3FR)
---------------------------------
bash scripts/validation/validate-reserve-provenance-package.sh
(validates reserve-provenance-package/*.json against reserve-provenance-package.schema.json)
EOF
jq -n \
--arg archiveId "$ARCHIVE_BASENAME" \
--arg gen "$GENERATED" \
--arg git "$GIT_COMMIT" \
--arg attTx "$CHAIN_ATTESTATION_TX_HASH" \
--arg rpc "$RPC_URL_138" \
--arg mTx "${CHAIN_ATTESTATION_TX_HASH_MAINNET:-}" \
--arg mRpc "${ETHEREUM_MAINNET_RPC:-}" \
--arg corr "1a62dd79-dca7-4bbf-b7f7-3d73e1f5912d" \
--argjson txCount "${TRANSACTION_ENVELOPE_COUNT}" \
--argjson txEnvelopes "${TRANSACTION_ENVELOPES_JSON}" \
--argjson offices '[1,21,22]' \
'{
archiveType: "OMNL_E2E_SETTLEMENT_AUDIT_CLOSURE",
archiveId: $archiveId,
archiveAttestationPolicy: "Dual-anchor: Chain 138 (mandatory path in this bundle) + Ethereum mainnet when CHAIN_ATTESTATION_TX_HASH_MAINNET and ETHEREUM_MAINNET_RPC are set or latest-dual-attestation.json present",
generatedAtUtc: $gen,
repositoryHeadCommit: $git,
auditProof: {
settlementSchema: "config/dbis-institutional/schemas/settlement-event.schema.json",
schemaDraft: "2020-12",
finercatEvidenceOffices: $offices,
validationTool: "check-jsonschema",
narrative: "Double-entry M1 interoffice (1410/2100); HO static for m1-clearing script; IPSAS/IFRS per OMNL_IPSAS_IFRS_INTEROFFICE_COMPLIANCE.md"
},
settlement: {
notionalUsdMajor: "102000000000.00",
chunking: "102 x USD 1000000000.00 per Fineract line (minor units 100000000000 cents each)",
script: "scripts/omnl/omnl-m1-clearing-102b-chunked.sh",
fromOfficeId: 21,
toOfficeId: 22,
referencePrefix: "OMNL-102B-CH",
complianceRef: "OMNL-102B-CHUNKED-20260331"
},
chainAttestation: {
chainId: 138,
rpcUrlUsed: $rpc,
transactionHash: $attTx,
description: "0-value self-send attestation; SettlementRouter not deployed per DBIS Rail status",
deployerAddress: "0x4A666F96fC8764181194447A7dFdb7d471b301C8"
},
chainAttestationMainnet: (if ($mTx | length) > 0 then {
chainId: 1,
rpcUrlUsed: $mRpc,
transactionHash: $mTx,
description: "0-value self-send attestation on Ethereum mainnet (dual-anchor with Chain 138)",
deployerAddress: "0x4A666F96fC8764181194447A7dFdb7d471b301C8"
} else null end),
closureCluster: {
correlationId: $corr,
canonicalSettlementEventFile: "settlement-events/omnl-102b-ledger-and-chain-20260331.json",
eventType: "CHAIN_SETTLEMENT",
statusExtension: "FINALIZED"
},
jvmtmRegulatoryClosure: {
description: "JVMTM-style audit commentary evidence (Tables B/C/D); templates unless JVMTM_CLOSURE_DIR set",
auditEngagementReference: "018215821582/INAAUDJVMTM/2025",
tablesBCDClosureMatrix: "config/jvmtm-regulatory-closure/INAAUDJVMTM_2025_AUDIT_CLOSURE_MATRIX.md",
mandatoryPaths: [
"reconciliation/daily-3way-reconciliation-report.json",
"liquidity/prefunding-proof.json",
"acknowledgements/pre-settlement-ack.json",
"exceptions/exception-policy.md",
"exceptions/sample-exception-event.json"
],
supplementaryPaths: [
"config/jvmtm-regulatory-closure/INAAUDJVMTM_2025_AUDIT_CLOSURE_MATRIX.md",
"reconciliation/3way-result.json",
"config/jvmtm-regulatory-closure/OPERATIONAL_EVIDENCE_VS_TEMPLATES.md",
"validation/kyt-screening-result.json",
"bcp/recovery-time-report.json",
"bcp/failover-test-log.txt",
"disaster-recovery/DR-simulation-report.json",
"monitoring/real-time-balance-snapshot.json",
"exceptions/retry-log.txt"
],
transactionGradePackPaths: [
"config/jvmtm-regulatory-closure/JVMTM_TRANSACTION_GRADE_COMPLIANCE_MATRIX.md",
"config/jvmtm-regulatory-closure/transaction-compliance-matrix.json",
"config/jvmtm-regulatory-closure/transaction-compliance-matrix.csv"
],
exampleExecutionEnvelopes: [
"config/jvmtm-regulatory-closure/examples/transaction-compliance-execution.example.json",
"config/jvmtm-regulatory-closure/examples/transaction-compliance-execution.blocked.example.json"
],
executionEnvelopeSchema: "schemas/jvmtm/transaction-compliance-execution.schema.json",
stagedTransactionEnvelopeCount: $txCount,
stagedTransactionEnvelopes: $txEnvelopes,
transactionEnvelopeArchiveDir: "transactions/",
schemaDir: "schemas/jvmtm",
operatorReadme: "config/jvmtm-regulatory-closure/README.md",
operationalEvidenceNote: "Templates structure controls; generated/vendor/execution artifacts prove operation — see OPERATIONAL_EVIDENCE_VS_TEMPLATES.md"
},
reserveProvenancePackage: {
description: "Staged reserve / funding origin attestation (3FR); HYBX-OMNL-DBIS regulatory stack + reserve hosting map; counsel review; MT940-camt.053-API keystone still required for bank leg",
root: "reserve-provenance-package/",
readme: "reserve-provenance-package/README.md",
schema: "reserve-provenance-package/schemas/reserve-provenance-package.schema.json",
artifacts: [
"reserve-provenance-package/legal/ATTORNEY_RECEIPT_ATTESTATION_3FR.json",
"reserve-provenance-package/settlement/SETTLEMENT_FINALITY_DECLARATION.json",
"reserve-provenance-package/provenance/FUNDING_ORIGIN_CHAIN_3FR.json",
"reserve-provenance-package/bank/JVMTM_BANK_BALANCE_JSON.json",
"reserve-provenance-package/bank/MT940_STATEMENT_3FR_TITAN_RECEIVING_ACCOUNT.txt",
"reserve-provenance-package/bank/MT940_STATEMENT_3FR_TITAN_RESERVE_LINKED.txt",
"reserve-provenance-package/bank/README_BANK_REQUEST_MT940_CAMT053.md",
"reserve-provenance-package/bank/JVMTM_BANK_BALANCE_PARSED_EXAMPLE_AFTER_MT940.example.json",
"reserve-provenance-package/kyt/KYT_EXECUTION_RECORD.json",
"reserve-provenance-package/reconciliation/3WAY_RECONCILIATION_TRIGGER.json",
"reserve-provenance-package/reserve/RESERVE_RECOGNITION_DECLARATION.json",
"reserve-provenance-package/reserve/RESERVE_MONETARY_LINKAGE_DECLARATION.json",
"reserve-provenance-package/reserve/RESERVE_HOSTING_AND_JURISDICTION_MAP.json",
"reserve-provenance-package/governance/REGULATORY_STACK_DECLARATION.json",
"reserve-provenance-package/governance/REGULATORY_STACK_NARRATIVE.txt"
]
}
}' > "${STAGE}/audit-proof/AUDIT_PROOF.json"
jq -n \
--arg gen "$GENERATED" \
--arg corr "1a62dd79-dca7-4bbf-b7f7-3d73e1f5912d" \
--arg tx "$CHAIN_ATTESTATION_TX_HASH" \
--arg mtx "${CHAIN_ATTESTATION_TX_HASH_MAINNET:-}" \
--argjson txEnvelopes "${TRANSACTION_ENVELOPES_JSON}" \
'{
closureRecord: "SETTLEMENT_CLOSURE",
generatedAtUtc: $gen,
layers: {
layer1_interofficeLedger: "102 chunked M1 posts office 21 to 22; refs OMNL-102B-CH*-20260331",
layer2_invariant: "Operator to confirm net M1 across 21+22 per SOP; HO unchanged for m1-clearing-only legs",
layer3_identifiers: {
correlationId: $corr,
chainTxHash: $tx,
chainId: 138,
ethereumMainnetChainTxHash: (if ($mtx | length) > 0 then $mtx else null end),
ethereumMainnetChainId: (if ($mtx | length) > 0 then 1 else null end)
},
layer4_schema: "settlement-events/*.json validate against settlement-event.schema.json",
layer5_transactionExecutionEnvelopes: $txEnvelopes
},
artefacts: ([
"audit-proof/AUDIT_PROOF.json",
"audit-proof/SETTLEMENT_CLOSURE.json",
"config/jvmtm-regulatory-closure/INAAUDJVMTM_2025_AUDIT_CLOSURE_MATRIX.md",
"config/jvmtm-regulatory-closure/JVMTM_TRANSACTION_GRADE_COMPLIANCE_MATRIX.md",
"config/jvmtm-regulatory-closure/transaction-compliance-matrix.json",
"config/jvmtm-regulatory-closure/transaction-compliance-matrix.csv",
"config/jvmtm-regulatory-closure/examples/transaction-compliance-execution.example.json",
"config/jvmtm-regulatory-closure/examples/transaction-compliance-execution.blocked.example.json",
"schemas/jvmtm/transaction-compliance-execution.schema.json",
"settlement-events/omnl-102b-ledger-and-chain-20260331.json",
"evidence/chain138-attestation-receipt.txt",
"evidence/mainnet-attestation-receipt.txt",
"reconciliation/daily-3way-reconciliation-report.json",
"liquidity/prefunding-proof.json",
"acknowledgements/pre-settlement-ack.json",
"exceptions/exception-policy.md",
"exceptions/sample-exception-event.json",
"exceptions/retry-log.txt",
"validation/kyt-screening-result.json",
"bcp/recovery-time-report.json",
"bcp/failover-test-log.txt",
"disaster-recovery/DR-simulation-report.json",
"monitoring/real-time-balance-snapshot.json",
"reserve-provenance-package/README.md",
"reserve-provenance-package/legal/ATTORNEY_RECEIPT_ATTESTATION_3FR.json",
"reserve-provenance-package/settlement/SETTLEMENT_FINALITY_DECLARATION.json",
"reserve-provenance-package/provenance/FUNDING_ORIGIN_CHAIN_3FR.json",
"reserve-provenance-package/bank/JVMTM_BANK_BALANCE_JSON.json",
"reserve-provenance-package/bank/MT940_STATEMENT_3FR_TITAN_RECEIVING_ACCOUNT.txt",
"reserve-provenance-package/bank/MT940_STATEMENT_3FR_TITAN_RESERVE_LINKED.txt",
"reserve-provenance-package/bank/README_BANK_REQUEST_MT940_CAMT053.md",
"reserve-provenance-package/bank/JVMTM_BANK_BALANCE_PARSED_EXAMPLE_AFTER_MT940.example.json",
"reserve-provenance-package/kyt/KYT_EXECUTION_RECORD.json",
"reserve-provenance-package/reconciliation/3WAY_RECONCILIATION_TRIGGER.json",
"reserve-provenance-package/reserve/RESERVE_RECOGNITION_DECLARATION.json",
"reserve-provenance-package/reserve/RESERVE_MONETARY_LINKAGE_DECLARATION.json",
"reserve-provenance-package/reserve/RESERVE_HOSTING_AND_JURISDICTION_MAP.json",
"reserve-provenance-package/governance/REGULATORY_STACK_DECLARATION.json",
"reserve-provenance-package/governance/REGULATORY_STACK_NARRATIVE.txt",
"reserve-provenance-package/schemas/reserve-provenance-package.schema.json",
"MANIFEST.json",
"MANIFEST.sha256"
] + $txEnvelopes)
}' > "${STAGE}/audit-proof/SETTLEMENT_CLOSURE.json"
# Manifest + sha256
NDJSON="${STAGE}/._manifest_items.ndjson"
: > "$NDJSON"
cd "$STAGE"
while IFS= read -r f; do
p="${f#./}"
[ -z "$p" ] && continue
case "$p" in MANIFEST.json|MANIFEST.sha256|._*) continue ;; esac
sz=$(wc -c < "$f" | tr -d ' ')
h=$(sha256sum "$f" | awk '{print $1}')
jq -n --arg path "$p" --arg sha "$h" --argjson size "$sz" \
'{path: $path, sha256: $sha, sizeBytes: $size}' >> "$NDJSON"
done < <(find . -type f | sort)
ITEMS=$(jq -s '.' "$NDJSON")
jq -n \
--arg id "$ARCHIVE_BASENAME" \
--arg gen "$GENERATED" \
--arg git "$GIT_COMMIT" \
--argjson files "$ITEMS" \
'{
archiveId: $id,
generatedAtUtc: $gen,
repositoryHeadCommit: $git,
description: "E2E OMNL settlement + audit proof + closure cluster (Fineract + Chain 138 attestation; optional Ethereum mainnet dual-anchor)",
files: $files
}' > "${STAGE}/MANIFEST.json"
find . -type f ! -name '._*' ! -name 'MANIFEST.sha256' | sort | while IFS= read -r f; do
p="${f#./}"
sha256sum "$f" | awk -v p="$p" '{print $1 " " p}'
done > "${STAGE}/MANIFEST.sha256"
rm -f "$NDJSON"
cd "$REPO_ROOT"
rm -f "$ZIP_PATH"
(
cd "$OUT_BASE"
zip -r -q "$ZIP_PATH" "$ARCHIVE_BASENAME"
)
echo "Wrote $ZIP_PATH" >&2
ls -la "$ZIP_PATH" >&2
if [ "${KEEP_STAGE:-0}" != "1" ]; then
rm -rf "$STAGE"
fi

View File

@@ -38,7 +38,7 @@ fi
cp "$DOCS/INDONESIA_SAMPLE_COVER_AND_TOC.md" "$STAGING/00_Cover/"
cat > "$STAGING/00_Cover/README.txt" << 'COVERREADME'
HYBX-BATCH-001 | Bank Kanaya (OMNL office 22) | USD 1,000,000,000.00
HYBX-BATCH-001 | Bank Kanaya (OMNL office 21) | USD 1,000,000,000.00
Cover/TOC: INDONESIA_SAMPLE_COVER_AND_TOC.md
Integrity: ELECTRONIC_SIGNATURE_AND_HASH_NOTARIZATION_POLICY.txt; GENERATED_EVIDENCE_ESIGN_MANIFEST.json;
HASH_NOTARIZATION_ANCHOR.txt; audit_and_hashes.txt; audit_manifest.json (contentCommitmentSha256).
@@ -123,7 +123,7 @@ fi
cat > "$STAGING/Volume_B/Section_3/SECTION_3_NA_MEMORANDUM.txt" << 'EOF'
SECTION 3 — CORRESPONDENT BANKING — NOT APPLICABLE (HYBX-BATCH-001)
Settlement via OMNL central-bank-ledger design; USD leg on OMNL books. Bank Kanaya office 22.
Settlement via OMNL central-bank-ledger design; USD leg on OMNL books. Bank Kanaya office 21.
No multi-hop nostro/vostro chain applies. See Appendix/INDONESIA_MASTER_PROOF_MANIFEST.md.
EOF
@@ -152,7 +152,7 @@ section_readme() {
local out="$2"
{
echo "HYBX-BATCH-001 — Section index ($id)"
echo "Settlement ref: HYBX-BATCH-001 | Value date: 2026-03-17 | Beneficiary: Bank Kanaya (office 22)"
echo "Settlement ref: HYBX-BATCH-001 | Value date: 2026-03-17 | Beneficiary: Bank Kanaya (office 21)"
echo "See Appendix/INDONESIA_MASTER_PROOF_MANIFEST.md for required exhibits."
} >"$out"
}
@@ -174,7 +174,7 @@ section_readme "Volume F §15" "$STAGING/Volume_F/Section_15/README.txt"
cat > "$STAGING/README.txt" << 'ZIPREADME'
TRANSACTION PACKAGE — HYBX-BATCH-001
Beneficiary: Bank Kanaya (Indonesia) — OMNL officeId 22 | USD 1,000,000,000.00
Beneficiary: Bank Kanaya (Indonesia) — OMNL officeId 21 | USD 1,000,000,000.00
Structure: 00_Cover, Volume_AF, Appendix. Generator: scripts/omnl/generate-transaction-package-evidence.py
Override ledger: HYBX_LEDGER_FILE=/path/to.csv. Integrity: 00_Cover/HASH_NOTARIZATION_ANCHOR.txt + audit_manifest.json
ZIPREADME
@@ -210,7 +210,7 @@ CONTENT_COMMITMENT=$(LC_ALL=C sort "$HASH_TSV" | sha256sum | awk '{print $1}')
cat > "$ANCHOR_FILE" <<ANCHOR
HASH NOTARIZATION ANCHOR — HYBX-BATCH-001
Build date (UTC): $BUILD_DATE
Beneficiary: Bank Kanaya — OMNL officeId 22
Beneficiary: Bank Kanaya — OMNL officeId 21
CONTENT COMMITMENT (SHA-256, hex): $CONTENT_COMMITMENT
Excluded from commitment input: this file; audit_and_hashes.txt; audit_manifest.json;
@@ -235,7 +235,7 @@ elif [ -n "${TSA_URL:-}" ] || { [ -n "${QES_SIGN_CERT:-}" ] && [ -n "${QES_SIGN_
fi
{
echo "Transaction package audit — HYBX-BATCH-001 | Bank Kanaya | office 22"
echo "Transaction package audit — HYBX-BATCH-001 | Bank Kanaya | office 21"
echo "Build date (UTC): $BUILD_DATE"
echo "Generator: scripts/omnl/build-transaction-package-zip.sh"
echo ""

View File

@@ -0,0 +1,75 @@
{
"documentId": "PT-CAKRA-INVESTAMA-SIDECAR",
"fineract": {
"officeId": 22,
"clientId": 16,
"staffIdPresidentDirector": 3,
"note": "If POST /users returns 500, create Fineract user in UI linked to staffId 3, office 22, username bpramukantoro (or override CAKRA_USERNAME)."
},
"officeExternalId": "OMNL-ID-JKT-CAKRA-001",
"clientExternalId": "OMNL-ID-JKT-CAKRA-CLIENT",
"companyRegistration": {
"ahuNumber": "AHU-0091539.AH.01.01.TAHUN 2025",
"registrationDate": "2025-10-24",
"jurisdiction": "DKI Jakarta",
"country": "Indonesia"
},
"registeredAddress": {
"line1": "THE BELLEZZA OFFICE TOWER FL 21 UNIT 08",
"line2": "JL. LETJEN SOEPENO NO. 34, ARTERI PERMATA HIJAU, GROGOL UTARA, KEBAYORAN LAMA",
"city": "SOUTH JAKARTA",
"postalCode": "12210",
"country": "Indonesia"
},
"primarySignatory": {
"fullName": "Bambang Pramukantoro SP",
"position": "President Director",
"nationality": "Indonesia",
"dateOfBirth": "1966-05-05",
"passportNumber": "X6087967",
"passportIssued": "2025-06-13",
"passportExpires": "2035-06-13",
"telephone": "+62 811-400-001",
"email": "bambangpram04@gmail.com"
},
"bankingMandiri": {
"bankName": "PT. BANK MANDIRI",
"branch": "KCP Jakarta Gandaria",
"bankAddress": "JL. Gandaria Tengah III No. 21, Kebayoran Baru, Jakarta Selatan",
"accountName": "PT. CAKRA INVESTAMA INTERNATIONAL",
"accountNumberIdr": "126-000-1597-888",
"accountNumberUsd": "126-000-1599-777",
"swift": "BMRIIDJA",
"bankOfficer": "Marisa",
"bankPhone": ["+62 21 2702865", "+62 21 2702866"],
"bankMobile": "+62 811-7884-429"
},
"tax": {
"npwp": "08.540.442.4-603.000",
"registeredName": "R Bambang Pramukantoro SP",
"taxOffice": "KPP Pratama Surabaya Karangpilang",
"npwpRegistrationDate": "2003-07-24"
},
"complianceProfile": {
"note": "Fineract Office/Client APIs do not store AML/FATCA/CRS; extend via sidecar, datatables, or compliance stack.",
"amlRiskScore": null,
"fatcaStatus": "UNKNOWN",
"crsStatus": "UNKNOWN",
"uboDeclaration": "MISSING",
"licensingNibSiup": "MISSING"
},
"additionalKtpReferences": [
{
"name": "R Bambang Pramukantoro",
"relation": "Director / tax registrant"
},
{
"name": "Lucky Bayu Purnomo",
"relation": "KTP on file"
},
{
"name": "Dedy Nugroho",
"relation": "KTP on file"
}
]
}

View File

@@ -0,0 +1,48 @@
#!/usr/bin/env bash
# Fetch or refuse KYT vendor evidence. Does NOT fabricate PASS — regulators expect vendor traceability.
#
# Modes:
# 1) KYT_API_URL + KYT_API_KEY (optional KYT_API_HEADERS_JSON): GET or POST via curl, write body to OUT_JSON.
# 2) KYT_VENDOR_EXPORT_JSON: copy existing vendor export path into OUT_JSON (operator already downloaded).
#
# If none set: writes a REFUSED manifest and exits 2.
#
# Env:
# KYT_OUT_JSON — default output/jvmtm-evidence/validation/kyt-vendor-result.json
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
OUT="${KYT_OUT_JSON:-${REPO_ROOT}/output/jvmtm-evidence/validation/kyt-vendor-result.json}"
mkdir -p "$(dirname "$OUT")"
if [[ -n "${KYT_VENDOR_EXPORT_JSON:-}" && -f "${KYT_VENDOR_EXPORT_JSON}" ]]; then
cp -a "${KYT_VENDOR_EXPORT_JSON}" "$OUT"
echo "Wrote $OUT from KYT_VENDOR_EXPORT_JSON" >&2
exit 0
fi
if [[ -z "${KYT_API_URL:-}" ]]; then
jq -n \
--arg ts "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
'{status: "REFUSED", reason: "No KYT_API_URL or KYT_VENDOR_EXPORT_JSON; vendor evidence not fabricated.", generated_at: $ts}' \
> "$OUT"
echo "REFUSED: wrote $OUT (exit 2)" >&2
exit 2
fi
TMP="$(mktemp)"
trap 'rm -f "$TMP"' EXIT
if [[ -n "${KYT_API_KEY:-}" ]]; then
curl -sS -H "Authorization: Bearer ${KYT_API_KEY}" "${KYT_API_URL}" -o "$TMP" || { echo "curl failed" >&2; exit 2; }
else
curl -sS "${KYT_API_URL}" -o "$TMP" || { echo "curl failed" >&2; exit 2; }
fi
if jq -e . "$TMP" &>/dev/null; then
jq --arg ts "$(date -u +%Y-%m-%dT%H:%M:%SZ)" '. + {fetched_at: $ts, source: "curl:KYT_API_URL"}' "$TMP" > "$OUT"
else
jq -n \
--arg ts "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
--arg sha "$(sha256sum "$TMP" 2>/dev/null | awk '{print $1}')" \
'{status: "RAW", note: "non-JSON KYT response; store full body out-of-band", response_sha256: $sha, fetched_at: $ts}' > "$OUT"
fi
echo "Wrote $OUT from KYT_API_URL" >&2
exit 0

View File

@@ -0,0 +1,269 @@
#!/usr/bin/env bash
# Generate three-way reconciliation JSON from Fineract (ledger) + optional bank file/env + Chain 138 ERC20 balance.
# Operational evidence: bank leg requires operator-supplied statement/API (file or env). See
# config/jvmtm-regulatory-closure/OPERATIONAL_EVIDENCE_VS_TEMPLATES.md
#
# Env (after sourcing load-project-env):
# OMNL_FINERACT_BASE_URL, OMNL_FINERACT_USER, OMNL_FINERACT_PASSWORD, OMNL_FINERACT_TENANT (default omnl)
# RECON_OFFICE_ID (default 21), RECON_GL_CODE (default 2100)
# RECON_TOKEN_ADDRESS (default canonical cUSDT on 138), RECON_CHAIN_HOLDER (default deployer), RECON_TOKEN_DECIMALS (default 6)
# JVMTM_CORRELATION_ID — use real UUID for examination (not literal PLACEHOLDER)
# JVMTM_BANK_BALANCE_JSON — path: {"value_major","statement_ref","fetched_at"?}
# JVMTM_BANK_BALANCE_MAJOR + JVMTM_BANK_STATEMENT_REF — alternative
# JVMTM_EVIDENCE_DIR — default REPO/output/jvmtm-evidence
# AS_OF — YYYY-MM-DD (default UTC today)
#
# Output: 3way-<AS_OF>.json + latest-3way-result.json
#
set -eo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
# shellcheck source=scripts/lib/load-project-env.sh
set +u
source "${REPO_ROOT}/scripts/lib/load-project-env.sh"
if [[ -f "${REPO_ROOT}/omnl-fineract/.env" ]]; then
set -a
# shellcheck disable=SC1090
source "${REPO_ROOT}/omnl-fineract/.env" 2>/dev/null || true
set +a
fi
set -euo pipefail
AS_OF="${AS_OF:-$(date -u +%Y-%m-%d)}"
OUT_DIR="${JVMTM_EVIDENCE_DIR:-${REPO_ROOT}/output/jvmtm-evidence}"
mkdir -p "$OUT_DIR"
OFFICE_ID="${RECON_OFFICE_ID:-21}"
GL_CODE="${RECON_GL_CODE:-2100}"
CORR="${JVMTM_CORRELATION_ID:-PLACEHOLDER}"
TOKEN_ADDR="${RECON_TOKEN_ADDRESS:-0x93E66202A11B1772E55407B32B44e5Cd8eda7f22}"
HOLDER="${RECON_CHAIN_HOLDER:-0x4A666F96fC8764181194447A7dFdb7d471b301C8}"
DECIMALS="${RECON_TOKEN_DECIMALS:-6}"
RPC="${RPC_URL_138:-http://192.168.11.211:8545}"
REPORT_ID="3WAY-GEN-${AS_OF}-$(date -u +%H%M%S)"
GAPS=()
LEDGER_SOURCE="fineract:/glaccounts"
CHAIN_SOURCE="cast:erc20_balanceOf"
RPC_HOST="$(RPC_URL="$RPC" python3 -c "from urllib.parse import urlparse; import os; print(urlparse(os.environ['RPC_URL']).hostname or os.environ['RPC_URL'])")"
BASE_URL="${OMNL_FINERACT_BASE_URL:-}"
PASS="${OMNL_FINERACT_PASSWORD:-}"
USER="${OMNL_FINERACT_USER:-app.omnl}"
TENANT="${OMNL_FINERACT_TENANT:-omnl}"
if [[ -n "$BASE_URL" && -n "$PASS" ]]; then
GL_RAW="$(curl -sS -H "Fineract-Platform-TenantId: ${TENANT}" -u "${USER}:${PASS}" "${BASE_URL}/glaccounts" || true)"
LEDGER_BLOCK="$(GL_RAW="$GL_RAW" OFFICE_ID="$OFFICE_ID" GL_CODE="$GL_CODE" python3 <<'PY'
import json, os
from datetime import datetime, timezone
office = int(os.environ["OFFICE_ID"])
code = os.environ["GL_CODE"]
raw = os.environ.get("GL_RAW", "[]")
try:
data = json.loads(raw)
except json.JSONDecodeError:
data = []
if isinstance(data, dict) and "pageItems" in data:
data = data["pageItems"]
rows = [
x for x in data
if isinstance(x, dict) and str(x.get("glCode")) == code
and (x.get("officeId") == office or x.get("officeId") is None)
]
acc = rows[0] if rows else {}
bal = acc.get("organizationRunningBalance")
if bal is None:
bal = acc.get("runningBalance")
if bal is None:
s = acc.get("summary")
if isinstance(s, dict):
bal = s.get("runningBalance")
out = {
"value_major": None,
"source": "fineract:/glaccounts",
"fetched_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
"gl_code": code,
"office_id": office,
"gl_account_id": acc.get("id"),
"raw_field": "organizationRunningBalance|runningBalance",
}
if bal is not None:
out["value_major"] = str(bal)
print(json.dumps({"ledger_line": out, "found": bool(acc)}))
PY
)"
LEDGER_VAL="$(echo "$LEDGER_BLOCK" | jq -r '.ledger_line.value_major // empty')"
if [[ "$(echo "$LEDGER_BLOCK" | jq -r '.found')" != "true" ]] || [[ -z "$LEDGER_VAL" ]]; then
GAPS+=("fineract_gl_balance_missing")
fi
LEDGER_JSON="$(echo "$LEDGER_BLOCK" | jq -c '.ledger_line')"
else
GAPS+=("fineract_unreachable_or_unconfigured")
LEDGER_JSON="$(jq -nc \
--arg s "$LEDGER_SOURCE" \
--arg ft "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
--argjson oid "$OFFICE_ID" \
--arg gc "$GL_CODE" \
'{value_major: null, source: $s, fetched_at: $ft, gl_code: $gc, office_id: $oid, raw_field: "n/a"}')"
fi
if command -v cast &>/dev/null; then
RAW_BAL="$(cast call "$TOKEN_ADDR" "balanceOf(address)(uint256)" "$HOLDER" --rpc-url "$RPC" 2>/dev/null || echo "")"
if [[ -n "$RAW_BAL" ]]; then
RAW_ONE="$(echo "$RAW_BAL" | awk '{print $1}')"
CHAIN_JSON="$(RAW_BAL="$RAW_ONE" DECIMALS="$DECIMALS" TOKEN_ADDR="$TOKEN_ADDR" HOLDER="$HOLDER" RPC_HOST="$RPC_HOST" python3 <<'PY'
import os, json
from decimal import Decimal
from datetime import datetime, timezone
raw = int(os.environ["RAW_BAL"].strip(), 0)
dec = int(os.environ["DECIMALS"])
major = str(Decimal(raw) / (Decimal(10) ** dec))
out = {
"value_major": major,
"source": "cast:erc20_balanceOf",
"fetched_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
"rpc_url_host": os.environ.get("RPC_HOST", ""),
"chain_id": 138,
"token_address": os.environ["TOKEN_ADDR"],
"holder_address": os.environ["HOLDER"],
"decimals": dec,
}
print(json.dumps(out))
PY
)"
else
GAPS+=("chain_balance_query_failed")
CHAIN_JSON="$(jq -nc \
--arg s "$CHAIN_SOURCE" \
--arg ft "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
--arg th "$RPC_HOST" \
--arg ta "$TOKEN_ADDR" \
--arg hd "$HOLDER" \
--argjson dec "$DECIMALS" \
'{value_major: null, source: $s, fetched_at: $ft, rpc_url_host: $th, chain_id: 138, token_address: $ta, holder_address: $hd, decimals: $dec}')"
fi
else
GAPS+=("cast_not_installed")
CHAIN_JSON="$(jq -nc \
--arg s "$CHAIN_SOURCE" \
--arg ft "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
--arg th "$RPC_HOST" \
--arg ta "$TOKEN_ADDR" \
--arg hd "$HOLDER" \
--argjson dec "$DECIMALS" \
'{value_major: null, source: $s, fetched_at: $ft, rpc_url_host: $th, chain_id: 138, token_address: $ta, holder_address: $hd, decimals: $dec}')"
fi
if [[ -n "${JVMTM_BANK_BALANCE_JSON:-}" && -f "${JVMTM_BANK_BALANCE_JSON}" ]]; then
BANK_JSON="$(jq -c \
--arg now "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
'{value_major: .value_major, source: (.source // "operator:jvmtm_bank_json_file"), fetched_at: (.fetched_at // $now), statement_ref: .statement_ref}' \
"${JVMTM_BANK_BALANCE_JSON}")"
BANK_VAL="$(echo "$BANK_JSON" | jq -r '.value_major // empty')"
if [[ -z "$BANK_VAL" ]]; then
GAPS+=("bank_file_missing_value_major")
BANK_JSON="null"
fi
elif [[ -n "${JVMTM_BANK_BALANCE_MAJOR:-}" ]]; then
BANK_JSON="$(jq -nc \
--arg v "${JVMTM_BANK_BALANCE_MAJOR}" \
--arg r "${JVMTM_BANK_STATEMENT_REF:-nostro-export}" \
--arg ft "$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
'{value_major: $v, source: "operator:env_JVMTM_BANK_BALANCE_MAJOR", fetched_at: $ft, statement_ref: $r}')"
else
GAPS+=("bank_statement_not_supplied")
BANK_JSON="null"
fi
[[ ${#CORR} -lt 8 ]] && GAPS+=("correlation_id_too_short_use_JVMTM_CORRELATION_ID")
[[ "$CORR" == "PLACEHOLDER" ]] && GAPS+=("correlation_id_placeholder_not_examination_grade")
GAPS_JSON="$(printf '%s\n' "${GAPS[@]}" | jq -R -s -c 'split("\n") | map(select(length>0))')"
ARGV_JSON="$(python3 -c 'import json,sys; print(json.dumps(sys.argv[1:]))' -- "$@")"
export LEDGER_JSON CHAIN_JSON BANK_JSON GAPS_JSON CORR REPORT_ID AS_OF ARGV_JSON
FINAL_JSON="$(python3 <<'PY'
import json, os
from decimal import Decimal, InvalidOperation
def D(x):
if x is None or x == "":
return None
try:
return Decimal(str(x))
except InvalidOperation:
return None
ledger = json.loads(os.environ["LEDGER_JSON"])
chain = json.loads(os.environ["CHAIN_JSON"])
bank_s = os.environ["BANK_JSON"]
bank = json.loads(bank_s) if bank_s != "null" else None
gaps = json.loads(os.environ["GAPS_JSON"])
corr = os.environ["CORR"]
report_id = os.environ["REPORT_ID"]
as_of = os.environ["AS_OF"]
argv = json.loads(os.environ["ARGV_JSON"])
lv = D(ledger.get("value_major"))
cv = D(chain.get("value_major") if chain else None)
bv = D(bank.get("value_major") if bank else None)
eps = Decimal("0.01")
def sub(a, b):
if a is None or b is None:
return None
return str(a - b)
var = {
"ledger_vs_bank_major": sub(lv, bv) if bv is not None else "n/a",
"ledger_vs_chain_major": sub(lv, cv) if cv is not None else "n/a",
"bank_vs_chain_major": sub(bv, cv) if bv is not None and cv is not None else "n/a",
}
matched = False
if lv is not None and cv is not None and bv is not None:
matched = abs(lv - cv) <= eps and abs(lv - bv) <= eps and abs(bv - cv) <= eps
elif lv is not None and cv is not None and bv is None:
matched = abs(lv - cv) <= eps
if any(g in gaps for g in ("bank_statement_not_supplied", "bank_file_missing_value_major")):
tier = "GENERATED_PARTIAL"
elif not gaps:
tier = "GENERATED_FULL" if matched and bv is not None else "GENERATED_PARTIAL"
else:
tier = "INCOMPLETE"
from datetime import datetime, timezone
import socket
gen_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
out = {
"schema_version": 1,
"report_id": report_id,
"as_of": as_of,
"correlation_id": corr,
"currency": "USD",
"evidence_tier": tier,
"evidence_gaps": gaps,
"ledger": ledger,
"bank": bank,
"chain": chain,
"variance": var,
"matched": matched,
"generated_at": gen_at,
"generator": {
"script": "scripts/omnl/generate-3way-reconciliation-evidence.sh",
"argv": argv,
"host": socket.gethostname(),
},
}
print(json.dumps(out, indent=2))
PY
)"
echo "$FINAL_JSON" | tee "${OUT_DIR}/3way-${AS_OF}.json" > "${OUT_DIR}/latest-3way-result.json"
echo "Wrote ${OUT_DIR}/3way-${AS_OF}.json and latest-3way-result.json" >&2

View File

@@ -207,7 +207,7 @@ def write_section1(staging: str) -> str:
core = f"""INSTITUTIONAL AUTHORIZATION — EVIDENCE REGISTER
Settlement batch: {BATCH}
Value date: {VALUE_DATE}
Beneficiary: Bank Kanaya (Indonesia) — OMNL officeId 22 (externalId BANK-KANAYA-ID)
Beneficiary: Bank Kanaya (Indonesia) — OMNL officeId 21 (externalId BANK-KANAYA-ID)
OMNL (settlement ledger authority)
Legal name: ORGANISATION MONDIALE DU NUMERIQUE L.P.B.C.
@@ -271,7 +271,7 @@ def write_section5(staging: str) -> str:
Settlement cycle: {CYCLE}
Value date: {VALUE_DATE}
Bank Kanaya (office 22) +1000000000.00
Bank Kanaya (office 21) +1000000000.00
OMNL Liquidity Pool -1000000000.00
System net 0.00
@@ -357,7 +357,7 @@ Cross-check: Appendix/INDONESIA_MASTER_PROOF_MANIFEST.md Section 9.
os.path.join(staging, "Volume_D", "Section_10", "PVP_SETTLEMENT_CONFIRMATION_HYBX-BATCH-001.txt"),
f"""PVP SETTLEMENT CONFIRMATION — {BATCH}
Value date: {VALUE_DATE}
Beneficiary: Bank Kanaya (office 22)
Beneficiary: Bank Kanaya (office 21)
Cross-check: Appendix/INDONESIA_MASTER_PROOF_MANIFEST.md Section 10.
""",
),
@@ -372,7 +372,7 @@ Cross-check: Appendix/INDONESIA_MASTER_PROOF_MANIFEST.md Section 11.
(
os.path.join(staging, "Volume_E", "Section_12", "AML_COMPLIANCE_SUMMARY_HYBX-BATCH-001.txt"),
f"""AML COMPLIANCE SUMMARY — {BATCH}
Beneficiary: Bank Kanaya (Indonesia) — officeId 22
Beneficiary: Bank Kanaya (Indonesia) — officeId 21
Primary schedule (4.995): Appendix/AML_PPATK_EVIDENCE_SCHEDULE_HYBX-BATCH-001.md
Screening / STR / retention: complete per schedule §6 certification.
Cross-check: Appendix/INDONESIA_MASTER_PROOF_MANIFEST.md Section 12;

View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
# OMNL / HYBX — Run recommended onboarding for PT. CAKRA INVESTAMA INTERNATIONAL:
# 1) Office (idempotent)
# 2) Tenant GL accounts (idempotent; skip with SKIP_GL=1)
# 3) Corporate client + NPWP + contact (idempotent)
# 4) Staff + Office Admin user (idempotent; needs password or CAKRA_GENERATE_PASSWORD=1)
#
# Usage (repo root):
# OMNL_CAKRA_ADMIN_PASSWORD='…' bash scripts/omnl/omnl-cakra-onboarding-complete.sh
# CAKRA_GENERATE_PASSWORD=1 bash scripts/omnl/omnl-cakra-onboarding-complete.sh
# SKIP_USER=1 bash scripts/omnl/omnl-cakra-onboarding-complete.sh # office + client + GL only
#
# Banking rails and AML/FATCA/CRS are not Fineract core fields — see:
# scripts/omnl/data/pt-cakra-investama-sidecar.json
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
SKIP_GL="${SKIP_GL:-0}"
SKIP_USER="${SKIP_USER:-0}"
echo "=== 1. Office (PT CAKRA) ===" >&2
bash "${REPO_ROOT}/scripts/omnl/omnl-office-create-pt-cakra-investama.sh"
if [ "$SKIP_GL" != "1" ]; then
echo "=== 2. GL accounts (tenant-wide, idempotent) ===" >&2
bash "${REPO_ROOT}/scripts/omnl/omnl-gl-accounts-create.sh" || true
fi
echo "=== 3. Client (corporate + NPWP + contact) ===" >&2
bash "${REPO_ROOT}/scripts/omnl/omnl-client-create-pt-cakra-investama.sh"
if [ "$SKIP_USER" != "1" ]; then
echo "=== 4. User (bpramukantoro @ CAKRA office) ===" >&2
if ! bash "${REPO_ROOT}/scripts/omnl/omnl-user-cakra-office-create.sh"; then
echo "WARNING: User API step failed; staff may still exist — see script stderr for STAFF_ID and UI steps." >&2
[ "${STRICT_ONBOARDING:-0}" = "1" ] && exit 1
fi
else
echo "=== 4. User step skipped (SKIP_USER=1) ===" >&2
fi
echo "=== Done ===" >&2
echo "Sidecar JSON: ${REPO_ROOT}/scripts/omnl/data/pt-cakra-investama-sidecar.json" >&2

View File

@@ -0,0 +1,148 @@
#!/usr/bin/env bash
# Broadcast minimal attestation transactions (0-value self-send) to anchor settlement finality
# when DBIS SettlementRouter is not deployed. Does NOT move 102B tokens.
#
# Default: Chain 138 only (RPC_URL_138 / LAN).
# Dual-anchor: also Ethereum mainnet (chain id 1) when ETHEREUM_MAINNET_RPC or RPC_URL_MAINNET
# is set — unless ATTEST_INCLUDE_MAINNET=0. Set ATTEST_INCLUDE_MAINNET=1 to force mainnet when
# RPC is configured; mainnet consumes real ETH gas.
#
# Prerequisites: PRIVATE_KEY (same deployer on both chains — ensure account has ETH on mainnet).
#
# Usage:
# CORRELATION_ID=uuid bash scripts/omnl/omnl-chain138-attestation-tx.sh
# ATTEST_INCLUDE_MAINNET=0 bash scripts/omnl/omnl-chain138-attestation-tx.sh # 138 only
# DRY_RUN=1 bash scripts/omnl/omnl-chain138-attestation-tx.sh
#
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
DRY_RUN="${DRY_RUN:-0}"
CORRELATION_ID="${CORRELATION_ID:-}"
if [ -f "${REPO_ROOT}/smom-dbis-138/.env" ]; then
set +u
set -a
# shellcheck disable=SC1090
source "${REPO_ROOT}/smom-dbis-138/.env" 2>/dev/null || true
set +a
set -u
fi
if [ -f "${REPO_ROOT}/.env" ]; then
set +u
set -a
# shellcheck disable=SC1090
source "${REPO_ROOT}/.env" 2>/dev/null || true
set +a
set -u
fi
RPC_138="${RPC_URL_138:-http://192.168.11.211:8545}"
MAINNET_RPC="${ETHEREUM_MAINNET_RPC:-${RPC_URL_MAINNET:-}}"
# Mainnet: auto-enable when RPC is configured, unless explicitly disabled.
if [ "${ATTEST_INCLUDE_MAINNET:-}" = "0" ]; then
DO_MAINNET=0
elif [ "${ATTEST_INCLUDE_MAINNET:-}" = "1" ]; then
DO_MAINNET=1
elif [ -n "$MAINNET_RPC" ]; then
DO_MAINNET=1
else
DO_MAINNET=0
fi
if [ "$DO_MAINNET" = "1" ] && [ -z "$MAINNET_RPC" ]; then
echo "ERROR: mainnet attestation requested (ATTEST_INCLUDE_MAINNET=1) but ETHEREUM_MAINNET_RPC / RPC_URL_MAINNET unset" >&2
exit 1
fi
if ! command -v cast &>/dev/null; then
echo "ERROR: cast (Foundry) not on PATH" >&2
exit 1
fi
if [ -z "${PRIVATE_KEY:-}" ] && [ "$DRY_RUN" != "1" ]; then
echo "ERROR: PRIVATE_KEY unset (set in smom-dbis-138/.env or .env)" >&2
exit 1
fi
ADDR="${ATTEST_FROM_ADDRESS:-}"
if [ -n "${PRIVATE_KEY:-}" ]; then
ADDR="$(cast wallet address --private-key "$PRIVATE_KEY")"
elif [ "$DRY_RUN" = "1" ]; then
ADDR="0x0000000000000000000000000000000000000001"
fi
FINGERPRINT=""
if [ -n "$CORRELATION_ID" ]; then
FINGERPRINT="$(cast keccak "$(printf '%s' "$CORRELATION_ID")")"
fi
extract_tx_hash() {
local out="$1"
local h
h="$(echo "$out" | sed -n 's/.*transactionHash[[:space:]]*//p' | head -1)"
if [ -z "$h" ]; then
h="$(echo "$out" | grep -oE '0x[a-fA-F0-9]{64}' | head -1 || true)"
fi
printf '%s' "$h"
}
LAST_TX_HASH=""
broadcast_one() {
local rpc="$1"
local chain_label="$2"
LAST_TX_HASH=""
echo "Attestation [$chain_label] From/To: $ADDR | RPC: $rpc | keccak(correlation_id): ${FINGERPRINT:0:18}..." >&2
if [ "$DRY_RUN" = "1" ]; then
echo "DRY_RUN: cast send $ADDR --value 0 --private-key <redacted> --rpc-url $rpc" >&2
LAST_TX_HASH=""
return 0
fi
local OUT
OUT="$(cast send "$ADDR" --value 0 --private-key "$PRIVATE_KEY" --rpc-url "$rpc" 2>&1)" || {
echo "$OUT" >&2
return 1
}
echo "$OUT"
LAST_TX_HASH="$(extract_tx_hash "$OUT")"
echo "chain_tx_hash[$chain_label]=$LAST_TX_HASH" >&2
}
TX_138=""
TX_MAINNET=""
broadcast_one "$RPC_138" "138" || exit 1
TX_138="$LAST_TX_HASH"
if [ "$DO_MAINNET" = "1" ]; then
echo "WARN: Also broadcasting on Ethereum mainnet (chain 1) — uses real ETH gas. Same PRIVATE_KEY / address as Chain 138." >&2
broadcast_one "$MAINNET_RPC" "1" || exit 1
TX_MAINNET="$LAST_TX_HASH"
fi
OUT_DIR="${REPO_ROOT}/output/jvmtm-evidence"
if [ "$DRY_RUN" != "1" ]; then
mkdir -p "$OUT_DIR"
GEN_AT="$(date -u -Iseconds)"
if command -v jq &>/dev/null; then
jq -n \
--arg h138 "$TX_138" \
--arg h1 "$TX_MAINNET" \
--arg gen "$GEN_AT" \
--arg corr "${CORRELATION_ID:-}" \
'{
generatedAtUtc: $gen,
correlationId: (if $corr != "" then $corr else null end),
attestations: [
{ chainId: 138, transactionHash: $h138, rpcKind: "RPC_URL_138" }
] + (if $h1 != "" then [{ chainId: 1, transactionHash: $h1, rpcKind: "ETHEREUM_MAINNET_RPC" }] else [] end)
}' > "${OUT_DIR}/latest-dual-attestation.json"
echo "Wrote ${OUT_DIR}/latest-dual-attestation.json" >&2
fi
{
echo "CHAIN_ATTESTATION_TX_HASH=${TX_138}"
echo "CHAIN_ATTESTATION_TX_HASH_MAINNET=${TX_MAINNET}"
} > "${OUT_DIR}/latest-dual-attestation.env"
echo "Wrote ${OUT_DIR}/latest-dual-attestation.env (source before build-omnl-e2e-settlement-audit-archive.sh)" >&2
fi

View File

@@ -0,0 +1,116 @@
#!/usr/bin/env bash
# OMNL Fineract — Corporate Client for PT. CAKRA INVESTAMA INTERNATIONAL (office CAKRA).
# Idempotent by client externalId. Adds contact (director), NPWP as "Any Other Id Type" if not present.
#
# Usage (repo root):
# DRY_RUN=1 bash scripts/omnl/omnl-client-create-pt-cakra-investama.sh
# bash scripts/omnl/omnl-client-create-pt-cakra-investama.sh
#
# Optional env:
# CAKRA_OFFICE_EXTERNAL_ID default OMNL-ID-JKT-CAKRA-001
# CAKRA_CLIENT_EXTERNAL_ID default OMNL-ID-JKT-CAKRA-CLIENT
# CAKRA_CLIENT_NAME default PT. CAKRA INVESTAMA INTERNATIONAL
# CAKRA_CONTACT_EMAIL default bambangpram04@gmail.com
# CAKRA_CONTACT_MOBILE default +62811400001
# CAKRA_NPWP default 08.540.442.4-603.000
# SKIP_NPWP_IDENTIFIER=1 skip NPWP POST (e.g. if using another id type in tenant)
#
# Settlement / bank / AHU metadata: scripts/omnl/data/pt-cakra-investama-sidecar.json (not Fineract-native).
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
DRY_RUN="${DRY_RUN:-0}"
CAKRA_OFFICE_EXTERNAL_ID="${CAKRA_OFFICE_EXTERNAL_ID:-OMNL-ID-JKT-CAKRA-001}"
CAKRA_CLIENT_EXTERNAL_ID="${CAKRA_CLIENT_EXTERNAL_ID:-OMNL-ID-JKT-CAKRA-CLIENT}"
CAKRA_CLIENT_NAME="${CAKRA_CLIENT_NAME:-PT. CAKRA INVESTAMA INTERNATIONAL}"
CAKRA_CONTACT_EMAIL="${CAKRA_CONTACT_EMAIL:-bambangpram04@gmail.com}"
CAKRA_CONTACT_MOBILE="${CAKRA_CONTACT_MOBILE:-+62811400001}"
CAKRA_NPWP="${CAKRA_NPWP:-08.540.442.4-603.000}"
SKIP_NPWP_IDENTIFIER="${SKIP_NPWP_IDENTIFIER:-0}"
SUBMITTED_DATE="${SUBMITTED_DATE:-$(date +%Y-%m-%d)}"
LEGAL_FORM_ID="${LEGAL_FORM_ID:-2}"
# shellcheck source=lib/omnl-fineract-common.sh
source "${REPO_ROOT}/scripts/omnl/lib/omnl-fineract-common.sh"
omnl_fineract_load_env
omnl_fineract_init_curl || exit 1
offices_json=$(curl "${CURL_OPTS[@]}" "${BASE_URL}/offices" 2>/dev/null)
OFFICE_ID=$(echo "$offices_json" | jq -r --arg e "$CAKRA_OFFICE_EXTERNAL_ID" '.[] | select(.externalId == $e) | .id' 2>/dev/null | head -1)
if [ -z "$OFFICE_ID" ] || [ "$OFFICE_ID" = "null" ]; then
echo "Office not found for externalId=$CAKRA_OFFICE_EXTERNAL_ID — run omnl-office-create-pt-cakra-investama.sh first." >&2
exit 1
fi
clients_wrap=$(omnl_fineract_fetch_all_clients_pageitems)
existing_id=$(echo "$clients_wrap" | jq -r --arg e "$CAKRA_CLIENT_EXTERNAL_ID" '.pageItems[] | select(.externalId == $e) | .id' 2>/dev/null | head -1)
if [ -n "$existing_id" ] && [ "$existing_id" != "null" ]; then
CLIENT_ID="$existing_id"
echo "Client already exists: clientId=$CLIENT_ID (externalId=$CAKRA_CLIENT_EXTERNAL_ID)" >&2
else
payload=$(jq -n \
--argjson officeId "$OFFICE_ID" \
--argjson legalFormId "$LEGAL_FORM_ID" \
--arg firstname "$CAKRA_CLIENT_NAME" \
--arg externalId "$CAKRA_CLIENT_EXTERNAL_ID" \
--arg submittedOnDate "$SUBMITTED_DATE" \
'{
officeId: $officeId,
legalFormId: $legalFormId,
firstname: $firstname,
lastname: ".",
externalId: $externalId,
dateFormat: "yyyy-MM-dd",
locale: "en",
active: false,
submittedOnDate: $submittedOnDate
}')
if [ "$DRY_RUN" = "1" ]; then
echo "DRY_RUN: would POST /clients $payload" >&2
exit 0
fi
res=$(curl "${CURL_OPTS[@]}" -X POST -d "$payload" "${BASE_URL}/clients" 2>/dev/null) || true
if echo "$res" | jq -e '.resourceId // .clientId' >/dev/null 2>&1; then
CLIENT_ID=$(echo "$res" | jq -r '.resourceId // .clientId')
echo "Created client clientId=$CLIENT_ID" >&2
else
echo "Failed to create client: $res" >&2
exit 1
fi
fi
if [ "$DRY_RUN" = "1" ]; then
exit 0
fi
# Contact (director channel)
payload_contact=$(jq -n --arg m "$CAKRA_CONTACT_MOBILE" --arg e "$CAKRA_CONTACT_EMAIL" '{ mobileNo: $m, emailAddress: $e }')
curl "${CURL_OPTS[@]}" -X PUT -d "$payload_contact" "${BASE_URL}/clients/${CLIENT_ID}" >/dev/null 2>&1 || true
# NPWP — tenant allows one active "Any Other Id Type" (id 4) per client
if [ "$SKIP_NPWP_IDENTIFIER" != "1" ] && [ -n "$CAKRA_NPWP" ]; then
if omnl_fineract_client_has_document_key "$CLIENT_ID" "$CAKRA_NPWP"; then
echo "NPWP identifier already present on client $CLIENT_ID" >&2
else
has_other=$(curl "${CURL_OPTS[@]}" "${BASE_URL}/clients/${CLIENT_ID}/identifiers" 2>/dev/null | jq -e '
(if type == "array" then . else (.pageItems // []) end)
| map(select((.documentType?.name // "") == "Any Other Id Type" and (.status == "Active" or .status == null)))
| length > 0
' >/dev/null 2>&1 && echo yes || echo no)
if [ "$has_other" = "yes" ]; then
echo "Skip NPWP POST: client already has an active Any Other Id Type (store AHU/NPWP detail in sidecar or deactivate old id)." >&2
else
payload_npwp=$(jq -n --arg key "$CAKRA_NPWP" --argjson typeId 4 '{ documentKey: $key, documentTypeId: $typeId, description: "NPWP (Indonesia tax ID)", status: "Active" }')
res=$(curl "${CURL_OPTS[@]}" -X POST -d "$payload_npwp" "${BASE_URL}/clients/${CLIENT_ID}/identifiers" 2>/dev/null) || true
if echo "$res" | jq -e '.resourceId' >/dev/null 2>&1; then
echo "Posted NPWP identifier for client $CLIENT_ID" >&2
else
echo "NPWP POST skipped or failed: $res" >&2
fi
fi
fi
fi
echo "CLIENT_ID_CAKRA=$CLIENT_ID"
echo "Sidecar (banking, AHU, compliance placeholders): ${REPO_ROOT}/scripts/omnl/data/pt-cakra-investama-sidecar.json" >&2

View File

@@ -0,0 +1,53 @@
#!/usr/bin/env bash
# OMNL Fineract — obtain base64EncodedAuthenticationKey via POST /authentication.
# Subsequent API calls use this value as HTTP Basic (not Bearer on standard Fineract).
#
# Usage (repo root, env from omnl-fineract/.env or .env):
# bash scripts/omnl/omnl-fineract-authentication-login.sh
# bash scripts/omnl/omnl-fineract-authentication-login.sh --export # prints export line for current shell
# OMNL_AUTH_USER=x OMNL_AUTH_PASSWORD=y bash scripts/omnl/omnl-fineract-authentication-login.sh
#
# Standard follow-up request:
# curl -H "Fineract-Platform-TenantId: ${OMNL_FINERACT_TENANT}" \
# -H "Authorization: Basic ${OMNL_FINERACT_AUTH_KEY}" \
# "${OMNL_FINERACT_BASE_URL}/offices"
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
EXPORT_MODE=0
[[ "${1:-}" == "--export" ]] && EXPORT_MODE=1
if [ -f "${REPO_ROOT}/omnl-fineract/.env" ]; then set +u; source "${REPO_ROOT}/omnl-fineract/.env" 2>/dev/null || true; set -u; fi
if [ -f "${REPO_ROOT}/.env" ]; then set +u; source "${REPO_ROOT}/.env" 2>/dev/null || true; set -u; fi
BASE_URL="${OMNL_FINERACT_BASE_URL:-}"
TENANT="${OMNL_FINERACT_TENANT:-omnl}"
USER="${OMNL_AUTH_USER:-${OMNL_FINERACT_USER:-app.omnl}}"
PASS="${OMNL_AUTH_PASSWORD:-${OMNL_FINERACT_PASSWORD:-}}"
if [ -z "$BASE_URL" ] || [ -z "$PASS" ]; then
echo "Set OMNL_FINERACT_BASE_URL and OMNL_FINERACT_PASSWORD (or OMNL_AUTH_USER / OMNL_AUTH_PASSWORD)." >&2
exit 1
fi
BODY=$(jq -n --arg u "$USER" --arg p "$PASS" '{ username: $u, password: $p }')
RESP=$(curl -s -S -X POST "${BASE_URL}/authentication" \
-H "Fineract-Platform-TenantId: ${TENANT}" \
-H "Content-Type: application/json" \
-d "$BODY")
KEY=$(echo "$RESP" | jq -r '.base64EncodedAuthenticationKey // empty')
if [ -z "$KEY" ] || [ "$KEY" = "null" ]; then
echo "Authentication failed or unexpected response:" >&2
echo "$RESP" | jq . 2>/dev/null || echo "$RESP" >&2
exit 1
fi
if [ "$EXPORT_MODE" = "1" ]; then
echo "export OMNL_FINERACT_AUTH_KEY='${KEY}'"
else
echo "$RESP" | jq .
echo "" >&2
echo "Use on API calls: Authorization: Basic ${KEY}" >&2
echo "(Fineract-Platform-TenantId: ${TENANT})" >&2
fi

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# OMNL Fineract — Move USD 102,000,000,000.00 from FROM_OFFICE to TO_OFFICE using chunked M1 clearing.
# Fineract journal line amounts must stay within DB limits; this repo verified 1B USD (100_000_000_000 cents) per line.
#
# Prerequisites: omnl-fineract/.env (or root .env) with OMNL API credentials.
# Live: COMPLIANCE_AUTH_REF, COMPLIANCE_APPROVER, DRY_RUN=0
#
# Usage (repo root):
# DRY_RUN=1 bash scripts/omnl/omnl-m1-clearing-102b-chunked.sh
# DRY_RUN=0 COMPLIANCE_AUTH_REF=... COMPLIANCE_APPROVER="..." bash scripts/omnl/omnl-m1-clearing-102b-chunked.sh
#
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
DRY_RUN="${DRY_RUN:-1}"
FROM_OFFICE="${FROM_OFFICE:-21}"
TO_OFFICE="${TO_OFFICE:-22}"
# 102 billion USD in cents = 102 * 10^9 * 100
TOTAL_CENTS=$((102000000000 * 100))
# Per-chunk: 1 billion USD in cents (Fineract-safe on this tenant)
CHUNK_CENTS="${CHUNK_CENTS:-100000000000}"
COMPLIANCE_AUTH_REF="${COMPLIANCE_AUTH_REF:-}"
COMPLIANCE_APPROVER="${COMPLIANCE_APPROVER:-}"
STAMP="${STAMP:-20260331}"
if [ "$DRY_RUN" != "1" ]; then
if [ -z "$COMPLIANCE_AUTH_REF" ] || [ -z "$COMPLIANCE_APPROVER" ]; then
echo "ERROR: Live run requires COMPLIANCE_AUTH_REF and COMPLIANCE_APPROVER" >&2
exit 1
fi
fi
n_full=$((TOTAL_CENTS / CHUNK_CENTS))
rem=$((TOTAL_CENTS % CHUNK_CENTS))
chunks=()
i=1
while [ "$i" -le "$n_full" ]; do
chunks+=("$CHUNK_CENTS")
i=$((i + 1))
done
if [ "$rem" -gt 0 ]; then
chunks+=("$rem")
fi
total_chunks=${#chunks[@]}
echo "Total USD (major): 102,000,000,000.00 | total cents: $TOTAL_CENTS | chunk cents: $CHUNK_CENTS | chunks: $total_chunks | DRY_RUN=$DRY_RUN" >&2
idx=0
for amt in "${chunks[@]}"; do
idx=$((idx + 1))
REFERENCE_BASE="OMNL-102B-CH${idx}-OF${FROM_OFFICE}-TO${TO_OFFICE}-${STAMP}"
SETTLEMENT_CONTEXT="OMNL 102B USD chunked M1 realloc chunk ${idx}/${total_chunks} (${amt} cents)"
export REFERENCE_BASE SETTLEMENT_CONTEXT
echo "--- Chunk $idx / $total_chunks | AMOUNT=$amt | ref=$REFERENCE_BASE ---" >&2
DRY_RUN="$DRY_RUN" \
FETCH_AMOUNT_FROM_API=0 \
AMOUNT="$amt" \
FROM_OFFICE="$FROM_OFFICE" \
TO_OFFICE="$TO_OFFICE" \
COMPLIANCE_AUTH_REF="$COMPLIANCE_AUTH_REF" \
COMPLIANCE_APPROVER="$COMPLIANCE_APPROVER" \
bash "${REPO_ROOT}/scripts/omnl/omnl-m1-clearing-transfer-between-offices.sh" || {
echo "ERROR: chunk $idx failed" >&2
exit 1
}
done
echo "Done. Posted $total_chunks chunk pairs (unwind + book)." >&2

View File

@@ -0,0 +1,212 @@
#!/usr/bin/env bash
# OMNL Fineract — Move M1 clearing-style position (GL 2100 / 1410) from one office to another.
#
# Accounting (same structure as omnl-pvp-post-clearing-bank-kanaya.sh branch leg):
# - Unwind source office: Dr 1410 / Cr 2100 (reverses Dr 2100 / Cr 1410)
# - Book target office: Dr 2100 / Cr 1410 (same as PvP beneficiary branch leg)
# Head office leg (Dr 2410 / Cr 2100) is unchanged — beneficiary reallocates at branch level only.
#
# Compliance (live post, DRY_RUN=0):
# - Set COMPLIANCE_AUTH_REF (e.g. committee minute id, ticket, legal opinion ref).
# - Set COMPLIANCE_APPROVER (human name) for material amounts (>= MATERIAL_THRESHOLD_COMPLIANCE, default 10_000_000).
# - Stable REFERENCE_BASE in journal referenceNumber (default HYBX-BATCH-001-BEN-REALLOC).
# - Run DRY_RUN=1 first; use maker-checker (WRITE_MAKER_PAYLOADS=1) if policy requires segregated duties.
# IPSAS / IFRS (IFGA default): comments append COMPLIANCE_STANDARD_MEMO — see
# docs/04-configuration/mifos-omnl-central-bank/OMNL_IPSAS_IFRS_INTEROFFICE_COMPLIANCE.md
#
# Amount:
# - Default: FETCH_AMOUNT_FROM_API=1 sums non-reversed DEBIT lines on GL 2100 at FROM_OFFICE (matches posted PvP Kanaya legs).
# - Override: AMOUNT=<same numeric scale as existing JEs on the tenant> (required if fetch yields 0).
#
# Usage:
# DRY_RUN=1 bash scripts/omnl/omnl-m1-clearing-transfer-between-offices.sh
# FROM_OFFICE=21 TO_OFFICE=22 DRY_RUN=0 COMPLIANCE_AUTH_REF=DIR-2026-0330 COMPLIANCE_APPROVER="CFO Name" \
# bash scripts/omnl/omnl-m1-clearing-transfer-between-offices.sh
#
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
DRY_RUN="${DRY_RUN:-1}"
TRANSACTION_DATE="${TRANSACTION_DATE:-$(date +%Y-%m-%d)}"
FROM_OFFICE="${FROM_OFFICE:-21}"
TO_OFFICE="${TO_OFFICE:-22}"
REFERENCE_BASE="${REFERENCE_BASE:-HYBX-BATCH-001-BEN-REALLOC}"
SETTLEMENT_CONTEXT="${SETTLEMENT_CONTEXT:-HYBX-BATCH-001 multilateral net beneficiary realloc Bank Kanaya to PT CAKRA}"
FETCH_AMOUNT_FROM_API="${FETCH_AMOUNT_FROM_API:-1}"
AMOUNT="${AMOUNT:-}"
MATERIAL_THRESHOLD_COMPLIANCE="${MATERIAL_THRESHOLD_COMPLIANCE:-10000000}"
COMPLIANCE_AUTH_REF="${COMPLIANCE_AUTH_REF:-}"
COMPLIANCE_APPROVER="${COMPLIANCE_APPROVER:-}"
WRITE_MAKER_PAYLOADS="${WRITE_MAKER_PAYLOADS:-0}"
# Appended to Fineract comments (IPSAS + IFRS; IFGA = IFRS unless org defines otherwise)
COMPLIANCE_STANDARD_MEMO="${COMPLIANCE_STANDARD_MEMO:-IPSAS:1,3,9,28,29 accrual double-entry inter-office M1 realloc no revenue. IFRS/IFGA-default: IAS32 IFRS7 IFRS9 amortised cost no PnL on symmetric 1410/2100 legs.}"
if [ -f "${REPO_ROOT}/omnl-fineract/.env" ]; then
set +u
# shellcheck disable=SC1090
source "${REPO_ROOT}/omnl-fineract/.env" 2>/dev/null || true
set -u
elif [ -f "${REPO_ROOT}/.env" ]; then
set +u
# shellcheck disable=SC1090
source "${REPO_ROOT}/.env" 2>/dev/null || true
set -u
fi
BASE_URL="${OMNL_FINERACT_BASE_URL:-}"
TENANT="${OMNL_FINERACT_TENANT:-omnl}"
USER="${OMNL_FINERACT_USER:-app.omnl}"
PASS="${OMNL_FINERACT_PASSWORD:-}"
if [ -z "$BASE_URL" ] || [ -z "$PASS" ]; then
echo "Set OMNL_FINERACT_BASE_URL and OMNL_FINERACT_PASSWORD" >&2
exit 1
fi
CURL_GET=(-s -S -H "Fineract-Platform-TenantId: ${TENANT}" -H "Content-Type: application/json" -u "${USER}:${PASS}")
CURL_POST=(-s -S -w "\n%{http_code}" -H "Fineract-Platform-TenantId: ${TENANT}" -H "Content-Type: application/json" -u "${USER}:${PASS}")
fetch_office_journal_all() {
local oid="$1"
local offset=0
local limit=500
local acc='[]'
while true; do
local resp
resp=$(curl "${CURL_GET[@]}" "${BASE_URL}/journalentries?officeId=${oid}&offset=${offset}&limit=${limit}")
local batch
batch=$(echo "$resp" | jq -c '.pageItems // []')
local n
n=$(echo "$batch" | jq 'length')
acc=$(jq -n --argjson a "$acc" --argjson b "$batch" '$a + $b')
local total
total=$(echo "$resp" | jq -r '.totalFilteredRecords // 0')
offset=$((offset + n))
if [ "$n" -lt "$limit" ] || [ "$offset" -ge "$total" ]; then
break
fi
done
echo "$acc"
}
sum_2100_debits() {
local items="$1"
echo "$items" | jq '[.[] | select((.reversed // false) | not) | select(.glAccountCode == "2100") | select((.entryType.value // .entryType // "") | ascii_downcase | test("debit"))] | map(.amount | tonumber) | add // 0'
}
GL_RAW=$(curl "${CURL_GET[@]}" "${BASE_URL}/glaccounts")
GL_JSON=$(echo "$GL_RAW" | jq -c 'if type == "array" then . else (.pageItems // []) end' 2>/dev/null || echo "[]")
get_gl_id() {
local code="$1"
echo "$GL_JSON" | jq -r --arg c "$code" '.[]? | select(.glCode == $c) | .id // empty' 2>/dev/null | head -n1
}
ID_1410="$(get_gl_id "1410")"
ID_2100="$(get_gl_id "2100")"
if [ -z "$ID_1410" ] || [ -z "$ID_2100" ]; then
echo "ERROR: Missing GL 1410 or 2100." >&2
exit 1
fi
FROM_NAME=$(curl "${CURL_GET[@]}" "${BASE_URL}/offices" | jq -r --argjson id "$FROM_OFFICE" '.[] | select(.id == $id) | .name // empty' | head -1)
TO_NAME=$(curl "${CURL_GET[@]}" "${BASE_URL}/offices" | jq -r --argjson id "$TO_OFFICE" '.[] | select(.id == $id) | .name // empty' | head -1)
if [ -z "$FROM_NAME" ] || [ -z "$TO_NAME" ]; then
echo "ERROR: Could not resolve office name for FROM_OFFICE=$FROM_OFFICE or TO_OFFICE=$TO_OFFICE" >&2
exit 1
fi
if [ -n "$AMOUNT" ]; then
TRANSFER_AMT="$AMOUNT"
elif [ "$FETCH_AMOUNT_FROM_API" = "1" ]; then
ITEMS=$(fetch_office_journal_all "$FROM_OFFICE")
TRANSFER_AMT=$(sum_2100_debits "$ITEMS")
else
echo "ERROR: Set AMOUNT= or FETCH_AMOUNT_FROM_API=1" >&2
exit 1
fi
if ! awk -v a="$TRANSFER_AMT" 'BEGIN { if (a + 0 > 0) exit 0; exit 1 }'; then
echo "ERROR: Transfer amount must be > 0 (got ${TRANSFER_AMT}). Set AMOUNT explicitly or ensure GL 2100 debits exist at FROM_OFFICE." >&2
exit 1
fi
REF_UNWIND="${REFERENCE_BASE}-UNWIND-${FROM_OFFICE}"
REF_BOOK="${REFERENCE_BASE}-BOOK-${TO_OFFICE}"
NARR_UNWIND="M1 clearing beneficiary realloc: unwind at ${FROM_NAME} (office ${FROM_OFFICE}). Auth: ${COMPLIANCE_AUTH_REF:-n/a}. ${SETTLEMENT_CONTEXT} | ${COMPLIANCE_STANDARD_MEMO}"
NARR_BOOK="M1 clearing beneficiary realloc: book at ${TO_NAME} (office ${TO_OFFICE}). Auth: ${COMPLIANCE_AUTH_REF:-n/a}. ${SETTLEMENT_CONTEXT} | ${COMPLIANCE_STANDARD_MEMO}"
if [ "$DRY_RUN" != "1" ]; then
if [ -z "$COMPLIANCE_AUTH_REF" ]; then
echo "ERROR: Live post requires COMPLIANCE_AUTH_REF (governance / ticket / minute reference)." >&2
exit 1
fi
if awk -v a="$TRANSFER_AMT" -v t="$MATERIAL_THRESHOLD_COMPLIANCE" 'BEGIN { exit !(a >= t) }'; then
if [ -z "$COMPLIANCE_APPROVER" ]; then
echo "ERROR: Amount ${TRANSFER_AMT} >= ${MATERIAL_THRESHOLD_COMPLIANCE}; set COMPLIANCE_APPROVER for dual-control attestation." >&2
exit 1
fi
fi
fi
post_je() {
local office_id="$1"
local debit_id="$2"
local credit_id="$3"
local ref="$4"
local memo="$5"
local body
body=$(jq -n \
--argjson officeId "$office_id" \
--arg transactionDate "$TRANSACTION_DATE" \
--arg comments "$memo" \
--arg referenceNumber "$ref" \
--argjson debitId "$debit_id" \
--argjson creditId "$credit_id" \
--argjson amount "$TRANSFER_AMT" \
'{ officeId: $officeId, transactionDate: $transactionDate, dateFormat: "yyyy-MM-dd", locale: "en", currencyCode: "USD", comments: $comments, referenceNumber: $referenceNumber, debits: [ { glAccountId: $debitId, amount: $amount } ], credits: [ { glAccountId: $creditId, amount: $amount } ] }')
if [ "$WRITE_MAKER_PAYLOADS" = "1" ]; then
local pdir="${REPO_ROOT}/reconciliation"
mkdir -p "$pdir"
local safe
safe=$(echo "$ref" | tr -c 'A-Za-z0-9_-' '_')
local to_write="$body"
if awk -v a="$TRANSFER_AMT" -v t="$MATERIAL_THRESHOLD_COMPLIANCE" 'BEGIN { exit !(a >= t) }' \
&& [ -n "${COMPLIANCE_APPROVER:-}" ]; then
to_write=$(echo "$body" | jq --arg approver "$COMPLIANCE_APPROVER" --arg approvedAt "$(date -u -Iseconds)" \
'. + { approvalMetadata: { approver: $approver, approvedAt: $approvedAt } }')
fi
echo "$to_write" > "${pdir}/je-${safe}.payload.json"
sha256sum "${pdir}/je-${safe}.payload.json" | awk '{print $1}' > "${pdir}/je-${safe}.payload.sha256"
echo "Wrote maker payload ${pdir}/je-${safe}.payload.json (post: PAYLOAD_FILE=... DRY_RUN=0 bash scripts/omnl/omnl-je-checker.sh)" >&2
fi
if [ "$DRY_RUN" = "1" ]; then
echo "DRY_RUN JE: office=$office_id ref=$ref" >&2
echo "$body" | jq .
return 0
fi
local out code resp
out=$(curl "${CURL_POST[@]}" -X POST -d "$body" "${BASE_URL}/journalentries" 2>/dev/null)
code=$(echo "$out" | tail -n1)
resp=$(echo "$out" | sed '$d')
if [ "$code" = "200" ] || [ "${code:0:1}" = "2" ]; then
echo "OK office=$office_id ref=$ref HTTP $code" >&2
echo "$resp" | jq . 2>/dev/null || echo "$resp"
else
echo "FAIL office=$office_id ref=$ref HTTP $code: $resp" >&2
return 1
fi
}
echo "M1 clearing transfer | from office ${FROM_OFFICE} (${FROM_NAME}) → ${TO_OFFICE} (${TO_NAME}) | amount=${TRANSFER_AMT} | DRY_RUN=${DRY_RUN}" >&2
echo "JE1 unwind: office ${FROM_OFFICE} Dr 1410 Cr 2100 | ref ${REF_UNWIND}" >&2
echo "JE2 book: office ${TO_OFFICE} Dr 2100 Cr 1410 | ref ${REF_BOOK}" >&2
# Unwind source (mirror of PvP branch leg)
post_je "$FROM_OFFICE" "$ID_1410" "$ID_2100" "$REF_UNWIND" "$NARR_UNWIND"
# Book target
post_je "$TO_OFFICE" "$ID_2100" "$ID_1410" "$REF_BOOK" "$NARR_BOOK"
echo "Done." >&2

View File

@@ -0,0 +1,77 @@
#!/usr/bin/env bash
# OMNL Fineract (HYBX) — Create Office for PT. CAKRA INVESTAMA INTERNATIONAL (Jakarta).
# CIS-derived: AHU-0091539.AH.01.01.TAHUN 2025, opening 2025-10-24, under Head Office (parentId 1).
# Uses POST /offices (name, parentId, openingDate, externalId, dateFormat, locale).
#
# Usage (repo root):
# DRY_RUN=1 bash scripts/omnl/omnl-office-create-pt-cakra-investama.sh
# bash scripts/omnl/omnl-office-create-pt-cakra-investama.sh
#
# Optional overrides:
# CAKRA_EXTERNAL_ID (default OMNL-ID-JKT-CAKRA-001)
# CAKRA_OFFICE_NAME OPENING_DATE PARENT_OFFICE_ID
#
# Env: omnl-fineract/.env or .env — OMNL_FINERACT_BASE_URL, OMNL_FINERACT_PASSWORD, OMNL_FINERACT_TENANT, OMNL_FINERACT_USER
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
DRY_RUN="${DRY_RUN:-0}"
OPENING_DATE="${OPENING_DATE:-2025-10-24}"
CAKRA_EXTERNAL_ID="${CAKRA_EXTERNAL_ID:-OMNL-ID-JKT-CAKRA-001}"
CAKRA_OFFICE_NAME="${CAKRA_OFFICE_NAME:-PT. CAKRA INVESTAMA INTERNATIONAL}"
PARENT_OFFICE_ID="${PARENT_OFFICE_ID:-1}"
if [ -f "${REPO_ROOT}/omnl-fineract/.env" ]; then
set +u
source "${REPO_ROOT}/omnl-fineract/.env" 2>/dev/null || true
set -u
elif [ -f "${REPO_ROOT}/.env" ]; then
set +u
source "${REPO_ROOT}/.env" 2>/dev/null || true
set -u
fi
BASE_URL="${OMNL_FINERACT_BASE_URL:-}"
TENANT="${OMNL_FINERACT_TENANT:-omnl}"
USER="${OMNL_FINERACT_USER:-app.omnl}"
PASS="${OMNL_FINERACT_PASSWORD:-}"
if [ -z "$BASE_URL" ] || [ -z "$PASS" ]; then
echo "Set OMNL_FINERACT_BASE_URL and OMNL_FINERACT_PASSWORD (omnl-fineract/.env or .env)." >&2
echo "Example: OMNL_FINERACT_BASE_URL=https://omnl.hybx.global/fineract-provider/api/v1" >&2
exit 1
fi
CURL_OPTS=(-s -S -H "Fineract-Platform-TenantId: ${TENANT}" -H "Content-Type: application/json" -u "${USER}:${PASS}")
offices_json=$(curl "${CURL_OPTS[@]}" "${BASE_URL}/offices" 2>/dev/null)
existing_id=$(echo "$offices_json" | jq -r --arg e "$CAKRA_EXTERNAL_ID" '.[] | select(.externalId == $e) | .id' 2>/dev/null | head -1)
if [ -n "$existing_id" ] && [ "$existing_id" != "null" ]; then
echo "CAKRA office already exists: officeId=$existing_id (externalId=$CAKRA_EXTERNAL_ID)" >&2
echo "OFFICE_ID_CAKRA=$existing_id"
exit 0
fi
payload=$(jq -n \
--arg name "$CAKRA_OFFICE_NAME" \
--arg openingDate "$OPENING_DATE" \
--arg externalId "$CAKRA_EXTERNAL_ID" \
--argjson parentId "$PARENT_OFFICE_ID" \
'{ name: $name, parentId: $parentId, openingDate: $openingDate, externalId: $externalId, dateFormat: "yyyy-MM-dd", locale: "en" }')
if [ "$DRY_RUN" = "1" ]; then
echo "DRY_RUN: would POST ${BASE_URL}/offices" >&2
echo "Payload: $payload" >&2
exit 0
fi
res=$(curl "${CURL_OPTS[@]}" -X POST -d "$payload" "${BASE_URL}/offices" 2>/dev/null) || true
if echo "$res" | jq -e '.resourceId // .officeId' >/dev/null 2>&1; then
CAKRA_OFFICE_ID=$(echo "$res" | jq -r '.resourceId // .officeId')
echo "Created CAKRA office: officeId=$CAKRA_OFFICE_ID" >&2
echo "OFFICE_ID_CAKRA=$CAKRA_OFFICE_ID"
else
echo "Failed to create office: $res" >&2
exit 1
fi

View File

@@ -7,20 +7,22 @@
#
# Usage:
# DRY_RUN=1 bash scripts/omnl/omnl-pvp-post-clearing-bank-kanaya.sh # print payloads only (default)
# DRY_RUN=0 OFFICE_ID_HO=1 OFFICE_ID_KANAYA=22 bash scripts/omnl/omnl-pvp-post-clearing-bank-kanaya.sh
# DRY_RUN=0 OFFICE_ID_HO=1 OFFICE_ID_KANAYA=21 bash scripts/omnl/omnl-pvp-post-clearing-bank-kanaya.sh
#
# Prerequisites: GL 1410, 2100, 2410 exist. Run resolve_ids.sh or let script resolve via GET /glaccounts.
# See: docs/04-configuration/mifos-omnl-central-bank/PvP_MULTILATERAL_NET_SETTLEMENT_BANK_KANAYA.md
# IPSAS/IFRS: docs/04-configuration/mifos-omnl-central-bank/OMNL_IPSAS_IFRS_INTEROFFICE_COMPLIANCE.md
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
DRY_RUN="${DRY_RUN:-1}"
TRANSACTION_DATE="${TRANSACTION_DATE:-$(date +%Y-%m-%d)}"
OFFICE_ID_HO="${OFFICE_ID_HO:-1}"
OFFICE_ID_KANAYA="${OFFICE_ID_KANAYA:-22}"
OFFICE_ID_KANAYA="${OFFICE_ID_KANAYA:-21}"
# 1,000,000,000.00 USD in cents
AMOUNT_MINOR="${AMOUNT_MINOR_UNITS:-100000000000}"
REF="${REFERENCE_COMMENT:-HYBX-BATCH-001-CLEARING}"
COMPLIANCE_STANDARD_MEMO="${COMPLIANCE_STANDARD_MEMO:-IPSAS:1,3,28,29 PvP clearing HO+branch. IFRS/IFGA-default: IAS32 IFRS7 IFRS9 no PnL on symmetric monetary legs.}"
if [ -f "${REPO_ROOT}/omnl-fineract/.env" ]; then
set +u
@@ -77,7 +79,7 @@ post_je() {
body=$(jq -n \
--argjson officeId "$office_id" \
--arg transactionDate "$TRANSACTION_DATE" \
--arg comments "$memo$REF" \
--arg comments "$memo$REF | $COMPLIANCE_STANDARD_MEMO" \
--argjson debitId "$debit_id" \
--argjson creditId "$credit_id" \
--argjson amount "$AMOUNT_MINOR" \

View File

@@ -0,0 +1,105 @@
#!/usr/bin/env bash
# OMNL Fineract — Staff + User for PT CAKRA office (President Director login, office-scoped).
# Default office: resolve by CAKRA_OFFICE_EXTERNAL_ID or CAKRA_OFFICE_ID (default external OMNL-ID-JKT-CAKRA-001).
#
# Env (required unless CAKRA_GENERATE_PASSWORD=1):
# OMNL_CAKRA_ADMIN_PASSWORD Password for Fineract user bpramukantoro
# Optional:
# CAKRA_GENERATE_PASSWORD=1 Generate a password and print it once to stderr (save securely).
# CAKRA_USERNAME default bpramukantoro
# CAKRA_OFFICE_ID integer office id (skips resolve by external id)
# CAKRA_OFFICE_EXTERNAL_ID default OMNL-ID-JKT-CAKRA-001
# CAKRA_ROLE_NAME default "Office Admin"
#
# Requires: omnl-fineract/.env or .env with OMNL_FINERACT_* admin credentials.
set -euo pipefail
REPO_ROOT="${REPO_ROOT:-$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)}"
CAKRA_USERNAME="${CAKRA_USERNAME:-bpramukantoro}"
STAFF_FIRSTNAME="${CAKRA_STAFF_FIRSTNAME:-Bambang}"
STAFF_LASTNAME="${CAKRA_STAFF_LASTNAME:-Pramukantoro}"
CAKRA_OFFICE_EXTERNAL_ID="${CAKRA_OFFICE_EXTERNAL_ID:-OMNL-ID-JKT-CAKRA-001}"
CAKRA_OFFICE_ID="${CAKRA_OFFICE_ID:-}"
CAKRA_ROLE_NAME="${CAKRA_ROLE_NAME:-Office Admin}"
CAKRA_GENERATE_PASSWORD="${CAKRA_GENERATE_PASSWORD:-0}"
if [ -f "${REPO_ROOT}/omnl-fineract/.env" ]; then set +u; source "${REPO_ROOT}/omnl-fineract/.env" 2>/dev/null || true; set -u; fi
if [ -f "${REPO_ROOT}/.env" ]; then set +u; source "${REPO_ROOT}/.env" 2>/dev/null || true; set -u; fi
BASE_URL="${OMNL_FINERACT_BASE_URL:-}"
TENANT="${OMNL_FINERACT_TENANT:-omnl}"
ADMIN_USER="${OMNL_FINERACT_USER:-app.omnl}"
ADMIN_PASS="${OMNL_FINERACT_PASSWORD:-}"
CAKRA_PASS="${OMNL_CAKRA_ADMIN_PASSWORD:-}"
[ -z "$BASE_URL" ] || [ -z "$ADMIN_PASS" ] && { echo "Set OMNL_FINERACT_BASE_URL and OMNL_FINERACT_PASSWORD" >&2; exit 1; }
if [ -z "$CAKRA_PASS" ]; then
if [ "$CAKRA_GENERATE_PASSWORD" = "1" ]; then
CAKRA_PASS="$(openssl rand -base64 18 | tr -d '\n')"
echo "Generated password for ${CAKRA_USERNAME} (save securely, not logged again):" >&2
echo "$CAKRA_PASS" >&2
else
echo "Set OMNL_CAKRA_ADMIN_PASSWORD or run with CAKRA_GENERATE_PASSWORD=1" >&2
exit 1
fi
fi
CURL_OPTS=(-s -S -w "\n%{http_code}" -H "Fineract-Platform-TenantId: ${TENANT}" -H "Content-Type: application/json" -u "${ADMIN_USER}:${ADMIN_PASS}")
if [ -z "$CAKRA_OFFICE_ID" ]; then
offices_json=$(curl "${CURL_OPTS[@]}" "${BASE_URL}/offices" 2>/dev/null | sed '$d')
CAKRA_OFFICE_ID=$(echo "$offices_json" | jq -r --arg e "$CAKRA_OFFICE_EXTERNAL_ID" '.[] | select(.externalId == $e) | .id' 2>/dev/null | head -1)
fi
if [ -z "$CAKRA_OFFICE_ID" ] || [ "$CAKRA_OFFICE_ID" = "null" ]; then
echo "Could not resolve office id for externalId=$CAKRA_OFFICE_EXTERNAL_ID (create office first)." >&2
exit 1
fi
users_json=$(curl "${CURL_OPTS[@]}" "${BASE_URL}/users" 2>/dev/null | sed '$d')
existing_uid=$(echo "$users_json" | jq -r --arg u "$CAKRA_USERNAME" '.[] | select(.username == $u) | .id' 2>/dev/null | head -1)
if [ -n "$existing_uid" ] && [ "$existing_uid" != "null" ]; then
echo "User already exists: username=$CAKRA_USERNAME userId=$existing_uid" >&2
echo "USER_ID_CAKRA=$existing_uid"
exit 0
fi
EXISTING_STAFF=$(curl "${CURL_OPTS[@]}" "${BASE_URL}/staff?officeId=${CAKRA_OFFICE_ID}" 2>/dev/null | sed '$d')
STAFF_ID=$(echo "$EXISTING_STAFF" | jq -r 'if type == "array" then (.[0].id // empty) else empty end' 2>/dev/null)
if [ -n "$STAFF_ID" ]; then
echo "Using existing staff id=$STAFF_ID for office $CAKRA_OFFICE_ID" >&2
else
JOINING_DATE="${JOINING_DATE:-$(date +%Y-%m-%d)}"
STAFF_JSON=$(jq -n --argjson officeId "$CAKRA_OFFICE_ID" --arg fn "$STAFF_FIRSTNAME" --arg ln "$STAFF_LASTNAME" --arg jd "$JOINING_DATE" '{ officeId: $officeId, firstname: $fn, lastname: $ln, joiningDate: $jd, dateFormat: "yyyy-MM-dd", locale: "en", isActive: true }')
STAFF_OUT=$(curl "${CURL_OPTS[@]}" -X POST -d "$STAFF_JSON" "${BASE_URL}/staff" 2>/dev/null)
STAFF_CODE=$(echo "$STAFF_OUT" | tail -n1)
STAFF_RESP=$(echo "$STAFF_OUT" | sed '$d')
[ "$STAFF_CODE" = "200" ] || [ "${STAFF_CODE:0:1}" = "2" ] || { echo "Staff failed $STAFF_CODE: $STAFF_RESP" >&2; exit 1; }
STAFF_ID=$(echo "$STAFF_RESP" | jq -r '.resourceId // empty')
[ -n "$STAFF_ID" ] || { echo "No staff resourceId" >&2; exit 1; }
echo "Staff created id=$STAFF_ID" >&2
fi
ROLES_JSON=$(curl "${CURL_OPTS[@]}" "${BASE_URL}/roles" 2>/dev/null | sed '$d')
ROLE_ID=$(echo "$ROLES_JSON" | jq -r --arg rn "$CAKRA_ROLE_NAME" '(.[] | select(.name == $rn) | .id) // empty' 2>/dev/null | head -n1)
if [ -z "$ROLE_ID" ] || [ "$ROLE_ID" = "null" ]; then
ROLE_ID=$(echo "$ROLES_JSON" | jq -r '(.[] | select(.name == "Office Admin") | .id) // (.[] | select(.name != "Super user" and .name != "System") | .id) // .[0].id // 2' 2>/dev/null | head -n1)
fi
ROLE_ID=${ROLE_ID:-3}
USER_JSON=$(jq -n --arg u "$CAKRA_USERNAME" --arg p "$CAKRA_PASS" --argjson sid "$STAFF_ID" --argjson oid "$CAKRA_OFFICE_ID" --arg fn "$STAFF_FIRSTNAME" --arg ln "$STAFF_LASTNAME" --argjson roleId "$ROLE_ID" '{ username: $u, password: $p, repeatPassword: $p, staffId: $sid, officeId: $oid, firstname: $fn, lastname: $ln, roles: [$roleId], passwordNeverExpires: true }')
USER_OUT=$(curl "${CURL_OPTS[@]}" -X POST -d "$USER_JSON" "${BASE_URL}/users" 2>/dev/null)
USER_CODE=$(echo "$USER_OUT" | tail -n1)
USER_RESP=$(echo "$USER_OUT" | sed '$d')
if [ "$USER_CODE" = "200" ] || [ "${USER_CODE:0:1}" = "2" ]; then
NEW_UID=$(echo "$USER_RESP" | jq -r '.resourceId // empty')
echo "User $CAKRA_USERNAME created for office $CAKRA_OFFICE_ID (userId=$NEW_UID)" >&2
echo "USER_ID_CAKRA=${NEW_UID:-unknown}"
exit 0
fi
echo "POST /users failed HTTP $USER_CODE: $USER_RESP" >&2
echo "Staff record is ready for manual linking: STAFF_ID_CAKRA=$STAFF_ID officeId=$CAKRA_OFFICE_ID" >&2
echo "If this tenant returns 500 on POST /users (known on some HYBX builds), create the user in the Fineract UI:" >&2
echo " Administration → Users → Create, office=$CAKRA_OFFICE_ID, link staff id $STAFF_ID, role Office Admin, username=$CAKRA_USERNAME" >&2
exit 1

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# Prove ACK instant is before journal credit (regulatory ordering: ack before credit).
# Usage: verify-ack-before-credit.sh <ack.json> <journalEntryId>
# ack.json: include "timestamp" or "ack_timestamp" as full ISO-8601 (UTC recommended).
# Fineract often returns transactionDate as YYYY-MM-DD only; we treat credit as end of that UTC day
# (conservative: ACK must be strictly before 23:59:59.999Z on that date unless you extend this script).
#
# Exit: 0 pass, 1 fail ordering, 2 usage/API/parse error.
set -eo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
# shellcheck source=scripts/lib/load-project-env.sh
set +u
source "${REPO_ROOT}/scripts/lib/load-project-env.sh"
set -euo pipefail
ACK_FILE="${1:-}"
JE_ID="${2:-}"
if [[ -z "$ACK_FILE" || -z "$JE_ID" || ! -f "$ACK_FILE" ]]; then
echo "Usage: $0 <ack.json> <journalEntryId>" >&2
exit 2
fi
BASE_URL="${OMNL_FINERACT_BASE_URL:-}"
PASS="${OMNL_FINERACT_PASSWORD:-}"
USER="${OMNL_FINERACT_USER:-app.omnl}"
TENANT="${OMNL_FINERACT_TENANT:-omnl}"
if [[ -z "$BASE_URL" || -z "$PASS" ]]; then
echo "error: OMNL_FINERACT_BASE_URL and OMNL_FINERACT_PASSWORD required" >&2
exit 2
fi
ACK_TS="$(jq -r '.timestamp // .ack_timestamp // empty' "$ACK_FILE")"
[[ -z "$ACK_TS" ]] && echo "error: ack file missing timestamp / ack_timestamp" >&2 && exit 2
JE_JSON="$(curl -sS -H "Fineract-Platform-TenantId: ${TENANT}" -u "${USER}:${PASS}" "${BASE_URL}/journalentries/${JE_ID}")"
CREDIT_DATE="$(echo "$JE_JSON" | jq -r '.transactionDate // empty')"
[[ -z "$CREDIT_DATE" ]] && echo "error: journalentries/${JE_ID} missing transactionDate" >&2 && exit 2
ACK_TS="$ACK_TS" CREDIT_DATE="$CREDIT_DATE" python3 <<'PY'
import os, sys
from datetime import datetime, timezone
ack_s = os.environ["ACK_TS"].strip().replace("Z", "+00:00")
try:
ack = datetime.fromisoformat(ack_s)
except ValueError:
print("error: cannot parse ACK timestamp as ISO-8601", file=sys.stderr)
sys.exit(2)
if ack.tzinfo is None:
ack = ack.replace(tzinfo=timezone.utc)
d = os.environ["CREDIT_DATE"].strip()[:10]
try:
y, m, day = (int(d[0:4]), int(d[5:7]), int(d[8:10]))
credit_end = datetime(y, m, day, 23, 59, 59, 999000, tzinfo=timezone.utc)
except Exception:
print("error: bad transactionDate", file=sys.stderr)
sys.exit(2)
if ack < credit_end:
print(f"OK: ack {ack.isoformat()} is before credit value-date end {credit_end.isoformat()}")
sys.exit(0)
print(f"FAIL: ack {ack.isoformat()} is not before credit window end {credit_end.isoformat()}", file=sys.stderr)
sys.exit(1)
PY

View File

@@ -45,7 +45,6 @@ p2p-port=30303
# QBFT Consensus
miner-enabled=false
miner-coinbase="0x0000000000000000000000000000000000000000"
sync-mode="FULL"

View File

@@ -56,6 +56,7 @@ available_ips=(
"${IP_SERVICE_52:-${IP_SERVICE_52:-192.168.11.52}}"
"${DB_HOST:-192.168.11.53}"
"${IP_ORDER_LEGAL:-192.168.11.87}"
"${IP_ORDER_MCP_LEGAL:-192.168.11.94}"
"${IP_SERVICE_55:-${IP_SERVICE_55:-192.168.11.55}}"
"${IP_SERVICE_56:-${IP_SERVICE_56:-192.168.11.56}}"
"${IP_SERVICE_57:-${IP_SERVICE_57:-192.168.11.57}}"

View File

@@ -152,7 +152,7 @@ declare -A CONTAINERS=(
["10080"]="order-eresidency:${IP_SERVICE_43:-${IP_SERVICE_43:-${IP_SERVICE_43:-192.168.11.43}}}:2048:2:20"
["10090"]="order-portal-public:${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}}}:2048:2:20"
["10091"]="order-portal-internal:${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}}}:2048:2:20"
["10092"]="order-mcp-legal:${IP_MIM_WEB:-192.168.11.37}:2048:2:20"
["10092"]="order-mcp-legal:${IP_ORDER_MCP_LEGAL:-192.168.11.94}:2048:2:20"
["10100"]="dbis-postgres-primary:${PROXMOX_HOST_ML110}5:4096:4:50"
["10101"]="dbis-postgres-replica-1:${PROXMOX_HOST_ML110}6:4096:4:50"
["10120"]="dbis-redis:${PROXMOX_HOST_R630_02}0:2048:2:20"

View File

@@ -19,10 +19,12 @@ for a in "$@"; do [[ "$a" == "--dry-run" ]] && DRY_RUN=true && break; done
if $DRY_RUN; then
echo "=== Completable from anywhere (--dry-run: commands only) ==="
echo ""
echo "1. Config validation: bash scripts/validation/validate-config-files.sh [--dry-run]"
echo "2. On-chain check (138): SKIP_EXIT=1 bash scripts/verify/check-contracts-on-chain-138.sh || true"
echo "3. All validation: bash scripts/verify/run-all-validation.sh --skip-genesis"
echo "4. Reconcile .env: bash scripts/verify/reconcile-env-canonical.sh --print"
echo "1. Config validation: bash scripts/validation/validate-config-files.sh [--dry-run]"
echo " (optional: python3 -m pip install check-jsonschema — step 1 then validates config/dbis-institutional JSON Schemas too)"
echo "2. On-chain check (138): SKIP_EXIT=1 bash scripts/verify/check-contracts-on-chain-138.sh || true"
echo "3. All validation: bash scripts/verify/run-all-validation.sh --skip-genesis"
echo "4. Public report API: SKIP_EXIT=1 bash scripts/verify/check-public-report-api.sh || true"
echo "5. Reconcile .env: bash scripts/verify/reconcile-env-canonical.sh --print"
echo ""
echo "Run without --dry-run to execute. Exit 0 = success."
exit 0
@@ -32,22 +34,27 @@ echo "=== Completable from anywhere (no LAN/creds) ==="
echo ""
# 1. Config validation
echo "[Step 1/4] Config validation..."
echo "[Step 1/5] Config validation..."
bash scripts/validation/validate-config-files.sh
echo ""
# 2. On-chain contract check (Chain 138) — may warn if RPC unreachable
echo "[Step 2/4] On-chain contract check (Chain 138)..."
echo "[Step 2/5] On-chain contract check (Chain 138)..."
SKIP_EXIT=1 bash scripts/verify/check-contracts-on-chain-138.sh || true
echo ""
# 3. Full validation (skip genesis to avoid RPC)
echo "[Step 3/4] Run all validation (--skip-genesis)..."
echo "[Step 3/5] Run all validation (--skip-genesis)..."
bash scripts/verify/run-all-validation.sh --skip-genesis
echo ""
# 4. Emit canonical .env lines for reconciliation
echo "[Step 4/4] Canonical .env (reconcile smom-dbis-138/.env)..."
echo "[Step 4/5] Public report API / token-aggregation health..."
SKIP_EXIT=1 bash scripts/verify/check-public-report-api.sh || true
echo ""
# 5. Emit canonical .env lines for reconciliation
echo "[Step 5/5] Canonical .env (reconcile smom-dbis-138/.env)..."
bash scripts/verify/reconcile-env-canonical.sh --print
echo ""

View File

@@ -0,0 +1,412 @@
#!/usr/bin/env bash
# Run the full remaining operator checklist from a LAN-connected host.
# Order:
# 1. Fix token-aggregation DB + explorer /api/v1 proxy
# 2. Wave 0 (NPMplus RPC fix + backup)
# 3. Blockscout verification
# 4. Public/private E2E
# 5. Optional E2E remediation
# 6. Optional config-ready chains + LINK funding
# 7. Optional Chain 138 next steps
# 8. Optional real sendCrossChain
# 9. Optional local security / cron
# 10. Final completion summary
#
# Usage:
# ./scripts/run-full-operator-completion-from-lan.sh --dry-run
# ./scripts/run-full-operator-completion-from-lan.sh
# ./scripts/run-full-operator-completion-from-lan.sh --fix-e2e-if-needed --install-cron
# ./scripts/run-full-operator-completion-from-lan.sh --include-config-ready-chains --include-chain138-next-steps
# ./scripts/run-full-operator-completion-from-lan.sh --include-send-cross-chain --send-amount 0.01 [--send-recipient 0x...]
# ./scripts/run-full-operator-completion-from-lan.sh --force-nginx-reset
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
if [[ -f "$PROJECT_ROOT/config/ip-addresses.conf" ]]; then
# shellcheck source=config/ip-addresses.conf
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
fi
DRY_RUN=false
SKIP_TOKEN_API_FIX=false
SKIP_WAVE0=false
SKIP_VERIFY=false
SKIP_E2E=false
SKIP_BACKUP=false
FIX_E2E_IF_NEEDED=false
INCLUDE_CONFIG_READY_CHAINS=false
INCLUDE_CHAIN138_NEXT_STEPS=false
INCLUDE_SEND_CROSS_CHAIN=false
INSTALL_CRON=false
APPLY_LOCAL_SECURITY=false
FORCE_NGINX_RESET=false
TOKEN_AGG_PORT_OVERRIDE=""
LINK_AMOUNT=""
SEND_AMOUNT=""
SEND_RECIPIENT=""
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run) DRY_RUN=true ;;
--skip-token-api-fix) SKIP_TOKEN_API_FIX=true ;;
--skip-wave0) SKIP_WAVE0=true ;;
--skip-verify) SKIP_VERIFY=true ;;
--skip-e2e) SKIP_E2E=true ;;
--skip-backup) SKIP_BACKUP=true ;;
--fix-e2e-if-needed) FIX_E2E_IF_NEEDED=true ;;
--include-config-ready-chains) INCLUDE_CONFIG_READY_CHAINS=true ;;
--include-chain138-next-steps) INCLUDE_CHAIN138_NEXT_STEPS=true ;;
--include-send-cross-chain) INCLUDE_SEND_CROSS_CHAIN=true ;;
--install-cron) INSTALL_CRON=true ;;
--apply-local-security) APPLY_LOCAL_SECURITY=true ;;
--force-nginx-reset) FORCE_NGINX_RESET=true ;;
--token-agg-port)
shift
TOKEN_AGG_PORT_OVERRIDE="${1:-}"
;;
--link)
shift
LINK_AMOUNT="${1:-}"
;;
--send-amount)
shift
SEND_AMOUNT="${1:-}"
;;
--send-recipient)
shift
SEND_RECIPIENT="${1:-}"
;;
-h|--help)
sed -n '1,28p' "$0"
exit 0
;;
*)
echo "Unknown argument: $1" >&2
exit 1
;;
esac
shift
done
if [[ "$INCLUDE_SEND_CROSS_CHAIN" == true && -z "$SEND_AMOUNT" ]]; then
echo "ERROR: --include-send-cross-chain requires --send-amount <eth>" >&2
exit 1
fi
PROXMOX_HOST="${PROXMOX_HOST_OVERRIDE:-${PROXMOX_HOST_R630_02:-192.168.11.12}}"
log_info() { printf '\033[0;34m[INFO]\033[0m %s\n' "$1"; }
log_ok() { printf '\033[0;32m[OK]\033[0m %s\n' "$1"; }
log_warn() { printf '\033[0;33m[WARN]\033[0m %s\n' "$1"; }
log_err() { printf '\033[0;31m[ERR]\033[0m %s\n' "$1"; }
section() { printf '\n=== %s ===\n' "$1"; }
print_cmd() {
printf ' '
printf '%q ' "$@"
printf '\n'
}
run_cmd() {
if [[ "$DRY_RUN" == true ]]; then
printf '[DRY-RUN]\n'
print_cmd "$@"
return 0
fi
"$@"
}
run_ssh() {
if [[ "$DRY_RUN" == true ]]; then
printf '[DRY-RUN]\n'
print_cmd ssh "$@"
return 0
fi
ssh "$@"
}
run_scp() {
if [[ "$DRY_RUN" == true ]]; then
printf '[DRY-RUN]\n'
print_cmd scp "$@"
return 0
fi
scp "$@"
}
push_script_to_vmid5000() {
local local_script="$1"
local remote_tmp="/tmp/$(basename "$local_script")"
local remote_vm="/root/$(basename "$local_script")"
run_scp "$local_script" "root@${PROXMOX_HOST}:${remote_tmp}"
run_ssh "root@${PROXMOX_HOST}" "pct push 5000 ${remote_tmp} ${remote_vm}"
run_ssh "root@${PROXMOX_HOST}" "pct exec 5000 -- chmod +x ${remote_vm}"
}
detect_token_agg_port() {
if [[ -n "$TOKEN_AGG_PORT_OVERRIDE" ]]; then
printf '%s\n' "$TOKEN_AGG_PORT_OVERRIDE"
return 0
fi
local attempt
for attempt in $(seq 1 12); do
if ssh "root@${PROXMOX_HOST}" "pct exec 5000 -- bash -lc '
ss -lntp 2>/dev/null | grep -q \"*:3001\" && { echo 3001; exit 0; }
ss -lntp 2>/dev/null | grep -q \"*:3000\" && { echo 3000; exit 0; }
for p in 3001 3000; do
curl -fsS --max-time 5 http://127.0.0.1:\$p/api/v1/networks >/dev/null 2>&1 && { echo \$p; exit 0; }
done
exit 1
'" 2>/dev/null; then
return 0
fi
sleep 2
done
return 1
}
fix_public_report_api() {
section "Step 1: Token-Aggregation DB + /api/v1 Proxy"
if [[ "$DRY_RUN" == true ]]; then
run_cmd bash "$SCRIPT_DIR/apply-token-aggregation-fix.sh" --dry-run
else
run_cmd bash "$SCRIPT_DIR/apply-token-aggregation-fix.sh"
fi
local token_agg_port="${TOKEN_AGG_PORT_OVERRIDE:-auto}"
if [[ "$DRY_RUN" != true ]]; then
token_agg_port="$(detect_token_agg_port)" || {
log_err "Could not detect token-aggregation port inside VMID 5000. Re-run with --token-agg-port 3001 or 3000."
exit 1
}
log_info "Detected token-aggregation port: ${token_agg_port}"
else
log_info "Would auto-detect token-aggregation port inside VMID 5000 (prefers 3001, then 3000)."
fi
if [[ "$FORCE_NGINX_RESET" == true ]]; then
log_warn "Using full nginx reset for VMID 5000."
push_script_to_vmid5000 "$PROJECT_ROOT/explorer-monorepo/scripts/fix-nginx-conflicts-vmid5000.sh"
run_ssh "root@${PROXMOX_HOST}" "pct exec 5000 -- bash -lc '/root/fix-nginx-conflicts-vmid5000.sh'"
else
push_script_to_vmid5000 "$PROJECT_ROOT/explorer-monorepo/scripts/apply-nginx-token-aggregation-proxy.sh"
run_ssh "root@${PROXMOX_HOST}" "pct exec 5000 -- bash -lc 'TOKEN_AGG_PORT=${token_agg_port} CONFIG_FILE=/etc/nginx/sites-available/blockscout /root/apply-nginx-token-aggregation-proxy.sh'"
fi
if [[ "$DRY_RUN" == true ]]; then
run_cmd bash "$SCRIPT_DIR/verify/check-public-report-api.sh"
else
if ! bash "$SCRIPT_DIR/verify/check-public-report-api.sh"; then
log_warn "Public report API still failing after HTTPS proxy patch. Applying HTTP /api/v1/ fallback on VMID 5000..."
push_script_to_vmid5000 "$PROJECT_ROOT/scripts/fix-explorer-http-api-v1-proxy.sh"
run_ssh "root@${PROXMOX_HOST}" "pct exec 5000 -- bash -lc 'TOKEN_AGG_PORT=${token_agg_port} /root/fix-explorer-http-api-v1-proxy.sh'"
bash "$SCRIPT_DIR/verify/check-public-report-api.sh"
fi
fi
run_cmd bash "$PROJECT_ROOT/metamask-integration/chain138-snap/scripts/verify-snap-api-and-icons.sh" "https://explorer.d-bis.org"
}
run_wave0() {
section "Step 2: Wave 0"
if [[ "$SKIP_BACKUP" == true ]]; then
run_cmd bash "$SCRIPT_DIR/run-wave0-from-lan.sh" --skip-backup
else
run_cmd bash "$SCRIPT_DIR/run-wave0-from-lan.sh"
fi
}
run_blockscout_verify() {
section "Step 3: Blockscout Verification"
if [[ "$DRY_RUN" == true ]]; then
run_cmd bash -lc "set -a; source '$PROJECT_ROOT/smom-dbis-138/.env' 2>/dev/null || true; set +a; '$SCRIPT_DIR/verify/run-contract-verification-with-proxy.sh'"
else
bash -lc "set -a; source '$PROJECT_ROOT/smom-dbis-138/.env' 2>/dev/null || true; set +a; '$SCRIPT_DIR/verify/run-contract-verification-with-proxy.sh'"
fi
}
run_e2e_with_optional_fix() {
section "Step 4: Public/Private E2E"
local public_ok=0
local private_ok=0
if [[ "$DRY_RUN" == true ]]; then
run_cmd bash "$SCRIPT_DIR/verify/verify-end-to-end-routing.sh" --profile=public
run_cmd bash "$SCRIPT_DIR/verify/verify-end-to-end-routing.sh" --profile=private
if [[ "$FIX_E2E_IF_NEEDED" == true ]]; then
run_cmd bash "$SCRIPT_DIR/maintenance/address-all-remaining-502s.sh" --run-besu-fix --e2e --dry-run
fi
return 0
fi
if bash "$SCRIPT_DIR/verify/verify-end-to-end-routing.sh" --profile=public; then
public_ok=1
log_ok "Public E2E passed."
else
log_warn "Public E2E failed."
fi
if bash "$SCRIPT_DIR/verify/verify-end-to-end-routing.sh" --profile=private; then
private_ok=1
log_ok "Private E2E passed."
else
log_warn "Private E2E failed."
fi
if [[ "$public_ok" == 1 && "$private_ok" == 1 ]]; then
return 0
fi
if [[ "$FIX_E2E_IF_NEEDED" != true ]]; then
log_warn "E2E remediation not requested. Re-run with --fix-e2e-if-needed to attempt backend/NPM/Besu fixes."
return 0
fi
section "Step 4b: E2E Remediation"
bash "$SCRIPT_DIR/maintenance/address-all-remaining-502s.sh" --run-besu-fix --e2e
bash "$SCRIPT_DIR/verify/verify-end-to-end-routing.sh" --profile=public || log_warn "Public E2E still failing after remediation."
bash "$SCRIPT_DIR/verify/verify-end-to-end-routing.sh" --profile=private || log_warn "Private E2E still failing after remediation."
}
run_config_ready_chains() {
section "Step 5: Config-Ready Chains + LINK Funding"
if [[ "$DRY_RUN" == true ]]; then
run_cmd bash -lc "cd '$PROJECT_ROOT/smom-dbis-138' && DRY_RUN=1 ./scripts/deployment/complete-config-ready-chains.sh"
if [[ -n "$LINK_AMOUNT" ]]; then
run_cmd bash -lc "cd '$PROJECT_ROOT/smom-dbis-138' && ./scripts/deployment/fund-ccip-bridges-with-link.sh --dry-run --link '$LINK_AMOUNT'"
else
run_cmd bash -lc "cd '$PROJECT_ROOT/smom-dbis-138' && ./scripts/deployment/fund-ccip-bridges-with-link.sh --dry-run"
fi
return 0
fi
(cd "$PROJECT_ROOT/smom-dbis-138" && ./scripts/deployment/complete-config-ready-chains.sh)
if [[ -n "$LINK_AMOUNT" ]]; then
(cd "$PROJECT_ROOT/smom-dbis-138" && ./scripts/deployment/fund-ccip-bridges-with-link.sh --link "$LINK_AMOUNT")
else
(cd "$PROJECT_ROOT/smom-dbis-138" && ./scripts/deployment/fund-ccip-bridges-with-link.sh)
fi
}
run_chain138_next_steps() {
section "Step 6: Chain 138 Next Steps"
if [[ "$DRY_RUN" == true ]]; then
run_cmd bash "$SCRIPT_DIR/deployment/run-all-next-steps-chain138.sh" --dry-run
else
run_cmd bash "$SCRIPT_DIR/deployment/run-all-next-steps-chain138.sh"
fi
}
run_send_cross_chain() {
section "Step 7: Real sendCrossChain"
if [[ -n "$SEND_RECIPIENT" ]]; then
if [[ "$DRY_RUN" == true ]]; then
run_cmd bash "$SCRIPT_DIR/bridge/run-send-cross-chain.sh" "$SEND_AMOUNT" "$SEND_RECIPIENT" --dry-run
else
run_cmd bash "$SCRIPT_DIR/bridge/run-send-cross-chain.sh" "$SEND_AMOUNT" "$SEND_RECIPIENT"
fi
else
if [[ "$DRY_RUN" == true ]]; then
run_cmd bash "$SCRIPT_DIR/bridge/run-send-cross-chain.sh" "$SEND_AMOUNT" --dry-run
else
run_cmd bash "$SCRIPT_DIR/bridge/run-send-cross-chain.sh" "$SEND_AMOUNT"
fi
fi
}
run_local_security_and_cron() {
section "Step 8: Local Security + Cron"
if [[ "$DRY_RUN" == true ]]; then
[[ -f "$PROJECT_ROOT/.env" ]] && run_cmd chmod 600 "$PROJECT_ROOT/.env"
[[ -f "$PROJECT_ROOT/smom-dbis-138/.env" ]] && run_cmd chmod 600 "$PROJECT_ROOT/smom-dbis-138/.env"
else
[[ -f "$PROJECT_ROOT/.env" ]] && chmod 600 "$PROJECT_ROOT/.env"
[[ -f "$PROJECT_ROOT/smom-dbis-138/.env" ]] && chmod 600 "$PROJECT_ROOT/smom-dbis-138/.env"
log_ok "Local .env permissions tightened where present."
fi
if [[ "$APPLY_LOCAL_SECURITY" == true ]]; then
if [[ "$DRY_RUN" == true ]]; then
run_cmd bash "$SCRIPT_DIR/security/setup-ssh-key-auth.sh" --apply
run_cmd bash "$SCRIPT_DIR/security/firewall-proxmox-8006.sh" --apply "192.168.11.0/24"
else
run_cmd bash "$SCRIPT_DIR/security/setup-ssh-key-auth.sh" --apply
run_cmd bash "$SCRIPT_DIR/security/firewall-proxmox-8006.sh" --apply "192.168.11.0/24"
fi
else
run_cmd bash "$SCRIPT_DIR/security/setup-ssh-key-auth.sh"
run_cmd bash "$SCRIPT_DIR/security/firewall-proxmox-8006.sh" --dry-run "192.168.11.0/24"
fi
if [[ "$INSTALL_CRON" == true ]]; then
run_cmd bash "$SCRIPT_DIR/maintenance/schedule-npmplus-backup-cron.sh" --install
run_cmd bash "$SCRIPT_DIR/maintenance/schedule-daily-weekly-cron.sh" --install
else
run_cmd bash "$SCRIPT_DIR/maintenance/schedule-npmplus-backup-cron.sh" --show
run_cmd bash "$SCRIPT_DIR/maintenance/schedule-daily-weekly-cron.sh" --show
fi
}
print_external_remainder() {
section "Still Manual / External"
cat <<'EOF'
- LINK support on the mainnet relay: docs/07-ccip/RELAY_BRIDGE_ADD_LINK_SUPPORT_RUNBOOK.md
- WEMIX verification if policy changes: docs/07-ccip/WEMIX_TOKEN_VERIFICATION.md
- Phase 2-4 infra expansion and observability: docs/00-meta/OPERATOR_AND_EXTERNAL_COMPLETION_CHECKLIST.md
- Ledger, Trust Wallet, Consensys, CoinGecko, CMC submissions: docs/00-meta/STILL_NOT_DONE_EXECUTION_CHECKLIST.md
EOF
}
section "Run Full Operator Completion"
printf ' dry-run=%s skip-token-api-fix=%s skip-wave0=%s skip-verify=%s skip-e2e=%s fix-e2e-if-needed=%s include-config-ready-chains=%s include-chain138-next-steps=%s include-send-cross-chain=%s install-cron=%s apply-local-security=%s force-nginx-reset=%s\n' \
"$DRY_RUN" "$SKIP_TOKEN_API_FIX" "$SKIP_WAVE0" "$SKIP_VERIFY" "$SKIP_E2E" "$FIX_E2E_IF_NEEDED" "$INCLUDE_CONFIG_READY_CHAINS" "$INCLUDE_CHAIN138_NEXT_STEPS" "$INCLUDE_SEND_CROSS_CHAIN" "$INSTALL_CRON" "$APPLY_LOCAL_SECURITY" "$FORCE_NGINX_RESET"
printf ' proxmox-host=%s token-agg-port=%s\n' "$PROXMOX_HOST" "${TOKEN_AGG_PORT_OVERRIDE:-auto}"
if [[ "$SKIP_TOKEN_API_FIX" != true ]]; then
fix_public_report_api
else
log_warn "Skipping token-aggregation/API fix."
fi
if [[ "$SKIP_WAVE0" != true ]]; then
run_wave0
else
log_warn "Skipping Wave 0."
fi
if [[ "$SKIP_VERIFY" != true ]]; then
run_blockscout_verify
else
log_warn "Skipping Blockscout verification."
fi
if [[ "$SKIP_E2E" != true ]]; then
run_e2e_with_optional_fix
else
log_warn "Skipping E2E."
fi
if [[ "$INCLUDE_CONFIG_READY_CHAINS" == true ]]; then
run_config_ready_chains
fi
if [[ "$INCLUDE_CHAIN138_NEXT_STEPS" == true ]]; then
run_chain138_next_steps
fi
if [[ "$INCLUDE_SEND_CROSS_CHAIN" == true ]]; then
run_send_cross_chain
fi
run_local_security_and_cron
section "Step 9: Completion Summary"
run_cmd bash "$SCRIPT_DIR/verify/check-completion-status.sh"
print_external_remainder

View File

@@ -157,7 +157,9 @@ case "$MODE" in
run_remote "$PROXMOX_HOST" "bash scripts/run-wave0-from-lan.sh $SKIP_BACKUP"
;;
npmplus)
run_remote "$PROXMOX_HOST" "bash scripts/nginx-proxy-manager/update-npmplus-proxy-hosts-api.sh"
# Large proxy lists can exceed default curl max-time (see NPM_CURL_MAX_TIME in update script).
_npm_curl_max="${NPM_CURL_MAX_TIME:-600}"
run_remote "$PROXMOX_HOST" "NPM_CURL_MAX_TIME=$_npm_curl_max bash scripts/nginx-proxy-manager/update-npmplus-proxy-hosts-api.sh"
;;
backup)
run_remote "$PROXMOX_HOST" "bash scripts/verify/backup-npmplus.sh"

View File

@@ -332,6 +332,7 @@ main() {
"www" # www.sankofa.nexus
"portal" # portal.sankofa.nexus (client SSO)
"admin" # admin.sankofa.nexus (client access admin)
"dash" # dash.sankofa.nexus (operator dash; NPM upstream may follow portal until IP_SANKOFA_DASH set)
"keycloak" # keycloak.sankofa.nexus (IdP)
"studio" # studio.sankofa.nexus (FusionAI Creator)
"phoenix" # phoenix.sankofa.nexus
@@ -349,6 +350,26 @@ main() {
# d-bis.org domain records
if [ "$run_dbis" = 1 ] && [ -n "$ZONE_D_BIS_ORG" ]; then
DBIS_RECORDS=(
"@" # d-bis.org (public apex)
"www" # www.d-bis.org → NPM (301 to apex in advanced_config)
"admin" # admin.d-bis.org (canonical admin console)
"core" # core.d-bis.org (DBIS Core client portal)
"members" # members.d-bis.org (institutional program)
"developers" # developers.d-bis.org
"data" # data.d-bis.org (API / health)
"research" # research.d-bis.org
"policy" # policy.d-bis.org
"ops" # ops.d-bis.org
"identity" # identity.d-bis.org
"status" # status.d-bis.org
"sandbox" # sandbox.d-bis.org
"interop" # interop.d-bis.org
"docs" # docs.d-bis.org
"mifos" # mifos.d-bis.org
"dapp" # dapp.d-bis.org
"gitea" # gitea.d-bis.org
"dev" # dev.d-bis.org
"codespaces" # codespaces.d-bis.org
"rpc-http-pub" # rpc-http-pub.d-bis.org
"rpc-ws-pub" # rpc-ws-pub.d-bis.org
"rpc" # rpc.d-bis.org (primary RPC)
@@ -357,8 +378,19 @@ main() {
"ws.rpc2" # ws.rpc2.d-bis.org (secondary WebSocket)
"rpc-http-prv" # rpc-http-prv.d-bis.org
"rpc-ws-prv" # rpc-ws-prv.d-bis.org
"rpc-core" # rpc-core.d-bis.org (alias to VMID 2101 core RPC; deploy still prefers IP:8545)
"rpc-fireblocks" # rpc-fireblocks.d-bis.org
"ws.rpc-fireblocks" # ws.rpc-fireblocks.d-bis.org
"rpc-alltra" # rpc-alltra.d-bis.org
"rpc-alltra-2" # rpc-alltra-2.d-bis.org
"rpc-alltra-3" # rpc-alltra-3.d-bis.org
"rpc-hybx" # rpc-hybx.d-bis.org
"rpc-hybx-2" # rpc-hybx-2.d-bis.org
"rpc-hybx-3" # rpc-hybx-3.d-bis.org
"cacti-alltra" # cacti-alltra.d-bis.org
"cacti-hybx" # cacti-hybx.d-bis.org
"explorer" # explorer.d-bis.org
"dbis-admin" # dbis-admin.d-bis.org
"dbis-admin" # dbis-admin.d-bis.org (legacy alias)
"dbis-api" # dbis-api.d-bis.org
"dbis-api-2" # dbis-api-2.d-bis.org
"secure" # secure.d-bis.org

View File

@@ -51,7 +51,7 @@ check_env() {
if $DRY_RUN; then
echo "=== Validation (--dry-run: would check) ==="
echo " REQUIRED_FILES: ${REQUIRED_FILES:-<default: config/ip-addresses.conf, .env.example, token-mapping*.json>}"
echo " REQUIRED_FILES: ${REQUIRED_FILES:-<default: config/ip-addresses.conf, .env.example, token-mapping*.json, gru-transport-active.json, gru-iso4217-currency-manifest.json>}"
echo " OPTIONAL_ENV: ${OPTIONAL_ENV:-<empty; set VALIDATE_OPTIONAL_ENV for Proxmox API vars>}"
exit 0
fi
@@ -63,6 +63,7 @@ if [[ -n "$REQUIRED_FILES" ]]; then
else
# Default: check common locations
[[ -d "$PROJECT_ROOT/config" ]] && check_file "$PROJECT_ROOT/config/ip-addresses.conf" || true
[[ -f "$PROJECT_ROOT/config/smart-contracts-master.json" ]] && check_file "$PROJECT_ROOT/config/smart-contracts-master.json" || true
[[ -f "$PROJECT_ROOT/.env.example" ]] && log_ok ".env.example present (copy to .env and fill)" || true
# Token mapping (Chain 138 ↔ Mainnet): optional but validate structure if present
if [[ -f "$PROJECT_ROOT/config/token-mapping.json" ]]; then
@@ -91,6 +92,283 @@ else
fi
fi
fi
if [[ -f "$PROJECT_ROOT/config/gru-transport-active.json" ]]; then
log_ok "Found: config/gru-transport-active.json"
if command -v jq &>/dev/null; then
if jq -e '
(.system.name | type == "string")
and (.system.shortName | type == "string")
and (.enabledCanonicalTokens | type == "array")
and (.enabledDestinationChains | type == "array")
and (.approvedBridgePeers | type == "array")
and (.transportPairs | type == "array")
and (.publicPools | type == "array")
' "$PROJECT_ROOT/config/gru-transport-active.json" &>/dev/null; then
log_ok "gru-transport-active.json: top-level overlay structure is valid"
else
log_err "gru-transport-active.json: invalid top-level structure"
ERRORS=$((ERRORS + 1))
fi
fi
if command -v node &>/dev/null; then
if PROJECT_ROOT="$PROJECT_ROOT" node <<'NODE'
const fs = require('fs');
const path = require('path');
const projectRoot = process.env.PROJECT_ROOT;
function readJson(relativePath) {
return JSON.parse(fs.readFileSync(path.join(projectRoot, relativePath), 'utf8'));
}
function normalizeAddress(address) {
return typeof address === 'string' ? address.trim().toLowerCase() : '';
}
function isNonZeroAddress(address) {
const normalized = normalizeAddress(address);
return /^0x[a-f0-9]{40}$/.test(normalized) && normalized !== '0x0000000000000000000000000000000000000000';
}
function refConfigured(ref) {
return !!ref && typeof ref === 'object' && (
(typeof ref.address === 'string' && ref.address.trim() !== '') ||
(typeof ref.env === 'string' && ref.env.trim() !== '')
);
}
const active = readJson('config/gru-transport-active.json');
const multichain = readJson('config/token-mapping-multichain.json');
const deployment = readJson('cross-chain-pmm-lps/config/deployment-status.json');
const poolMatrix = readJson('cross-chain-pmm-lps/config/pool-matrix.json');
const currencyManifest = readJson('config/gru-iso4217-currency-manifest.json');
const errors = [];
const canonicalChainId = Number(active.system?.canonicalChainId ?? 138);
const enabledCanonicalTokens = Array.isArray(active.enabledCanonicalTokens) ? active.enabledCanonicalTokens : [];
const enabledCanonical = new Set(enabledCanonicalTokens.map((token) => String(token.symbol)));
const enabledChainsArray = Array.isArray(active.enabledDestinationChains) ? active.enabledDestinationChains : [];
const enabledChains = new Set(enabledChainsArray.map((chain) => Number(chain.chainId)));
const peersByKey = new Map((active.approvedBridgePeers || []).map((peer) => [String(peer.key), peer]));
const reserveVerifiers = active.reserveVerifiers && typeof active.reserveVerifiers === 'object'
? active.reserveVerifiers
: {};
const transportPairsByKey = new Map((active.transportPairs || []).map((pair) => [String(pair.key), pair]));
const publicPoolsByKey = new Map((active.publicPools || []).map((pool) => [String(pool.key), pool]));
const manifestByCode = new Map((currencyManifest.currencies || []).map((currency) => [String(currency.code), currency]));
function getMappingPair(fromChainId, toChainId) {
return (multichain.pairs || []).find(
(entry) => Number(entry.fromChainId) === Number(fromChainId) && Number(entry.toChainId) === Number(toChainId)
);
}
function getMappingToken(fromChainId, toChainId, mappingKey) {
const pair = getMappingPair(fromChainId, toChainId);
if (!pair) return null;
return (pair.tokens || []).find((token) => token.key === mappingKey) || null;
}
function getExpectedPoolKey(chainId, mirroredSymbol) {
const chain = poolMatrix.chains?.[String(chainId)];
const hubStable = typeof chain?.hubStable === 'string' ? chain.hubStable.trim() : '';
if (!hubStable) return null;
return `${chainId}-${mirroredSymbol}-${hubStable}`;
}
for (const chain of active.enabledDestinationChains || []) {
if (!peersByKey.has(String(chain.peerKey || ''))) {
errors.push(`enabledDestinationChains[${chain.chainId}] references missing peerKey ${chain.peerKey}`);
}
}
for (const token of enabledCanonicalTokens) {
const currency = manifestByCode.get(String(token.currencyCode || ''));
if (!currency) {
errors.push(`enabledCanonicalTokens[${token.symbol}] references missing currencyCode ${token.currencyCode} in gru-iso4217-currency-manifest.json`);
continue;
}
if (currency.status?.deployed !== true) {
errors.push(`enabledCanonicalTokens[${token.symbol}] requires manifest currency ${token.currencyCode} to be deployed`);
}
if (currency.status?.transportActive !== true) {
errors.push(`enabledCanonicalTokens[${token.symbol}] requires manifest currency ${token.currencyCode} to mark transportActive=true`);
}
}
for (const pair of active.transportPairs || []) {
const canonicalChainId = Number(pair.canonicalChainId ?? active.system?.canonicalChainId ?? 138);
const destinationChainId = Number(pair.destinationChainId);
const canonicalSymbol = String(pair.canonicalSymbol || '');
const mirroredSymbol = String(pair.mirroredSymbol || '');
if (!enabledCanonical.has(canonicalSymbol)) {
errors.push(`transportPairs[${pair.key}] uses canonicalSymbol ${canonicalSymbol} which is not enabled`);
}
if (!enabledChains.has(destinationChainId)) {
errors.push(`transportPairs[${pair.key}] uses destinationChainId ${destinationChainId} which is not enabled`);
}
const peer = peersByKey.get(String(pair.peerKey || ''));
if (!peer) {
errors.push(`transportPairs[${pair.key}] is missing approved bridge peer ${pair.peerKey}`);
} else {
if (!refConfigured(peer.l1Bridge)) {
errors.push(`approvedBridgePeers[${peer.key}] is missing l1Bridge wiring`);
}
if (!refConfigured(peer.l2Bridge)) {
errors.push(`approvedBridgePeers[${peer.key}] is missing l2Bridge wiring`);
}
}
const maxOutstanding = pair.maxOutstanding && typeof pair.maxOutstanding === 'object' ? pair.maxOutstanding : null;
if (!maxOutstanding || (!maxOutstanding.amount && !maxOutstanding.env)) {
errors.push(`transportPairs[${pair.key}] is missing maxOutstanding amount/env`);
}
const mappingToken = getMappingToken(canonicalChainId, destinationChainId, pair.mappingKey);
if (!mappingToken) {
errors.push(`transportPairs[${pair.key}] mappingKey ${pair.mappingKey} is missing from token-mapping-multichain.json`);
} else {
if (!isNonZeroAddress(mappingToken.addressFrom)) {
errors.push(`transportPairs[${pair.key}] has invalid canonical addressFrom in token-mapping-multichain.json`);
}
if (!isNonZeroAddress(mappingToken.addressTo)) {
errors.push(`transportPairs[${pair.key}] mapping exists but cW pair is not deployed (addressTo missing/zero)`);
}
}
const deploymentChain = deployment.chains?.[String(destinationChainId)];
const deployedMirror = deploymentChain?.cwTokens?.[mirroredSymbol];
if (!deploymentChain || !isNonZeroAddress(deployedMirror)) {
errors.push(`transportPairs[${pair.key}] mapping exists but deployment-status.json has no deployed ${mirroredSymbol} for chain ${destinationChainId}`);
} else if (mappingToken && normalizeAddress(deployedMirror) !== normalizeAddress(mappingToken.addressTo)) {
errors.push(`transportPairs[${pair.key}] deployment-status.json ${mirroredSymbol} does not match token-mapping-multichain.json addressTo`);
}
if ((pair.publicPoolKeys || []).length > 0) {
for (const publicPoolKey of pair.publicPoolKeys) {
if (!publicPoolsByKey.has(String(publicPoolKey))) {
errors.push(`transportPairs[${pair.key}] references missing public pool key ${publicPoolKey}`);
}
}
}
if (pair.reserveVerifierKey) {
const verifier = reserveVerifiers[pair.reserveVerifierKey];
if (!verifier) {
errors.push(`transportPairs[${pair.key}] requires missing reserve verifier ${pair.reserveVerifierKey}`);
} else {
if (!refConfigured(verifier.bridgeRef)) {
errors.push(`reserveVerifiers.${pair.reserveVerifierKey} is missing bridgeRef wiring`);
}
if (!refConfigured(verifier.verifierRef)) {
errors.push(`reserveVerifiers.${pair.reserveVerifierKey} is missing verifierRef wiring`);
}
if (verifier.requireVaultBacking && !refConfigured(verifier.vaultRef)) {
errors.push(`reserveVerifiers.${pair.reserveVerifierKey} requires vault backing but vaultRef is unset`);
}
if (verifier.requireReserveSystemBalance && !refConfigured(verifier.reserveSystemRef)) {
errors.push(`reserveVerifiers.${pair.reserveVerifierKey} requires reserve-system balance checks but reserveSystemRef is unset`);
}
}
}
}
for (const pool of active.publicPools || []) {
if (pool.active === true) {
if (!isNonZeroAddress(pool.poolAddress)) {
errors.push(`publicPools[${pool.key}] is active but has no poolAddress`);
continue;
}
const deploymentChain = deployment.chains?.[String(pool.chainId)];
const deployedPools = Array.isArray(deploymentChain?.pmmPools) ? deploymentChain.pmmPools : [];
const deploymentMatch = deployedPools.some((entry) => normalizeAddress(entry?.poolAddress) === normalizeAddress(pool.poolAddress));
if (!deploymentMatch) {
errors.push(`publicPools[${pool.key}] is active but deployment-status.json does not contain its poolAddress`);
}
}
}
for (const [chainIdKey, deploymentChain] of Object.entries(deployment.chains || {})) {
const destinationChainId = Number(chainIdKey);
if (destinationChainId === canonicalChainId) continue;
if (deploymentChain?.bridgeAvailable !== true) continue;
const mappingPair = getMappingPair(canonicalChainId, destinationChainId);
if (!mappingPair) continue;
let compatible = true;
for (const token of enabledCanonicalTokens) {
const mappingKey = String(token.mappingKey || '');
const mirroredSymbol = String(token.mirroredSymbol || '');
const mappingToken = mappingKey ? (mappingPair.tokens || []).find((entry) => entry.key === mappingKey) : null;
const deployedMirror = deploymentChain?.cwTokens?.[mirroredSymbol];
const expectedPoolKey = getExpectedPoolKey(destinationChainId, mirroredSymbol);
if (
!mappingKey ||
!mappingToken ||
!isNonZeroAddress(mappingToken.addressTo) ||
!isNonZeroAddress(deployedMirror) ||
normalizeAddress(mappingToken.addressTo) !== normalizeAddress(deployedMirror) ||
!expectedPoolKey
) {
compatible = false;
break;
}
}
if (!compatible) continue;
const enabledChain = enabledChainsArray.find((chain) => Number(chain.chainId) === destinationChainId);
if (!enabledChain) {
errors.push(`compatible destination chain ${destinationChainId} (${deploymentChain?.name || 'unknown'}) is missing from enabledDestinationChains`);
continue;
}
for (const token of enabledCanonicalTokens) {
const expectedPairKey = `${canonicalChainId}-${destinationChainId}-${token.symbol}-${token.mirroredSymbol}`;
const expectedPoolKey = getExpectedPoolKey(destinationChainId, String(token.mirroredSymbol || ''));
const pair = transportPairsByKey.get(expectedPairKey);
if (!pair) {
errors.push(`compatible destination chain ${destinationChainId} is missing transport pair ${expectedPairKey}`);
continue;
}
if (expectedPoolKey && !publicPoolsByKey.has(expectedPoolKey)) {
errors.push(`compatible destination chain ${destinationChainId} is missing public pool placeholder ${expectedPoolKey}`);
}
if (expectedPoolKey && !(pair.publicPoolKeys || []).includes(expectedPoolKey)) {
errors.push(`transportPairs[${pair.key}] must include the pool-matrix first-hop key ${expectedPoolKey}`);
}
}
}
if (errors.length > 0) {
console.error(errors.join('\n'));
process.exit(1);
}
NODE
then
log_ok "gru-transport-active.json: overlay cross-checks passed"
else
log_err "gru-transport-active.json: overlay cross-checks failed"
ERRORS=$((ERRORS + 1))
fi
else
log_err "Node.js is required to validate gru-transport-active.json cross-file wiring"
ERRORS=$((ERRORS + 1))
fi
else
log_err "Missing config/gru-transport-active.json"
ERRORS=$((ERRORS + 1))
fi
[[ -f "$PROJECT_ROOT/config/smart-contracts-master.json" ]] && log_ok "Found: config/smart-contracts-master.json" || true
# Token lists (Uniswap format): validate structure if present
for list in token-lists/lists/dbis-138.tokenlist.json token-lists/lists/cronos.tokenlist.json token-lists/lists/all-mainnet.tokenlist.json; do
@@ -154,6 +432,79 @@ else
log_err "Missing config/proxmox-operational-template.json"
ERRORS=$((ERRORS + 1))
fi
if [[ -f "$PROJECT_ROOT/config/gru-iso4217-currency-manifest.json" ]]; then
log_ok "Found: config/gru-iso4217-currency-manifest.json"
if command -v jq &>/dev/null; then
if jq -e '
(.name | type == "string")
and (.version | type == "string")
and (.updated | type == "string")
and (.canonicalChainId | type == "number")
and (.currencies | type == "array")
and ((.currencies | length) > 0)
and ((.currencies | map(.code) | unique | length) == (.currencies | length))
and (
all(.currencies[];
(.code | type == "string")
and ((.code | length) >= 3)
and (.name | type == "string")
and (.type == "fiat" or .type == "commodity")
and ((.minorUnits == null) or (.minorUnits | type == "number"))
and (.status.planned | type == "boolean")
and (.status.deployed | type == "boolean")
and (.status.transportActive | type == "boolean")
and (.status.x402Ready | type == "boolean")
and (.canonicalAssets | type == "object")
)
)
' "$PROJECT_ROOT/config/gru-iso4217-currency-manifest.json" &>/dev/null; then
log_ok "gru-iso4217-currency-manifest.json: top-level manifest structure is valid"
else
log_err "gru-iso4217-currency-manifest.json: invalid top-level structure"
ERRORS=$((ERRORS + 1))
fi
fi
fi
if [[ -f "$PROJECT_ROOT/config/gru-standards-profile.json" ]]; then
log_ok "Found: config/gru-standards-profile.json"
if command -v jq &>/dev/null; then
if jq -e '
(.name | type == "string")
and (.profileId | type == "string")
and (.version | type == "string")
and (.updated | type == "string")
and (.canonicalChainId | type == "number")
and (.scope | type == "object")
and (.paymentProfiles | type == "array")
and (.baseTokenStandards | type == "array")
and (.transportAndWrapperStandards | type == "array")
and (.governanceAndPolicyStandards | type == "array")
' "$PROJECT_ROOT/config/gru-standards-profile.json" &>/dev/null; then
log_ok "gru-standards-profile.json: top-level standards profile structure is valid"
else
log_err "gru-standards-profile.json: invalid top-level structure"
ERRORS=$((ERRORS + 1))
fi
if jq -e '
(.canonicalChainId == $activeChain)
and (.canonicalChainId == $manifestChain)
and (.references.transportOverlay == "config/gru-transport-active.json")
and (.references.currencyManifest == "config/gru-iso4217-currency-manifest.json")
' \
--argjson activeChain "$(jq -r '.system.canonicalChainId' "$PROJECT_ROOT/config/gru-transport-active.json")" \
--argjson manifestChain "$(jq -r '.canonicalChainId' "$PROJECT_ROOT/config/gru-iso4217-currency-manifest.json")" \
"$PROJECT_ROOT/config/gru-standards-profile.json" &>/dev/null; then
log_ok "gru-standards-profile.json: canonical-chain and reference wiring matches active overlay + currency manifest"
else
log_err "gru-standards-profile.json: canonical-chain or reference wiring does not match active overlay / currency manifest"
ERRORS=$((ERRORS + 1))
fi
fi
else
log_err "Missing config/gru-standards-profile.json"
ERRORS=$((ERRORS + 1))
fi
fi
if [[ -n "$OPTIONAL_ENV" ]]; then
@@ -162,6 +513,34 @@ if [[ -n "$OPTIONAL_ENV" ]]; then
done
fi
# DBIS institutional Digital Master Plan example JSON
if [[ -f "$PROJECT_ROOT/config/dbis-institutional/examples/trust.json" ]] && [[ -x "$SCRIPT_DIR/validate-dbis-institutional-json.sh" ]]; then
if bash "$SCRIPT_DIR/validate-dbis-institutional-json.sh" &>/dev/null; then
log_ok "DBIS institutional examples (JSON parse)"
else
log_err "DBIS institutional examples failed JSON parse"
ERRORS=$((ERRORS + 1))
fi
fi
if command -v check-jsonschema &>/dev/null && [[ -x "$SCRIPT_DIR/validate-dbis-institutional-schemas.sh" ]]; then
if SCHEMA_STRICT=1 bash "$SCRIPT_DIR/validate-dbis-institutional-schemas.sh" &>/dev/null; then
log_ok "DBIS institutional JSON Schemas (settlement-event, address-registry-entry)"
else
log_err "DBIS institutional JSON Schema validation failed (pip install check-jsonschema)"
ERRORS=$((ERRORS + 1))
fi
fi
if [[ -f "$PROJECT_ROOT/config/smart-contracts-master.json" ]] && command -v jq &>/dev/null && [[ -x "$SCRIPT_DIR/validate-explorer-chain138-inventory.sh" ]]; then
if bash "$SCRIPT_DIR/validate-explorer-chain138-inventory.sh" &>/dev/null; then
log_ok "Explorer address-inventory Chain 138 vs smart-contracts-master.json"
else
log_err "Explorer address-inventory Chain 138 drift (see validate-explorer-chain138-inventory.sh)"
ERRORS=$((ERRORS + 1))
fi
fi
if [[ $ERRORS -gt 0 ]]; then
log_err "Validation failed with $ERRORS error(s). Set VALIDATE_REQUIRED_FILES='path1 path2' to require specific files."
exit 1

View File

@@ -0,0 +1,389 @@
#!/usr/bin/env bash
# Validate the DBIS identity completion package.
# Usage:
# bash scripts/validation/validate-dbis-identity-package.sh \
# --package config/production/dbis-identity-public-did-package.json \
# --secrets config/production/dbis-identity-public-did-secrets.env
#
# For template validation only:
# bash scripts/validation/validate-dbis-identity-package.sh \
# --package config/production/dbis-identity-public-did-package.example.json \
# --secrets config/production/dbis-identity-public-did-secrets.example.env \
# --allow-placeholders
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
PACKAGE_PATH="$PROJECT_ROOT/config/production/dbis-identity-public-did-package.json"
SECRETS_PATH="$PROJECT_ROOT/config/production/dbis-identity-public-did-secrets.env"
ALLOW_PLACEHOLDERS=false
PARTIAL_EXTERNAL_ALLOWED=false
log_info() { echo "[INFO] $1"; }
log_ok() { echo "[OK] $1"; }
log_warn() { echo "[WARN] $1"; }
log_err() { echo "[ERROR] $1"; }
while [[ $# -gt 0 ]]; do
case "$1" in
--package)
PACKAGE_PATH="$2"
shift 2
;;
--secrets)
SECRETS_PATH="$2"
shift 2
;;
--allow-placeholders)
ALLOW_PLACEHOLDERS=true
shift
;;
*)
log_err "Unknown argument: $1"
exit 1
;;
esac
done
ERRORS=0
WARNINGS=0
require_file() {
local path="$1"
if [[ -f "$path" ]]; then
log_ok "Found: $path"
else
log_err "Missing file: $path"
ERRORS=$((ERRORS + 1))
fi
}
check_placeholder_string() {
local label="$1"
local value="$2"
if [[ -z "$value" ]]; then
log_err "$label is empty"
ERRORS=$((ERRORS + 1))
return
fi
if [[ "$value" == *"<fill-me"* || "$value" == "CHANGEME" || "$value" == "TODO" ]]; then
if $ALLOW_PLACEHOLDERS; then
log_warn "$label still contains a placeholder"
WARNINGS=$((WARNINGS + 1))
else
log_err "$label still contains a placeholder"
ERRORS=$((ERRORS + 1))
fi
else
log_ok "$label is populated"
fi
}
check_placeholder_string_maybe_partial() {
local label="$1"
local value="$2"
if $PARTIAL_EXTERNAL_ALLOWED; then
if [[ -z "$value" ]]; then
log_warn "$label is empty while package is awaiting external endorser data"
WARNINGS=$((WARNINGS + 1))
return
fi
if [[ "$value" == *"<fill-me"* || "$value" == "CHANGEME" || "$value" == "TODO" ]]; then
log_warn "$label still contains a placeholder while package is awaiting external endorser data"
WARNINGS=$((WARNINGS + 1))
return
fi
log_ok "$label is populated"
return
fi
check_placeholder_string "$label" "$value"
}
check_indy_did_format() {
local label="$1"
local value="$2"
if [[ "$value" == *"<fill-me"* || "$value" == "CHANGEME" || "$value" == "TODO" ]]; then
if $ALLOW_PLACEHOLDERS || $PARTIAL_EXTERNAL_ALLOWED; then
log_warn "$label still contains a placeholder"
WARNINGS=$((WARNINGS + 1))
else
log_err "$label still contains a placeholder"
ERRORS=$((ERRORS + 1))
fi
return
fi
if [[ -z "$value" ]]; then
if $ALLOW_PLACEHOLDERS || $PARTIAL_EXTERNAL_ALLOWED; then
log_warn "$label is empty while package is awaiting external endorser data"
WARNINGS=$((WARNINGS + 1))
else
log_err "$label is empty"
ERRORS=$((ERRORS + 1))
fi
return
fi
if [[ "$value" =~ ^[1-9A-HJ-NP-Za-km-z]{16,32}$ ]]; then
log_ok "$label format looks like an Indy DID"
else
if $PARTIAL_EXTERNAL_ALLOWED; then
log_warn "$label does not yet look like a valid Indy DID"
WARNINGS=$((WARNINGS + 1))
else
log_err "$label does not look like a valid Indy DID"
ERRORS=$((ERRORS + 1))
fi
fi
}
check_uuid_like() {
local label="$1"
local value="$2"
if [[ "$value" == *"<fill-me"* || "$value" == "CHANGEME" || "$value" == "TODO" ]]; then
if $ALLOW_PLACEHOLDERS || $PARTIAL_EXTERNAL_ALLOWED; then
log_warn "$label still contains a placeholder"
WARNINGS=$((WARNINGS + 1))
else
log_err "$label still contains a placeholder"
ERRORS=$((ERRORS + 1))
fi
return
fi
if [[ -z "$value" ]]; then
if $ALLOW_PLACEHOLDERS || $PARTIAL_EXTERNAL_ALLOWED; then
log_warn "$label is empty while package is awaiting external endorser data"
WARNINGS=$((WARNINGS + 1))
else
log_err "$label is empty"
ERRORS=$((ERRORS + 1))
fi
return
fi
if [[ "$value" =~ ^[0-9a-fA-F-]{16,}$ ]]; then
log_ok "$label format looks connection-id compatible"
else
if $PARTIAL_EXTERNAL_ALLOWED; then
log_warn "$label does not yet look like a valid connection identifier"
WARNINGS=$((WARNINGS + 1))
else
log_err "$label does not look like a valid connection identifier"
ERRORS=$((ERRORS + 1))
fi
fi
}
check_change_control_ref() {
local value="$1"
if [[ "$value" =~ ^DBIS-ID-GOV-[0-9]{4}-[0-9]{3}$ ]]; then
log_ok "governance.changeControlRef format is valid"
else
log_err "governance.changeControlRef must match DBIS-ID-GOV-YYYY-NNN"
ERRORS=$((ERRORS + 1))
fi
}
check_quorum_format() {
local value="$1"
if [[ "$value" =~ ^[0-9]+-of-[0-9]+$ ]]; then
log_ok "governance.endorserGovernanceModel.quorum format is valid"
else
log_err "governance.endorserGovernanceModel.quorum must match N-of-M"
ERRORS=$((ERRORS + 1))
fi
}
check_env_var() {
local label="$1"
local name="$2"
local value="${!name:-}"
if [[ -z "$value" ]]; then
if $ALLOW_PLACEHOLDERS; then
log_warn "$label env var $name is empty"
WARNINGS=$((WARNINGS + 1))
else
log_err "$label env var $name is empty"
ERRORS=$((ERRORS + 1))
fi
return
fi
check_placeholder_string "$label env var $name" "$value"
}
require_file "$PACKAGE_PATH"
require_file "$SECRETS_PATH"
if ! command -v jq >/dev/null 2>&1; then
log_err "jq is required"
exit 1
fi
if [[ $ERRORS -gt 0 ]]; then
exit 1
fi
if jq -e '
(.schemaVersion | type == "string") and
(.programId | type == "string") and
(.packageStatus | type == "string") and
(.ariesAgent.adminUrl | type == "string") and
(.ariesAgent.didcommUrl | type == "string") and
(.ariesAgent.walletType | type == "string") and
(.ariesAgent.adminAuthMode | type == "string") and
(.ariesAgent.adminApiKeyEnv | type == "string") and
(.ledger.type | type == "string") and
(.ledger.targetNetwork | type == "string") and
(.ledger.trustScope | type == "string") and
(.ledger.poolName | type == "string") and
(.ledger.genesisSource | type == "string") and
(.ledger.didMethod | type == "string") and
(.ledger.nymWriteMode | type == "string") and
(.governance.governanceVersion | type == "string") and
(.governance.changeControlRef | type == "string") and
(.governance.changeControlFormat | type == "string") and
(.governance.operatorOwner | type == "string") and
(.governance.approvalOwner | type == "string") and
(.governance.endorserGovernanceModel.type | type == "string") and
(.governance.endorserGovernanceModel.quorum | type == "string") and
(.governance.endorserGovernanceModel.custodians | type == "array") and
(.governance.endorserGovernanceModel.custodians | length >= 3) and
(.governance.endorserGovernanceModel.singleKeyDidControl | type == "string") and
(.governance.endorserGovernanceModel.currentPhase | type == "string") and
(.governance.endorserGovernanceModel.futurePhases | type == "array") and
(.governance.endorserGovernanceModel.futurePhases | length >= 1) and
(.roles.author.alias | type == "string") and
(.roles.author.connectionIdEnv | type == "string") and
(.roles.endorser.alias | type == "string") and
(.roles.endorser.did | type == "string") and
(.roles.endorser.connectionIdEnv | type == "string") and
(.anoncreds.schemas | type == "array") and
(.anoncreds.schemas | length >= 1) and
(.anoncreds.verificationProfiles | type == "array") and
(.anoncreds.verificationProfiles | length >= 1) and
(.evidence.outputDir | type == "string") and
(.evidence.requiredArtifacts | type == "array") and
(.evidence.requiredArtifacts | length >= 1)
' "$PACKAGE_PATH" >/dev/null; then
log_ok "Package JSON structure is valid"
else
log_err "Package JSON structure is invalid"
ERRORS=$((ERRORS + 1))
fi
PACKAGE_STATUS="$(jq -r '.packageStatus' "$PACKAGE_PATH")"
if [[ "$PACKAGE_STATUS" == "awaiting-external-endorser" ]]; then
PARTIAL_EXTERNAL_ALLOWED=true
log_info "Package status allows external-governance gaps to remain warnings"
fi
check_placeholder_string "schemaVersion" "$(jq -r '.schemaVersion' "$PACKAGE_PATH")"
check_placeholder_string "programId" "$(jq -r '.programId' "$PACKAGE_PATH")"
check_placeholder_string "ariesAgent.adminUrl" "$(jq -r '.ariesAgent.adminUrl' "$PACKAGE_PATH")"
check_placeholder_string "ariesAgent.didcommUrl" "$(jq -r '.ariesAgent.didcommUrl' "$PACKAGE_PATH")"
check_placeholder_string "ariesAgent.adminAuthMode" "$(jq -r '.ariesAgent.adminAuthMode' "$PACKAGE_PATH")"
check_placeholder_string "ledger.targetNetwork" "$(jq -r '.ledger.targetNetwork' "$PACKAGE_PATH")"
check_placeholder_string "ledger.trustScope" "$(jq -r '.ledger.trustScope' "$PACKAGE_PATH")"
check_placeholder_string "ledger.poolName" "$(jq -r '.ledger.poolName' "$PACKAGE_PATH")"
check_placeholder_string "ledger.genesisSource" "$(jq -r '.ledger.genesisSource' "$PACKAGE_PATH")"
check_placeholder_string "ledger.didMethod" "$(jq -r '.ledger.didMethod' "$PACKAGE_PATH")"
check_placeholder_string "ledger.nymWriteMode" "$(jq -r '.ledger.nymWriteMode' "$PACKAGE_PATH")"
check_placeholder_string "governance.governanceVersion" "$(jq -r '.governance.governanceVersion' "$PACKAGE_PATH")"
CHANGE_CONTROL_REF="$(jq -r '.governance.changeControlRef' "$PACKAGE_PATH")"
check_placeholder_string "governance.changeControlRef" "$CHANGE_CONTROL_REF"
check_change_control_ref "$CHANGE_CONTROL_REF"
check_placeholder_string "governance.changeControlFormat" "$(jq -r '.governance.changeControlFormat' "$PACKAGE_PATH")"
check_placeholder_string "governance.operatorOwner" "$(jq -r '.governance.operatorOwner' "$PACKAGE_PATH")"
check_placeholder_string "governance.approvalOwner" "$(jq -r '.governance.approvalOwner' "$PACKAGE_PATH")"
check_placeholder_string "governance.endorserGovernanceModel.type" "$(jq -r '.governance.endorserGovernanceModel.type' "$PACKAGE_PATH")"
GOV_QUORUM="$(jq -r '.governance.endorserGovernanceModel.quorum' "$PACKAGE_PATH")"
check_placeholder_string "governance.endorserGovernanceModel.quorum" "$GOV_QUORUM"
check_quorum_format "$GOV_QUORUM"
check_placeholder_string "governance.endorserGovernanceModel.singleKeyDidControl" "$(jq -r '.governance.endorserGovernanceModel.singleKeyDidControl' "$PACKAGE_PATH")"
check_placeholder_string "governance.endorserGovernanceModel.currentPhase" "$(jq -r '.governance.endorserGovernanceModel.currentPhase' "$PACKAGE_PATH")"
if jq -e '(.governance.endorserGovernanceModel.custodians | type == "array") and (.governance.endorserGovernanceModel.custodians | length >= 3)' "$PACKAGE_PATH" >/dev/null; then
log_ok "governance.endorserGovernanceModel.custodians has at least 3 entries"
else
log_err "governance.endorserGovernanceModel.custodians must have at least 3 entries"
ERRORS=$((ERRORS + 1))
fi
if jq -e '(.governance.endorserGovernanceModel.futurePhases | type == "array") and (.governance.endorserGovernanceModel.futurePhases | length >= 1)' "$PACKAGE_PATH" >/dev/null; then
log_ok "governance.endorserGovernanceModel.futurePhases is populated"
else
log_err "governance.endorserGovernanceModel.futurePhases must contain at least one entry"
ERRORS=$((ERRORS + 1))
fi
check_placeholder_string "roles.author.alias" "$(jq -r '.roles.author.alias' "$PACKAGE_PATH")"
AUTHOR_PUBLIC_DID="$(jq -r '.roles.author.publicDid' "$PACKAGE_PATH")"
ENDORSER_DID="$(jq -r '.roles.endorser.did' "$PACKAGE_PATH")"
check_placeholder_string_maybe_partial "roles.author.publicDid" "$AUTHOR_PUBLIC_DID"
check_placeholder_string_maybe_partial "roles.author.verkey" "$(jq -r '.roles.author.verkey' "$PACKAGE_PATH")"
check_placeholder_string "roles.endorser.alias" "$(jq -r '.roles.endorser.alias' "$PACKAGE_PATH")"
check_placeholder_string_maybe_partial "roles.endorser.did" "$ENDORSER_DID"
check_placeholder_string "anoncreds.schemas[0].name" "$(jq -r '.anoncreds.schemas[0].name' "$PACKAGE_PATH")"
check_placeholder_string "anoncreds.schemas[0].version" "$(jq -r '.anoncreds.schemas[0].version' "$PACKAGE_PATH")"
if [[ -n "$AUTHOR_PUBLIC_DID" ]]; then
check_indy_did_format "roles.author.publicDid" "$AUTHOR_PUBLIC_DID"
fi
if [[ -n "$ENDORSER_DID" && "$ENDORSER_DID" != *"<fill-me"* ]]; then
check_indy_did_format "roles.endorser.did" "$ENDORSER_DID"
fi
GENESIS_SOURCE="$(jq -r '.ledger.genesisSource' "$PACKAGE_PATH")"
if [[ "$GENESIS_SOURCE" == /* ]]; then
if [[ -f "$GENESIS_SOURCE" ]]; then
log_ok "genesisSource file exists: $GENESIS_SOURCE"
else
log_warn "genesisSource file not present on this machine: $GENESIS_SOURCE"
WARNINGS=$((WARNINGS + 1))
fi
fi
set -a
source "$SECRETS_PATH"
set +a
AUTHOR_ENV_NAME="$(jq -r '.roles.author.connectionIdEnv' "$PACKAGE_PATH")"
ENDORSER_ENV_NAME="$(jq -r '.roles.endorser.connectionIdEnv' "$PACKAGE_PATH")"
ADMIN_KEY_ENV_NAME="$(jq -r '.ariesAgent.adminApiKeyEnv' "$PACKAGE_PATH")"
ADMIN_AUTH_MODE="$(jq -r '.ariesAgent.adminAuthMode' "$PACKAGE_PATH")"
if [[ "$ADMIN_AUTH_MODE" == "apiKey" ]]; then
check_env_var "Configured admin API key" "$ADMIN_KEY_ENV_NAME"
else
log_info "Skipping admin API key requirement because adminAuthMode=$ADMIN_AUTH_MODE"
fi
if [[ -n "${!AUTHOR_ENV_NAME:-}" ]]; then
check_env_var "Author connection" "$AUTHOR_ENV_NAME"
check_uuid_like "Author connection" "${!AUTHOR_ENV_NAME}"
else
log_warn "Author connection env var $AUTHOR_ENV_NAME is empty"
WARNINGS=$((WARNINGS + 1))
fi
if [[ -n "${!ENDORSER_ENV_NAME:-}" ]]; then
check_env_var "Endorser connection" "$ENDORSER_ENV_NAME"
check_uuid_like "Endorser connection" "${!ENDORSER_ENV_NAME}"
else
if $PARTIAL_EXTERNAL_ALLOWED; then
log_warn "Endorser connection env var $ENDORSER_ENV_NAME is empty while package is awaiting external endorser data"
WARNINGS=$((WARNINGS + 1))
else
log_err "Endorser connection env var $ENDORSER_ENV_NAME is empty"
ERRORS=$((ERRORS + 1))
fi
fi
if [[ $ERRORS -gt 0 ]]; then
log_err "Identity package validation failed with $ERRORS error(s) and $WARNINGS warning(s)"
exit 1
fi
if [[ $WARNINGS -gt 0 ]]; then
log_warn "Identity package validation passed with $WARNINGS warning(s)"
else
log_ok "Identity package validation passed"
fi

View File

@@ -0,0 +1,10 @@
#!/usr/bin/env bash
# Validate JSON syntax for DBIS institutional examples (no ajv required).
set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
EX="$ROOT/config/dbis-institutional/examples"
for f in "$EX"/*.json; do
python3 -m json.tool "$f" >/dev/null
echo "OK $f"
done
echo "All institutional example JSON files parse."

View File

@@ -0,0 +1,64 @@
#!/usr/bin/env bash
# Validate dbis-institutional examples against JSON Schemas (draft 2020-12).
# Uses `check-jsonschema` when available (pip install check-jsonschema).
# In CI, install first: pip install check-jsonschema
#
# Env:
# SCHEMA_STRICT=1 exit 1 if check-jsonschema is missing (default: skip with 0)
#
set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
SCHEMA_DIR="$ROOT/config/dbis-institutional/schemas"
EX_DIR="$ROOT/config/dbis-institutional/examples"
# validate_json_array EX_FILE SCHEMA_FILE LABEL
validate_json_array() {
local ex_file="$1" schema_file="$2" label="$3"
if [[ ! -f "$ex_file" ]]; then
return 0
fi
if ! command -v jq &>/dev/null; then
echo "error: jq is required for $label validation" >&2
exit 1
fi
local n
n=$(jq 'length' "$ex_file")
if [[ "${n:-0}" -lt 1 ]]; then
echo "error: $ex_file must be a non-empty array" >&2
exit 1
fi
local batch_tmp idx
batch_tmp="$(mktemp)"
trap 'rm -f "$batch_tmp"' RETURN
idx=0
while IFS= read -r line; do
echo "$line" >"$batch_tmp"
check-jsonschema --schemafile "$schema_file" "$batch_tmp"
idx=$((idx + 1))
done < <(jq -c '.[]' "$ex_file")
echo "OK $label ($idx items)"
rm -f "$batch_tmp"
trap - RETURN
}
if ! command -v check-jsonschema &>/dev/null; then
if [[ "${SCHEMA_STRICT:-0}" == "1" ]]; then
echo "error: check-jsonschema not found; pip install check-jsonschema" >&2
exit 1
fi
echo "skip: check-jsonschema not installed (pip install check-jsonschema); JSON parse still covered by validate-dbis-institutional-json.sh"
exit 0
fi
check-jsonschema --schemafile "$SCHEMA_DIR/settlement-event.schema.json" "$EX_DIR/settlement-event.example.json"
check-jsonschema --schemafile "$SCHEMA_DIR/settlement-event.schema.json" "$EX_DIR/settlement-event.chain138-primary.example.json"
check-jsonschema --schemafile "$SCHEMA_DIR/settlement-event.schema.json" "$EX_DIR/settlement-event.min.json"
validate_json_array "$EX_DIR/settlement-events-batch.example.json" "$SCHEMA_DIR/settlement-event.schema.json" "settlement-events-batch"
check-jsonschema --schemafile "$SCHEMA_DIR/address-registry-entry.schema.json" "$EX_DIR/address-registry-entry.example.json"
validate_json_array "$EX_DIR/address-registry-entries-batch.example.json" "$SCHEMA_DIR/address-registry-entry.schema.json" "address-registry-entries-batch"
check-jsonschema --schemafile "$SCHEMA_DIR/trust.schema.json" "$EX_DIR/trust.json"
check-jsonschema --schemafile "$SCHEMA_DIR/governance.schema.json" "$EX_DIR/governance.json"
check-jsonschema --schemafile "$SCHEMA_DIR/policy-manifest.schema.json" "$EX_DIR/policy.json"
echo "OK dbis-institutional schema validation (settlement-event, settlement-event.chain138-primary, settlement-events-batch, address-registry-entry, address-registry-entries-batch, trust, governance, policy-manifest)"

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
# Compare explorer-monorepo Chain 138 keys in address-inventory.json to
# config/smart-contracts-master.json (G3 drift guard).
# Usage: bash scripts/validation/validate-explorer-chain138-inventory.sh
# Requires: jq
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
MASTER="${PROJECT_ROOT}/config/smart-contracts-master.json"
INV="${PROJECT_ROOT}/explorer-monorepo/config/address-inventory.json"
norm() { echo "$1" | tr '[:upper:]' '[:lower:]'; }
if ! command -v jq &>/dev/null; then
echo "[WARN] jq not installed; skip explorer Chain 138 inventory alignment check"
exit 0
fi
if [[ ! -f "$MASTER" ]]; then
echo "[ERROR] Missing $MASTER"
exit 1
fi
if [[ ! -f "$INV" ]]; then
echo "[WARN] Missing $INV; skip explorer inventory check"
exit 0
fi
ERR=0
expect_match() {
local key="$1"
local jqpath="$2"
local exp
exp=$(jq -r "$jqpath" "$MASTER")
local got
got=$(jq -r --arg k "$key" '.inventory[$k] // empty' "$INV")
if [[ -z "$got" ]]; then
echo "[ERROR] inventory missing key: $key"
ERR=$((ERR + 1))
return
fi
if [[ "$(norm "$exp")" != "$(norm "$got")" ]]; then
echo "[ERROR] $key mismatch: inventory=$got master=$exp"
ERR=$((ERR + 1))
fi
}
expect_match "CCIP_ROUTER_138" '.chains["138"].contracts.CCIP_Router'
expect_match "CCIP_ROUTER_ADDRESS" '.chains["138"].contracts.CCIP_Router'
expect_match "CCIPWETH9_BRIDGE_138" '.chains["138"].contracts.CCIPWETH9_Bridge'
expect_match "CCIPWETH9_BRIDGE" '.chains["138"].contracts.CCIPWETH9_Bridge'
expect_match "LINK_TOKEN_138" '.chains["138"].contracts.LINK'
expect_match "ISO20022_ROUTER" '.chains["138"].contracts.ISO20022Router'
if [[ $ERR -gt 0 ]]; then
echo "[ERROR] Explorer address-inventory Chain 138 drift ($ERR). Update explorer-monorepo/config/address-inventory.json or smart-contracts-master.json."
exit 1
fi
echo "[OK] Explorer address-inventory Chain 138 keys match smart-contracts-master.json"

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env bash
# Validate JVMTM / regulatory closure example JSON against local schemas (draft 2020-12).
# Uses check-jsonschema when available; SCHEMA_STRICT=1 fails if missing.
set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
BASE="$ROOT/config/jvmtm-regulatory-closure"
SCHEMA="$BASE/schemas"
EX="$BASE/examples"
HAVE_CHECK_JSONSCHEMA=1
if ! command -v check-jsonschema &>/dev/null; then
HAVE_CHECK_JSONSCHEMA=0
if [[ "${SCHEMA_STRICT:-0}" == "1" ]]; then
echo "error: check-jsonschema not found; pip install check-jsonschema" >&2
exit 1
fi
echo "skip: check-jsonschema not installed (pip install check-jsonschema)"
fi
validate_pair() {
local schema_file="$1" example_file="$2"
check-jsonschema --schemafile "$schema_file" "$example_file"
}
if [[ "$HAVE_CHECK_JSONSCHEMA" == "1" ]]; then
validate_pair "$SCHEMA/daily-3way-reconciliation-report.schema.json" "$EX/daily-3way-reconciliation-report.example.json"
validate_pair "$SCHEMA/three-way-reconciliation-result.schema.json" "$EX/three-way-reconciliation-result.example.json"
validate_pair "$SCHEMA/prefunding-proof.schema.json" "$EX/prefunding-proof.example.json"
validate_pair "$SCHEMA/pre-settlement-ack.schema.json" "$EX/pre-settlement-ack.example.json"
validate_pair "$SCHEMA/sample-exception-event.schema.json" "$EX/sample-exception-event.example.json"
validate_pair "$SCHEMA/kyt-screening-result.schema.json" "$EX/kyt-screening-result.example.json"
validate_pair "$SCHEMA/recovery-time-report.schema.json" "$EX/recovery-time-report.example.json"
validate_pair "$SCHEMA/dr-simulation-report.schema.json" "$EX/dr-simulation-report.example.json"
validate_pair "$SCHEMA/real-time-balance-snapshot.schema.json" "$EX/real-time-balance-snapshot.example.json"
validate_pair "$SCHEMA/transaction-compliance-execution.schema.json" "$EX/transaction-compliance-execution.example.json"
validate_pair "$SCHEMA/transaction-compliance-execution.schema.json" "$EX/transaction-compliance-execution.blocked.example.json"
fi
if ! command -v python3 &>/dev/null; then
echo "error: python3 not found; required for JVMTM transaction-grade pack validation" >&2
exit 1
fi
python3 "$ROOT/scripts/validation/validate-jvmtm-transaction-compliance-pack.py"
if [[ "$HAVE_CHECK_JSONSCHEMA" == "1" ]]; then
echo "OK jvmtm-regulatory-closure schema validation (11 example/schema pairs + transaction-grade pack checks)"
else
echo "OK jvmtm-regulatory-closure transaction-grade pack validation (schema checks skipped: check-jsonschema not installed)"
fi

View File

@@ -0,0 +1,341 @@
#!/usr/bin/env python3
"""Validate the JVMTM transaction-grade compliance pack."""
from __future__ import annotations
import csv
import io
import json
import sys
from pathlib import Path
RAIL_MODES = {"chain138-primary", "swift", "hybrid", "internal-only"}
BLOCKING_LEVELS = {"HARD_STOP", "ESCALATE", "POST_EVENT"}
DECISION_STATUSES = {"READY", "BLOCKED", "ESCALATE"}
CONTROL_STATUSES = {"PASS", "FAIL", "PENDING", "WAIVED"}
EVIDENCE_REF_TYPES = {"repo-path", "runtime-slot", "archive-path", "external-ref"}
REQUIRED_CONTROL_FIELDS = [
"control_id",
"phase",
"domain",
"requirement",
"validation_method",
"blocking_level",
"applies_to_rail",
"source_audit_rows",
"repo_evidence_artifacts",
"validator_command",
"failure_action",
"high_value_override",
"notes",
]
CSV_FIELDNAMES = [
"control_id",
"phase",
"domain",
"requirement",
"validation_method",
"blocking_level",
"applies_to_rail",
"source_audit_rows",
"repo_evidence_artifacts",
"validator_command",
"failure_action",
"high_value_override",
"notes",
]
def fail(message: str) -> None:
raise SystemExit(f"error: {message}")
def load_json(path: Path) -> dict:
try:
return json.loads(path.read_text(encoding="utf-8"))
except FileNotFoundError:
fail(f"missing JSON file: {path}")
except json.JSONDecodeError as exc:
fail(f"invalid JSON in {path}: {exc}")
def format_artifacts(artifacts: list[dict[str, str]]) -> str:
return " | ".join(f'{artifact["artifact_type"]}:{artifact["ref"]}' for artifact in artifacts)
def render_csv(matrix: dict) -> str:
buffer = io.StringIO(newline="")
writer = csv.DictWriter(buffer, fieldnames=CSV_FIELDNAMES, lineterminator="\n")
writer.writeheader()
for control in matrix["controls"]:
writer.writerow(
{
"control_id": control["control_id"],
"phase": control["phase"],
"domain": control["domain"],
"requirement": control["requirement"],
"validation_method": control["validation_method"],
"blocking_level": control["blocking_level"],
"applies_to_rail": " | ".join(control["applies_to_rail"]),
"source_audit_rows": " | ".join(control["source_audit_rows"]),
"repo_evidence_artifacts": format_artifacts(control["repo_evidence_artifacts"]),
"validator_command": control["validator_command"],
"failure_action": control["failure_action"],
"high_value_override": control["high_value_override"],
"notes": control["notes"],
}
)
return buffer.getvalue()
def validate_evidence_ref(ref: dict, label: str) -> None:
if not isinstance(ref, dict):
fail(f"{label} must be an object")
for key in ("artifact_type", "ref"):
if key not in ref or not isinstance(ref[key], str) or not ref[key].strip():
fail(f"{label} missing non-empty {key}")
if ref["artifact_type"] not in EVIDENCE_REF_TYPES:
fail(f"{label} uses unsupported artifact_type {ref['artifact_type']}")
if "sha256" in ref:
sha256 = ref["sha256"]
if not isinstance(sha256, str) or len(sha256) != 64 or any(c not in "0123456789abcdefABCDEF" for c in sha256):
fail(f"{label} sha256 must be a 64-character hex string")
def validate_pack_reference(ref: dict, label: str, repo_root: Path, slot_refs: set[str]) -> None:
validate_evidence_ref(ref, label)
artifact_type = ref["artifact_type"]
target = ref["ref"]
if artifact_type == "repo-path":
if not (repo_root / target).exists():
fail(f"{label} repo-path does not exist: {target}")
elif artifact_type == "runtime-slot":
if target not in slot_refs:
fail(f"{label} runtime-slot does not exist in the matrix: {target}")
def validate_execution_example(
path: Path,
control_ids: set[str],
expected_status: str,
matrix_version: str,
repo_root: Path,
slot_refs: set[str],
) -> None:
payload = load_json(path)
required_top_level = [
"schema_version",
"matrix_version",
"transaction_id",
"correlation_id",
"rail_mode",
"amount",
"currency",
"decision_status",
"decision_reason",
"validated_at",
"approved_by",
"instruction_ref",
"control_results",
]
for field in required_top_level:
if field not in payload:
fail(f"{path} missing required field {field}")
if payload["decision_status"] not in DECISION_STATUSES:
fail(f"{path} uses unsupported decision_status {payload['decision_status']}")
if payload["rail_mode"] not in RAIL_MODES:
fail(f"{path} uses unsupported rail_mode {payload['rail_mode']}")
if payload["decision_status"] != expected_status:
fail(f"{path} decision_status expected {expected_status} but found {payload['decision_status']}")
if payload["matrix_version"] != matrix_version:
fail(f"{path} matrix_version {payload['matrix_version']} does not match canonical matrix_version {matrix_version}")
validate_pack_reference(payload["instruction_ref"], f"{path}:instruction_ref", repo_root, slot_refs)
if "settlement_event_ref" in payload:
validate_pack_reference(payload["settlement_event_ref"], f"{path}:settlement_event_ref", repo_root, slot_refs)
if not isinstance(payload["control_results"], list) or not payload["control_results"]:
fail(f"{path} control_results must be a non-empty array")
seen = set()
for index, result in enumerate(payload["control_results"]):
label = f"{path}:control_results[{index}]"
if not isinstance(result, dict):
fail(f"{label} must be an object")
for key in ("control_id", "status", "blocking", "validated_at", "validator_ref", "evidence_refs"):
if key not in result:
fail(f"{label} missing required field {key}")
control_id = result["control_id"]
if control_id not in control_ids:
fail(f"{label} references unknown control_id {control_id}")
if control_id in seen:
fail(f"{path} repeats control_id {control_id}")
seen.add(control_id)
if result["status"] not in CONTROL_STATUSES:
fail(f"{label} uses unsupported status {result['status']}")
if result["blocking"] not in BLOCKING_LEVELS:
fail(f"{label} uses unsupported blocking value {result['blocking']}")
if not isinstance(result["evidence_refs"], list) or not result["evidence_refs"]:
fail(f"{label} evidence_refs must be a non-empty array")
for ref_index, evidence_ref in enumerate(result["evidence_refs"]):
validate_pack_reference(evidence_ref, f"{label}:evidence_refs[{ref_index}]", repo_root, slot_refs)
if expected_status == "READY":
if "settlement_event_ref" not in payload:
fail(f"{path} must include settlement_event_ref for the READY example")
statuses = {result["control_id"]: result["status"] for result in payload["control_results"]}
if statuses.get("PT-02") != "PASS" or statuses.get("TX-02") != "PASS":
fail(f"{path} must show PT-02 and TX-02 as PASS for READY examples")
if expected_status == "BLOCKED":
if "settlement_event_ref" in payload:
fail(f"{path} should omit settlement_event_ref for the BLOCKED pre-execution example")
statuses = {result["control_id"]: result["status"] for result in payload["control_results"]}
if statuses.get("PT-02") != "FAIL":
fail(f"{path} must show PT-02 as FAIL for BLOCKED examples")
if statuses.get("TX-02") not in {"FAIL", "PENDING"}:
fail(f"{path} must show TX-02 as FAIL or PENDING for BLOCKED examples")
def main() -> int:
repo_root = Path(__file__).resolve().parents[2]
config_dir = repo_root / "config/jvmtm-regulatory-closure"
matrix_path = config_dir / "transaction-compliance-matrix.json"
csv_path = config_dir / "transaction-compliance-matrix.csv"
markdown_path = config_dir / "JVMTM_TRANSACTION_GRADE_COMPLIANCE_MATRIX.md"
schema_path = config_dir / "schemas/transaction-compliance-execution.schema.json"
ready_example_path = config_dir / "examples/transaction-compliance-execution.example.json"
blocked_example_path = config_dir / "examples/transaction-compliance-execution.blocked.example.json"
for path in (matrix_path, csv_path, markdown_path, schema_path, ready_example_path, blocked_example_path):
if not path.exists():
fail(f"missing required pack file: {path}")
matrix = load_json(matrix_path)
if matrix.get("schema_version") != 1:
fail(f"{matrix_path} schema_version must equal 1")
if not isinstance(matrix.get("matrix_version"), str) or not matrix["matrix_version"]:
fail(f"{matrix_path} matrix_version must be a non-empty string")
if not isinstance(matrix.get("runtime_slots"), list) or not matrix["runtime_slots"]:
fail(f"{matrix_path} runtime_slots must be a non-empty array")
if not isinstance(matrix.get("controls"), list) or not matrix["controls"]:
fail(f"{matrix_path} controls must be a non-empty array")
if matrix.get("canonical_format") != "json":
fail(f"{matrix_path} canonical_format must equal 'json'")
if matrix.get("csv_export") != "config/jvmtm-regulatory-closure/transaction-compliance-matrix.csv":
fail(f"{matrix_path} csv_export must point to the canonical CSV path")
if not isinstance(matrix.get("source_baseline"), list) or not matrix["source_baseline"]:
fail(f"{matrix_path} source_baseline must be a non-empty array")
for baseline_ref in matrix["source_baseline"]:
if not isinstance(baseline_ref, str) or not baseline_ref.strip():
fail(f"{matrix_path} contains an invalid source_baseline entry")
if not (repo_root / baseline_ref).exists():
fail(f"{matrix_path} source_baseline path does not exist: {baseline_ref}")
slot_refs: set[str] = set()
for index, slot in enumerate(matrix["runtime_slots"]):
if not isinstance(slot, dict):
fail(f"{matrix_path} runtime_slots[{index}] must be an object")
for key in ("slot", "source", "archive_path", "description"):
if key not in slot or not isinstance(slot[key], str) or not slot[key].strip():
fail(f"{matrix_path} runtime_slots[{index}] missing non-empty {key}")
if slot["slot"] in slot_refs:
fail(f"{matrix_path} repeats runtime slot {slot['slot']}")
slot_refs.add(slot["slot"])
control_ids: set[str] = set()
for index, control in enumerate(matrix["controls"]):
label = f"{matrix_path}:controls[{index}]"
if not isinstance(control, dict):
fail(f"{label} must be an object")
for field in REQUIRED_CONTROL_FIELDS:
if field not in control:
fail(f"{label} missing field {field}")
control_id = control["control_id"]
if not isinstance(control_id, str) or not control_id.strip():
fail(f"{label} control_id must be a non-empty string")
if control_id in control_ids:
fail(f"{matrix_path} repeats control_id {control_id}")
control_ids.add(control_id)
if control["blocking_level"] not in BLOCKING_LEVELS:
fail(f"{label} uses unsupported blocking_level {control['blocking_level']}")
if not isinstance(control["applies_to_rail"], list) or not control["applies_to_rail"]:
fail(f"{label} applies_to_rail must be a non-empty array")
if any(rail not in RAIL_MODES for rail in control["applies_to_rail"]):
fail(f"{label} uses unsupported rail mode")
if not isinstance(control["source_audit_rows"], list) or not control["source_audit_rows"]:
fail(f"{label} source_audit_rows must be a non-empty array")
artifacts = control["repo_evidence_artifacts"]
if not isinstance(artifacts, list) or not artifacts:
fail(f"{label} repo_evidence_artifacts must be a non-empty array")
for artifact_index, artifact in enumerate(artifacts):
if not isinstance(artifact, dict):
fail(f"{label}:repo_evidence_artifacts[{artifact_index}] must be an object")
for key in ("artifact_type", "ref"):
if key not in artifact or not isinstance(artifact[key], str) or not artifact[key].strip():
fail(f"{label}:repo_evidence_artifacts[{artifact_index}] missing non-empty {key}")
artifact_type = artifact["artifact_type"]
ref = artifact["ref"]
if artifact_type == "repo-path":
if not (repo_root / ref).exists():
fail(f"{label}:repo_evidence_artifacts[{artifact_index}] repo-path does not exist: {ref}")
elif artifact_type == "runtime-slot":
if ref not in slot_refs:
fail(f"{label}:repo_evidence_artifacts[{artifact_index}] unknown runtime slot: {ref}")
else:
fail(f"{label}:repo_evidence_artifacts[{artifact_index}] unsupported artifact_type {artifact_type}")
expected_csv = render_csv(matrix)
actual_csv = csv_path.read_text(encoding="utf-8")
if actual_csv != expected_csv:
fail(
"transaction-compliance-matrix.csv is out of date with transaction-compliance-matrix.json; "
"run scripts/jvmtm/export-transaction-compliance-matrix-csv.py"
)
actual_rows = [line for line in actual_csv.splitlines() if line.strip()]
expected_row_count = len(matrix["controls"]) + 1
if len(actual_rows) != expected_row_count:
fail(
f"{csv_path} row count mismatch: expected {expected_row_count} including header, "
f"found {len(actual_rows)}"
)
markdown_text = markdown_path.read_text(encoding="utf-8")
if matrix["title"] not in markdown_text:
fail(f"{markdown_path} does not contain the canonical matrix title: {matrix['title']}")
missing_markdown_controls = [control_id for control_id in control_ids if control_id not in markdown_text]
if missing_markdown_controls:
fail(
f"{markdown_path} is missing control ids present in the canonical matrix: "
f"{', '.join(sorted(missing_markdown_controls))}"
)
validate_execution_example(
ready_example_path,
control_ids,
"READY",
matrix["matrix_version"],
repo_root,
slot_refs,
)
validate_execution_example(
blocked_example_path,
control_ids,
"BLOCKED",
matrix["matrix_version"],
repo_root,
slot_refs,
)
print(
"OK jvmtm transaction-grade compliance pack "
f"({len(control_ids)} controls, {len(slot_refs)} runtime slots, CSV synchronized)"
)
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,28 @@
#!/usr/bin/env bash
# Validate 3FR reserve provenance package JSON files against schemas/reserve-provenance-package.schema.json
set -euo pipefail
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
PKG="$ROOT/config/reserve-provenance-package"
SCHEMA="$PKG/schemas/reserve-provenance-package.schema.json"
if ! command -v check-jsonschema &>/dev/null; then
if [[ "${SCHEMA_STRICT:-0}" == "1" ]]; then
echo "error: check-jsonschema not found; pip install check-jsonschema" >&2
exit 1
fi
for f in "$PKG"/legal/*.json "$PKG"/settlement/*.json "$PKG"/provenance/*.json "$PKG"/bank/*.json "$PKG"/kyt/*.json "$PKG"/reconciliation/*.json "$PKG"/reserve/*.json "$PKG"/governance/*.json; do
[[ -f "$f" ]] || continue
[[ "$f" == *.example.json ]] && continue
python3 -m json.tool "$f" >/dev/null
echo "OK parse $f"
done
echo "skip: check-jsonschema not installed (JSON parse only)"
exit 0
fi
for f in "$PKG"/legal/*.json "$PKG"/settlement/*.json "$PKG"/provenance/*.json "$PKG"/bank/*.json "$PKG"/kyt/*.json "$PKG"/reconciliation/*.json "$PKG"/reserve/*.json "$PKG"/governance/*.json; do
[[ -f "$f" ]] || continue
[[ "$f" == *.example.json ]] && continue
check-jsonschema --schemafile "$SCHEMA" "$f"
done
echo "OK reserve-provenance-package (10 JSON files + schema)"

View File

@@ -29,8 +29,15 @@ One-line install (Debian/Ubuntu): `sudo apt install -y sshpass rsync dnsutils ip
- `backup-npmplus.sh` - Full NPMplus backup (database, API exports, certificates)
- `check-contracts-on-chain-138.sh` - Check that Chain 138 deployed contracts have bytecode on-chain (`cast code` for 31 addresses; requires `cast` and RPC access). Use `[RPC_URL]` or env `RPC_URL_138`; `--dry-run` lists addresses only (no RPC calls); `SKIP_EXIT=1` to exit 0 when RPC unreachable.
- `check-public-report-api.sh` - Verify that `explorer.d-bis.org/api/v1/report/*` and `/api/v1/networks` return token-aggregation JSON rather than Blockscout-style `/api/v1` responses. Use `SKIP_EXIT=1` for diagnostic-only mode. Set `SKIP_BRIDGE_ROUTES=0` to assert `/api/v1/bridge/routes`, and `SKIP_BRIDGE_PREFLIGHT=0` to assert `/api/v1/bridge/preflight` payload shape.
- `check-token-aggregation-chain138-api.sh` - Hits tokens, pools, quote, `bridge/routes`, `bridge/status`, `bridge/preflight`, and networks on both `/api/v1/*` and `/token-aggregation/api/v1/*`. `BASE_URL=https://explorer.d-bis.org` (default) or `http://192.168.11.140`.
- `check-gru-transport-preflight.sh` - Operator-focused GRU runtime preflight. Calls `/api/v1/bridge/preflight`, prints blocked pairs with `eligibilityBlockers` / `runtimeMissingRequirements`, and fails unless all active pairs are runtime-ready or `ALLOW_BLOCKED=1` is set.
- `check-cstar-v2-transport-stack.sh` - Predeploy Forge verifier for the `c* V2` bridge stack. Runs the base V2 token suite, legacy reserve-verifier compatibility suite, V2 reserve/verifier full L1/L2 round-trip suite, and the core `CWMultiTokenBridge` round-trip suite.
- `run-repo-green-test-path.sh` - Local deterministic green-path aggregate behind root `pnpm test`. Runs config validation, then the focused `smom-dbis-138` contract and service CI targets.
- `check-completion-status.sh` - One-command summary of repo-completable checks, public report API health, and pointers to operator/external remaining work.
- `reconcile-env-canonical.sh` - Emit recommended .env lines for Chain 138 (canonical source of truth); use to reconcile `smom-dbis-138/.env` with [CONTRACT_ADDRESSES_REFERENCE](../../docs/11-references/CONTRACT_ADDRESSES_REFERENCE.md). Usage: `./scripts/verify/reconcile-env-canonical.sh [--print]`
- `check-deployer-balance-blockscout-vs-rpc.sh` - Compare deployer native balance from Blockscout API vs RPC (to verify index matches current chain); see [EXPLORER_AND_BLOCKSCAN_REFERENCE](../../docs/11-references/EXPLORER_AND_BLOCKSCAN_REFERENCE.md)
- `sync-blockscout-address-labels-from-registry.sh` - Plan or sync Blockscout address labels from `address-registry-entry` JSON (`config/dbis-institutional/schemas/address-registry-entry.schema.json`: `blockscout.label`, `status: active`). Supports `--mode=http`, `--mode=db`, and `--mode=auto`; on the self-hosted Chain 138 explorer, `db` is the right live mode because `/api/v1/*` is token-aggregation, not a native Blockscout label-write API. DB mode writes primary labels into Blockscout `public.address_names` through CT `5000`. See `config/dbis-institutional/README.md` and [OMNL_DBIS_CORE_CHAIN138_SMART_VAULT_RTGS_RUNBOOK.md](../../docs/03-deployment/OMNL_DBIS_CORE_CHAIN138_SMART_VAULT_RTGS_RUNBOOK.md).
- `check-dependencies.sh` - Verify required tools (bash, curl, jq, openssl, ssh)
- `export-cloudflare-dns-records.sh` - Export Cloudflare DNS records
- `export-npmplus-config.sh` - Export NPMplus proxy hosts and certificates via API
@@ -43,7 +50,9 @@ One-line install (Debian/Ubuntu): `sudo apt install -y sshpass rsync dnsutils ip
## Task runners (no LAN vs from LAN)
- **From anywhere (no LAN/creds):** `../run-completable-tasks-from-anywhere.sh` — runs config validation, on-chain contract check, run-all-validation --skip-genesis, and reconcile-env-canonical.
- **From anywhere (no LAN/creds):** `../run-completable-tasks-from-anywhere.sh` — runs config validation, on-chain contract check, run-all-validation --skip-genesis, public report API diagnostics, and reconcile-env-canonical.
- **Completion snapshot:** `check-completion-status.sh` — summarizes what is complete locally and what still depends on operator or external execution.
- **Full LAN execution order:** `../run-full-operator-completion-from-lan.sh` — starts with the token-aggregation `/api/v1` repair, then Wave 0, verification, E2E, and optional operator-only deployment steps. Use `--dry-run` first.
- **From LAN (NPM_PASSWORD, optional PRIVATE_KEY):** `../run-operator-tasks-from-lan.sh` — runs W0-1 (NPMplus RPC fix), W0-3 (NPMplus backup), O-1 (Blockscout verification); use `--dry-run` to print commands only. See [ALL_TASKS_DETAILED_STEPS](../../docs/00-meta/ALL_TASKS_DETAILED_STEPS.md).
## Environment

View File

@@ -2,8 +2,9 @@
# Check whether Chain 138 deployed tokens (cUSDT, cUSDC) support ERC-2612 permit or ERC-3009.
# Used to determine x402 compatibility: thirdweb x402 requires permit or ERC-3009.
#
# Usage: ./scripts/verify/check-chain138-token-permit-support.sh [RPC_URL]
# Usage: ./scripts/verify/check-chain138-token-permit-support.sh [RPC_URL] [--token SYMBOL=ADDRESS]...
# RPC_URL: optional; default from RPC_URL_138 or CHAIN_138_RPC_URL or https://rpc-core.d-bis.org
# --token SYMBOL=ADDRESS: optional; inspect custom token inventory (repeatable)
# --dry-run: print RPC and token addresses only (no RPC calls).
#
# Exit: 0 if script runs; output is human-readable. Use output to fill CHAIN138_X402_TOKEN_SUPPORT.md.
@@ -14,19 +15,55 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
[[ -f "${SCRIPT_DIR}/../lib/load-project-env.sh" ]] && source "${SCRIPT_DIR}/../lib/load-project-env.sh" 2>/dev/null || true
DRY_RUN=""
RPC_ARG=""
for a in "$@"; do
if [[ "$a" == "--dry-run" ]]; then DRY_RUN=1; else [[ -z "$RPC_ARG" ]] && RPC_ARG="$a"; fi
declare -A TOKENS=()
TOKEN_ORDER=()
add_token() {
local spec="$1"
local symbol="${spec%%=*}"
local address="${spec#*=}"
if [[ -z "$symbol" || -z "$address" || "$symbol" == "$address" ]]; then
echo "ERROR: invalid token spec '$spec' (expected SYMBOL=ADDRESS)" >&2
exit 1
fi
TOKENS["$symbol"]="$address"
TOKEN_ORDER+=("$symbol")
}
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run)
DRY_RUN=1
shift
;;
--token)
[[ $# -ge 2 ]] || { echo "ERROR: --token requires SYMBOL=ADDRESS" >&2; exit 1; }
add_token "$2"
shift 2
;;
--token=*)
add_token "${1#--token=}"
shift
;;
*)
if [[ -z "$RPC_ARG" ]]; then
RPC_ARG="$1"
else
add_token "$1"
fi
shift
;;
esac
done
RPC="${RPC_ARG:-${RPC_URL_138:-${CHAIN_138_RPC_URL:-https://rpc-core.d-bis.org}}}"
# Token name, address (from CHAIN138_TOKEN_ADDRESSES.md)
declare -A TOKENS
TOKENS[cUSDT]="0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
TOKENS[cUSDC]="0xf22258f57794CC8E06237084b353Ab30fFfa640b"
if [[ ${#TOKEN_ORDER[@]} -eq 0 ]]; then
add_token "cUSDT=0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
add_token "cUSDC=0xf22258f57794CC8E06237084b353Ab30fFfa640b"
fi
# Test holder for nonces(address) call (any address is fine)
HOLDER="0x0000000000000000000000000000000000000001"
@@ -34,7 +71,7 @@ HOLDER="0x0000000000000000000000000000000000000001"
if [[ -n "$DRY_RUN" ]]; then
echo "=== Chain 138 token permit support check (--dry-run) ==="
echo "RPC: $RPC"
for sym in cUSDT cUSDC; do echo " $sym: ${TOKENS[$sym]}"; done
for sym in "${TOKEN_ORDER[@]}"; do echo " $sym: ${TOKENS[$sym]}"; done
exit 0
fi
@@ -48,7 +85,7 @@ if ! command -v cast &>/dev/null; then
exit 1
fi
for sym in cUSDT cUSDC; do
for sym in "${TOKEN_ORDER[@]}"; do
addr="${TOKENS[$sym]}"
echo "--- $sym ($addr) ---"

View File

@@ -0,0 +1,203 @@
#!/usr/bin/env bash
# Check whether Chain 138 is operationally ready for x402 and whether its payment tokens are x402-capable.
#
# Usage:
# ./scripts/verify/check-chain138-x402-readiness.sh [CORE_RPC] [PUBLIC_RPC] [EXPLORER_STATS] [--token SYMBOL=ADDRESS]...
# ./scripts/verify/check-chain138-x402-readiness.sh --strict
#
# Exit codes:
# 0 when the script runs successfully
# 1 when --strict is used and x402 is not fully ready
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
STRICT=0
POSITIONAL=()
declare -A TOKENS=()
TOKEN_ORDER=()
add_token() {
local spec="$1"
local symbol="${spec%%=*}"
local address="${spec#*=}"
if [[ -z "$symbol" || -z "$address" || "$symbol" == "$address" ]]; then
echo "ERROR: invalid token spec '$spec' (expected SYMBOL=ADDRESS)" >&2
exit 1
fi
TOKENS["$symbol"]="$address"
TOKEN_ORDER+=("$symbol")
}
while [[ $# -gt 0 ]]; do
case "$1" in
--strict)
STRICT=1
shift
;;
--token)
[[ $# -ge 2 ]] || { echo "ERROR: --token requires SYMBOL=ADDRESS" >&2; exit 1; }
add_token "$2"
shift 2
;;
--token=*)
add_token "${1#--token=}"
shift
;;
*)
POSITIONAL+=("$1")
shift
;;
esac
done
CORE_RPC="${POSITIONAL[0]:-${RPC_URL_138:-${CHAIN_138_RPC_URL:-http://192.168.11.211:8545}}}"
PUBLIC_RPC="${POSITIONAL[1]:-${PUBLIC_RPC_URL_138:-https://rpc.public-0138.defi-oracle.io}}"
EXPLORER_STATS="${POSITIONAL[2]:-${EXPLORER_STATS_URL_138:-https://explorer.d-bis.org/api/v2/stats}}"
if [[ ${#TOKEN_ORDER[@]} -eq 0 ]]; then
add_token "cUSDT=0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
add_token "cUSDC=0xf22258f57794CC8E06237084b353Ab30fFfa640b"
fi
HOLDER="0x0000000000000000000000000000000000000001"
ZERO_BYTES32="0x0000000000000000000000000000000000000000000000000000000000000000"
rpc_call() {
local url="$1"
local method="$2"
local params="${3:-[]}"
curl -sS --max-time 15 \
-H 'Content-Type: application/json' \
--data "{\"jsonrpc\":\"2.0\",\"method\":\"${method}\",\"params\":${params},\"id\":1}" \
"$url"
}
json_field() {
local json="$1"
local jq_expr="$2"
jq -r "$jq_expr" <<<"$json" 2>/dev/null || true
}
http_status() {
local url="$1"
local body_file="$2"
curl -k -sS --max-time 15 -o "$body_file" -w "%{http_code}" "$url"
}
echo "=== Chain 138 x402 readiness ==="
echo "Core RPC: $CORE_RPC"
echo "Public RPC: $PUBLIC_RPC"
echo "Explorer: $EXPLORER_STATS"
echo ""
core_ok=0
public_ok=0
explorer_ok=0
token_ready=0
core_block="n/a"
core_peers="n/a"
core_syncing="n/a"
public_client="n/a"
explorer_blocks="n/a"
if core_block_json="$(rpc_call "$CORE_RPC" "eth_blockNumber")"; then
core_block="$(json_field "$core_block_json" '.result // "n/a"')"
if [[ "$core_block" != "n/a" && "$core_block" != "null" ]]; then
core_ok=1
fi
fi
if [[ "$core_ok" -eq 1 ]]; then
core_peers="$(json_field "$(rpc_call "$CORE_RPC" "net_peerCount")" '.result // "n/a"')"
core_syncing="$(json_field "$(rpc_call "$CORE_RPC" "eth_syncing")" '.result')"
fi
public_body_file="$(mktemp)"
explorer_body_file="$(mktemp)"
trap 'rm -f "$public_body_file" "$explorer_body_file"' EXIT
public_status="$(curl -k -sS --max-time 15 -o "$public_body_file" -w "%{http_code}" \
-H 'Content-Type: application/json' \
--data '{"jsonrpc":"2.0","method":"web3_clientVersion","params":[],"id":1}' \
"$PUBLIC_RPC" || true)"
public_result="$(cat "$public_body_file" 2>/dev/null || true)"
public_client="$(json_field "$public_result" '.result // empty')"
if [[ "$public_status" == "200" && -n "$public_client" ]]; then
public_ok=1
fi
explorer_status="$(http_status "$EXPLORER_STATS" "$explorer_body_file" || true)"
explorer_result="$(cat "$explorer_body_file" 2>/dev/null || true)"
explorer_blocks="$(json_field "$explorer_result" '.total_blocks // "n/a"')"
if [[ "$explorer_status" == "200" && "$explorer_blocks" != "n/a" && "$explorer_blocks" != "null" ]]; then
explorer_ok=1
fi
echo "Operational readiness"
echo " core_rpc_ok: $core_ok"
echo " core_block: $core_block"
echo " core_peer_count: $core_peers"
echo " core_syncing: $core_syncing"
echo " public_rpc_ok: $public_ok"
echo " public_rpc_http: ${public_status:-n/a}"
echo " public_client: ${public_client:-n/a}"
echo " explorer_ok: $explorer_ok"
echo " explorer_http: ${explorer_status:-n/a}"
echo " explorer_blocks: $explorer_blocks"
echo ""
echo "Token compatibility"
if ! command -v cast >/dev/null 2>&1; then
echo " cast_available: 0"
echo " note: install foundry/cast to perform on-chain permit checks"
else
echo " cast_available: 1"
for sym in "${TOKEN_ORDER[@]}"; do
addr="${TOKENS[$sym]}"
permit_supported=0
auth_supported=0
if cast call "$addr" "nonces(address)(uint256)" "$HOLDER" --rpc-url "$CORE_RPC" >/dev/null 2>&1; then
permit_supported=1
fi
if cast call "$addr" "authorizationState(address,bytes32)(bool)" "$HOLDER" "$ZERO_BYTES32" --rpc-url "$CORE_RPC" >/dev/null 2>&1; then
auth_supported=1
fi
if [[ "$permit_supported" -eq 1 || "$auth_supported" -eq 1 ]]; then
token_ready=1
fi
echo " ${sym}_address: $addr"
echo " ${sym}_erc2612: $permit_supported"
echo " ${sym}_erc3009: $auth_supported"
done
fi
echo ""
if [[ "$core_ok" -eq 1 && "$public_ok" -eq 1 && "$explorer_ok" -eq 1 ]]; then
echo "Operational verdict: Chain 138 edge services are healthy."
else
echo "Operational verdict: Chain 138 edge services are not fully healthy."
fi
if [[ "$token_ready" -eq 1 ]]; then
echo "Token verdict: At least one canonical Chain 138 payment token is x402-capable."
else
echo "Token verdict: Canonical Chain 138 payment tokens are still not x402-capable."
fi
if [[ "$core_ok" -eq 1 && "$public_ok" -eq 1 && "$explorer_ok" -eq 1 && "$token_ready" -eq 1 ]]; then
echo "x402 verdict: READY"
else
echo "x402 verdict: BLOCKED"
echo " note: thirdweb x402 still needs an ERC-2612 or ERC-3009 payment token on Chain 138."
fi
if [[ "$STRICT" -eq 1 && ! ( "$core_ok" -eq 1 && "$public_ok" -eq 1 && "$explorer_ok" -eq 1 && "$token_ready" -eq 1 ) ]]; then
exit 1
fi

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
# Summarize repo-completable vs operator/external completion state in one place.
# Usage: bash scripts/verify/check-completion-status.sh
# Exit codes:
# 0 = all repo-completable checks passed and public API looks healthy
# 1 = one or more checks reported issues
# Set SKIP_EXIT=1 to always exit 0 after printing the summary.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
SKIP_EXIT="${SKIP_EXIT:-0}"
FAILURES=0
section() {
printf '\n=== %s ===\n' "$1"
}
run_check() {
local label="$1"
shift
printf -- '- %s\n' "$label"
if "$@"; then
printf ' [OK] %s\n' "$label"
else
printf ' [WARN] %s\n' "$label"
FAILURES=$((FAILURES + 1))
fi
}
section "Repo-Completable Checks"
run_check "Config validation" bash scripts/validation/validate-config-files.sh
run_check "All validation (--skip-genesis)" bash scripts/verify/run-all-validation.sh --skip-genesis
run_check "Submodule working trees" env SKIP_EXIT=0 bash scripts/verify/submodules-clean.sh
section "Public API Health"
run_check "Public report API" env SKIP_EXIT=0 KEEP_GOING=1 bash scripts/verify/check-public-report-api.sh
section "Status Interpretation"
cat <<'EOF'
- Repo-local validation is complete when the config, validation, and submodule checks pass.
- Public report API problems are usually operator-side nginx/proxy deployment issues, not repo code issues.
- Remaining non-local work is tracked in:
- docs/00-meta/STILL_NOT_DONE_EXECUTION_CHECKLIST.md
- docs/00-meta/OPERATOR_AND_EXTERNAL_COMPLETION_CHECKLIST.md
- docs/00-meta/COMPLETE_REQUIRED_OPTIONAL_RECOMMENDED_INDEX.md
EOF
section "Summary"
if (( FAILURES == 0 )); then
echo "- All repo-completable checks passed."
echo "- Public report API looks healthy."
else
echo "- Checks with warnings: $FAILURES"
echo "- Review the warnings above to distinguish repo-local cleanup from operator-side work."
fi
if (( FAILURES > 0 )) && [[ "$SKIP_EXIT" != "1" ]]; then
exit 1
fi

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env bash
# Check that Chain 138 deployed contracts have bytecode on-chain.
# Address list: 61 (core, CCIP, PMM, vault/reserve, oracle keeper path, CompliantFiatTokens). Source: CONTRACT_ADDRESSES_REFERENCE, ADDRESS_MATRIX.
# Address list: 64 (core, CCIP canonical+legacy routers, WETH9 canonical+legacy bridges, PMM, vault/reserve, oracle keeper path, CompliantFiatTokens, ISO20022Router). Aligns with smom-dbis-138/.env and ADDRESS_MATRIX.
# Usage: ./scripts/verify/check-contracts-on-chain-138.sh [RPC_URL] [--dry-run]
# Default RPC: from env RPC_URL_138 (Chain 138 Core standard) or config/ip-addresses.conf, else https://rpc-core.d-bis.org
# Optional: SKIP_EXIT=1 to exit 0 even when some addresses MISS (e.g. when RPC unreachable from this host).
@@ -14,9 +14,17 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
export PROJECT_ROOT
# Load project env so RPC_URL_138 (Chain 138 Core) from config/ip-addresses.conf or smom-dbis-138/.env is used
[[ -f "${SCRIPT_DIR}/../lib/load-project-env.sh" ]] && source "${SCRIPT_DIR}/../lib/load-project-env.sh" 2>/dev/null || true
# Load project env so RPC_URL_138 (Chain 138 Core) from config/ip-addresses.conf or smom-dbis-138/.env is used.
# export PROJECT_ROOT so load-project-env does not re-derive a wrong path from BASH_SOURCE and hit err_exit.
# Temporarily relax -e/-u: nested dotenv may invoke helpers not on PATH or reference unset vars (exit 127 / set -u).
if [[ -f "${SCRIPT_DIR}/../lib/load-project-env.sh" ]]; then
set +eu
# shellcheck source=../lib/load-project-env.sh
source "${SCRIPT_DIR}/../lib/load-project-env.sh" 2>/dev/null || true
set -euo pipefail
fi
# Parse args: first non-option is RPC_URL; --dry-run = print only, no cast calls
DRY_RUN=""
@@ -49,9 +57,11 @@ else
"0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f" # WETH10
"0x99b3511a2d315a497c8112c1fdd8d508d4b1e506" # Multicall / Oracle Aggregator
"0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6" # Oracle Proxy
"0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e" # CCIP Router
"0x42DAb7b888Dd382bD5Adcf9E038dBF1fD03b4817" # CCIP Router (canonical; CCIP_ROUTER / relay path)
"0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e" # CCIP Router direct legacy (CCIP_ROUTER_DIRECT_LEGACY)
"0x105F8A15b819948a89153505762444Ee9f324684" # CCIP Sender
"0x971cD9D156f193df8051E48043C476e53ECd4693" # CCIPWETH9Bridge
"0xcacfd227A040002e49e2e01626363071324f820a" # CCIPWETH9Bridge (canonical sendCrossChain)
"0x971cD9D156f193df8051E48043C476e53ECd4693" # CCIPWETH9Bridge direct legacy (CCIPWETH9_BRIDGE_DIRECT_LEGACY)
"0xe0E93247376aa097dB308B92e6Ba36bA015535D0" # CCIPWETH10Bridge
"0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03" # LINK
"0x93E66202A11B1772E55407B32B44e5Cd8eda7f22" # cUSDT
@@ -106,6 +116,7 @@ else
"0x54dBd40cF05e15906A2C21f600937e96787f5679" # cCADC
"0x290E52a8819A4fbD0714E517225429aA2B70EC6b" # cXAUC
"0x94e408E26c6FD8F4ee00b54dF19082FDA07dC96E" # cXAUT
"0xBf1BB3E73C2DB7c4aebCd7bf757cdD1C12dE9074" # ISO20022Router (explorer address-inventory ISO20022_ROUTER)
)
fi

View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
# Verify that the c* V2 token, reserve, and c* <-> cW* transport stack are green before deploy.
# Usage: bash scripts/verify/check-cstar-v2-transport-stack.sh
#
# Env:
# DRY_RUN=1 Print the commands without executing them
set -euo pipefail
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
SMOM_ROOT="${PROJECT_ROOT}/smom-dbis-138"
DRY_RUN="${DRY_RUN:-0}"
log() { printf '%s\n' "$*"; }
ok() { printf '[OK] %s\n' "$*"; }
run() {
if [[ "$DRY_RUN" == "1" ]]; then
printf '[DRY_RUN] %s\n' "$*"
return 0
fi
"$@"
}
if ! command -v forge >/dev/null 2>&1; then
printf '[FAIL] forge is required but not installed or not on PATH.\n' >&2
exit 1
fi
log "=== c* V2 transport stack verifier ==="
log "Repo: ${SMOM_ROOT}"
log ""
pushd "$SMOM_ROOT" >/dev/null
# Foundry's JSON cache occasionally drifts when toolchain output shapes change.
# Removing the generated cache keeps these focused suites reliable in CI and local runs.
rm -f cache/solidity-files-cache.json
run forge test --match-path "test/compliance/CompliantFiatTokenV2.t.sol"
ok "CompliantFiatTokenV2 base token suite passed."
run forge test --match-path "test/bridge/CWReserveVerifierVaultIntegration.t.sol"
ok "Legacy reserve-verifier bridge compatibility suite passed."
run forge test --match-path "test/bridge/CWReserveVerifierVaultV2Integration.t.sol"
ok "V2 reserve-verifier + full L1/L2 transport suite passed."
run forge test --match-path "test/bridge/CWMultiTokenBridge.t.sol"
ok "Core CWMultiTokenBridge round-trip suite passed."
popd >/dev/null
log ""
ok "c* V2 bridge and transport stack is green."

View File

@@ -0,0 +1,103 @@
#!/usr/bin/env bash
# Verify GRU Monetary Transport Layer runtime readiness via /api/v1/bridge/preflight.
# Usage: bash scripts/verify/check-gru-transport-preflight.sh [base_url]
# base_url: Optional API base, defaults to https://explorer.d-bis.org
#
# Exit codes:
# 0 = endpoint healthy and, unless ALLOW_BLOCKED=1, no blocked pairs remain
# 1 = endpoint unreachable, wrong payload, or blocked pairs remain
#
# Env:
# SKIP_EXIT=1 Print diagnostics but always exit 0
# ALLOW_BLOCKED=1 Treat blocked pairs as warnings instead of failures
set -euo pipefail
BASE_URL="${1:-${BASE_URL:-https://explorer.d-bis.org}}"
BASE_URL="${BASE_URL%/}"
SKIP_EXIT="${SKIP_EXIT:-0}"
ALLOW_BLOCKED="${ALLOW_BLOCKED:-0}"
HAD_FAILURE=0
log() { printf '%s\n' "$*"; }
ok() { printf '[OK] %s\n' "$*"; }
warn() { printf '[WARN] %s\n' "$*"; }
fail() {
printf '[FAIL] %s\n' "$*"
HAD_FAILURE=1
if [[ "$SKIP_EXIT" != "1" ]]; then
exit 1
fi
}
tmp_body="$(mktemp)"
trap 'rm -f "$tmp_body"' EXIT
fetch_preflight() {
local prefix
for prefix in "" "/token-aggregation"; do
local url="${BASE_URL}${prefix}/api/v1/bridge/preflight"
local code
code="$(curl -sS -o "$tmp_body" -w "%{http_code}" -m 25 "$url" 2>/dev/null || echo "000")"
if [[ "$code" == "200" ]]; then
printf '%s\n' "$prefix"
return 0
fi
done
return 1
}
log "=== GRU Transport preflight ==="
log "Base URL: $BASE_URL"
log ""
if ! prefix="$(fetch_preflight)"; then
fail "Could not fetch /api/v1/bridge/preflight on either /api/v1 or /token-aggregation/api/v1."
fi
if ! jq -e '
type == "object" and
(.gruTransport | type == "object") and
(.gruTransport.summary.transportPairs | type == "number") and
(.gruTransport.blockedPairs | type == "array") and
(.gruTransport.readyPairs | type == "array")
' "$tmp_body" >/dev/null 2>&1; then
summary="$(head -c 300 "$tmp_body" | tr '\n' ' ')"
fail "Unexpected /api/v1/bridge/preflight payload shape. Sample: $summary"
fi
transport_pairs="$(jq -r '.gruTransport.summary.transportPairs // 0' "$tmp_body")"
runtime_ready_pairs="$(jq -r '.gruTransport.summary.runtimeReadyTransportPairs // 0' "$tmp_body")"
blocked_pairs="$(jq -r '.gruTransport.blockedPairs | length' "$tmp_body")"
ready_pairs="$(jq -r '.gruTransport.readyPairs | length' "$tmp_body")"
display_path="${prefix}/api/v1/bridge/preflight"
if [[ -z "$prefix" ]]; then
display_path="/api/v1/bridge/preflight"
fi
ok "Preflight endpoint reachable at ${display_path}"
log "Transport pairs: $transport_pairs"
log "Runtime-ready pairs: $runtime_ready_pairs"
log "Ready pairs returned: $ready_pairs"
log "Blocked pairs returned: $blocked_pairs"
if (( blocked_pairs > 0 )); then
log ""
warn "Blocked GRU transport pairs:"
jq -r '
.gruTransport.blockedPairs[]
| "- \(.key): eligibilityBlockers=\(((.eligibilityBlockers // []) | join(",")) // "") runtimeMissingRequirements=\(((.runtimeMissingRequirements // []) | join(",")) // "")"
' "$tmp_body"
if [[ "$ALLOW_BLOCKED" != "1" ]]; then
fail "GRU transport preflight has blocked pairs. Set ALLOW_BLOCKED=1 for diagnostic-only mode."
else
warn "ALLOW_BLOCKED=1 set: blocked pairs reported without failing."
fi
else
ok "All active GRU transport pairs are runtime-ready."
fi
if [[ "$SKIP_EXIT" == "1" ]]; then
warn "SKIP_EXIT=1 set: diagnostic mode."
fi

View File

@@ -0,0 +1,128 @@
#!/usr/bin/env bash
# Verify that the public token-aggregation/report API is reachable and not misrouted to Blockscout.
# Usage: bash scripts/verify/check-public-report-api.sh [base_url]
# base_url: Optional API base, defaults to https://explorer.d-bis.org
#
# Exit codes:
# 0 = all expected endpoints returned token-aggregation-style JSON
# 1 = one or more endpoints returned the wrong shape or were unreachable
# Set SKIP_EXIT=1 to print diagnostics but exit 0.
# Set KEEP_GOING=1 to keep checking every endpoint before exiting non-zero.
# Set SKIP_BRIDGE_ROUTES=0 to assert /api/v1/bridge/routes payload shape.
# Set SKIP_BRIDGE_PREFLIGHT=0 to assert /api/v1/bridge/preflight payload shape.
set -euo pipefail
BASE_URL="${1:-https://explorer.d-bis.org}"
SKIP_EXIT="${SKIP_EXIT:-0}"
KEEP_GOING="${KEEP_GOING:-0}"
HAD_FAILURE=0
log() { printf '%s\n' "$*"; }
ok() { printf '[OK] %s\n' "$*"; }
warn() { printf '[WARN] %s\n' "$*"; }
fail() {
printf '[FAIL] %s\n' "$*"
HAD_FAILURE=1
if [[ "$SKIP_EXIT" != "1" && "$KEEP_GOING" != "1" ]]; then
exit 1
fi
}
check_json_shape() {
local name="$1"
local url="$2"
local jq_expr="$3"
local expected_desc="$4"
local response
local body
local status
if ! response="$(curl -sSL --max-time 20 -w $'\n%{http_code}' "$url" 2>/dev/null)"; then
fail "$name request failed: $url"
return 0
fi
status="$(printf '%s' "$response" | tail -n 1)"
body="$(printf '%s' "$response" | sed '$d')"
if printf '%s' "$body" | jq -e 'type == "object" and has("message") and has("result") and has("status")' >/dev/null 2>&1; then
fail "$name is returning Blockscout-style JSON (HTTP $status) instead of token-aggregation JSON. See docs/04-configuration/TOKEN_AGGREGATION_REPORT_API_RUNBOOK.md"
return 0
fi
if printf '%s' "$body" | jq -e 'type == "object" and has("error")' >/dev/null 2>&1; then
local api_error
api_error="$(printf '%s' "$body" | jq -r '.error' 2>/dev/null || echo 'unknown error')"
fail "$name returned token-aggregation error payload (HTTP $status): $api_error"
return 0
fi
if printf '%s' "$body" | jq -e "$jq_expr" >/dev/null 2>&1; then
ok "$name healthy ($expected_desc, HTTP $status)"
return 0
fi
local summary
summary="$(printf '%s' "$body" | head -c 240 | tr '\n' ' ')"
fail "$name returned unexpected payload (HTTP $status). Expected $expected_desc. Sample: $summary"
}
log "=== Public report API check ==="
log "Base URL: $BASE_URL"
log ""
check_json_shape \
"token-list" \
"$BASE_URL/api/v1/report/token-list?chainId=138" \
'type == "object" and (.tokens | type == "array")' \
'object with .tokens[]'
check_json_shape \
"coingecko report" \
"$BASE_URL/api/v1/report/coingecko?chainId=138" \
'type == "object"' \
'token-aggregation report JSON object'
check_json_shape \
"cmc report" \
"$BASE_URL/api/v1/report/cmc?chainId=138" \
'type == "object"' \
'token-aggregation report JSON object'
check_json_shape \
"networks" \
"$BASE_URL/api/v1/networks" \
'type == "object" and (.networks | type == "array")' \
'object with .networks[]'
# Bridge routes (requires token-aggregation build with GET /api/v1/bridge/routes). Off by default until edge is deployed.
if [[ "${SKIP_BRIDGE_ROUTES:-1}" != "1" ]]; then
check_json_shape \
"bridge-routes" \
"$BASE_URL/api/v1/bridge/routes" \
'type == "object" and (.chain138Bridges | type == "object") and (.routes | type == "object")' \
'object with .chain138Bridges and .routes'
fi
# GRU preflight (shape only; does not require all pairs to be runtime-ready). Off by default until edge is deployed.
if [[ "${SKIP_BRIDGE_PREFLIGHT:-1}" != "1" ]]; then
check_json_shape \
"bridge-preflight" \
"$BASE_URL/api/v1/bridge/preflight" \
'type == "object" and (.gruTransport | type == "object") and (.gruTransport.summary.transportPairs | type == "number") and (.gruTransport.blockedPairs | type == "array")' \
'object with .gruTransport.summary and .gruTransport.blockedPairs[]'
fi
log ""
if (( HAD_FAILURE > 0 )); then
if [[ "$SKIP_EXIT" == "1" ]]; then
warn "SKIP_EXIT=1 set: non-healthy endpoints were reported without failing."
elif [[ "$KEEP_GOING" == "1" ]]; then
exit 1
fi
elif [[ "$SKIP_EXIT" == "1" ]]; then
warn "SKIP_EXIT=1 set: non-healthy endpoints were reported without failing."
else
ok "Public report API endpoints look healthy."
fi

View File

@@ -0,0 +1,121 @@
#!/usr/bin/env bash
# E2E: every public RPC FQDN — HTTP JSON-RPC eth_chainId (+ WSS where listed).
# Exit 0 only if all HTTP checks pass; WSS failures warn unless STRICT_WSS=1 (then exit 1).
#
# Usage: bash scripts/verify/check-rpc-fqdns-e2e.sh
# Env: RPC_TIMEOUT_SEC (default 25), STRICT_WSS=1 to fail on wscat errors
set -euo pipefail
TO="${RPC_TIMEOUT_SEC:-25}"
BODY='{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}'
# Chain 138
EXPECT='0x8a'
# HTTP JSON-RPC hostnames (inventory: verify-end-to-end-routing.sh + RPC_ENDPOINTS_MASTER + NPM tw-core / core-2)
HTTP_FQDNS=(
rpc-http-pub.d-bis.org
rpc.d-bis.org
rpc2.d-bis.org
rpc-http-prv.d-bis.org
rpc-fireblocks.d-bis.org
rpc.public-0138.defi-oracle.io
rpc.defi-oracle.io
rpc-alltra.d-bis.org
rpc-alltra-2.d-bis.org
rpc-alltra-3.d-bis.org
rpc-hybx.d-bis.org
rpc-hybx-2.d-bis.org
rpc-hybx-3.d-bis.org
rpc.tw-core.d-bis.org
rpc-core-2.d-bis.org
rpc-core.d-bis.org
)
# WebSocket RPC hostnames (wss://)
WS_FQDNS=(
rpc-ws-pub.d-bis.org
ws.rpc.d-bis.org
ws.rpc2.d-bis.org
rpc-ws-prv.d-bis.org
ws.rpc-fireblocks.d-bis.org
wss.defi-oracle.io
wss.tw-core.d-bis.org
)
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "RPC FQDN E2E — eth_chainId (HTTP) + WSS smoke"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
http_fail=0
echo -e "${BLUE}--- HTTP (POST / JSON-RPC) ---${NC}"
for host in "${HTTP_FQDNS[@]}"; do
url="https://${host}"
if ! getent ahosts "$host" >/dev/null 2>&1; then
echo -e "${YELLOW}SKIP${NC} $url — hostname does not resolve (add DNS or use Core IP :8545)"
continue
fi
resp=$(mktemp)
code=$(curl -sS -m "$TO" -X POST "$url" \
-H 'Content-Type: application/json' \
-d "$BODY" \
-k -w '%{http_code}' -o "$resp" 2>/dev/null || echo "000")
cid=$(jq -r '.result // empty' "$resp" 2>/dev/null || true)
err=$(head -c 120 "$resp" 2>/dev/null | tr -d '\r\n')
rm -f "$resp"
if [[ "$code" == "200" && "$cid" == "$EXPECT" ]]; then
echo -e "${GREEN}OK${NC} $url chainId=$cid"
elif [[ "$code" == "200" && -n "$cid" ]]; then
echo -e "${YELLOW}WARN${NC} $url HTTP $code chainId=$cid (expected $EXPECT)"
((http_fail++)) || true
else
echo -e "${RED}FAIL${NC} $url HTTP $code ${err}"
((http_fail++)) || true
fi
done
echo ""
echo -e "${BLUE}--- WebSocket (wscat eth_chainId) ---${NC}"
ws_fail=0
if ! command -v wscat >/dev/null 2>&1; then
echo -e "${YELLOW}SKIP${NC} wscat not installed (npm i -g wscat)"
ws_fail=0
else
for host in "${WS_FQDNS[@]}"; do
if out=$(timeout "$((TO + 5))" wscat -n -c "wss://${host}" -x "$BODY" -w 8 2>&1); then
if echo "$out" | grep -q '"result"'; then
echo -e "${GREEN}OK${NC} wss://${host}"
else
echo -e "${YELLOW}OK*${NC} wss://${host} (connected, no JSON line)"
fi
else
echo -e "${RED}FAIL${NC} wss://${host} $(echo "$out" | tail -1)"
((ws_fail++)) || true
fi
done
fi
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
if [[ "$http_fail" -eq 0 ]]; then
echo -e "${GREEN}HTTP: all passed ($EXPECT)${NC}"
else
echo -e "${RED}HTTP: $http_fail failure(s)${NC}"
fi
if [[ "${STRICT_WSS:-0}" == "1" ]] && [[ "$ws_fail" -gt 0 ]]; then
echo -e "${RED}WSS: $ws_fail failure(s) (STRICT_WSS=1)${NC}"
exit 1
fi
if [[ "$http_fail" -gt 0 ]]; then
exit 1
fi
echo -e "${GREEN}Done.${NC}"
exit 0

View File

@@ -0,0 +1,68 @@
#!/usr/bin/env bash
# Verify token-aggregation HTTP surface for Chain 138 (pools, quotes, bridge routes, and GRU preflight).
# Usage: BASE_URL=https://explorer.d-bis.org bash scripts/verify/check-token-aggregation-chain138-api.sh
# Tries both /api/v1/* and /token-aggregation/api/v1/* (explorer nginx layouts differ).
set -euo pipefail
BASE_URL="${BASE_URL:-https://explorer.d-bis.org}"
BASE_URL="${BASE_URL%/}"
CUSDT="0x93E66202A11B1772E55407B32B44e5Cd8eda7f22"
CUSDC="0xf22258f57794CC8E06237084b353Ab30fFfa640b"
try_path() {
local prefix="$1"
local path="$2"
local url="${BASE_URL}${prefix}${path}"
local code
code=$(curl -sS -o /tmp/ta-check.json -w "%{http_code}" -m 25 "$url" || echo "000")
echo " $code ${prefix}${path}"
if [[ "$code" == 200 ]]; then
head -c 220 /tmp/ta-check.json
echo
fi
}
echo "== Token-aggregation checks against ${BASE_URL} =="
for prefix in "" "/token-aggregation"; do
echo ""
echo "-- prefix: ${prefix:-/} (root /api/v1) --"
try_path "${prefix}" "/api/v1/tokens?chainId=138&limit=3&includeDodoPool=true"
try_path "${prefix}" "/api/v1/tokens/${CUSDT}/pools?chainId=138"
try_path "${prefix}" "/api/v1/quote?chainId=138&tokenIn=${CUSDT}&tokenOut=${CUSDC}&amountIn=1000000"
try_path "${prefix}" "/api/v1/bridge/routes"
try_path "${prefix}" "/api/v1/bridge/status"
try_path "${prefix}" "/api/v1/bridge/preflight"
try_path "${prefix}" "/api/v1/networks"
done
echo ""
echo ""
echo "== bridge summary =="
for prefix in "" "/token-aggregation"; do
code=$(curl -sS -o /tmp/br.json -w "%{http_code}" -m 20 "${BASE_URL}${prefix}/api/v1/bridge/routes" 2>/dev/null || echo 000)
echo "${prefix:-/} -> HTTP $code"
if [[ "$code" == "200" ]] && command -v jq >/dev/null 2>&1; then
jq '{weth9: .chain138Bridges.weth9, weth10: .chain138Bridges.weth10}' /tmp/br.json 2>/dev/null || head -c 200 /tmp/br.json
echo
fi
done
echo ""
echo "== bridge/preflight summary =="
for prefix in "" "/token-aggregation"; do
code=$(curl -sS -o /tmp/gru-preflight.json -w "%{http_code}" -m 20 "${BASE_URL}${prefix}/api/v1/bridge/preflight" 2>/dev/null || echo 000)
echo "${prefix:-/} -> HTTP $code"
if [[ "$code" == "200" ]] && command -v jq >/dev/null 2>&1; then
jq '{transportPairs: .gruTransport.summary.transportPairs, runtimeReadyTransportPairs: .gruTransport.summary.runtimeReadyTransportPairs, blockedPairs: (.gruTransport.blockedPairs | length)}' /tmp/gru-preflight.json 2>/dev/null || head -c 200 /tmp/gru-preflight.json
echo
fi
done
echo ""
echo "Notes:"
echo " - Empty tokens/pools: set DATABASE_URL + migrations; RPC to 138; PMM integration now defaults on-chain if env unset."
echo " - bridge/routes 404: redeploy token-aggregation from repo (implements GET /api/v1/bridge/routes)."
echo " - bridge/preflight blocked pairs: run bash scripts/verify/check-gru-transport-preflight.sh [BASE_URL] for exact missing refs."
echo " - Health: curl -s http://127.0.0.1:3001/health on explorer VM (not always proxied as /health)."

View File

@@ -41,6 +41,7 @@ declare -A DOMAIN_ZONES=(
["rpc-http-pub.d-bis.org"]="d-bis.org"
["rpc-ws-pub.d-bis.org"]="d-bis.org"
["rpc-http-prv.d-bis.org"]="d-bis.org"
["rpc-core.d-bis.org"]="d-bis.org"
["rpc-ws-prv.d-bis.org"]="d-bis.org"
["dbis-admin.d-bis.org"]="d-bis.org"
["dbis-api.d-bis.org"]="d-bis.org"

View File

@@ -0,0 +1,42 @@
#!/usr/bin/env bash
# P1 local verification — no LAN deploys, no Proxmox SSH, no on-chain txs.
# Completes automatable slices documented in docs/00-meta/TODOS_CONSOLIDATED.md (P1-F*).
#
# Usage:
# ./scripts/verify/run-p1-local-verification.sh # config + completable
# ./scripts/verify/run-p1-local-verification.sh --with-iru-tests # + dbis_core pnpm test:iru-marketplace
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$ROOT"
WITH_IRU=0
for a in "$@"; do
[[ "$a" == "--with-iru-tests" ]] && WITH_IRU=1
done
echo "== P1 local verification (repo root: $ROOT) =="
echo ""
echo "[1/3] validate-config-files.sh"
bash scripts/validation/validate-config-files.sh
echo ""
echo "[2/3] run-completable-tasks-from-anywhere.sh"
./scripts/run-completable-tasks-from-anywhere.sh
echo ""
if [[ "$WITH_IRU" -eq 1 ]]; then
echo "[3/3] dbis_core pnpm test:iru-marketplace"
if command -v pnpm &>/dev/null; then
(cd dbis_core && pnpm test:iru-marketplace)
else
echo "SKIP: pnpm not installed; run: cd dbis_core && pnpm test:iru-marketplace"
fi
else
echo "[3/3] SKIP IRU tests (pass --with-iru-tests to run)"
fi
echo ""
echo "[OK] P1 local verification finished."

View File

@@ -0,0 +1,27 @@
#!/usr/bin/env bash
# Run the repo's local deterministic green-path tests for Chain 138 / GRU transport.
# Usage: bash scripts/verify/run-repo-green-test-path.sh
set -euo pipefail
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
log() { printf '%s\n' "$*"; }
ok() { printf '[OK] %s\n' "$*"; }
run_step() {
local label="$1"
shift
log ""
log "=== ${label} ==="
"$@"
}
run_step "Config validation" \
bash "$PROJECT_ROOT/scripts/validation/validate-config-files.sh"
run_step "Chain 138 package CI targets" \
pnpm --dir "$PROJECT_ROOT/smom-dbis-138" run test:ci
log ""
ok "Repo green-path tests passed."

View File

@@ -1,9 +1,11 @@
#!/usr/bin/env bash
# Exit 0 if every submodule has a clean working tree (no modified/untracked files).
# Use in CI or after merges: bash scripts/verify/submodules-clean.sh
# Set SKIP_EXIT=1 to report dirty submodules without failing.
set -euo pipefail
ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
cd "$ROOT"
SKIP_EXIT="${SKIP_EXIT:-0}"
tmp="$(mktemp)"
trap 'rm -f "$tmp"' EXIT
@@ -25,7 +27,10 @@ done < <(git config --file .gitmodules --get-regexp '^submodule\..*\.path$' | aw
if (( dirty )); then
echo "submodules-clean: dirty submodule working trees:" >&2
cat "$tmp" >&2
exit 1
if [[ "$SKIP_EXIT" != "1" ]]; then
exit 1
fi
exit 0
fi
echo "submodules-clean: OK (all submodules clean)"

View File

@@ -0,0 +1,284 @@
#!/usr/bin/env bash
# Sync address labels from DBIS institutional registry JSON into Blockscout.
# Default: print the planned action only. Use --apply to write.
#
# Supported modes:
# http - POST JSON to a Blockscout-compatible label endpoint
# db - write primary labels directly into Blockscout Postgres address_names
# auto - prefer HTTP if the route exists; otherwise fall back to DB sync
#
# Registry shape: config/dbis-institutional/schemas/address-registry-entry.schema.json
#
# Env (HTTP mode):
# BLOCKSCOUT_BASE_URL default https://explorer.d-bis.org
# BLOCKSCOUT_LABEL_PATH default /api/v1/labels
# BLOCKSCOUT_API_KEY optional Bearer token if the endpoint requires it
#
# Env (DB mode):
# BLOCKSCOUT_DB_SSH_HOST default root@192.168.11.12
# BLOCKSCOUT_DB_CT_VMID default 5000
# BLOCKSCOUT_DB_CONTAINER default blockscout-postgres
# BLOCKSCOUT_DB_USER default blockscout
# BLOCKSCOUT_DB_NAME default blockscout
#
# Usage:
# bash scripts/verify/sync-blockscout-address-labels-from-registry.sh file1.json [file2.json ...]
# bash scripts/verify/sync-blockscout-address-labels-from-registry.sh --from-dir config/dbis-institutional/registry
# bash scripts/verify/sync-blockscout-address-labels-from-registry.sh --apply --mode=db --from-dir config/dbis-institutional/registry
#
set -euo pipefail
APPLY=0
FROM_DIR=""
SYNC_MODE="${BLOCKSCOUT_SYNC_MODE:-auto}"
while [[ $# -gt 0 ]]; do
case "$1" in
--apply) APPLY=1; shift ;;
--from-dir=*)
FROM_DIR="${1#*=}"
shift
;;
--from-dir)
FROM_DIR="${2:?}"
shift 2
;;
--mode=*)
SYNC_MODE="${1#*=}"
shift
;;
--mode)
SYNC_MODE="${2:?}"
shift 2
;;
-h|--help)
sed -n '2,42p' "$0" | sed 's/^# \{0,1\}//'
exit 0
;;
*) break ;;
esac
done
case "$SYNC_MODE" in
auto|http|db) ;;
*)
echo "error: --mode must be one of: auto, http, db" >&2
exit 1
;;
esac
BASE_URL="${BLOCKSCOUT_BASE_URL:-https://explorer.d-bis.org}"
LABEL_PATH="${BLOCKSCOUT_LABEL_PATH:-/api/v1/labels}"
URL="${BASE_URL%/}${LABEL_PATH}"
BLOCKSCOUT_DB_SSH_HOST="${BLOCKSCOUT_DB_SSH_HOST:-root@192.168.11.12}"
BLOCKSCOUT_DB_CT_VMID="${BLOCKSCOUT_DB_CT_VMID:-5000}"
BLOCKSCOUT_DB_CONTAINER="${BLOCKSCOUT_DB_CONTAINER:-blockscout-postgres}"
BLOCKSCOUT_DB_USER="${BLOCKSCOUT_DB_USER:-blockscout}"
BLOCKSCOUT_DB_NAME="${BLOCKSCOUT_DB_NAME:-blockscout}"
files=()
if [[ -n "$FROM_DIR" ]]; then
if [[ ! -d "$FROM_DIR" ]]; then
echo "error: --from-dir not a directory: $FROM_DIR" >&2
exit 1
fi
while IFS= read -r -d '' f; do
files+=("$f")
done < <(find "$FROM_DIR" -maxdepth 1 -name '*.json' -print0 2>/dev/null || true)
else
files=("$@")
fi
if [[ ${#files[@]} -eq 0 ]]; then
echo "usage: $0 [--apply] [--mode auto|http|db] [--from-dir DIR] <registry.json> [...]" >&2
echo " or: REGISTRY_DIR=... $0 --from-dir \"\$REGISTRY_DIR\"" >&2
exit 1
fi
if ! command -v jq &>/dev/null; then
echo "error: jq is required" >&2
exit 1
fi
sql_quote() {
printf "%s" "$1" | sed "s/'/''/g"
}
probe_http_sync() {
local tmp status
tmp=$(mktemp)
status=$(curl -sS -o "$tmp" -w '%{http_code}' -X POST "$URL" -H 'Content-Type: application/json' --data '{}' || true)
local body
body=$(cat "$tmp")
rm -f "$tmp"
# 2xx/4xx except 404 means the route exists and reached a handler.
if [[ "$status" =~ ^(200|201|202|204|400|401|403|405|409|415|422)$ ]]; then
return 0
fi
if [[ "$status" == "404" && "$body" == *'"error":"Not found"'* ]]; then
return 1
fi
return 1
}
run_db_sql() {
local sql="$1"
ssh "$BLOCKSCOUT_DB_SSH_HOST" \
"pct exec ${BLOCKSCOUT_DB_CT_VMID} -- docker exec -i ${BLOCKSCOUT_DB_CONTAINER} psql -U ${BLOCKSCOUT_DB_USER} -d ${BLOCKSCOUT_DB_NAME} -v ON_ERROR_STOP=1 -f -" \
<<<"$sql"
}
emit_http() {
local display="$1"
local address="$2"
local label="$3"
local ltype="$4"
local body
body=$(jq -nc --arg a "$address" --arg l "$label" --arg t "$ltype" '{address:$a,label:$l,type:$t}')
if [[ "$APPLY" -ne 1 ]]; then
echo "PLAN mode=http file=$display"
echo " POST $URL"
echo " $body"
return 0
fi
local hdr=()
if [[ -n "${BLOCKSCOUT_API_KEY:-}" ]]; then
hdr=(-H "Authorization: Bearer ${BLOCKSCOUT_API_KEY}" -H "Content-Type: application/json")
else
hdr=(-H "Content-Type: application/json")
fi
echo "POST $display -> $URL"
curl -fsS "${hdr[@]}" -X POST "$URL" -d "$body" >/dev/null
echo "ok http $address"
}
emit_db() {
local display="$1"
local address="$2"
local label="$3"
local ltype="$4"
local normalized_address="${address#0x}"
normalized_address="${normalized_address#0X}"
normalized_address=$(printf '%s' "$normalized_address" | tr '[:upper:]' '[:lower:]')
if [[ ! "$normalized_address" =~ ^[0-9a-f]{40}$ ]]; then
echo "skip (invalid address): $display" >&2
return 0
fi
local metadata
metadata=$(jq -nc \
--arg source "registry" \
--arg registryFile "$display" \
--arg labelType "$ltype" \
'{source:$source,registryFile:$registryFile,labelType:$labelType}')
local sql
sql=$(cat <<SQL
INSERT INTO public.address_names (
address_hash,
name,
"primary",
inserted_at,
updated_at,
metadata
)
VALUES (
decode('$(sql_quote "$normalized_address")', 'hex'),
'$(sql_quote "$label")',
true,
NOW(),
NOW(),
'$(sql_quote "$metadata")'::jsonb
)
ON CONFLICT (address_hash) WHERE "primary" = true
DO UPDATE SET
name = EXCLUDED.name,
updated_at = EXCLUDED.updated_at,
metadata = COALESCE(public.address_names.metadata, '{}'::jsonb) || COALESCE(EXCLUDED.metadata, '{}'::jsonb);
SQL
)
if [[ "$APPLY" -ne 1 ]]; then
echo "PLAN mode=db file=$display"
echo " SSH $BLOCKSCOUT_DB_SSH_HOST -> CT $BLOCKSCOUT_DB_CT_VMID -> ${BLOCKSCOUT_DB_NAME}.public.address_names"
echo " address=$address label=$label type=$ltype"
return 0
fi
echo "UPSERT $display -> ${BLOCKSCOUT_DB_NAME}.public.address_names"
run_db_sql "$sql" >/dev/null
echo "ok db $address"
}
emit_one() {
local file="$1"
local display="${2:-$file}"
local mode="$3"
local blob
blob=$(jq -e . "$file" 2>/dev/null) || { echo "skip (invalid JSON): $display" >&2; return 0; }
local status address label ltype
status=$(echo "$blob" | jq -r '.status // "active"')
[[ "$status" == "active" ]] || { echo "skip (status=$status): $display" >&2; return 0; }
address=$(echo "$blob" | jq -r '.address // empty')
label=$(echo "$blob" | jq -r '.blockscout.label // empty')
ltype=$(echo "$blob" | jq -r '.blockscout.labelType // "contract"')
if [[ -z "$address" || -z "$label" ]]; then
echo "skip (missing address or blockscout.label): $display" >&2
return 0
fi
case "$mode" in
http) emit_http "$display" "$address" "$label" "$ltype" ;;
db) emit_db "$display" "$address" "$label" "$ltype" ;;
*)
echo "error: unsupported mode: $mode" >&2
return 1
;;
esac
}
SELECTED_MODE="$SYNC_MODE"
if [[ "$SYNC_MODE" == "auto" && "$APPLY" -eq 1 ]]; then
if probe_http_sync; then
SELECTED_MODE="http"
else
SELECTED_MODE="db"
fi
fi
for f in "${files[@]}"; do
[[ -f "$f" ]] || { echo "skip (not a file): $f" >&2; continue; }
if jq -e 'type == "object" and (.address|type=="string")' "$f" &>/dev/null; then
emit_one "$f" "$f" "$SELECTED_MODE" || exit 1
elif jq -e 'type == "array"' "$f" &>/dev/null; then
tmpdir=$(mktemp -d)
len=$(jq 'length' "$f")
for ((i = 0; i < len; i++)); do
jq ".[$i]" "$f" >"$tmpdir/single.json"
emit_one "$tmpdir/single.json" "$f (item $i)" "$SELECTED_MODE" || { rm -rf "$tmpdir"; exit 1; }
done
rm -rf "$tmpdir"
else
echo "skip (not object or array of objects): $f" >&2
fi
done
if [[ "$APPLY" -ne 1 ]]; then
echo ""
echo "Dry run only. Re-run with --apply. Use --mode=db for this self-hosted Blockscout when /api/v1 labels is not available."
else
echo ""
echo "Completed in mode=$SELECTED_MODE."
fi

View File

@@ -1,5 +1,5 @@
#!/usr/bin/env bash
# Troubleshoot the 6 E2E RPC HTTP failures (405 at edge).
# Troubleshoot E2E RPC HTTP failures (405 at edge); tests 7 primary FQDNs (+ optional --lan NPM Host checks).
# Usage: bash scripts/verify/troubleshoot-rpc-failures.sh [--lan]
# --lan Also test NPMplus direct (192.168.11.167) with Host header; requires LAN access.
@@ -17,6 +17,7 @@ RPC_DOMAINS=(
"rpc.d-bis.org"
"rpc2.d-bis.org"
"rpc-http-prv.d-bis.org"
"rpc-core.d-bis.org"
"rpc.defi-oracle.io"
)
RPC_BODY='{"jsonrpc":"2.0","method":"eth_chainId","params":[],"id":1}'
@@ -35,7 +36,7 @@ info() { echo -e "${BLUE}[INFO]${NC} $1"; }
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Troubleshoot 6 RPC E2E failures (POST → public IP)"
echo "Troubleshoot RPC E2E failures (POST → public IP)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""

View File

@@ -65,10 +65,13 @@ declare -A DOMAIN_TYPES_ALL=(
["ws.rpc.d-bis.org"]="rpc-ws"
["ws.rpc2.d-bis.org"]="rpc-ws"
["rpc-http-prv.d-bis.org"]="rpc-http"
["rpc-core.d-bis.org"]="rpc-http"
["rpc-ws-prv.d-bis.org"]="rpc-ws"
["rpc-fireblocks.d-bis.org"]="rpc-http"
["ws.rpc-fireblocks.d-bis.org"]="rpc-ws"
["admin.d-bis.org"]="web"
["dbis-admin.d-bis.org"]="web"
["core.d-bis.org"]="web"
["dbis-api.d-bis.org"]="api"
["dbis-api-2.d-bis.org"]="api"
["secure.d-bis.org"]="web"
@@ -111,6 +114,19 @@ declare -A DOMAIN_TYPES_ALL=(
["gitea.d-bis.org"]="web"
["dev.d-bis.org"]="web"
["codespaces.d-bis.org"]="web"
# DBIS institutional multi-portal program (optional-when-fail until provisioned)
["d-bis.org"]="web"
["www.d-bis.org"]="web"
["members.d-bis.org"]="web"
["developers.d-bis.org"]="web"
["data.d-bis.org"]="api"
["research.d-bis.org"]="web"
["policy.d-bis.org"]="web"
["ops.d-bis.org"]="web"
["identity.d-bis.org"]="web"
["status.d-bis.org"]="web"
["sandbox.d-bis.org"]="web"
["interop.d-bis.org"]="web"
)
# Private/admin profile domains (private RPC + Fireblocks RPC only).
declare -a PRIVATE_PROFILE_DOMAINS=(
@@ -174,7 +190,7 @@ else
fi
# Domains that are optional when any test fails (off-LAN, 502, unreachable); fail → skip so run passes.
_PUB_OPTIONAL_WHEN_FAIL="dapp.d-bis.org mifos.d-bis.org explorer.d-bis.org dbis-admin.d-bis.org dbis-api.d-bis.org dbis-api-2.d-bis.org secure.d-bis.org sankofa.nexus www.sankofa.nexus phoenix.sankofa.nexus www.phoenix.sankofa.nexus the-order.sankofa.nexus www.the-order.sankofa.nexus studio.sankofa.nexus keycloak.sankofa.nexus admin.sankofa.nexus portal.sankofa.nexus dash.sankofa.nexus docs.d-bis.org blockscout.defi-oracle.io mim4u.org www.mim4u.org secure.mim4u.org training.mim4u.org rpc-http-pub.d-bis.org rpc.d-bis.org rpc2.d-bis.org rpc.public-0138.defi-oracle.io rpc.defi-oracle.io ws.rpc.d-bis.org ws.rpc2.d-bis.org"
_PUB_OPTIONAL_WHEN_FAIL="dapp.d-bis.org mifos.d-bis.org explorer.d-bis.org admin.d-bis.org dbis-admin.d-bis.org core.d-bis.org dbis-api.d-bis.org dbis-api-2.d-bis.org secure.d-bis.org d-bis.org www.d-bis.org members.d-bis.org developers.d-bis.org data.d-bis.org research.d-bis.org policy.d-bis.org ops.d-bis.org identity.d-bis.org status.d-bis.org sandbox.d-bis.org interop.d-bis.org sankofa.nexus www.sankofa.nexus phoenix.sankofa.nexus www.phoenix.sankofa.nexus the-order.sankofa.nexus www.the-order.sankofa.nexus studio.sankofa.nexus keycloak.sankofa.nexus admin.sankofa.nexus portal.sankofa.nexus dash.sankofa.nexus docs.d-bis.org blockscout.defi-oracle.io mim4u.org www.mim4u.org secure.mim4u.org training.mim4u.org rpc-http-pub.d-bis.org rpc.d-bis.org rpc2.d-bis.org rpc-core.d-bis.org rpc.public-0138.defi-oracle.io rpc.defi-oracle.io ws.rpc.d-bis.org ws.rpc2.d-bis.org"
_PRIV_OPTIONAL_WHEN_FAIL="rpc-http-prv.d-bis.org rpc-ws-prv.d-bis.org rpc-fireblocks.d-bis.org ws.rpc-fireblocks.d-bis.org"
if [[ -z "${E2E_OPTIONAL_WHEN_FAIL:-}" ]]; then
if [[ "$PROFILE" == "private" ]]; then
@@ -199,6 +215,7 @@ declare -A E2E_HTTPS_PATH=(
["phoenix.sankofa.nexus"]="/health"
["www.phoenix.sankofa.nexus"]="/health"
["studio.sankofa.nexus"]="/studio/"
["data.d-bis.org"]="/v1/health"
)
# Expected apex URL for NPM www → canonical 301/308 (Location must use this host; path from E2E_HTTPS_PATH must appear when set)