docs: Ledger Live integration, contract deploy learnings, NEXT_STEPS updates
Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled

- ADD_CHAIN138_TO_LEDGER_LIVE: Ledger form done; public code review repo bis-innovations/LedgerLive; init/push commands
- CONTRACT_DEPLOYMENT_RUNBOOK: Chain 138 gas price 1 gwei, 36-addr check, TransactionMirror workaround
- CONTRACT_*: AddressMapper, MirrorManager deployed 2026-02-12; 36-address on-chain check
- NEXT_STEPS_FOR_YOU: Ledger done; steps completable now (no LAN); run-completable-tasks-from-anywhere
- MASTER_INDEX, OPERATOR_OPTIONAL, SMART_CONTRACTS_INVENTORY_SIMPLE: updates
- LEDGER_BLOCKCHAIN_INTEGRATION_COMPLETE: bis-innovations/LedgerLive reference

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
defiQUG
2026-02-12 15:46:57 -08:00
parent cc8dcaf356
commit fbda1b4beb
5114 changed files with 498901 additions and 4567 deletions

84
scripts/.env.r630-01 Normal file
View File

@@ -0,0 +1,84 @@
# Sankofa/Phoenix Deployment Configuration for r630-01
# Copy this file to .env.r630-01 and update with your values
# Proxmox Configuration
PROXMOX_HOST=192.168.11.11
PROXMOX_NODE=r630-01
PROXMOX_STORAGE=thin1
PROXMOX_USER=root@pam
# Network Configuration
SANKOFA_VLAN=160
SANKOFA_SUBNET=10.160.0.0/22
SANKOFA_GATEWAY=10.160.0.1
# Service IPs (VLAN 160)
SANKOFA_POSTGRES_IP=10.160.0.13
SANKOFA_API_IP=10.160.0.10
SANKOFA_PORTAL_IP=10.160.0.11
SANKOFA_KEYCLOAK_IP=10.160.0.12
# VMIDs
VMID_SANKOFA_POSTGRES=7803
VMID_SANKOFA_API=7800
VMID_SANKOFA_PORTAL=7801
VMID_SANKOFA_KEYCLOAK=7802
# Database Configuration
DB_HOST=10.160.0.13
DB_PORT=5432
DB_NAME=sankofa
DB_USER=sankofa
DB_PASSWORD=CHANGE_THIS_PASSWORD_IN_PRODUCTION
POSTGRES_SUPERUSER_PASSWORD=CHANGE_THIS_PASSWORD_IN_PRODUCTION
# Keycloak Configuration
KEYCLOAK_URL=http://10.160.0.12:8080
KEYCLOAK_ADMIN_URL=http://10.160.0.12:8080/admin
KEYCLOAK_REALM=master
KEYCLOAK_ADMIN_USERNAME=admin
KEYCLOAK_ADMIN_PASSWORD=CHANGE_THIS_PASSWORD_IN_PRODUCTION
KEYCLOAK_CLIENT_ID_API=sankofa-api
KEYCLOAK_CLIENT_ID_PORTAL=portal-client
KEYCLOAK_CLIENT_SECRET_API=CHANGE_THIS_SECRET_IN_PRODUCTION
KEYCLOAK_CLIENT_SECRET_PORTAL=CHANGE_THIS_SECRET_IN_PRODUCTION
KEYCLOAK_MULTI_REALM=false
# API Configuration
API_HOST=10.160.0.10
API_PORT=4000
NEXT_PUBLIC_GRAPHQL_ENDPOINT=http://10.160.0.10:4000/graphql
NEXT_PUBLIC_GRAPHQL_WS_ENDPOINT=ws://10.160.0.10:4000/graphql-ws
JWT_SECRET=CHANGE_THIS_JWT_SECRET_IN_PRODUCTION
NODE_ENV=production
# Portal Configuration
PORTAL_HOST=10.160.0.11
PORTAL_PORT=3000
NEXT_PUBLIC_APP_URL=http://10.160.0.11:3000
NEXT_PUBLIC_CROSSPLANE_API=http://crossplane.sankofa.nexus
NEXT_PUBLIC_ARGOCD_URL=http://argocd.sankofa.nexus
NEXT_PUBLIC_GRAFANA_URL=http://grafana.sankofa.nexus
NEXT_PUBLIC_LOKI_URL=http://loki.sankofa.nexus:3100
NEXTAUTH_URL=http://10.160.0.11:3000
NEXTAUTH_SECRET=CHANGE_THIS_NEXTAUTH_SECRET_IN_PRODUCTION
# Multi-Tenancy
ENABLE_MULTI_TENANT=true
DEFAULT_TENANT_ID=
# Billing Configuration
BILLING_GRANULARITY=SECOND
BLOCKCHAIN_BILLING_ENABLED=false
BLOCKCHAIN_IDENTITY_ENABLED=false
# Blockchain (Optional)
BLOCKCHAIN_RPC_URL=
RESOURCE_PROVISIONING_CONTRACT_ADDRESS=
# Monitoring (Optional)
NEXT_PUBLIC_SENTRY_DSN=
SENTRY_AUTH_TOKEN=
# Analytics (Optional)
NEXT_PUBLIC_ANALYTICS_ID=

View File

@@ -1,4 +1,12 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Quick script to install Cloudflare Tunnel service
# Usage: ./INSTALL_TUNNEL.sh <TUNNEL_TOKEN>

46
scripts/INSTALL_TUNNEL.sh.bak Executable file
View File

@@ -0,0 +1,46 @@
#!/bin/bash
set -euo pipefail
# Quick script to install Cloudflare Tunnel service
# Usage: ./INSTALL_TUNNEL.sh <TUNNEL_TOKEN>
if [ -z "$1" ]; then
echo "Error: Tunnel token required!"
echo ""
echo "Usage: $0 <TUNNEL_TOKEN>"
echo ""
echo "Get your token from Cloudflare Dashboard:"
echo " Zero Trust → Networks → Tunnels → Create tunnel → Copy token"
exit 1
fi
TUNNEL_TOKEN="$1"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
CLOUDFLARED_VMID="${CLOUDFLARED_VMID:-102}"
echo "Installing Cloudflare Tunnel service..."
echo "Container: VMID $CLOUDFLARED_VMID"
# Stop existing DoH service if running
ssh root@${PROXMOX_HOST} "pct exec $CLOUDFLARED_VMID -- systemctl stop cloudflared 2>/dev/null || true"
# Install tunnel service
ssh root@${PROXMOX_HOST} "pct exec $CLOUDFLARED_VMID -- cloudflared service install $TUNNEL_TOKEN"
# Enable and start
ssh root@${PROXMOX_HOST} "pct exec $CLOUDFLARED_VMID -- systemctl enable cloudflared"
ssh root@${PROXMOX_HOST} "pct exec $CLOUDFLARED_VMID -- systemctl start cloudflared"
# Check status
echo ""
echo "Checking tunnel status..."
ssh root@${PROXMOX_HOST} "pct exec $CLOUDFLARED_VMID -- systemctl status cloudflared --no-pager | head -10"
echo ""
echo "✅ Tunnel service installed!"
echo ""
echo "Next steps:"
echo "1. Configure routes in Cloudflare Dashboard"
echo "2. Update DNS records to CNAME pointing to tunnel"
echo "3. See: docs/04-configuration/CLOUDFLARE_TUNNEL_QUICK_SETUP.md"

View File

@@ -1,4 +1,12 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Quick SSH key setup for Proxmox deployment
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"

28
scripts/QUICK_SSH_SETUP.sh.bak Executable file
View File

@@ -0,0 +1,28 @@
#!/bin/bash
set -euo pipefail
# Quick SSH key setup for Proxmox deployment
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
echo "Setting up SSH key for Proxmox host..."
# Check if key exists
if [ ! -f ~/.ssh/id_ed25519 ]; then
echo "Generating SSH key..."
ssh-keygen -t ed25519 -f ~/.ssh/id_ed25519 -N "" -C "proxmox-deployment"
fi
echo "Copying SSH key to Proxmox host..."
echo "You will be prompted for the root password:"
ssh-copy-id -i ~/.ssh/id_ed25519.pub root@"${PROXMOX_HOST}"
echo ""
echo "Testing SSH connection..."
if ssh -o BatchMode=yes -o ConnectTimeout=5 root@"${PROXMOX_HOST}" "echo 'SSH key working'" 2>/dev/null; then
echo "✅ SSH key setup successful!"
echo "You can now run deployment without password prompts:"
echo " ./scripts/deploy-to-proxmox-host.sh"
else
echo "⚠️ SSH key may not be working. You'll need to enter password during deployment."
fi

View File

@@ -1,118 +1,297 @@
# Project Root Scripts
# Scripts Directory
This directory contains utility scripts for managing the Proxmox workspace project.
**Last Updated:** 2026-01-31
## Setup Scripts
---
### `setup.sh`
Initial setup script that creates `.env` file and Claude Desktop configuration.
## Overview
This directory contains automation scripts for Proxmox VE management. Scripts have been consolidated into unified frameworks for better maintainability.
**Current Count:** 381 scripts (down from 759 - 50% reduction)
---
## Unified Frameworks
### 1. verify-all.sh
Verification framework consolidating all check/verify/validate scripts.
**Usage:**
```bash
./scripts/setup.sh
./scripts/verify-all.sh [component] [type] [host]
```
### `complete-setup.sh`
Complete setup script that performs all setup steps including dependency installation.
**Examples:**
```bash
./scripts/verify-all.sh all
./scripts/verify-all.sh service status
./scripts/verify-all.sh network connectivity
```
**See:** `docs/00-meta/FRAMEWORK_USAGE_GUIDE.md` for complete documentation.
---
### 2. list.sh
Listing framework consolidating all list/show/get scripts.
**Usage:**
```bash
./scripts/complete-setup.sh
./scripts/list.sh [type] [filter] [host]
```
### `verify-setup.sh`
Verifies that the workspace is properly configured and all prerequisites are met.
**Examples:**
```bash
./scripts/list.sh containers
./scripts/list.sh containers running
./scripts/list.sh vms r630-01
```
---
### 3. fix-all.sh
Fix framework consolidating all fix-*.sh scripts.
**Usage:**
```bash
./scripts/verify-setup.sh
./scripts/fix-all.sh [issue-type] [component] [host] [--dry-run]
```
## Environment Configuration
**Examples:**
```bash
./scripts/fix-all.sh all
./scripts/fix-all.sh service postgresql 10100
./scripts/fix-all.sh network all --dry-run
```
### `configure-env.sh`
Quick configuration script to update `.env` with Proxmox credentials.
---
### 4. configure.sh
Configuration framework consolidating all configure/config scripts.
**Usage:**
```bash
./scripts/configure-env.sh
./scripts/configure.sh [component] [action] [host]
```
### `load-env.sh`
Standardized `.env` loader function. Can be sourced by other scripts.
**Examples:**
```bash
./scripts/configure.sh all setup
./scripts/configure.sh network update
./scripts/configure.sh ssl validate
```
---
### 5. deploy.sh
Deployment framework consolidating all deploy/setup/install scripts.
**Usage:**
```bash
source scripts/load-env.sh
load_env_file
./scripts/deploy.sh [component] [options] [host]
```
Or run directly:
**Examples:**
```bash
./scripts/load-env.sh
./scripts/deploy.sh all
./scripts/deploy.sh service postgresql
./scripts/deploy.sh all --phase=1
```
## Token Management
---
### `create-proxmox-token.sh`
Creates a Proxmox API token programmatically.
### 6. CCIP WETH9 Bridge (Chain 138) router mismatch fix
Deploy and configure a new WETH9 bridge using the **working** CCIP router (fixes router mismatch where the old bridge pointed to an address with no code).
**Usage:**
```bash
./scripts/create-proxmox-token.sh <host> <user> <password> [token-name]
# Dry-run (no PRIVATE_KEY): simulate deploy and config
./scripts/deploy-and-configure-weth9-bridge-chain138.sh --dry-run
# Real run
export PRIVATE_KEY=0x... # required
export CHAIN138_RPC_URL=http://192.168.11.211:8545 # admin/deployment (RPC_CORE_1)
./scripts/deploy-and-configure-weth9-bridge-chain138.sh
# Then: export CCIPWETH9_BRIDGE_CHAIN138=<printed address>
```
**Example:**
All bridge scripts use `CCIPWETH9_BRIDGE_CHAIN138` when set; otherwise they fall back to the previous bridge address. See `COMPREHENSIVE_STATUS_BRIDGE_READY.md` and `.env.example` (CCIP section).
---
### 7. Contract Verification (Blockscout)
Verify deployed contracts on Blockscout (Chain 138) using the **Forge Verification Proxy** (required for Forge/Blockscout API compatibility).
**Preferred: orchestrated script (starts proxy if needed):**
```bash
./scripts/create-proxmox-token.sh 192.168.11.10 root@pam mypassword mcp-server
source smom-dbis-138/.env 2>/dev/null
./scripts/verify/run-contract-verification-with-proxy.sh
```
### `update-token.sh`
Interactively updates the `PROXMOX_TOKEN_VALUE` in `~/.env`.
**Manual (proxy + verify):**
```bash
# 1. Start proxy (separate terminal)
BLOCKSCOUT_URL=http://192.168.11.140:4000 node forge-verification-proxy/server.js
# 2. Run verification
./scripts/verify-contracts-blockscout.sh
```
**Env:** `FORGE_VERIFY_TIMEOUT=600` (default; set to `0` for no limit). Uses `scripts/lib/load-project-env.sh` for config.
**See:** `forge-verification-proxy/README.md`, `docs/03-deployment/BLOCKSCOUT_FIX_RUNBOOK.md`
### 8. CCIP WETH9 Bridge — send ETH (WETH) to mainnet
Send WETH cross-chain via CCIP (Chain 138 → Ethereum mainnet or other destination). Uses `PRIVATE_KEY` and `CCIPWETH9_BRIDGE_CHAIN138` from env (load-project-env).
**Send to mainnet (exact command):**
```bash
cd /home/intlc/projects/proxmox
source smom-dbis-138/.env
export CCIP_DEST_CHAIN_SELECTOR=5009297550715157269 # Ethereum mainnet
./scripts/bridge/run-send-cross-chain.sh <amount_eth> [recipient]
# Example: ./scripts/bridge/run-send-cross-chain.sh 0.005
# With recipient: ./scripts/bridge/run-send-cross-chain.sh 0.005 0xYourMainnetAddress
```
**Dry-run (simulate only):**
```bash
./scripts/bridge/run-send-cross-chain.sh <amount_eth> [recipient] --dry-run
```
Default bridge in `.env` is the **LINK-fee** bridge (pay fee in Chain 138 LINK). To pay fee in **native ETH**, set `CCIPWETH9_BRIDGE_CHAIN138=0x63cbeE010D64ab7F1760ad84482D6cC380435ab5`.
**Requirements:** Sender must have (1) WETH on Chain 138 (balance ≥ amount), (2) for LINK-fee bridge: LINK on Chain 138 approved for the bridge; for native-ETH bridge: sufficient ETH for fee. When using a **new** bridge address, approve both WETH and LINK to that bridge. Recipient defaults to sender address if omitted.
**If send reverts** (e.g. `0x9996b315` with fee-token address): the CCIP router on Chain 138 may not accept the bridges fee token (LINK at `0xb772...`). See [docs/07-ccip/SEND_ETH_TO_MAINNET_REVERT_TRACE.md](../docs/07-ccip/SEND_ETH_TO_MAINNET_REVERT_TRACE.md) for the revert trace and fix options.
**Env:** `CCIP_DEST_CHAIN_SELECTOR` (default: 5009297550715157269 = Ethereum mainnet); `GAS_PRICE` (default: 1000000000); `CONFIRM_ABOVE_ETH` (optional; prompt for confirmation above this amount).
### 9. DBIS Frontend Deploy to Container
Deploy dbis-frontend build to Proxmox container VMID 10130. Builds locally, pushes dist, reloads nginx.
**Usage:**
```bash
./scripts/update-token.sh
./scripts/dbis/deploy-dbis-frontend-to-container.sh
```
## Testing & Validation
**Env:** Uses `load-project-env.sh` and `get_host_for_vmid()`. `DBIS_FRONTEND_DEPLOY_PATH` overrides container deploy path (e.g. `/opt/dbis-core/frontend/dist`).
### `test-connection.sh`
Tests the connection to the Proxmox API using credentials from `~/.env`.
### 10. CT 2301 Corrupted Rootfs Recovery
CT 2301 (besu-rpc-private-1) may fail to start with `lxc.hook.pre-start` due to corrupted rootfs.
**Scripts:**
- `./scripts/fix-ct-2301-corrupted-rootfs.sh` — documents recovery options
- `./scripts/recreate-ct-2301.sh` — destroys and recreates CT 2301 (data loss; use after corrupted rootfs). Uses `load-project-env.sh` for config.
### 11. Backup and Security
- **Config backup:** `./scripts/backup-proxmox-configs.sh [--dry-run]` — backs up local config and .env
- **NPMplus backup:** `./scripts/verify/backup-npmplus.sh [--dry-run]` — requires NPM_PASSWORD in .env
- **Wave 0 from LAN:** `./scripts/run-wave0-from-lan.sh [--dry-run] [--skip-backup] [--skip-rpc-fix]` — runs NPMplus RPC fix (W0-1) and NPMplus backup (W0-3); W0-2 (sendCrossChain) run separately without `--dry-run`.
- **All waves (max parallel):** `./scripts/run-all-waves-parallel.sh [--dry-run] [--skip-wave0] [--skip-wave2] [--host HOST]` — Wave 0 via SSH, Wave 1 parallel (env, cron, SSH/firewall dry-run, shellcheck, validate), Wave 2 W2-6 (create 2506/2507/2508). See `docs/00-meta/FULL_PARALLEL_EXECUTION_ORDER.md` and `FULL_PARALLEL_RUN_LOG.md`.
- **NPMplus backup cron:** `./scripts/maintenance/schedule-npmplus-backup-cron.sh [--install|--show]` — add or print daily 03:00 cron for backup-npmplus.sh.
- **Security:** `./scripts/security/secure-env-permissions.sh [--dry-run]` or `chmod 600 .env smom-dbis-138/.env dbis_core/.env` — secure env files. **Validator keys (W1-19):** On Proxmox host as root: `./scripts/secure-validator-keys.sh [--dry-run]` (VMIDs 10001004).
### 12. Maintenance (135139)
- **Daily/weekly checks:** `./scripts/maintenance/daily-weekly-checks.sh [daily|weekly|all]` — explorer sync (135), RPC health (136), config API (137). **Cron:** `./scripts/maintenance/schedule-daily-weekly-cron.sh [--install|--show]` (daily 08:00, weekly Sun 09:00). See [OPERATIONAL_RUNBOOKS.md](../docs/03-deployment/OPERATIONAL_RUNBOOKS.md) § Maintenance.
- **Start firefly-ali-1 (6201):** `./scripts/maintenance/start-firefly-6201.sh [--dry-run] [--host HOST]` — start CT 6201 on r630-02 when needed (optional ongoing).
- **Config validation (pre-deploy):** `./scripts/validation/validate-config-files.sh` — set `VALIDATE_REQUIRED_FILES` for required paths. **CI / all validation:** `./scripts/verify/run-all-validation.sh [--skip-genesis]` — dependencies + config + optional genesis (no LAN/SSH).
### 13. Phase 2, 3 & 4 Deployment Scripts
- **Monitoring (Phase 2):** `./scripts/deployment/phase2-observability.sh [--config-only]` — writes `config/monitoring/` (prometheus.yml, alertmanager.yml).
- **Security (Phase 2):** `./scripts/security/setup-ssh-key-auth.sh [--dry-run|--apply]`, `./scripts/security/firewall-proxmox-8006.sh [--dry-run|--apply] [CIDR]`.
- **Backup (Phase 2):** `./scripts/backup/automated-backup.sh [--dry-run] [--with-npmplus]` — config + optional NPMplus; cron in header.
- **CCIP (Phase 3):** `./scripts/ccip/ccip-deploy-checklist.sh` — env check and deployment order from spec.
- **Sovereign tenants (Phase 4):** `./scripts/deployment/phase4-sovereign-tenants.sh [--show-steps|--dry-run]` — checklist; full runbook in OPERATIONAL_RUNBOOKS § Phase 4.
- **Full verification (6 steps):** `./scripts/verify/run-full-verification.sh` — Step 0: config validation; Steps 15: DNS, UDM Pro, NPMplus, backend VMs, E2E routing; Step 6: source-of-truth JSON. Run from project root.
---
## Utility Modules
Shared utility functions are available in `scripts/utils/`:
- `container-utils.sh` - Container operations
- `network-utils.sh` - Network operations
- `service-utils.sh` - Service operations
- `config-utils.sh` - Configuration operations
- `proxmox-utils.sh` - Proxmox operations
**Usage:**
```bash
./scripts/test-connection.sh
source "$(dirname "${BASH_SOURCE[0]}")/../utils/container-utils.sh"
container_status 5000
container_restart 5000
```
### `validate-ml110-deployment.sh`
Comprehensive validation script for deployment to ml110-01.
---
**Usage:**
```bash
./scripts/validate-ml110-deployment.sh
## Shared Libraries
Core shared modules in `scripts/lib/`:
- **`load-project-env.sh`** — Load project environment (.env, config/ip-addresses.conf, smom-dbis-138/.env). **Use this** instead of hardcoding IPs or sourcing multiple files. Scripts that need config should `source "${SCRIPT_DIR}/lib/load-project-env.sh"`.
- `ip-config.sh` - Centralized IP address configuration
- `logging.sh` - Consistent logging functions
- `proxmox-api.sh` - Proxmox API helpers
- `ssh-helpers.sh` - SSH utility functions
---
## Migration
Old scripts have been archived to `scripts/archive/consolidated/`. Use the frameworks instead.
**Migration Guide:** `docs/00-meta/FRAMEWORK_MIGRATION_GUIDES.md`
**Migration Examples:** `docs/00-meta/MIGRATION_EXAMPLES.md`
**Migration Checklist:** `docs/00-meta/MIGRATION_CHECKLIST.md`
---
## Directory Structure
```
scripts/
├── lib/ # Shared libraries (load-project-env.sh, etc.)
├── bridge/ # CCIP bridge scripts
│ └── run-send-cross-chain.sh
├── dbis/ # DBIS Core deployment scripts
│ └── deploy-dbis-frontend-to-container.sh
├── verify/ # Verification scripts
│ ├── check-contracts-on-chain-138.sh # On-chain bytecode check (Chain 138)
│ ├── run-contract-verification-with-proxy.sh
│ └── ... # Other verify scripts
├── utils/ # Utility modules
├── archive/ # Archived scripts
│ ├── consolidated/ # Migrated scripts
│ ├── small-scripts/# Merged small scripts
│ ├── test/ # Test scripts
│ └── backups/ # Backup scripts
├── verify-all.sh # Verification framework
├── list.sh # Listing framework
├── fix-all.sh # Fix framework
├── configure.sh # Configuration framework
└── deploy.sh # Deployment framework
```
This script validates:
- Prerequisites
- Proxmox connection
- Storage availability
- Template availability
- Configuration files
- Deployment scripts
- Resource requirements
---
## Script Dependencies
## Documentation
All scripts use the standardized `~/.env` file for configuration. See [docs/ENV_STANDARDIZATION.md](/docs/04-configuration/ENV_STANDARDIZATION.md) for details.
- **Framework Usage:** `docs/00-meta/FRAMEWORK_USAGE_GUIDE.md`
- **Migration Guides:** `docs/00-meta/FRAMEWORK_MIGRATION_GUIDES.md`
- **Final Report:** `docs/00-meta/FINAL_REDUCTION_REPORT.md`
- **Script Inventory:** `docs/00-meta/SCRIPT_INVENTORY.md`
## Environment Variables
All scripts expect these variables in `~/.env`:
- `PROXMOX_HOST` - Proxmox host IP or hostname
- `PROXMOX_PORT` - Proxmox API port (default: 8006)
- `PROXMOX_USER` - Proxmox API user (e.g., root@pam)
- `PROXMOX_TOKEN_NAME` - API token name
- `PROXMOX_TOKEN_VALUE` - API token secret value
---
**Status:** ✅ Scripts consolidated and documented

View File

@@ -4,13 +4,19 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
source "$SOURCE_PROJECT/.env" 2>/dev/null || true
RPC_URL="${RPC_URL_138:-http://192.168.11.250:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-0x89dd12025bfCD38A168455A44B400e913ED33BE2}"
RPC_URL="${RPC_URL_138:-http://${RPC_ALLTRA_1:-${RPC_ALLTRA_1:-192.168.11.250}}:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-0x971cD9D156f193df8051E48043C476e53ECd4693}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-0xe0E93247376aa097dB308B92e6Ba36bA015535D0}"
echo "=== Access Control Audit ==="

View File

@@ -0,0 +1,82 @@
#!/usr/bin/env bash
# Access control audit and improvements
# Usage: ./access-control-audit.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
source "$SOURCE_PROJECT/.env" 2>/dev/null || true
RPC_URL="${RPC_URL_138:-http://192.168.11.250:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-0x89dd12025bfCD38A168455A44B400e913ED33BE2}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-0xe0E93247376aa097dB308B92e6Ba36bA015535D0}"
echo "=== Access Control Audit ==="
echo ""
# Check admin roles
check_admin_roles() {
echo "## Admin Roles"
echo ""
# Get admin addresses (if contract has owner() function)
WETH9_ADMIN=$(cast call "$WETH9_BRIDGE" "owner()" --rpc-url "$RPC_URL" 2>/dev/null || echo "N/A")
WETH10_ADMIN=$(cast call "$WETH10_BRIDGE" "owner()" --rpc-url "$RPC_URL" 2>/dev/null || echo "N/A")
echo "WETH9 Bridge Admin: $WETH9_ADMIN"
echo "WETH10 Bridge Admin: $WETH10_ADMIN"
echo ""
# Recommendations
echo "## Recommendations"
echo ""
echo "1. ✅ Use multi-sig wallet for admin operations"
echo "2. ✅ Implement role-based access control"
echo "3. ✅ Regular review of admin addresses"
echo "4. ✅ Use hardware wallets for key management"
echo "5. ✅ Implement rate limiting on bridge operations"
echo ""
}
# Check pause functionality
check_pause_functionality() {
echo "## Pause Functionality"
echo ""
WETH9_PAUSED=$(cast call "$WETH9_BRIDGE" "paused()" --rpc-url "$RPC_URL" 2>/dev/null || echo "N/A")
WETH10_PAUSED=$(cast call "$WETH10_BRIDGE" "paused()" --rpc-url "$RPC_URL" 2>/dev/null || echo "N/A")
echo "WETH9 Bridge Paused: $WETH9_PAUSED"
echo "WETH10 Bridge Paused: $WETH10_PAUSED"
echo ""
echo "## Emergency Procedures"
echo ""
echo "To pause bridge:"
echo " cast send $WETH9_BRIDGE 'pause()' --rpc-url $RPC_URL --private-key \$PRIVATE_KEY"
echo ""
echo "To unpause bridge:"
echo " cast send $WETH9_BRIDGE 'unpause()' --rpc-url $RPC_URL --private-key \$PRIVATE_KEY"
echo ""
}
# Security recommendations
security_recommendations() {
echo "## Security Recommendations"
echo ""
echo "1. **Multi-Signature Wallet**: Upgrade admin to multi-sig for critical operations"
echo "2. **Role-Based Access**: Implement granular role-based access control"
echo "3. **Key Management**: Use hardware wallets or secure key management systems"
echo "4. **Rate Limiting**: Implement rate limiting on bridge operations"
echo "5. **Monitoring**: Set up alerts for admin operations"
echo "6. **Audit Trail**: Maintain comprehensive audit logs"
echo "7. **Regular Reviews**: Conduct regular access control reviews"
echo ""
}
check_admin_roles
check_pause_functionality
security_recommendations

View File

@@ -4,6 +4,12 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Load environment variables
ENV_FILE="${HOME}/.env"
if [ ! -f "$ENV_FILE" ]; then
@@ -85,7 +91,7 @@ echo " - Click 'Launch' on your Omada Controller"
echo " - Navigate to: Settings → Firewall → Firewall Rules"
echo ""
echo "4. Check for firewall rules blocking Blockscout:"
echo " - Destination IP: 192.168.11.140"
echo " - Destination IP: ${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}"
echo " - Destination Port: 80"
echo " - Action: Deny or Reject"
echo ""
@@ -95,8 +101,8 @@ echo " Enable: Yes"
echo " Action: Allow"
echo " Direction: Forward"
echo " Protocol: TCP"
echo " Source IP: 192.168.11.0/24"
echo " Destination IP: 192.168.11.140"
echo " Source IP: ${NETWORK_192_168_11_0:-192.168.11.0}/24"
echo " Destination IP: ${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}"
echo " Destination Port: 80"
echo " Priority: High (above deny rules)"
echo ""

View File

@@ -0,0 +1,128 @@
#!/bin/bash
# Access Omada Cloud Controller and check firewall rules for Blockscout
# This script helps automate access to the cloud controller web interface
set -euo pipefail
# Load environment variables
ENV_FILE="${HOME}/.env"
if [ ! -f "$ENV_FILE" ]; then
echo "Error: .env file not found at $ENV_FILE"
exit 1
fi
# Load environment variables manually to avoid issues with special characters
while IFS='=' read -r key value || [ -n "$key" ]; do
# Skip comments and empty lines
[[ "$key" =~ ^[[:space:]]*# ]] && continue
[[ -z "$key" ]] && continue
# Remove quotes if present
value=$(echo "$value" | sed -e 's/^"//' -e 's/"$//' -e "s/^'//" -e "s/'$//")
# Export variable
export "$key=$value"
done < <(grep -v '^#' "$ENV_FILE" | grep -v '^$' | grep -iE "OMADA|TP_LINK|TPLINK")
# Omada Cloud Controller URL
CLOUD_CONTROLLER_URL="https://omada.tplinkcloud.com"
# Try to detect cloud controller credentials
# Common variable names for TP-Link/Omada cloud credentials
TP_LINK_USERNAME="${TP_LINK_USERNAME:-${OMADA_CLOUD_USERNAME:-${OMADA_TP_LINK_ID:-}}}"
TP_LINK_PASSWORD="${TP_LINK_PASSWORD:-${OMADA_CLOUD_PASSWORD:-${OMADA_TP_LINK_PASSWORD:-}}}"
# Fallback to admin credentials if cloud-specific ones aren't found
if [ -z "$TP_LINK_USERNAME" ]; then
TP_LINK_USERNAME="${OMADA_ADMIN_USERNAME:-${OMADA_API_KEY:-}}"
fi
if [ -z "$TP_LINK_PASSWORD" ]; then
TP_LINK_PASSWORD="${OMADA_ADMIN_PASSWORD:-${OMADA_API_SECRET:-}}"
fi
echo "════════════════════════════════════════"
echo "Omada Cloud Controller Access Helper"
echo "════════════════════════════════════════"
echo ""
echo "Cloud Controller URL: $CLOUD_CONTROLLER_URL"
echo ""
if [ -z "$TP_LINK_USERNAME" ] || [ -z "$TP_LINK_PASSWORD" ]; then
echo "❌ Error: Cloud Controller credentials not found in .env file"
echo ""
echo "Required environment variables (one of these combinations):"
echo " Option 1 (TP-Link ID):"
echo " TP_LINK_USERNAME=your-tp-link-id"
echo " TP_LINK_PASSWORD=your-tp-link-password"
echo ""
echo " Option 2 (Omada Cloud):"
echo " OMADA_CLOUD_USERNAME=your-cloud-username"
echo " OMADA_CLOUD_PASSWORD=your-cloud-password"
echo ""
echo " Option 3 (Omada TP-Link ID):"
echo " OMADA_TP_LINK_ID=your-tp-link-id"
echo " OMADA_TP_LINK_PASSWORD=your-tp-link-password"
echo ""
echo "Available Omada-related variables in .env:"
cat .env | grep -i "OMADA\|TP" | grep -v "^#" | sed 's/=.*/=<hidden>/' || echo " (none found)"
exit 1
fi
echo "✓ Credentials found in .env file"
echo ""
echo "To access Omada Cloud Controller:"
echo ""
echo "1. Open browser and navigate to:"
echo " $CLOUD_CONTROLLER_URL"
echo ""
echo "2. Login with credentials:"
echo " Username: $TP_LINK_USERNAME"
echo " Password: [hidden - check .env file]"
echo ""
echo "3. After logging in:"
echo " - Click 'Launch' on your Omada Controller"
echo " - Navigate to: Settings → Firewall → Firewall Rules"
echo ""
echo "4. Check for firewall rules blocking Blockscout:"
echo " - Destination IP: 192.168.11.140"
echo " - Destination Port: 80"
echo " - Action: Deny or Reject"
echo ""
echo "5. Create allow rule if needed:"
echo " Name: Allow Internal to Blockscout HTTP"
echo " Enable: Yes"
echo " Action: Allow"
echo " Direction: Forward"
echo " Protocol: TCP"
echo " Source IP: 192.168.11.0/24"
echo " Destination IP: 192.168.11.140"
echo " Destination Port: 80"
echo " Priority: High (above deny rules)"
echo ""
# Check if we're in a graphical environment and can open browser
if command -v xdg-open &> /dev/null; then
read -p "Open Omada Cloud Controller in browser? (y/n) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo "Opening $CLOUD_CONTROLLER_URL..."
xdg-open "$CLOUD_CONTROLLER_URL" 2>/dev/null || echo "Could not open browser automatically. Please open manually."
fi
elif [ -n "$DISPLAY" ] && command -v open &> /dev/null; then
read -p "Open Omada Cloud Controller in browser? (y/n) " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo "Opening $CLOUD_CONTROLLER_URL..."
open "$CLOUD_CONTROLLER_URL" 2>/dev/null || echo "Could not open browser automatically. Please open manually."
fi
else
echo "Note: No graphical environment detected. Please open browser manually."
fi
echo ""
echo "════════════════════════════════════════"
echo "For detailed instructions, see:"
echo " docs/OMADA_CLOUD_CONTROLLER_FIREWALL_GUIDE.md"
echo "════════════════════════════════════════"

View File

@@ -4,6 +4,12 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
@@ -20,7 +26,7 @@ log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Host configuration
R630_01_IP="192.168.11.11"
R630_01_IP="${PROXMOX_HOST_R630_01}"
R630_01_PASSWORD="password"
R630_01_HOSTNAME="r630-01"

View File

@@ -0,0 +1,138 @@
#!/bin/bash
# Activate storage on r630-01 (local-lvm and thin1)
# Usage: ./scripts/activate-storage-r630-01.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Host configuration
R630_01_IP="192.168.11.11"
R630_01_PASSWORD="password"
R630_01_HOSTNAME="r630-01"
log_info "========================================="
log_info "Activating Storage on r630-01"
log_info "========================================="
echo ""
# Test connectivity
log_info "1. Testing connectivity to ${R630_01_IP}..."
if ping -c 2 -W 2 "$R630_01_IP" >/dev/null 2>&1; then
log_success "Host is reachable"
else
log_error "Host is NOT reachable"
exit 1
fi
echo ""
# Test SSH
log_info "2. Testing SSH access..."
if sshpass -p "$R630_01_PASSWORD" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$R630_01_IP" "echo 'SSH OK'" >/dev/null 2>&1; then
log_success "SSH access works"
else
log_error "SSH access failed"
exit 1
fi
echo ""
# Activate storage
log_info "3. Activating storage..."
echo ""
sshpass -p "$R630_01_PASSWORD" ssh -o StrictHostKeyChecking=no root@"$R630_01_IP" bash <<'ENDSSH'
set -e
echo "=== Current Storage Status ==="
pvesm status 2>&1 || echo "Cannot list storage"
echo ""
echo "=== Available Volume Groups ==="
vgs 2>&1 || echo "No volume groups found"
echo ""
echo "=== Available Thin Pools ==="
lvs -o lv_name,vg_name,lv_size,data_percent,metadata_percent,pool_lv 2>&1 | grep -E "LV|thin" || echo "No thin pools found"
echo ""
echo "=== Current Storage Configuration ==="
cat /etc/pve/storage.cfg 2>/dev/null | grep -E "r630-01|local-lvm|thin1" || echo "No relevant storage config found"
echo ""
echo "=== Step 1: Updating storage.cfg node references ==="
# Backup
cp /etc/pve/storage.cfg /etc/pve/storage.cfg.backup.$(date +%Y%m%d_%H%M%S) 2>/dev/null || echo "Cannot backup storage.cfg"
# Update node references from "pve" to "r630-01"
sed -i 's/nodes pve$/nodes r630-01/' /etc/pve/storage.cfg 2>/dev/null || true
sed -i 's/nodes pve /nodes r630-01 /' /etc/pve/storage.cfg 2>/dev/null || true
sed -i 's/nodes pve,/nodes r630-01,/' /etc/pve/storage.cfg 2>/dev/null || true
echo "Updated storage.cfg:"
cat /etc/pve/storage.cfg 2>/dev/null | grep -E "r630-01|local-lvm|thin1" || echo "No relevant entries found"
echo ""
echo "=== Step 2: Enabling local-lvm storage ==="
# Check if local-lvm exists
if pvesm status 2>/dev/null | grep -q "local-lvm"; then
echo "local-lvm storage found, enabling..."
pvesm set local-lvm --disable 0 2>&1 || echo "Failed to enable local-lvm (may already be enabled)"
else
echo "local-lvm storage not found in storage list"
echo "Checking if volume group exists..."
if vgs | grep -q "pve\|data"; then
echo "Volume group found. Storage may need to be added to storage.cfg"
fi
fi
echo ""
echo "=== Step 3: Enabling thin1 storage ==="
# Check if thin1 exists
if pvesm status 2>/dev/null | grep -q "thin1"; then
echo "thin1 storage found, enabling..."
pvesm set thin1 --disable 0 2>&1 || echo "Failed to enable thin1 (may already be enabled)"
else
echo "thin1 storage not found in storage list"
echo "Checking if thin pool exists..."
if lvs | grep -q "thin1"; then
echo "Thin pool found. Storage may need to be added to storage.cfg"
fi
fi
echo ""
echo "=== Step 4: Verifying storage status ==="
echo "Storage Status:"
pvesm status 2>&1 || echo "Cannot list storage"
echo ""
echo "=== Step 5: Storage Details ==="
for storage in local-lvm thin1; do
if pvesm status 2>/dev/null | grep -q "$storage"; then
echo "--- $storage ---"
pvesm status 2>/dev/null | grep "$storage" || true
fi
done
echo ""
ENDSSH
echo ""
log_success "Storage activation complete for r630-01"
echo ""
log_info "Verification:"
log_info " - Check storage status above"
log_info " - Verify storage is enabled and accessible"
log_info " - Storage should now be available for VM/container creation"
echo ""

View File

@@ -4,6 +4,12 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
@@ -20,7 +26,7 @@ log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Host configuration
R630_02_IP="192.168.11.12"
R630_02_IP="${PROXMOX_HOST_R630_02}"
R630_02_PASSWORD="password"
R630_02_HOSTNAME="r630-02"

View File

@@ -0,0 +1,139 @@
#!/bin/bash
# Activate storage on r630-02 (local-lvm and thin1-thin6)
# Usage: ./scripts/activate-storage-r630-02.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Host configuration
R630_02_IP="192.168.11.12"
R630_02_PASSWORD="password"
R630_02_HOSTNAME="r630-02"
log_info "========================================="
log_info "Activating Storage on r630-02"
log_info "========================================="
echo ""
# Test connectivity
log_info "1. Testing connectivity to ${R630_02_IP}..."
if ping -c 2 -W 2 "$R630_02_IP" >/dev/null 2>&1; then
log_success "Host is reachable"
else
log_error "Host is NOT reachable"
exit 1
fi
echo ""
# Test SSH
log_info "2. Testing SSH access..."
if sshpass -p "$R630_02_PASSWORD" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$R630_02_IP" "echo 'SSH OK'" >/dev/null 2>&1; then
log_success "SSH access works"
else
log_error "SSH access failed"
exit 1
fi
echo ""
# Activate storage
log_info "3. Activating storage..."
echo ""
sshpass -p "$R630_02_PASSWORD" ssh -o StrictHostKeyChecking=no root@"$R630_02_IP" bash <<'ENDSSH'
set -e
echo "=== Current Storage Status ==="
pvesm status 2>&1 || echo "Cannot list storage"
echo ""
echo "=== Available Volume Groups ==="
vgs 2>&1 || echo "No volume groups found"
echo ""
echo "=== Available Thin Pools ==="
lvs -o lv_name,vg_name,lv_size,data_percent,metadata_percent,pool_lv 2>&1 | grep -E "LV|thin" || echo "No thin pools found"
echo ""
echo "=== Current Storage Configuration ==="
cat /etc/pve/storage.cfg 2>/dev/null | grep -E "r630-02|local-lvm|thin[1-6]" || echo "No relevant storage config found"
echo ""
echo "=== Step 1: Updating storage.cfg node references ==="
# Backup
cp /etc/pve/storage.cfg /etc/pve/storage.cfg.backup.$(date +%Y%m%d_%H%M%S) 2>/dev/null || echo "Cannot backup storage.cfg"
# Update node references from "pve2" to "r630-02"
sed -i 's/nodes pve2$/nodes r630-02/' /etc/pve/storage.cfg 2>/dev/null || true
sed -i 's/nodes pve2 /nodes r630-02 /' /etc/pve/storage.cfg 2>/dev/null || true
sed -i 's/nodes pve2,/nodes r630-02,/' /etc/pve/storage.cfg 2>/dev/null || true
echo "Updated storage.cfg:"
cat /etc/pve/storage.cfg 2>/dev/null | grep -E "r630-02|local-lvm|thin[1-6]" || echo "No relevant entries found"
echo ""
echo "=== Step 2: Enabling local-lvm storage ==="
# Check if local-lvm exists
if pvesm status 2>/dev/null | grep -q "local-lvm"; then
echo "local-lvm storage found, enabling..."
pvesm set local-lvm --disable 0 2>&1 || echo "Failed to enable local-lvm (may already be enabled)"
else
echo "local-lvm storage not found in storage list"
echo "Checking if volume group exists..."
if vgs | grep -q "pve\|data"; then
echo "Volume group found. Storage may need to be added to storage.cfg"
fi
fi
echo ""
echo "=== Step 3: Enabling thin storage pools (thin1-thin6) ==="
for thin in thin1 thin2 thin3 thin4 thin5 thin6; do
if pvesm status 2>/dev/null | grep -q "$thin"; then
echo "Enabling $thin..."
pvesm set $thin --disable 0 2>&1 || echo "Failed to enable $thin (may already be enabled)"
else
echo "$thin storage not found in storage list"
echo "Checking if thin pool exists..."
if lvs | grep -q "$thin"; then
echo "$thin pool found. Storage may need to be added to storage.cfg"
fi
fi
done
echo ""
echo "=== Step 4: Verifying storage status ==="
echo "Storage Status:"
pvesm status 2>&1 || echo "Cannot list storage"
echo ""
echo "=== Step 5: Storage Details ==="
for storage in local-lvm thin1 thin2 thin3 thin4 thin5 thin6; do
if pvesm status 2>/dev/null | grep -q "$storage"; then
echo "--- $storage ---"
pvesm status 2>/dev/null | grep "$storage" || true
fi
done
echo ""
ENDSSH
echo ""
log_success "Storage activation complete for r630-02"
echo ""
log_info "Verification:"
log_info " - Check storage status above"
log_info " - Verify storage is enabled and accessible"
log_info " - Storage should now be available for VM/container creation"
echo ""

View File

@@ -0,0 +1,29 @@
#!/usr/bin/env bash
# Fetch 2102's enode from RPC and add to config/besu-node-lists, then deploy to all nodes.
# Run after 2102 (besu-rpc-core-2) is up and responding on 8545.
# Usage: ./scripts/add-2102-enode-to-lists.sh
set -euo pipefail
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
HOST="${PROXMOX_ML110:-192.168.11.10}"
IP="${RPC_CORE_2:-192.168.11.212}"
ENODE=$(ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=accept-new root@$HOST "pct exec 2102 -- curl -s -X POST -H 'Content-Type: application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"admin_nodeInfo\",\"params\":[],\"id\":1}' http://127.0.0.1:8545" | jq -r '.result.enode // empty')
[[ -z "$ENODE" ]] && ENODE=$(ssh -o ConnectTimeout=10 root@$HOST "pct exec 2102 -- curl -s -X POST -H 'Content-Type: application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"admin_nodeInfo\",\"params\":[],\"id\":1}' http://127.0.0.1:8545" | grep -o '"enode":"[^"]*"' | cut -d'"' -f4)
[[ -z "$ENODE" ]] && { echo "Could not get enode from 2102. Is besu-rpc running and RPC responding on 8545?"; exit 1; }
ENODE=$(echo "$ENODE" | sed "s/@[0-9.]*:/@$IP:/")
echo "Enode: $ENODE"
# Add to static-nodes.json
jq --arg e "$ENODE" '. + [$e]' "$PROJECT_ROOT/config/besu-node-lists/static-nodes.json" > /tmp/static-nodes-2102.json && mv /tmp/static-nodes-2102.json "$PROJECT_ROOT/config/besu-node-lists/static-nodes.json"
# Add to permissions-nodes.toml (comma on last entry, insert new enode before ])
PERMS="$PROJECT_ROOT/config/besu-node-lists/permissions-nodes.toml"
grep -q "$ENODE" "$PERMS" && { echo "Enode already in permissions-nodes.toml"; } || {
sed -i 's/@192.168.11.241:30303"$/@192.168.11.241:30303",/' "$PERMS"
sed -i '/^]$/i\ "'"$ENODE"'"' "$PERMS"
}
echo "Deploying to all nodes..."
"$PROJECT_ROOT/scripts/deploy-besu-node-lists-to-all.sh"
echo "Done. 2102 enode added and lists deployed."

View File

@@ -4,7 +4,13 @@
set -euo pipefail
IP="${IP:-192.168.11.140}"
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
IP="${IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}"
DOMAIN="${DOMAIN:-explorer.d-bis.org}"
PASSWORD="${PASSWORD:-L@kers2010}"
@@ -539,7 +545,7 @@ cat > /tmp/blockscout-with-bridge-monitoring.html <<'BRIDGE_HTML_EOF'
const BRIDGE_CONTRACTS = {
CCIP_ROUTER: '0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e',
CCIP_SENDER: '0x105F8A15b819948a89153505762444Ee9f324684',
WETH9_BRIDGE: '0x89dd12025bfCD38A168455A44B400e913ED33BE2',
WETH9_BRIDGE: '__WETH9_BRIDGE_CHAIN138__',
WETH10_BRIDGE: '0xe0E93247376aa097dB308B92e6Ba36bA015535D0',
WETH9_TOKEN: '0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2',
WETH10_TOKEN: '0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f',
@@ -865,6 +871,9 @@ cat > /tmp/blockscout-with-bridge-monitoring.html <<'BRIDGE_HTML_EOF'
</html>
BRIDGE_HTML_EOF
# Inject WETH9 bridge address (use CCIPWETH9_BRIDGE_CHAIN138 after deploy-and-configure)
sed -i "s|__WETH9_BRIDGE_CHAIN138__|${CCIPWETH9_BRIDGE_CHAIN138:-0x971cD9D156f193df8051E48043C476e53ECd4693}|g" /tmp/blockscout-with-bridge-monitoring.html
# Step 3: Upload enhanced explorer
log_step "Step 3: Uploading enhanced explorer with bridge monitoring..."
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /tmp/blockscout-with-bridge-monitoring.html root@"$IP":/var/www/html/index.html

View File

@@ -0,0 +1,889 @@
#!/usr/bin/env bash
# Add Comprehensive Bridge Monitoring to Blockscout Explorer
# Adds CCIP bridge monitoring, transaction tracking, and health monitoring
set -euo pipefail
IP="${IP:-192.168.11.140}"
DOMAIN="${DOMAIN:-explorer.d-bis.org}"
PASSWORD="${PASSWORD:-L@kers2010}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_step() { echo -e "${CYAN}[STEP]${NC} $1"; }
exec_container() {
local cmd="$1"
sshpass -p "$PASSWORD" ssh -o StrictHostKeyChecking=no root@"$IP" "bash -c '$cmd'" 2>&1
}
echo "════════════════════════════════════════════════════════"
echo "Add Bridge Monitoring to Blockscout Explorer"
echo "════════════════════════════════════════════════════════"
echo ""
# Step 1: Read current explorer HTML
log_step "Step 1: Reading current explorer interface..."
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no root@"$IP":/var/www/html/index.html /tmp/blockscout-current.html
log_success "Current explorer interface backed up"
# Step 2: Create enhanced explorer with bridge monitoring
log_step "Step 2: Creating enhanced explorer with bridge monitoring..."
# This is a large file - I'll create it with comprehensive bridge monitoring features
cat > /tmp/blockscout-with-bridge-monitoring.html <<'BRIDGE_HTML_EOF'
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Chain 138 Explorer | d-bis.org | Bridge Monitoring</title>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css">
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
:root {
--primary: #667eea;
--secondary: #764ba2;
--success: #10b981;
--warning: #f59e0b;
--danger: #ef4444;
--bridge-blue: #3b82f6;
--dark: #1f2937;
--light: #f9fafb;
--border: #e5e7eb;
--text: #111827;
--text-light: #6b7280;
}
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
background: var(--light);
color: var(--text);
line-height: 1.6;
}
.navbar {
background: linear-gradient(135deg, var(--primary) 0%, var(--secondary) 100%);
color: white;
padding: 1rem 2rem;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
position: sticky;
top: 0;
z-index: 1000;
}
.nav-container {
max-width: 1400px;
margin: 0 auto;
display: flex;
justify-content: space-between;
align-items: center;
}
.logo {
font-size: 1.5rem;
font-weight: bold;
display: flex;
align-items: center;
gap: 0.5rem;
}
.nav-links {
display: flex;
gap: 2rem;
list-style: none;
}
.nav-links a {
color: white;
text-decoration: none;
transition: opacity 0.2s;
}
.nav-links a:hover { opacity: 0.8; }
.search-box {
flex: 1;
max-width: 600px;
margin: 0 2rem;
}
.search-input {
width: 100%;
padding: 0.75rem 1rem;
border: none;
border-radius: 8px;
font-size: 1rem;
background: rgba(255,255,255,0.2);
color: white;
backdrop-filter: blur(10px);
}
.search-input::placeholder { color: rgba(255,255,255,0.7); }
.search-input:focus {
outline: none;
background: rgba(255,255,255,0.3);
}
.container {
max-width: 1400px;
margin: 0 auto;
padding: 2rem;
}
.stats-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
gap: 1.5rem;
margin-bottom: 2rem;
}
.stat-card {
background: white;
padding: 1.5rem;
border-radius: 12px;
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
transition: transform 0.2s, box-shadow 0.2s;
}
.stat-card:hover {
transform: translateY(-2px);
box-shadow: 0 4px 12px rgba(0,0,0,0.15);
}
.stat-card.bridge-card {
border-left: 4px solid var(--bridge-blue);
}
.stat-label {
color: var(--text-light);
font-size: 0.875rem;
text-transform: uppercase;
letter-spacing: 0.5px;
margin-bottom: 0.5rem;
}
.stat-value {
font-size: 2rem;
font-weight: bold;
color: var(--primary);
}
.stat-value.bridge-value {
color: var(--bridge-blue);
}
.card {
background: white;
border-radius: 12px;
box-shadow: 0 2px 8px rgba(0,0,0,0.1);
padding: 2rem;
margin-bottom: 2rem;
}
.card-header {
display: flex;
justify-content: space-between;
align-items: center;
margin-bottom: 1.5rem;
padding-bottom: 1rem;
border-bottom: 2px solid var(--border);
}
.card-title {
font-size: 1.5rem;
font-weight: bold;
color: var(--text);
}
.tabs {
display: flex;
gap: 1rem;
margin-bottom: 1.5rem;
border-bottom: 2px solid var(--border);
flex-wrap: wrap;
}
.tab {
padding: 1rem 1.5rem;
background: none;
border: none;
cursor: pointer;
font-size: 1rem;
color: var(--text-light);
border-bottom: 3px solid transparent;
transition: all 0.2s;
}
.tab.active {
color: var(--primary);
border-bottom-color: var(--primary);
font-weight: 600;
}
.bridge-tab.active {
color: var(--bridge-blue);
border-bottom-color: var(--bridge-blue);
}
.table {
width: 100%;
border-collapse: collapse;
}
.table th {
text-align: left;
padding: 1rem;
background: var(--light);
font-weight: 600;
color: var(--text);
border-bottom: 2px solid var(--border);
}
.table td {
padding: 1rem;
border-bottom: 1px solid var(--border);
}
.table tr:hover { background: var(--light); }
.hash {
font-family: 'Courier New', monospace;
font-size: 0.875rem;
color: var(--primary);
word-break: break-all;
}
.hash:hover { text-decoration: underline; cursor: pointer; }
.badge {
display: inline-block;
padding: 0.25rem 0.75rem;
border-radius: 20px;
font-size: 0.875rem;
font-weight: 600;
}
.badge-success { background: #d1fae5; color: var(--success); }
.badge-warning { background: #fef3c7; color: var(--warning); }
.badge-danger { background: #fee2e2; color: var(--danger); }
.badge-chain {
background: #dbeafe;
color: var(--bridge-blue);
}
.loading {
text-align: center;
padding: 3rem;
color: var(--text-light);
}
.loading i {
font-size: 2rem;
animation: spin 1s linear infinite;
}
@keyframes spin {
from { transform: rotate(0deg); }
to { transform: rotate(360deg); }
}
.error {
background: #fee2e2;
color: var(--danger);
padding: 1rem;
border-radius: 8px;
margin: 1rem 0;
}
.bridge-chain-card {
background: linear-gradient(135deg, #dbeafe 0%, #bfdbfe 100%);
padding: 1.5rem;
border-radius: 12px;
margin-bottom: 1rem;
}
.chain-name {
font-size: 1.25rem;
font-weight: bold;
color: var(--bridge-blue);
margin-bottom: 0.5rem;
}
.chain-info {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(150px, 1fr));
gap: 1rem;
margin-top: 1rem;
}
.chain-stat {
font-size: 0.875rem;
}
.chain-stat-label {
color: var(--text-light);
}
.chain-stat-value {
font-weight: bold;
color: var(--text);
margin-top: 0.25rem;
}
.bridge-health {
display: flex;
align-items: center;
gap: 0.5rem;
}
.health-indicator {
width: 12px;
height: 12px;
border-radius: 50%;
background: var(--success);
animation: pulse 2s infinite;
}
.health-indicator.warning { background: var(--warning); }
.health-indicator.danger { background: var(--danger); }
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
.detail-view {
display: none;
}
.detail-view.active { display: block; }
.info-row {
display: flex;
padding: 1rem;
border-bottom: 1px solid var(--border);
}
.info-label {
font-weight: 600;
min-width: 200px;
color: var(--text-light);
}
.info-value {
flex: 1;
word-break: break-all;
}
.btn {
padding: 0.5rem 1rem;
border: none;
border-radius: 6px;
cursor: pointer;
font-size: 0.875rem;
transition: all 0.2s;
}
.btn-primary {
background: var(--primary);
color: white;
}
.btn-primary:hover { background: var(--secondary); }
.btn-bridge {
background: var(--bridge-blue);
color: white;
}
.btn-bridge:hover { background: #2563eb; }
@media (max-width: 768px) {
.nav-container { flex-direction: column; gap: 1rem; }
.search-box { max-width: 100%; margin: 0; }
.nav-links { flex-wrap: wrap; justify-content: center; }
}
</style>
</head>
<body>
<nav class="navbar">
<div class="nav-container">
<div class="logo">
<i class="fas fa-cube"></i>
<span>Chain 138 Explorer</span>
</div>
<div class="search-box">
<input type="text" class="search-input" id="searchInput" placeholder="Search by address, transaction hash, or block number...">
</div>
<ul class="nav-links">
<li><a href="#" onclick="showHome(); return false;"><i class="fas fa-home"></i> Home</a></li>
<li><a href="#" onclick="showBlocks(); return false;"><i class="fas fa-cubes"></i> Blocks</a></li>
<li><a href="#" onclick="showTransactions(); return false;"><i class="fas fa-exchange-alt"></i> Transactions</a></li>
<li><a href="#" onclick="showBridgeMonitoring(); return false;"><i class="fas fa-bridge"></i> Bridge</a></li>
<li><a href="#" onclick="showTokens(); return false;"><i class="fas fa-coins"></i> Tokens</a></li>
</ul>
</div>
</nav>
<div class="container" id="mainContent">
<!-- Home View -->
<div id="homeView">
<div class="stats-grid" id="statsGrid">
<!-- Stats loaded dynamically -->
</div>
<div class="card">
<div class="card-header">
<h2 class="card-title">Latest Blocks</h2>
<button class="btn btn-primary" onclick="showBlocks()">View All</button>
</div>
<div id="latestBlocks">
<div class="loading"><i class="fas fa-spinner"></i> Loading blocks...</div>
</div>
</div>
<div class="card">
<div class="card-header">
<h2 class="card-title">Latest Transactions</h2>
<button class="btn btn-primary" onclick="showTransactions()">View All</button>
</div>
<div id="latestTransactions">
<div class="loading"><i class="fas fa-spinner"></i> Loading transactions...</div>
</div>
</div>
</div>
<!-- Bridge Monitoring View -->
<div id="bridgeView" class="detail-view">
<div class="card">
<div class="card-header">
<h2 class="card-title"><i class="fas fa-bridge"></i> Bridge Monitoring Dashboard</h2>
<button class="btn btn-bridge" onclick="refreshBridgeData()"><i class="fas fa-sync-alt"></i> Refresh</button>
</div>
<div class="tabs">
<button class="tab bridge-tab active" onclick="showBridgeTab('overview')">Overview</button>
<button class="tab bridge-tab" onclick="showBridgeTab('contracts')">Bridge Contracts</button>
<button class="tab bridge-tab" onclick="showBridgeTab('transactions')">Bridge Transactions</button>
<button class="tab bridge-tab" onclick="showBridgeTab('chains')">Destination Chains</button>
</div>
<!-- Bridge Overview Tab -->
<div id="bridgeOverview" class="bridge-tab-content">
<div class="stats-grid">
<div class="stat-card bridge-card">
<div class="stat-label">Total Bridge Volume</div>
<div class="stat-value bridge-value" id="bridgeVolume">-</div>
</div>
<div class="stat-card bridge-card">
<div class="stat-label">Bridge Transactions</div>
<div class="stat-value bridge-value" id="bridgeTxCount">-</div>
</div>
<div class="stat-card bridge-card">
<div class="stat-label">Active Bridges</div>
<div class="stat-value bridge-value" id="activeBridges">2</div>
</div>
<div class="stat-card bridge-card">
<div class="stat-label">Bridge Health</div>
<div class="stat-value bridge-value">
<div class="bridge-health">
<span class="health-indicator" id="bridgeHealth"></span>
<span id="bridgeHealthText">Healthy</span>
</div>
</div>
</div>
</div>
<h3 style="margin-top: 2rem; margin-bottom: 1rem;">Bridge Contracts Status</h3>
<div id="bridgeContractsStatus">
<div class="loading"><i class="fas fa-spinner"></i> Loading bridge status...</div>
</div>
</div>
<!-- Bridge Contracts Tab -->
<div id="bridgeContracts" class="bridge-tab-content" style="display: none;">
<div id="bridgeContractsList">
<div class="loading"><i class="fas fa-spinner"></i> Loading bridge contracts...</div>
</div>
</div>
<!-- Bridge Transactions Tab -->
<div id="bridgeTransactions" class="bridge-tab-content" style="display: none;">
<div id="bridgeTxList">
<div class="loading"><i class="fas fa-spinner"></i> Loading bridge transactions...</div>
</div>
</div>
<!-- Destination Chains Tab -->
<div id="bridgeChains" class="bridge-tab-content" style="display: none;">
<div id="destinationChainsList">
<div class="loading"><i class="fas fa-spinner"></i> Loading destination chains...</div>
</div>
</div>
</div>
</div>
<!-- Other views (blocks, transactions, etc.) -->
<div id="blocksView" class="detail-view">
<div class="card">
<div class="card-header">
<h2 class="card-title">All Blocks</h2>
</div>
<div id="blocksList">
<div class="loading"><i class="fas fa-spinner"></i> Loading blocks...</div>
</div>
</div>
</div>
<div id="transactionsView" class="detail-view">
<div class="card">
<div class="card-header">
<h2 class="card-title">All Transactions</h2>
</div>
<div id="transactionsList">
<div class="loading"><i class="fas fa-spinner"></i> Loading transactions...</div>
</div>
</div>
</div>
<!-- Detail views for block/transaction/address -->
<div id="blockDetailView" class="detail-view">
<div class="card">
<div class="card-header">
<button class="btn btn-secondary" onclick="showBlocks()"><i class="fas fa-arrow-left"></i> Back</button>
<h2 class="card-title">Block Details</h2>
</div>
<div id="blockDetail"></div>
</div>
</div>
<div id="transactionDetailView" class="detail-view">
<div class="card">
<div class="card-header">
<button class="btn btn-secondary" onclick="showTransactions()"><i class="fas fa-arrow-left"></i> Back</button>
<h2 class="card-title">Transaction Details</h2>
</div>
<div id="transactionDetail"></div>
</div>
</div>
<div id="addressDetailView" class="detail-view">
<div class="card">
<div class="card-header">
<button class="btn btn-secondary" onclick="showHome()"><i class="fas fa-arrow-left"></i> Back</button>
<h2 class="card-title">Address Details</h2>
</div>
<div id="addressDetail"></div>
</div>
</div>
</div>
<script>
const API_BASE = '/api';
let currentView = 'home';
// Bridge contract addresses
const BRIDGE_CONTRACTS = {
CCIP_ROUTER: '0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e',
CCIP_SENDER: '0x105F8A15b819948a89153505762444Ee9f324684',
WETH9_BRIDGE: '0x89dd12025bfCD38A168455A44B400e913ED33BE2',
WETH10_BRIDGE: '0xe0E93247376aa097dB308B92e6Ba36bA015535D0',
WETH9_TOKEN: '0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2',
WETH10_TOKEN: '0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f',
LINK_TOKEN: '0x514910771AF9Ca656af840dff83E8264EcF986CA'
};
const DESTINATION_CHAINS = {
'56': { name: 'BSC', selector: '11344663589394136015', status: 'active' },
'137': { name: 'Polygon', selector: '4051577828743386545', status: 'active' },
'43114': { name: 'Avalanche', selector: '6433500567565415381', status: 'active' },
'8453': { name: 'Base', selector: '15971525489660198786', status: 'active' },
'42161': { name: 'Arbitrum', selector: '', status: 'pending' },
'10': { name: 'Optimism', selector: '', status: 'pending' }
};
// Initialize
document.addEventListener('DOMContentLoaded', () => {
loadStats();
loadLatestBlocks();
loadBridgeData();
document.getElementById('searchInput').addEventListener('keypress', (e) => {
if (e.key === 'Enter') {
handleSearch(e.target.value);
}
});
});
async function fetchAPI(url) {
try {
const response = await fetch(url);
if (!response.ok) throw new Error(`HTTP ${response.status}`);
return await response.json();
} catch (error) {
console.error('API Error:', error);
throw error;
}
}
async function loadStats() {
try {
const stats = await fetchAPI(`${API_BASE}/v2/stats`);
const statsGrid = document.getElementById('statsGrid');
statsGrid.innerHTML = `
<div class="stat-card">
<div class="stat-label">Total Blocks</div>
<div class="stat-value">${formatNumber(stats.total_blocks)}</div>
</div>
<div class="stat-card">
<div class="stat-label">Total Transactions</div>
<div class="stat-value">${formatNumber(stats.total_transactions)}</div>
</div>
<div class="stat-card">
<div class="stat-label">Total Addresses</div>
<div class="stat-value">${formatNumber(stats.total_addresses)}</div>
</div>
<div class="stat-card bridge-card">
<div class="stat-label">Bridge Contracts</div>
<div class="stat-value bridge-value">2 Active</div>
</div>
`;
const blockData = await fetchAPI(`${API_BASE}?module=block&action=eth_block_number`);
const blockNum = parseInt(blockData.result, 16);
// Add latest block if needed
} catch (error) {
console.error('Failed to load stats:', error);
}
}
async function loadBridgeData() {
await Promise.all([
loadBridgeOverview(),
loadBridgeContracts(),
loadDestinationChains()
]);
}
async function loadBridgeOverview() {
try {
// Load bridge contract balances and status
const contracts = ['WETH9_BRIDGE', 'WETH10_BRIDGE', 'CCIP_ROUTER'];
let html = '<table class="table"><thead><tr><th>Contract</th><th>Address</th><th>Type</th><th>Status</th><th>Balance</th></tr></thead><tbody>';
for (const contract of contracts) {
const address = BRIDGE_CONTRACTS[contract];
const name = contract.replace('_', ' ');
try {
const balance = await fetchAPI(`${API_BASE}?module=account&action=eth_get_balance&address=${address}&tag=latest`);
const balanceEth = formatEther(balance.result || '0');
html += `<tr>
<td><strong>${name}</strong></td>
<td class="hash" onclick="showAddressDetail('${address}')" style="cursor: pointer;">${shortenHash(address)}</td>
<td>${contract.includes('BRIDGE') ? 'Bridge' : contract.includes('ROUTER') ? 'Router' : 'Token'}</td>
<td><span class="badge badge-success">Active</span></td>
<td>${balanceEth} ETH</td>
</tr>`;
} catch (e) {
html += `<tr>
<td><strong>${name}</strong></td>
<td class="hash">${shortenHash(address)}</td>
<td>-</td>
<td><span class="badge badge-warning">Unknown</span></td>
<td>-</td>
</tr>`;
}
}
html += '</tbody></table>';
document.getElementById('bridgeContractsStatus').innerHTML = html;
// Update bridge stats
document.getElementById('bridgeTxCount').textContent = 'Loading...';
document.getElementById('bridgeVolume').textContent = 'Calculating...';
document.getElementById('bridgeHealth').classList.add('health-indicator');
} catch (error) {
document.getElementById('bridgeContractsStatus').innerHTML =
`<div class="error">Failed to load bridge data: ${error.message}</div>`;
}
}
async function loadBridgeContracts() {
const contracts = [
{ name: 'CCIP Router', address: BRIDGE_CONTRACTS.CCIP_ROUTER, type: 'Router', description: 'Routes cross-chain messages' },
{ name: 'CCIP Sender', address: BRIDGE_CONTRACTS.CCIP_SENDER, type: 'Sender', description: 'Initiates cross-chain transfers' },
{ name: 'WETH9 Bridge', address: BRIDGE_CONTRACTS.WETH9_BRIDGE, type: 'Bridge', description: 'Bridges WETH9 tokens' },
{ name: 'WETH10 Bridge', address: BRIDGE_CONTRACTS.WETH10_BRIDGE, type: 'Bridge', description: 'Bridges WETH10 tokens' }
];
let html = '<div style="display: grid; gap: 1.5rem;">';
for (const contract of contracts) {
try {
const balance = await fetchAPI(`${API_BASE}?module=account&action=eth_get_balance&address=${contract.address}&tag=latest`);
html += `
<div class="bridge-chain-card">
<div class="chain-name">${contract.name}</div>
<div style="margin-bottom: 0.5rem;">
<span class="hash" onclick="showAddressDetail('${contract.address}')" style="cursor: pointer;">${contract.address}</span>
</div>
<div style="color: var(--text-light); margin-bottom: 1rem;">${contract.description}</div>
<div class="chain-info">
<div class="chain-stat">
<div class="chain-stat-label">Type</div>
<div class="chain-stat-value">${contract.type}</div>
</div>
<div class="chain-stat">
<div class="chain-stat-label">Balance</div>
<div class="chain-stat-value">${formatEther(balance.result || '0')} ETH</div>
</div>
<div class="chain-stat">
<div class="chain-stat-label">Status</div>
<div class="chain-stat-value"><span class="badge badge-success">Active</span></div>
</div>
</div>
</div>
`;
} catch (e) {
html += `<div class="bridge-chain-card">
<div class="chain-name">${contract.name}</div>
<div class="hash">${contract.address}</div>
<div class="error">Unable to fetch data</div>
</div>`;
}
}
html += '</div>';
document.getElementById('bridgeContractsList').innerHTML = html;
}
async function loadDestinationChains() {
let html = '';
for (const [chainId, chain] of Object.entries(DESTINATION_CHAINS)) {
const statusBadge = chain.status === 'active' ?
'<span class="badge badge-success">Active</span>' :
'<span class="badge badge-warning">Pending</span>';
html += `
<div class="bridge-chain-card">
<div style="display: flex; justify-content: space-between; align-items: center;">
<div class="chain-name">${chain.name} (Chain ID: ${chainId})</div>
${statusBadge}
</div>
<div class="chain-info">
<div class="chain-stat">
<div class="chain-stat-label">Chain Selector</div>
<div class="chain-stat-value">${chain.selector || 'N/A'}</div>
</div>
<div class="chain-stat">
<div class="chain-stat-label">Status</div>
<div class="chain-stat-value">${chain.status === 'active' ? 'Connected' : 'Not Configured'}</div>
</div>
<div class="chain-stat">
<div class="chain-stat-label">Bridge Contracts</div>
<div class="chain-stat-value">Deployed</div>
</div>
</div>
</div>
`;
}
document.getElementById('destinationChainsList').innerHTML = html;
}
function showBridgeTab(tab) {
// Hide all tab contents
document.querySelectorAll('.bridge-tab-content').forEach(el => el.style.display = 'none');
document.querySelectorAll('.bridge-tab').forEach(el => el.classList.remove('active'));
// Show selected tab
document.getElementById(`bridge${tab.charAt(0).toUpperCase() + tab.slice(1)}`).style.display = 'block';
event.target.classList.add('active');
}
function showBridgeMonitoring() {
showView('bridge');
loadBridgeData();
}
function refreshBridgeData() {
loadBridgeData();
}
function showHome() {
showView('home');
loadStats();
loadLatestBlocks();
}
function showBlocks() {
showView('blocks');
// Load blocks list
}
function showTransactions() {
showView('transactions');
// Load transactions list
}
function showTokens() {
alert('Token view coming soon!');
}
function showView(viewName) {
currentView = viewName;
document.querySelectorAll('.detail-view').forEach(v => v.classList.remove('active'));
document.getElementById('homeView').style.display = viewName === 'home' ? 'block' : 'none';
if (viewName !== 'home') {
document.getElementById(`${viewName}View`).classList.add('active');
}
}
async function loadLatestBlocks() {
const container = document.getElementById('latestBlocks');
try {
const blockData = await fetchAPI(`${API_BASE}?module=block&action=eth_block_number`);
const latestBlock = parseInt(blockData.result, 16);
let html = '<table class="table"><thead><tr><th>Block</th><th>Hash</th><th>Transactions</th><th>Timestamp</th></tr></thead><tbody>';
for (let i = 0; i < 10 && latestBlock - i >= 0; i++) {
const blockNum = latestBlock - i;
try {
const block = await fetchAPI(`${API_BASE}?module=block&action=eth_get_block_by_number&tag=0x${blockNum.toString(16)}&boolean=false`);
if (block.result) {
const timestamp = new Date(parseInt(block.result.timestamp, 16) * 1000).toLocaleString();
const txCount = block.result.transactions.length;
html += `<tr onclick="showBlockDetail('${blockNum}')" style="cursor: pointer;">
<td>${blockNum}</td>
<td class="hash">${shortenHash(block.result.hash)}</td>
<td>${txCount}</td>
<td>${timestamp}</td>
</tr>`;
}
} catch (e) {}
}
html += '</tbody></table>';
container.innerHTML = html;
} catch (error) {
container.innerHTML = `<div class="error">Failed to load blocks: ${error.message}</div>`;
}
}
function showBlockDetail(blockNumber) {
// Implement block detail view
alert(`Block ${blockNumber} detail view - to be implemented`);
}
function showAddressDetail(address) {
showView('addressDetail');
// Implement address detail view
}
function handleSearch(query) {
query = query.trim();
if (!query) return;
if (/^0x[a-fA-F0-9]{40}$/.test(query)) {
showAddressDetail(query);
} else if (/^0x[a-fA-F0-9]{64}$/.test(query)) {
// Show transaction detail
alert(`Transaction ${query} - to be implemented`);
} else if (/^\d+$/.test(query)) {
showBlockDetail(query);
} else {
alert('Invalid search. Enter an address, transaction hash, or block number.');
}
}
function formatNumber(num) {
return parseInt(num || 0).toLocaleString();
}
function shortenHash(hash, length = 10) {
if (!hash || hash.length <= length * 2 + 2) return hash;
return hash.substring(0, length + 2) + '...' + hash.substring(hash.length - length);
}
function formatEther(wei, unit = 'ether') {
const weiStr = wei.toString();
const weiNum = weiStr.startsWith('0x') ? parseInt(weiStr, 16) : parseInt(weiStr);
const ether = weiNum / Math.pow(10, unit === 'gwei' ? 9 : 18);
return ether.toFixed(6).replace(/\.?0+$/, '');
}
</script>
</body>
</html>
BRIDGE_HTML_EOF
# Step 3: Upload enhanced explorer
log_step "Step 3: Uploading enhanced explorer with bridge monitoring..."
sshpass -p "$PASSWORD" scp -o StrictHostKeyChecking=no /tmp/blockscout-with-bridge-monitoring.html root@"$IP":/var/www/html/index.html
log_success "Enhanced explorer with bridge monitoring uploaded"
echo ""
log_success "Bridge monitoring added to explorer!"
echo ""
log_info "Bridge Monitoring Features:"
log_info " ✅ Bridge Overview Dashboard"
log_info " ✅ Bridge Contract Status Monitoring"
log_info " ✅ Bridge Transaction Tracking"
log_info " ✅ Destination Chain Status"
log_info " ✅ Bridge Health Indicators"
log_info " ✅ Real-time Bridge Statistics"
log_info " ✅ CCIP Router & Sender Monitoring"
log_info " ✅ WETH9 & WETH10 Bridge Tracking"
echo ""
log_info "Access: https://explorer.d-bis.org/"
log_info "Click 'Bridge' in the navigation to view bridge monitoring"
echo ""

View File

@@ -4,6 +4,12 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
@@ -21,8 +27,8 @@ log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
source "$SOURCE_PROJECT/.env"
RPC_URL="${RPC_URL_138:-http://192.168.11.250:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-0x89dd12025bfCD38A168455A44B400e913ED33BE2}"
RPC_URL="${RPC_URL_138_PUBLIC:-http://${RPC_PUBLIC_1:-192.168.11.221}:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-0x971cD9D156f193df8051E48043C476e53ECd4693}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-0xe0E93247376aa097dB308B92e6Ba36bA015535D0}"
ETHEREUM_MAINNET_SELECTOR="5009297550715157269"

View File

@@ -0,0 +1,111 @@
#!/usr/bin/env bash
# Add Ethereum Mainnet to bridge destinations
# Usage: ./add-ethereum-mainnet-bridge.sh [weth9_bridge_address] [weth10_bridge_address]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
source "$SOURCE_PROJECT/.env"
RPC_URL="${RPC_URL_138:-http://192.168.11.250:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-0x89dd12025bfCD38A168455A44B400e913ED33BE2}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-0xe0E93247376aa097dB308B92e6Ba36bA015535D0}"
ETHEREUM_MAINNET_SELECTOR="5009297550715157269"
WETH9_MAINNET_BRIDGE="${1:-}"
WETH10_MAINNET_BRIDGE="${2:-}"
if [ -z "$WETH9_MAINNET_BRIDGE" ] || [ -z "$WETH10_MAINNET_BRIDGE" ]; then
log_error "Usage: $0 <weth9_mainnet_bridge_address> <weth10_mainnet_bridge_address>"
log_info "Example: $0 0x... 0x..."
exit 1
fi
DEPLOYER=$(cast wallet address --private-key "$PRIVATE_KEY" 2>/dev/null)
log_info "========================================="
log_info "Add Ethereum Mainnet to Bridges"
log_info "========================================="
log_info ""
log_info "Ethereum Mainnet Selector: $ETHEREUM_MAINNET_SELECTOR"
log_info "WETH9 Mainnet Bridge: $WETH9_MAINNET_BRIDGE"
log_info "WETH10 Mainnet Bridge: $WETH10_MAINNET_BRIDGE"
log_info ""
# Check if already configured
log_info "Checking current configuration..."
WETH9_CHECK=$(cast call "$WETH9_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
WETH10_CHECK=$(cast call "$WETH10_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
if [ -n "$WETH9_CHECK" ] && ! echo "$WETH9_CHECK" | grep -q "0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH9 bridge already configured for Ethereum Mainnet"
else
log_info "Configuring WETH9 bridge for Ethereum Mainnet..."
CURRENT_NONCE=$(cast nonce "$DEPLOYER" --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
TX_OUTPUT=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH9_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price 20000000000 \
--nonce "$CURRENT_NONCE" \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH9 bridge configured: $HASH"
sleep 10
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted|already exists" | head -1 || echo "Unknown")
log_error "✗ WETH9 configuration failed: $ERR"
fi
fi
if [ -n "$WETH10_CHECK" ] && ! echo "$WETH10_CHECK" | grep -q "0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH10 bridge already configured for Ethereum Mainnet"
else
log_info "Configuring WETH10 bridge for Ethereum Mainnet..."
CURRENT_NONCE=$(cast nonce "$DEPLOYER" --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
TX_OUTPUT=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH10_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price 20000000000 \
--nonce "$CURRENT_NONCE" \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH10 bridge configured: $HASH"
sleep 10
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted|already exists" | head -1 || echo "Unknown")
log_error "✗ WETH10 configuration failed: $ERR"
fi
fi
log_info ""
log_success "========================================="
log_success "Ethereum Mainnet Configuration Complete!"
log_success "========================================="

View File

@@ -7,9 +7,12 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR/.."
# Load environment variables
# Load environment variables (set +u so values with $ in them don't trigger unbound variable)
if [[ -f ".env" ]]; then
source .env
set +u
# shellcheck source=/dev/null
source .env 2>/dev/null || true
set -u
fi
# Tunnel configuration

View File

@@ -4,7 +4,13 @@
set -euo pipefail
IP="${IP:-192.168.11.140}"
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
IP="${IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}}0}"
DOMAIN="${DOMAIN:-explorer.d-bis.org}"
PASSWORD="${PASSWORD:-L@kers2010}"
@@ -641,7 +647,7 @@ cat > /tmp/blockscout-with-weth-utilities.html <<'WETH_HTML_EOF'
<h4 style="margin-top: 1.5rem; margin-bottom: 0.5rem;">Cross-Chain Bridging</h4>
<p>Both WETH9 and WETH10 can be bridged to other chains using the CCIP bridge contracts:</p>
<ul style="margin-left: 2rem; margin-top: 0.5rem;">
<li><strong>WETH9 Bridge:</strong> <span class="hash">0x89dd12025bfCD38A168455A44B400e913ED33BE2</span></li>
<li><strong>WETH9 Bridge:</strong> <span class="hash">0x971cD9D156f193df8051E48043C476e53ECd4693</span></li>
<li><strong>WETH10 Bridge:</strong> <span class="hash">0xe0E93247376aa097dB308B92e6Ba36bA015535D0</span></li>
</ul>
</div>

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,6 @@
#!/bin/bash
set -euo pipefail
# Analyze all Cloudflare domains for tunnel configurations and issues
set -e

282
scripts/analyze-all-rpc-peers.sh Executable file
View File

@@ -0,0 +1,282 @@
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Comprehensive RPC node peer count analysis
# Usage: ./analyze-all-rpc-peers.sh [proxmox-host]
set -e
PROXMOX_HOST="${1:-pve2}"
echo "=========================================="
echo "RPC Node Peer Count Analysis"
echo "=========================================="
echo ""
# Function to execute command on Proxmox host
exec_proxmox() {
if command -v pct &>/dev/null; then
eval "$@"
else
ssh root@$PROXMOX_HOST "$@"
fi
}
declare -A NODES=(
["2101"]="${RPC_CORE_1}"
["2201"]="${RPC_PUBLIC_1}"
["2303"]="${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-${RPC_NODE_233:-192.168.11.233}}}}}}}"
["2304"]="${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-${RPC_NODE_234:-192.168.11.234}}}}}}}"
["2305"]="${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-${RPC_NODE_235:-192.168.11.235}}}}}}}"
["2306"]="${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-${RPC_NODE_236:-192.168.11.236}}}}}}}"
["2307"]="${IP_RPC_237:-${IP_RPC_237:-${IP_RPC_237:-192.168.11.237}}}"
["2308"]="${IP_RPC_238:-${IP_RPC_238:-${IP_RPC_238:-192.168.11.238}}}"
["2400"]="${RPC_THIRDWEB_PRIMARY}"
["2401"]="${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-${RPC_THIRDWEB_1:-192.168.11.241}}}}}}}"
["2402"]="${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-${RPC_THIRDWEB_2:-192.168.11.242}}}}}}}"
["2403"]="${RPC_THIRDWEB_3:-${RPC_THIRDWEB_3:-${RPC_THIRDWEB_3:-192.168.11.243}}}"
)
declare -A PEER_COUNTS
declare -A BLOCK_NUMBERS
declare -A STATUS
echo "Gathering data from all RPC nodes..."
echo ""
for VMID in "${!NODES[@]}"; do
IP="${NODES[$VMID]}"
# Get block number
BLOCK_RESPONSE=$(exec_proxmox "curl -s -X POST -H 'Content-Type: application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}' http://$IP:8545 2>/dev/null" || echo "")
if [ -n "$BLOCK_RESPONSE" ]; then
BLOCK_HEX=$(echo "$BLOCK_RESPONSE" | grep -o '"result":"[^"]*"' | cut -d'"' -f4)
if [ -n "$BLOCK_HEX" ]; then
BLOCK_NUM=$(printf "%d" $BLOCK_HEX 2>/dev/null || echo "0")
BLOCK_NUMBERS[$VMID]=$BLOCK_NUM
else
BLOCK_NUMBERS[$VMID]="N/A"
fi
else
BLOCK_NUMBERS[$VMID]="N/A"
STATUS[$VMID]="❌ Not responding"
continue
fi
# Get peer count
PEER_RESPONSE=$(exec_proxmox "curl -s -X POST -H 'Content-Type: application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"net_peerCount\",\"params\":[],\"id\":1}' http://$IP:8545 2>/dev/null" || echo "")
if [ -n "$PEER_RESPONSE" ]; then
PEER_HEX=$(echo "$PEER_RESPONSE" | grep -o '"result":"[^"]*"' | cut -d'"' -f4)
if [ -n "$PEER_HEX" ]; then
PEER_NUM=$(printf "%d" $PEER_HEX 2>/dev/null || echo "0")
PEER_COUNTS[$VMID]=$PEER_NUM
else
PEER_COUNTS[$VMID]="0"
fi
else
PEER_COUNTS[$VMID]="N/A"
fi
# Determine status
if [ "${PEER_COUNTS[$VMID]}" = "N/A" ] || [ "${BLOCK_NUMBERS[$VMID]}" = "N/A" ]; then
STATUS[$VMID]="❌ Not responding"
elif [ "${PEER_COUNTS[$VMID]}" -ge 7 ]; then
STATUS[$VMID]="✅ Excellent"
elif [ "${PEER_COUNTS[$VMID]}" -ge 5 ]; then
STATUS[$VMID]="✅ Good"
elif [ "${PEER_COUNTS[$VMID]}" -ge 3 ]; then
STATUS[$VMID]="⚠️ Acceptable"
elif [ "${PEER_COUNTS[$VMID]}" -ge 1 ]; then
STATUS[$VMID]="⚠️ Warning"
else
STATUS[$VMID]="❌ Critical"
fi
done
# Display results
echo "=========================================="
echo "Peer Count Analysis Results"
echo "=========================================="
printf "%-6s | %-15s | %-12s | %-6s | %-15s\n" "VMID" "IP Address" "Block Height" "Peers" "Status"
echo "----------------------------------------------------------------------------"
for VMID in "${!NODES[@]}"; do
IP="${NODES[$VMID]}"
BLOCK="${BLOCK_NUMBERS[$VMID]}"
PEERS="${PEER_COUNTS[$VMID]}"
STAT="${STATUS[$VMID]}"
printf "%-6s | %-15s | %-12s | %-6s | %-15s\n" "$VMID" "$IP" "$BLOCK" "$PEERS" "$STAT"
done
echo ""
echo "=========================================="
echo "Summary"
echo "=========================================="
echo ""
# Group by peer count
EXCELLENT=0
GOOD=0
ACCEPTABLE=0
WARNING=0
CRITICAL=0
NON_RESPONDING=0
for VMID in "${!STATUS[@]}"; do
case "${STATUS[$VMID]}" in
*Excellent*) ((EXCELLENT++)) ;;
*Good*) ((GOOD++)) ;;
*Acceptable*) ((ACCEPTABLE++)) ;;
*Warning*) ((WARNING++)) ;;
*Critical*) ((CRITICAL++)) ;;
*Not responding*) ((NON_RESPONDING++)) ;;
esac
done
echo "✅ Excellent (7+ peers): $EXCELLENT nodes"
echo "✅ Good (5-6 peers): $GOOD nodes"
echo "⚠️ Acceptable (3-4 peers): $ACCEPTABLE nodes"
echo "⚠️ Warning (1-2 peers): $WARNING nodes"
echo "❌ Critical (0 peers): $CRITICAL nodes"
echo "❌ Not responding: $NON_RESPONDING nodes"
echo ""
# Expected peer count
echo "=========================================="
echo "Expected Peer Count"
echo "=========================================="
echo ""
echo "Network Size: ~19-20 active nodes"
echo ""
echo "Recommended Peer Counts:"
echo " - Minimum healthy: 2-3 peers"
echo " - Recommended: 5-7 peers ✅"
echo " - Maximum: 20-25 peers (max-peers setting)"
echo ""
# Analysis
echo "=========================================="
echo "Analysis"
echo "=========================================="
echo ""
# Check for block height mismatches
MAIN_BLOCK=""
MAIN_COUNT=0
for VMID in "${!BLOCK_NUMBERS[@]}"; do
BLOCK="${BLOCK_NUMBERS[$VMID]}"
if [ "$BLOCK" != "N/A" ] && [[ "$BLOCK" =~ ^[0-9]+$ ]]; then
# Find most common block height
COUNT=0
for VMID2 in "${!BLOCK_NUMBERS[@]}"; do
if [ "${BLOCK_NUMBERS[$VMID2]}" = "$BLOCK" ]; then
((COUNT++))
fi
done
if [ $COUNT -gt $MAIN_COUNT ]; then
MAIN_COUNT=$COUNT
MAIN_BLOCK=$BLOCK
fi
fi
done
echo "Main network block height: $MAIN_BLOCK (${MAIN_COUNT} nodes)"
echo ""
# Check for nodes ahead or behind
AHEAD=()
BEHIND=()
for VMID in "${!BLOCK_NUMBERS[@]}"; do
BLOCK="${BLOCK_NUMBERS[$VMID]}"
if [ "$BLOCK" != "N/A" ] && [[ "$BLOCK" =~ ^[0-9]+$ ]] && [ "$MAIN_BLOCK" != "" ]; then
DIFF=$((BLOCK - MAIN_BLOCK))
if [ $DIFF -gt 1000 ]; then
AHEAD+=("$VMID (block $BLOCK, +$DIFF)")
elif [ $DIFF -lt -1000 ]; then
BEHIND+=("$VMID (block $BLOCK, $DIFF)")
fi
fi
done
if [ ${#AHEAD[@]} -gt 0 ]; then
echo "⚠️ Nodes ahead of main network:"
for node in "${AHEAD[@]}"; do
echo " - VMID $node"
done
echo ""
fi
if [ ${#BEHIND[@]} -gt 0 ]; then
echo "⏳ Nodes behind main network (syncing):"
for node in "${BEHIND[@]}"; do
echo " - VMID $node"
done
echo ""
fi
# Recommendations
echo "=========================================="
echo "Recommendations"
echo "=========================================="
echo ""
# Nodes with 2 peers (ThirdWeb)
LOW_PEER_NODES=()
for VMID in "${!PEER_COUNTS[@]}"; do
PEERS="${PEER_COUNTS[$VMID]}"
if [ "$PEERS" != "N/A" ] && [ "$PEERS" -lt 3 ] && [ "$PEERS" -gt 0 ]; then
LOW_PEER_NODES+=("$VMID")
fi
done
if [ ${#LOW_PEER_NODES[@]} -gt 0 ]; then
echo "⚠️ Nodes with low peer count (${#LOW_PEER_NODES[@]} nodes):"
for vmid in "${LOW_PEER_NODES[@]}"; do
echo " - VMID $vmid: ${PEER_COUNTS[$vmid]} peers (should have 5-7)"
done
echo ""
echo "Actions needed:"
echo " 1. Verify static-nodes.json contains all 15 nodes"
echo " 2. Check discovery-enabled=true"
echo " 3. Verify permissions-nodes.toml is correct"
echo " 4. Restart Besu services"
echo ""
echo "Run: ./scripts/fix-thirdweb-peer-connectivity.sh"
echo ""
fi
# Nodes with 0 peers
ZERO_PEER_NODES=()
for VMID in "${!PEER_COUNTS[@]}"; do
PEERS="${PEER_COUNTS[$VMID]}"
if [ "$PEERS" = "0" ]; then
ZERO_PEER_NODES+=("$VMID")
fi
done
if [ ${#ZERO_PEER_NODES[@]} -gt 0 ]; then
echo "⏳ Nodes with 0 peers (${#ZERO_PEER_NODES[@]} nodes):"
for vmid in "${ZERO_PEER_NODES[@]}"; do
BLOCK="${BLOCK_NUMBERS[$vmid]}"
echo " - VMID $vmid: Block $BLOCK"
if [ "$BLOCK" != "N/A" ] && [ "$BLOCK" -lt 1000000 ]; then
echo " Status: Syncing (expected during initial sync)"
else
echo " Status: May be isolated - investigate"
fi
done
echo ""
fi
echo "=========================================="
echo "Complete"
echo "=========================================="

View File

@@ -0,0 +1,276 @@
#!/usr/bin/env bash
set -euo pipefail
# Comprehensive RPC node peer count analysis
# Usage: ./analyze-all-rpc-peers.sh [proxmox-host]
set -e
PROXMOX_HOST="${1:-pve2}"
echo "=========================================="
echo "RPC Node Peer Count Analysis"
echo "=========================================="
echo ""
# Function to execute command on Proxmox host
exec_proxmox() {
if command -v pct &>/dev/null; then
eval "$@"
else
ssh root@$PROXMOX_HOST "$@"
fi
}
declare -A NODES=(
["2101"]="192.168.11.211"
["2201"]="192.168.11.221"
["2303"]="192.168.11.233"
["2304"]="192.168.11.234"
["2305"]="192.168.11.235"
["2306"]="192.168.11.236"
["2307"]="192.168.11.237"
["2308"]="192.168.11.238"
["2400"]="192.168.11.240"
["2401"]="192.168.11.241"
["2402"]="192.168.11.242"
["2403"]="192.168.11.243"
)
declare -A PEER_COUNTS
declare -A BLOCK_NUMBERS
declare -A STATUS
echo "Gathering data from all RPC nodes..."
echo ""
for VMID in "${!NODES[@]}"; do
IP="${NODES[$VMID]}"
# Get block number
BLOCK_RESPONSE=$(exec_proxmox "curl -s -X POST -H 'Content-Type: application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}' http://$IP:8545 2>/dev/null" || echo "")
if [ -n "$BLOCK_RESPONSE" ]; then
BLOCK_HEX=$(echo "$BLOCK_RESPONSE" | grep -o '"result":"[^"]*"' | cut -d'"' -f4)
if [ -n "$BLOCK_HEX" ]; then
BLOCK_NUM=$(printf "%d" $BLOCK_HEX 2>/dev/null || echo "0")
BLOCK_NUMBERS[$VMID]=$BLOCK_NUM
else
BLOCK_NUMBERS[$VMID]="N/A"
fi
else
BLOCK_NUMBERS[$VMID]="N/A"
STATUS[$VMID]="❌ Not responding"
continue
fi
# Get peer count
PEER_RESPONSE=$(exec_proxmox "curl -s -X POST -H 'Content-Type: application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"net_peerCount\",\"params\":[],\"id\":1}' http://$IP:8545 2>/dev/null" || echo "")
if [ -n "$PEER_RESPONSE" ]; then
PEER_HEX=$(echo "$PEER_RESPONSE" | grep -o '"result":"[^"]*"' | cut -d'"' -f4)
if [ -n "$PEER_HEX" ]; then
PEER_NUM=$(printf "%d" $PEER_HEX 2>/dev/null || echo "0")
PEER_COUNTS[$VMID]=$PEER_NUM
else
PEER_COUNTS[$VMID]="0"
fi
else
PEER_COUNTS[$VMID]="N/A"
fi
# Determine status
if [ "${PEER_COUNTS[$VMID]}" = "N/A" ] || [ "${BLOCK_NUMBERS[$VMID]}" = "N/A" ]; then
STATUS[$VMID]="❌ Not responding"
elif [ "${PEER_COUNTS[$VMID]}" -ge 7 ]; then
STATUS[$VMID]="✅ Excellent"
elif [ "${PEER_COUNTS[$VMID]}" -ge 5 ]; then
STATUS[$VMID]="✅ Good"
elif [ "${PEER_COUNTS[$VMID]}" -ge 3 ]; then
STATUS[$VMID]="⚠️ Acceptable"
elif [ "${PEER_COUNTS[$VMID]}" -ge 1 ]; then
STATUS[$VMID]="⚠️ Warning"
else
STATUS[$VMID]="❌ Critical"
fi
done
# Display results
echo "=========================================="
echo "Peer Count Analysis Results"
echo "=========================================="
printf "%-6s | %-15s | %-12s | %-6s | %-15s\n" "VMID" "IP Address" "Block Height" "Peers" "Status"
echo "----------------------------------------------------------------------------"
for VMID in "${!NODES[@]}"; do
IP="${NODES[$VMID]}"
BLOCK="${BLOCK_NUMBERS[$VMID]}"
PEERS="${PEER_COUNTS[$VMID]}"
STAT="${STATUS[$VMID]}"
printf "%-6s | %-15s | %-12s | %-6s | %-15s\n" "$VMID" "$IP" "$BLOCK" "$PEERS" "$STAT"
done
echo ""
echo "=========================================="
echo "Summary"
echo "=========================================="
echo ""
# Group by peer count
EXCELLENT=0
GOOD=0
ACCEPTABLE=0
WARNING=0
CRITICAL=0
NON_RESPONDING=0
for VMID in "${!STATUS[@]}"; do
case "${STATUS[$VMID]}" in
*Excellent*) ((EXCELLENT++)) ;;
*Good*) ((GOOD++)) ;;
*Acceptable*) ((ACCEPTABLE++)) ;;
*Warning*) ((WARNING++)) ;;
*Critical*) ((CRITICAL++)) ;;
*Not responding*) ((NON_RESPONDING++)) ;;
esac
done
echo "✅ Excellent (7+ peers): $EXCELLENT nodes"
echo "✅ Good (5-6 peers): $GOOD nodes"
echo "⚠️ Acceptable (3-4 peers): $ACCEPTABLE nodes"
echo "⚠️ Warning (1-2 peers): $WARNING nodes"
echo "❌ Critical (0 peers): $CRITICAL nodes"
echo "❌ Not responding: $NON_RESPONDING nodes"
echo ""
# Expected peer count
echo "=========================================="
echo "Expected Peer Count"
echo "=========================================="
echo ""
echo "Network Size: ~19-20 active nodes"
echo ""
echo "Recommended Peer Counts:"
echo " - Minimum healthy: 2-3 peers"
echo " - Recommended: 5-7 peers ✅"
echo " - Maximum: 20-25 peers (max-peers setting)"
echo ""
# Analysis
echo "=========================================="
echo "Analysis"
echo "=========================================="
echo ""
# Check for block height mismatches
MAIN_BLOCK=""
MAIN_COUNT=0
for VMID in "${!BLOCK_NUMBERS[@]}"; do
BLOCK="${BLOCK_NUMBERS[$VMID]}"
if [ "$BLOCK" != "N/A" ] && [[ "$BLOCK" =~ ^[0-9]+$ ]]; then
# Find most common block height
COUNT=0
for VMID2 in "${!BLOCK_NUMBERS[@]}"; do
if [ "${BLOCK_NUMBERS[$VMID2]}" = "$BLOCK" ]; then
((COUNT++))
fi
done
if [ $COUNT -gt $MAIN_COUNT ]; then
MAIN_COUNT=$COUNT
MAIN_BLOCK=$BLOCK
fi
fi
done
echo "Main network block height: $MAIN_BLOCK (${MAIN_COUNT} nodes)"
echo ""
# Check for nodes ahead or behind
AHEAD=()
BEHIND=()
for VMID in "${!BLOCK_NUMBERS[@]}"; do
BLOCK="${BLOCK_NUMBERS[$VMID]}"
if [ "$BLOCK" != "N/A" ] && [[ "$BLOCK" =~ ^[0-9]+$ ]] && [ "$MAIN_BLOCK" != "" ]; then
DIFF=$((BLOCK - MAIN_BLOCK))
if [ $DIFF -gt 1000 ]; then
AHEAD+=("$VMID (block $BLOCK, +$DIFF)")
elif [ $DIFF -lt -1000 ]; then
BEHIND+=("$VMID (block $BLOCK, $DIFF)")
fi
fi
done
if [ ${#AHEAD[@]} -gt 0 ]; then
echo "⚠️ Nodes ahead of main network:"
for node in "${AHEAD[@]}"; do
echo " - VMID $node"
done
echo ""
fi
if [ ${#BEHIND[@]} -gt 0 ]; then
echo "⏳ Nodes behind main network (syncing):"
for node in "${BEHIND[@]}"; do
echo " - VMID $node"
done
echo ""
fi
# Recommendations
echo "=========================================="
echo "Recommendations"
echo "=========================================="
echo ""
# Nodes with 2 peers (ThirdWeb)
LOW_PEER_NODES=()
for VMID in "${!PEER_COUNTS[@]}"; do
PEERS="${PEER_COUNTS[$VMID]}"
if [ "$PEERS" != "N/A" ] && [ "$PEERS" -lt 3 ] && [ "$PEERS" -gt 0 ]; then
LOW_PEER_NODES+=("$VMID")
fi
done
if [ ${#LOW_PEER_NODES[@]} -gt 0 ]; then
echo "⚠️ Nodes with low peer count (${#LOW_PEER_NODES[@]} nodes):"
for vmid in "${LOW_PEER_NODES[@]}"; do
echo " - VMID $vmid: ${PEER_COUNTS[$vmid]} peers (should have 5-7)"
done
echo ""
echo "Actions needed:"
echo " 1. Verify static-nodes.json contains all 15 nodes"
echo " 2. Check discovery-enabled=true"
echo " 3. Verify permissions-nodes.toml is correct"
echo " 4. Restart Besu services"
echo ""
echo "Run: ./scripts/fix-thirdweb-peer-connectivity.sh"
echo ""
fi
# Nodes with 0 peers
ZERO_PEER_NODES=()
for VMID in "${!PEER_COUNTS[@]}"; do
PEERS="${PEER_COUNTS[$VMID]}"
if [ "$PEERS" = "0" ]; then
ZERO_PEER_NODES+=("$VMID")
fi
done
if [ ${#ZERO_PEER_NODES[@]} -gt 0 ]; then
echo "⏳ Nodes with 0 peers (${#ZERO_PEER_NODES[@]} nodes):"
for vmid in "${ZERO_PEER_NODES[@]}"; do
BLOCK="${BLOCK_NUMBERS[$vmid]}"
echo " - VMID $vmid: Block $BLOCK"
if [ "$BLOCK" != "N/A" ] && [ "$BLOCK" -lt 1000000 ]; then
echo " Status: Syncing (expected during initial sync)"
else
echo " Status: May be isolated - investigate"
fi
done
echo ""
fi
echo "=========================================="
echo "Complete"
echo "=========================================="

View File

@@ -4,6 +4,12 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"

View File

@@ -0,0 +1,148 @@
#!/usr/bin/env bash
# Analyze cluster and prepare migration plan for LXC containers
# Reviews current container distribution and suggests migration strategy
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
PROXMOX_PASS="${PROXMOX_PASS:-L@kers2010}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_header() { echo -e "${CYAN}[$1]${NC} $2"; }
# SSH helper
ssh_proxmox() {
sshpass -p "$PROXMOX_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$PROXMOX_HOST" "$@"
}
echo "========================================="
log_header "CLUSTER" "Analysis and Migration Planning"
echo "========================================="
echo ""
# Check cluster status
log_info "Cluster Status:"
ssh_proxmox "pvecm status" 2>&1 | head -20
echo ""
# Get node resource usage
log_info "Node Resource Usage:"
nodes_json=$(ssh_proxmox "pvesh get /nodes --output-format json" 2>&1)
if [[ -n "$nodes_json" ]]; then
echo "$nodes_json" | python3 << 'PYEOF'
import sys, json
try:
data = json.load(sys.stdin)
print(f"{'Node':<10} {'CPU %':<10} {'RAM Used/Max':<20} {'RAM %':<10} {'Disk Used/Max':<20} {'Disk %':<10} {'Status'}")
print("-" * 100)
for node in sorted(data, key=lambda x: x['node']):
cpu_pct = node['cpu'] * 100
mem_used = node['mem'] / (1024**3)
mem_max = node['maxmem'] / (1024**3)
mem_pct = (node['mem'] / node['maxmem']) * 100 if node['maxmem'] > 0 else 0
disk_used = node['disk'] / (1024**3)
disk_max = node['maxdisk'] / (1024**3)
disk_pct = (node['disk'] / node['maxdisk']) * 100 if node['maxdisk'] > 0 else 0
status = "🟢" if node['status'] == 'online' else "🔴"
print(f"{node['node']:<10} {cpu_pct:>6.2f}% {mem_used:>6.1f}/{mem_max:>6.1f}GB {mem_pct:>5.1f}% {disk_used:>6.1f}/{disk_max:>6.1f}GB {disk_pct:>5.1f}% {status}")
except Exception as e:
print(f"Error parsing node data: {e}", file=sys.stderr)
PYEOF
else
log_error "Failed to get node information"
fi
echo ""
# Get container distribution per node
log_info "Container Distribution by Node:"
echo ""
for node in ml110 pve pve2; do
log_header "NODE" "$node"
containers_json=$(ssh_proxmox "pvesh get /nodes/$node/lxc --output-format json" 2>&1)
if [[ -n "$containers_json" ]]; then
echo "$containers_json" | python3 << 'PYEOF'
import sys, json
try:
response = sys.stdin.read()
if not response or response.strip() == '':
print(" No containers found")
sys.exit(0)
data = json.loads(response)
if 'data' in data and isinstance(data['data'], list):
containers = sorted(data['data'], key=lambda x: x['vmid'])
if containers:
print(f"{'VMID':<6} {'Name':<35} {'Status':<12}")
print("-" * 55)
for c in containers:
vmid = str(c['vmid'])
name = (c.get('name', 'N/A') or 'N/A')[:35]
status = c.get('status', 'unknown')
status_icon = "🟢" if status == "running" else "🔴" if status == "stopped" else "🟡"
print(f"{vmid:<6} {name:<35} {status_icon} {status}")
print(f"\nTotal: {len(containers)} containers")
else:
print(" No containers found")
else:
print(" No containers found (empty data)")
except json.JSONDecodeError as e:
print(f" Error parsing JSON: {e}")
print(f" Response: {response[:200]}")
except Exception as e:
print(f" Error: {e}")
PYEOF
else
echo " Error retrieving containers"
fi
echo ""
done
# Migration recommendations
log_info "Migration Recommendations:"
echo ""
echo "📊 Resource Analysis:"
echo " - ml110: Heavy load (28.3% RAM used, 9.4% CPU)"
echo " - pve2: Almost empty (1.8% RAM, 0.06% CPU) - IDEAL for migration target"
echo " - pve: Light load (1.1% RAM, 0.20% CPU)"
echo ""
echo "🎯 Priority 1 - High Resource Containers (move to pve2):"
echo " - Besu validators (1000-1004) - High CPU/memory usage"
echo " - Besu RPC nodes (2500-2502) - High memory usage (16GB each)"
echo " - Blockscout (5000) - Database intensive"
echo ""
echo "🎯 Priority 2 - Medium Priority:"
echo " - Besu sentries (1500-1503) - Moderate resource usage"
echo " - Service containers (3500-3501) - Oracle, CCIP monitor"
echo " - Firefly (6200) - Moderate resources"
echo ""
echo "🔒 Priority 3 - Keep on ml110 (infrastructure):"
echo " - Infrastructure services (100-105) - proxmox-mail-gateway, cloudflared, omada, gitea, nginx"
echo " - Monitoring (130) - Keep on primary node"
echo " - These are core infrastructure and should remain on primary node"
echo ""
log_success "Analysis complete!"
echo ""
log_info "Next steps:"
echo " 1. Review migration plan above"
echo " 2. Run: ./scripts/migrate-containers-to-pve2.sh to execute migrations"
echo " 3. Verify containers after migration"
echo ""

View File

@@ -4,12 +4,18 @@
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Configuration
R630_02_IP="192.168.11.12"
ML110_IP="192.168.11.10"
R630_02_IP="${PROXMOX_HOST_R630_02}"
ML110_IP="${PROXMOX_HOST_ML110}"
# Colors
RED='\033[0;31m'

View File

@@ -0,0 +1,217 @@
#!/usr/bin/env bash
# Analyze all Firefly issues for VMIDs 6200 and 6201
# Usage: ./scripts/analyze-firefly-issues.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Configuration
R630_02_IP="192.168.11.12"
ML110_IP="192.168.11.10"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_section() { echo -e "\n${CYAN}=== $1 ===${NC}\n"; }
echo ""
log_info "═══════════════════════════════════════════════════════════"
log_info " ANALYZING FIREFLY ISSUES FOR VMIDs 6200 AND 6201"
log_info "═══════════════════════════════════════════════════════════"
echo ""
# Function to analyze a Firefly container
analyze_firefly() {
local vmid=$1
local node_ip=$2
local node_name=$3
log_section "VMID $vmid Analysis ($node_name)"
# Check container status
log_info "1. Container Status:"
STATUS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct status $vmid 2>/dev/null | awk '{print \$2}'" || echo "unknown")
if [[ "$STATUS" == "running" ]]; then
log_success " Container is running"
else
log_warn " Container status: $STATUS"
fi
# Get container info
log_info "2. Container Configuration:"
HOSTNAME=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct config $vmid 2>/dev/null | grep -oP 'hostname=\\K[^,]+' | head -1" || echo "unknown")
IP=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct config $vmid 2>/dev/null | grep -oP 'ip=\\K[^,]+' | head -1" || echo "unknown")
ROOTFS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct config $vmid 2>/dev/null | grep '^rootfs:'" || echo "")
log_info " Hostname: $HOSTNAME"
log_info " IP: $IP"
if [[ -n "$ROOTFS" ]]; then
log_info " Storage: $(echo $ROOTFS | sed 's/^rootfs: //')"
fi
# Check Firefly directory
log_info "3. Firefly Installation:"
FIREFLY_DIR=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct exec $vmid -- test -d /opt/firefly && echo 'exists' || echo 'missing'" 2>/dev/null || echo "cannot_check")
if [[ "$FIREFLY_DIR" == "exists" ]]; then
log_success " Firefly directory exists: /opt/firefly"
# Check docker-compose.yml
COMPOSE_FILE=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct exec $vmid -- test -f /opt/firefly/docker-compose.yml && echo 'exists' || echo 'missing'" 2>/dev/null || echo "cannot_check")
if [[ "$COMPOSE_FILE" == "exists" ]]; then
log_success " docker-compose.yml exists"
# Check image configuration
IMAGE=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct exec $vmid -- grep -i 'image:' /opt/firefly/docker-compose.yml 2>/dev/null | grep -i firefly | head -1 | awk '{print \$2}'" || echo "")
if [[ -n "$IMAGE" ]]; then
log_info " Firefly image: $IMAGE"
fi
else
log_warn " docker-compose.yml missing"
fi
else
log_warn " Firefly directory missing or cannot check"
fi
# Check systemd service
log_info "4. Systemd Service:"
SERVICE_EXISTS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct exec $vmid -- systemctl list-unit-files 2>/dev/null | grep -i firefly | head -1" || echo "")
if [[ -n "$SERVICE_EXISTS" ]]; then
log_success " Firefly service unit exists"
SERVICE_STATUS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct exec $vmid -- systemctl is-active firefly.service 2>/dev/null || echo 'inactive'" || echo "unknown")
if [[ "$SERVICE_STATUS" == "active" ]]; then
log_success " Service status: active"
else
log_warn " Service status: $SERVICE_STATUS"
# Get error details
ERROR_LOG=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct exec $vmid -- journalctl -u firefly.service -n 5 --no-pager 2>/dev/null | tail -3" || echo "")
if [[ -n "$ERROR_LOG" ]]; then
log_info " Recent errors:"
echo "$ERROR_LOG" | sed 's/^/ /'
fi
fi
else
log_warn " Firefly service unit not found"
fi
# Check Docker containers
log_info "5. Docker Containers:"
DOCKER_CONTAINERS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct exec $vmid -- docker ps -a --format '{{.Names}}\t{{.Status}}' 2>/dev/null | grep -i firefly || echo 'none'" || echo "cannot_check")
if [[ "$DOCKER_CONTAINERS" != "none" ]] && [[ "$DOCKER_CONTAINERS" != "cannot_check" ]]; then
log_info " Firefly containers:"
echo "$DOCKER_CONTAINERS" | while IFS=$'\t' read -r name status; do
if echo "$status" | grep -q "Up"; then
log_success " $name: $status"
else
log_warn " $name: $status"
fi
done
else
log_warn " No Firefly Docker containers found or Docker not accessible"
fi
# Check Docker images
log_info "6. Docker Images:"
DOCKER_IMAGES=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct exec $vmid -- docker images --format '{{.Repository}}:{{.Tag}}' 2>/dev/null | grep -i firefly || echo 'none'" || echo "cannot_check")
if [[ "$DOCKER_IMAGES" != "none" ]] && [[ "$DOCKER_IMAGES" != "cannot_check" ]]; then
log_success " Firefly images available:"
echo "$DOCKER_IMAGES" | sed 's/^/ /'
else
log_warn " No Firefly Docker images found"
fi
# Check docker-compose status
log_info "7. Docker Compose Status:"
if [[ "$COMPOSE_FILE" == "exists" ]]; then
COMPOSE_STATUS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct exec $vmid -- cd /opt/firefly && docker-compose ps 2>/dev/null || echo 'error'" || echo "cannot_check")
if [[ "$COMPOSE_STATUS" != "error" ]] && [[ "$COMPOSE_STATUS" != "cannot_check" ]]; then
log_info " Docker Compose services:"
echo "$COMPOSE_STATUS" | sed 's/^/ /'
else
log_warn " Cannot check docker-compose status"
fi
fi
# Check for common issues
log_info "8. Common Issues Check:"
# Check disk space
DISK_USAGE=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct exec $vmid -- df -h / 2>/dev/null | tail -1 | awk '{print \$5}' | sed 's/%//'" || echo "unknown")
if [[ "$DISK_USAGE" != "unknown" ]]; then
if [[ $DISK_USAGE -gt 90 ]]; then
log_error " Disk usage: ${DISK_USAGE}% (CRITICAL)"
elif [[ $DISK_USAGE -gt 80 ]]; then
log_warn " Disk usage: ${DISK_USAGE}% (High)"
else
log_success " Disk usage: ${DISK_USAGE}% (OK)"
fi
fi
# Check network connectivity
NETWORK_TEST=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${node_ip} \
"pct exec $vmid -- ping -c 1 -W 2 8.8.8.8 2>/dev/null && echo 'working' || echo 'not_working'" || echo "unknown")
if [[ "$NETWORK_TEST" == "working" ]]; then
log_success " Network connectivity: OK"
else
log_warn " Network connectivity: Issues detected"
fi
echo ""
}
# Analyze VMID 6200 (r630-02)
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${R630_02_IP} "pct list 2>/dev/null | grep -q '^6200'"; then
analyze_firefly 6200 "$R630_02_IP" "r630-02"
else
log_warn "VMID 6200 not found on r630-02"
fi
# Analyze VMID 6201 (ml110)
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${ML110_IP} "pct list 2>/dev/null | grep -q '^6201'"; then
analyze_firefly 6201 "$ML110_IP" "ml110"
else
log_warn "VMID 6201 not found on ml110"
fi
log_success "═══════════════════════════════════════════════════════════"
log_success " ANALYSIS COMPLETE"
log_success "═══════════════════════════════════════════════════════════"
echo ""

View File

@@ -0,0 +1,148 @@
#!/usr/bin/env bash
# Analyze NPMplus certificates and identify duplicates
# Uses Node.js to query database directly
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
PROXMOX_HOST="${1:-192.168.11.11}"
CONTAINER_ID="${2:-10233}"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔍 NPMplus Certificate Analysis"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Query certificates
log_info "Querying certificates from database..."
CERT_JSON=$(ssh root@"$PROXMOX_HOST" "pct exec $CONTAINER_ID -- docker exec npmplus node -e \"
const Database = require('better-sqlite3');
const db = new Database('/data/npmplus/database.sqlite', { readonly: true });
const certs = db.prepare('SELECT id, domain_names, provider, expires_on, created_on, is_deleted FROM certificate WHERE is_deleted = 0 ORDER BY id').all();
console.log(JSON.stringify(certs));
db.close();
\" 2>&1" || echo "[]")
if [ "$CERT_JSON" = "[]" ] || [ -z "$CERT_JSON" ]; then
log_warn "No certificates found"
exit 0
fi
CERT_COUNT=$(echo "$CERT_JSON" | jq 'length' 2>/dev/null || echo "0")
log_info "Found $CERT_COUNT certificates"
echo ""
# Display all certificates
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📋 All Certificates:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
declare -A CERT_GROUPS
declare -A CERT_DETAILS
echo "$CERT_JSON" | jq -c '.[]' 2>/dev/null | while IFS= read -r cert; do
cert_id=$(echo "$cert" | jq -r '.id')
domain_names=$(echo "$cert" | jq -r '.domain_names' | jq -r 'join(",")' 2>/dev/null || echo "")
provider=$(echo "$cert" | jq -r '.provider')
expires_on=$(echo "$cert" | jq -r '.expires_on')
created_on=$(echo "$cert" | jq -r '.created_on')
echo " ID: $cert_id"
echo " Domains: $domain_names"
echo " Provider: $provider"
echo " Expires: $expires_on"
echo " Created: $created_on"
echo ""
# Normalize for grouping
normalized=$(echo "$domain_names" | tr '[:upper:]' '[:lower:]' | tr ',' ' ' | xargs -n1 | sort | xargs | tr ' ' ',')
if [ -z "${CERT_GROUPS[$normalized]:-}" ]; then
CERT_GROUPS[$normalized]="$cert_id"
else
CERT_GROUPS[$normalized]="${CERT_GROUPS[$normalized]},$cert_id"
fi
CERT_DETAILS[$cert_id]="$domain_names|$provider|$expires_on|$created_on"
done
# Analyze duplicates
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔍 Duplicate Analysis:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Use a temporary file to store results since we're in a subshell
TEMP_FILE=$(mktemp)
echo "$CERT_JSON" > "$TEMP_FILE"
# Analyze for duplicates
DUPLICATES=$(echo "$CERT_JSON" | jq -r 'group_by(.domain_names | tostring) | map(select(length > 1)) | .[] | "\(.[0].domain_names | tostring)|\(map(.id) | join(","))"' 2>/dev/null || echo "")
if [ -z "$DUPLICATES" ]; then
log_success "✅ No duplicate certificates found!"
rm -f "$TEMP_FILE"
exit 0
fi
duplicate_count=0
echo "$DUPLICATES" | while IFS='|' read -r domains cert_ids; do
duplicate_count=$((duplicate_count + 1))
cert_array=(${cert_ids//,/ })
log_warn "Duplicate certificates found:"
log_info " Domains: $domains"
log_info " Certificate IDs: $cert_ids"
echo ""
# Find best certificate to keep (most recent)
best_id=""
best_created=""
for cert_id in "${cert_array[@]}"; do
cert_info=$(echo "$CERT_JSON" | jq -r ".[] | select(.id == $cert_id) | \"\(.id)|\(.created_on)\"" 2>/dev/null || echo "")
IFS='|' read -r id created <<< "$cert_info"
if [ -z "$best_id" ] || [ "$created" \> "$best_created" ]; then
best_id="$id"
best_created="$created"
fi
done
log_success " → Keep Certificate ID: $best_id (created: $best_created)"
for cert_id in "${cert_array[@]}"; do
if [ "$cert_id" != "$best_id" ]; then
log_warn " → Delete Certificate ID: $cert_id"
fi
done
echo ""
done
rm -f "$TEMP_FILE"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Summary:"
log_info " Total certificates: $CERT_COUNT"
log_warn " Duplicates found - see analysis above"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""

View File

@@ -0,0 +1,142 @@
#!/usr/bin/env bash
# Analyze NPMplus certificates and identify duplicates
# Uses Node.js to query database directly
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
PROXMOX_HOST="${1:-192.168.11.11}"
CONTAINER_ID="${2:-10233}"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔍 NPMplus Certificate Analysis"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Query certificates
log_info "Querying certificates from database..."
CERT_JSON=$(ssh root@"$PROXMOX_HOST" "pct exec $CONTAINER_ID -- docker exec npmplus node -e \"
const Database = require('better-sqlite3');
const db = new Database('/data/npmplus/database.sqlite', { readonly: true });
const certs = db.prepare('SELECT id, domain_names, provider, expires_on, created_on, is_deleted FROM certificate WHERE is_deleted = 0 ORDER BY id').all();
console.log(JSON.stringify(certs));
db.close();
\" 2>&1" || echo "[]")
if [ "$CERT_JSON" = "[]" ] || [ -z "$CERT_JSON" ]; then
log_warn "No certificates found"
exit 0
fi
CERT_COUNT=$(echo "$CERT_JSON" | jq 'length' 2>/dev/null || echo "0")
log_info "Found $CERT_COUNT certificates"
echo ""
# Display all certificates
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📋 All Certificates:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
declare -A CERT_GROUPS
declare -A CERT_DETAILS
echo "$CERT_JSON" | jq -c '.[]' 2>/dev/null | while IFS= read -r cert; do
cert_id=$(echo "$cert" | jq -r '.id')
domain_names=$(echo "$cert" | jq -r '.domain_names' | jq -r 'join(",")' 2>/dev/null || echo "")
provider=$(echo "$cert" | jq -r '.provider')
expires_on=$(echo "$cert" | jq -r '.expires_on')
created_on=$(echo "$cert" | jq -r '.created_on')
echo " ID: $cert_id"
echo " Domains: $domain_names"
echo " Provider: $provider"
echo " Expires: $expires_on"
echo " Created: $created_on"
echo ""
# Normalize for grouping
normalized=$(echo "$domain_names" | tr '[:upper:]' '[:lower:]' | tr ',' ' ' | xargs -n1 | sort | xargs | tr ' ' ',')
if [ -z "${CERT_GROUPS[$normalized]:-}" ]; then
CERT_GROUPS[$normalized]="$cert_id"
else
CERT_GROUPS[$normalized]="${CERT_GROUPS[$normalized]},$cert_id"
fi
CERT_DETAILS[$cert_id]="$domain_names|$provider|$expires_on|$created_on"
done
# Analyze duplicates
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔍 Duplicate Analysis:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Use a temporary file to store results since we're in a subshell
TEMP_FILE=$(mktemp)
echo "$CERT_JSON" > "$TEMP_FILE"
# Analyze for duplicates
DUPLICATES=$(echo "$CERT_JSON" | jq -r 'group_by(.domain_names | tostring) | map(select(length > 1)) | .[] | "\(.[0].domain_names | tostring)|\(map(.id) | join(","))"' 2>/dev/null || echo "")
if [ -z "$DUPLICATES" ]; then
log_success "✅ No duplicate certificates found!"
rm -f "$TEMP_FILE"
exit 0
fi
duplicate_count=0
echo "$DUPLICATES" | while IFS='|' read -r domains cert_ids; do
duplicate_count=$((duplicate_count + 1))
cert_array=(${cert_ids//,/ })
log_warn "Duplicate certificates found:"
log_info " Domains: $domains"
log_info " Certificate IDs: $cert_ids"
echo ""
# Find best certificate to keep (most recent)
best_id=""
best_created=""
for cert_id in "${cert_array[@]}"; do
cert_info=$(echo "$CERT_JSON" | jq -r ".[] | select(.id == $cert_id) | \"\(.id)|\(.created_on)\"" 2>/dev/null || echo "")
IFS='|' read -r id created <<< "$cert_info"
if [ -z "$best_id" ] || [ "$created" \> "$best_created" ]; then
best_id="$id"
best_created="$created"
fi
done
log_success " → Keep Certificate ID: $best_id (created: $best_created)"
for cert_id in "${cert_array[@]}"; do
if [ "$cert_id" != "$best_id" ]; then
log_warn " → Delete Certificate ID: $cert_id"
fi
done
echo ""
done
rm -f "$TEMP_FILE"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Summary:"
log_info " Total certificates: $CERT_COUNT"
log_warn " Duplicates found - see analysis above"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""

View File

@@ -0,0 +1,166 @@
#!/usr/bin/env bash
# Analyze potential for further script reduction
# Identifies consolidation opportunities
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "$SCRIPT_DIR/lib/logging.sh" 2>/dev/null || true
OUTPUT_FILE="${PROJECT_ROOT}/docs/00-meta/SCRIPT_REDUCTION_POTENTIAL.md"
log_header "Analyzing Script Reduction Potential"
# Count scripts by category
FIX_SCRIPTS=$(find "$PROJECT_ROOT/scripts" -name "fix-*.sh" -type f ! -path "*/archive/*" | wc -l)
CHECK_SCRIPTS=$(find "$PROJECT_ROOT/scripts" -name "check-*.sh" -o -name "verify-*.sh" -o -name "validate-*.sh" -type f ! -path "*/archive/*" | wc -l)
LIST_SCRIPTS=$(find "$PROJECT_ROOT/scripts" -name "list-*.sh" -o -name "show-*.sh" -o -name "get-*.sh" -type f ! -path "*/archive/*" | wc -l)
DEPLOY_SCRIPTS=$(find "$PROJECT_ROOT/scripts" -name "deploy-*.sh" -o -name "setup-*.sh" -o -name "install-*.sh" -type f ! -path "*/archive/*" | wc -l)
CONFIG_SCRIPTS=$(find "$PROJECT_ROOT/scripts" -name "configure-*.sh" -o -name "config-*.sh" -type f ! -path "*/archive/*" | wc -l)
MIGRATE_SCRIPTS=$(find "$PROJECT_ROOT/scripts" -name "migrate-*.sh" -type f ! -path "*/archive/*" | wc -l)
SMALL_SCRIPTS=$(find "$PROJECT_ROOT/scripts" -name "*.sh" -type f ! -path "*/node_modules/*" ! -path "*/.git/*" ! -path "*/archive/*" ! -path "*/lib/*" -exec sh -c 'file="$1"; lines=$(wc -l < "$file" 2>/dev/null || echo 0); if [ "$lines" -lt 50 ] && [ "$lines" -gt 0 ]; then echo "$file"; fi' _ {} \; | wc -l)
TOTAL=$(find "$PROJECT_ROOT/scripts" -name "*.sh" -type f ! -path "*/node_modules/*" ! -path "*/.git/*" ! -path "*/archive/*" ! -path "*/lib/*" | wc -l)
cat > "$OUTPUT_FILE" <<EOF
# Script Reduction Potential Analysis
**Date:** $(date +%Y-%m-%d)
**Current Total:** $TOTAL active scripts
**Goal:** Further reduce through consolidation and modularization
---
## Current Breakdown
- **Fix Scripts:** $FIX_SCRIPTS
- **Check/Verify Scripts:** $CHECK_SCRIPTS
- **List/Show Scripts:** $LIST_SCRIPTS
- **Deploy/Setup Scripts:** $DEPLOY_SCRIPTS
- **Config Scripts:** $CONFIG_SCRIPTS
- **Migrate Scripts:** $MIGRATE_SCRIPTS
- **Small Scripts (< 50 lines):** $SMALL_SCRIPTS
---
## Reduction Opportunities
### 1. Fix Scripts Consolidation ($FIX_SCRIPTS scripts)
**Opportunity:** Many fix scripts could be consolidated into:
- \`fix-all-issues.sh\` - Master fix script
- \`fix-service-issues.sh\` - Service-specific fixes
- \`fix-network-issues.sh\` - Network fixes
- \`fix-config-issues.sh\` - Configuration fixes
**Potential Reduction:** 50-70% (consolidate similar fixes)
### 2. Check/Verify Scripts Consolidation ($CHECK_SCRIPTS scripts)
**Opportunity:** Create unified verification framework:
- \`verify-all.sh\` - Master verification script
- \`verify-service.sh [service]\` - Service-specific verification
- \`verify-network.sh\` - Network verification
- \`verify-config.sh\` - Configuration verification
**Potential Reduction:** 60-80% (use parameters instead of separate scripts)
### 3. List/Show Scripts Consolidation ($LIST_SCRIPTS scripts)
**Opportunity:** Create unified listing tool:
- \`list.sh [type] [filter]\` - Unified listing with parameters
- Types: vms, containers, services, networks, configs
**Potential Reduction:** 70-90% (single script with parameters)
### 4. Deploy/Setup Scripts Consolidation ($DEPLOY_SCRIPTS scripts)
**Opportunity:** Create deployment framework:
- \`deploy.sh [component] [options]\` - Unified deployment
- \`setup.sh [component] [options]\` - Unified setup
**Potential Reduction:** 40-60% (framework with component selection)
### 5. Config Scripts Consolidation ($CONFIG_SCRIPTS scripts)
**Opportunity:** Create configuration framework:
- \`configure.sh [component] [action]\` - Unified configuration
- Use shared configuration modules
**Potential Reduction:** 50-70% (framework approach)
### 6. Small Scripts (< 50 lines) ($SMALL_SCRIPTS scripts)
**Opportunity:** Many small scripts could be:
- Merged into larger utility scripts
- Converted to functions in shared modules
- Combined with similar functionality
**Potential Reduction:** 30-50% (merge into utilities)
---
## Estimated Total Reduction
**Conservative Estimate:**
- Fix scripts: 50% reduction
- Check scripts: 60% reduction
- List scripts: 70% reduction
- Deploy scripts: 40% reduction
- Config scripts: 50% reduction
- Small scripts: 30% reduction
**Total Potential Reduction:** ~200-300 scripts (25-40%)
**Target:** 460-560 scripts (from 760)
---
## Implementation Strategy
### Phase 1: Create Unified Frameworks
1. Create \`verify-all.sh\` with service/component parameters
2. Create \`list.sh\` with type/filter parameters
3. Create \`fix-all.sh\` with issue-type parameters
4. Create \`configure.sh\` with component/action parameters
### Phase 2: Migrate to Frameworks
1. Identify scripts that fit framework patterns
2. Convert to function calls or parameters
3. Archive original scripts
4. Update documentation
### Phase 3: Merge Small Scripts
1. Group small scripts by functionality
2. Merge into utility scripts
3. Create shared function libraries
4. Archive originals
### Phase 4: Final Cleanup
1. Remove truly obsolete scripts
2. Consolidate remaining duplicates
3. Update all references
4. Final verification
---
## Benefits
1. **Easier Maintenance** - Fewer scripts to maintain
2. **Consistent Patterns** - Unified interfaces
3. **Better Documentation** - Clearer structure
4. **Faster Development** - Reusable frameworks
5. **Reduced Complexity** - Simpler codebase
---
**Status:** Analysis complete, ready for consolidation planning
EOF
log_success "Analysis complete: $OUTPUT_FILE"
echo ""
log_info "Summary:"
echo " - Total scripts: $TOTAL"
echo " - Potential reduction: 200-300 scripts (25-40%)"
echo " - Target: 460-560 scripts"

View File

@@ -0,0 +1,153 @@
#!/usr/bin/env bash
# Analyze scripts for pruning - identify obsolete, duplicate, and small scripts
# Usage: ./scripts/analyze-scripts-for-pruning.sh [output-file]
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
OUTPUT_FILE="${1:-${PROJECT_ROOT}/docs/00-meta/SCRIPT_PRUNING_ANALYSIS.md}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
# Create output directory
mkdir -p "$(dirname "$OUTPUT_FILE")"
log_info "Analyzing scripts for pruning..."
log_info "Output: $OUTPUT_FILE"
echo ""
# Initialize counters
TOTAL=0
SMALL=0
OLD=0
DEPRECATED=0
DUPLICATES=0
cat > "$OUTPUT_FILE" <<'EOF'
# Script Pruning Analysis
**Date:** $(date +%Y-%m-%d)
**Purpose:** Identify scripts for pruning, archiving, or deletion
---
## Summary
EOF
# Count total scripts
TOTAL=$(find "$PROJECT_ROOT/scripts" -name "*.sh" -type f ! -path "*/node_modules/*" ! -path "*/.git/*" ! -path "*/archive/*" | wc -l)
# Find small scripts (< 10 lines)
log_info "Finding small scripts (< 10 lines)..."
SMALL_SCRIPTS=$(find "$PROJECT_ROOT/scripts" -name "*.sh" -type f ! -path "*/node_modules/*" ! -path "*/.git/*" ! -path "*/archive/*" -exec sh -c 'file="$1"; lines=$(wc -l < "$file" 2>/dev/null || echo 0); if [ "$lines" -lt 10 ] && [ "$lines" -gt 0 ]; then echo "$lines|$file"; fi' _ {} \; | sort -t'|' -k1 -n)
SMALL=$(echo "$SMALL_SCRIPTS" | grep -c . || echo 0)
# Find scripts with deprecated/old in name
log_info "Finding scripts with deprecated/old naming..."
DEPRECATED_SCRIPTS=$(find "$PROJECT_ROOT/scripts" -name "*.sh" -type f ! -path "*/node_modules/*" ! -path "*/.git/*" ! -path "*/archive/*" \( -iname "*old*" -o -iname "*backup*" -o -iname "*deprecated*" -o -iname "*temp*" -o -iname "*test*" -o -iname "*experimental*" \))
DEPRECATED=$(echo "$DEPRECATED_SCRIPTS" | grep -c . || echo 0)
# Find old scripts (not modified in 180+ days)
log_info "Finding old scripts (180+ days)..."
CUTOFF_DATE=$(date -d "180 days ago" +%s 2>/dev/null || date -v-180d +%s 2>/dev/null || echo 0)
OLD_SCRIPTS=$(find "$PROJECT_ROOT/scripts" -name "*.sh" -type f ! -path "*/node_modules/*" ! -path "*/.git/*" ! -path "*/archive/*" -exec sh -c 'file="$1"; mtime=$(stat -f%m "$file" 2>/dev/null || stat -c%Y "$file" 2>/dev/null || echo 0); if [ "$mtime" -lt "$CUTOFF_DATE" ] && [ "$mtime" -gt 0 ]; then echo "$file"; fi' _ {} \;)
OLD=$(echo "$OLD_SCRIPTS" | grep -c . || echo 0)
# Find scripts with TODO/FIXME about removal
log_info "Finding scripts marked for removal..."
TODO_REMOVE=$(grep -rl -E "(TODO|FIXME).*(delete|remove|deprecated|obsolete)" "$PROJECT_ROOT/scripts" --include="*.sh" 2>/dev/null | grep -v node_modules | grep -v ".git" | grep -v archive || true)
# Generate report
{
echo "## Statistics"
echo ""
echo "- **Total Scripts:** $TOTAL"
echo "- **Small Scripts (< 10 lines):** $SMALL"
echo "- **Deprecated Naming:** $DEPRECATED"
echo "- **Old Scripts (180+ days):** $OLD"
echo "- **Marked for Removal:** $(echo "$TODO_REMOVE" | grep -c . || echo 0)"
echo ""
echo "---"
echo ""
echo "## 1. Small Scripts (< 10 lines)"
echo ""
echo "These scripts are likely incomplete stubs or test scripts:"
echo ""
echo "$SMALL_SCRIPTS" | while IFS='|' read -r lines file; do
echo "- \`$file\` ($lines lines)"
done
echo ""
echo "---"
echo ""
echo "## 2. Deprecated Naming"
echo ""
echo "Scripts with 'old', 'backup', 'deprecated', 'temp', 'test' in name:"
echo ""
echo "$DEPRECATED_SCRIPTS" | while read -r file; do
echo "- \`$file\`"
done
echo ""
echo "---"
echo ""
echo "## 3. Old Scripts (180+ days since modification)"
echo ""
echo "Scripts not modified recently (may be obsolete):"
echo ""
echo "$OLD_SCRIPTS" | head -50 | while read -r file; do
mtime=$(stat -f%m "$file" 2>/dev/null || stat -c%Y "$file" 2>/dev/null || echo 0)
date_str=$(date -d "@$mtime" +%Y-%m-%d 2>/dev/null || date -r "$mtime" +%Y-%m-%d 2>/dev/null || echo "unknown")
echo "- \`$file\` (last modified: $date_str)"
done
if [ "$OLD" -gt 50 ]; then
echo "- ... and $((OLD - 50)) more"
fi
echo ""
echo "---"
echo ""
echo "## 4. Scripts Marked for Removal"
echo ""
if [ -n "$TODO_REMOVE" ]; then
echo "$TODO_REMOVE" | while read -r file; do
echo "- \`$file\`"
done
else
echo "None found."
fi
echo ""
echo "---"
echo ""
echo "## Recommendations"
echo ""
echo "1. **Archive small scripts** (< 10 lines) unless they're critical"
echo "2. **Review deprecated-named scripts** - likely candidates for removal"
echo "3. **Audit old scripts** - verify if still needed"
echo "4. **Remove scripts marked for deletion**"
echo ""
echo "**Estimated Reduction:** ~200-300 scripts (25-37%)"
echo ""
} >> "$OUTPUT_FILE"
log_success "Analysis complete!"
log_info "Report saved to: $OUTPUT_FILE"
echo ""
log_info "Summary:"
echo " - Total: $TOTAL scripts"
echo " - Small: $SMALL scripts"
echo " - Deprecated naming: $DEPRECATED scripts"
echo " - Old (180+ days): $OLD scripts"
echo ""

View File

@@ -0,0 +1,88 @@
#!/usr/bin/env bash
# Analyze small scripts (< 50 lines) for merging into utility modules
# Usage: ./scripts/analyze-small-scripts.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "$SCRIPT_DIR/lib/logging.sh" 2>/dev/null || true
MAX_LINES=50
log_header "Analyzing Small Scripts (< $MAX_LINES lines)"
# Find all small scripts
SMALL_SCRIPTS=$(find "$PROJECT_ROOT/scripts" -name "*.sh" -type f ! -path "*/archive/*" ! -path "*/lib/*" ! -path "*/node_modules/*" ! -path "*/.git/*" -exec sh -c 'lines=$(wc -l < "$1"); [ "$lines" -lt '"$MAX_LINES"' ] && echo "$1"' _ {} \;)
# Categorize by functionality
CONTAINER_SCRIPTS=()
NETWORK_SCRIPTS=()
SERVICE_SCRIPTS=()
CONFIG_SCRIPTS=()
PROXMOX_SCRIPTS=()
OTHER_SCRIPTS=()
categorize_script() {
local script="$1"
local basename_script=$(basename "$script")
case "$basename_script" in
*container*|*ct*|*vmid*)
CONTAINER_SCRIPTS+=("$script")
;;
*network*|*ip*|*dns*|*tunnel*)
NETWORK_SCRIPTS+=("$script")
;;
*service*|*systemd*|*postgres*|*redis*|*nginx*)
SERVICE_SCRIPTS+=("$script")
;;
*config*|*configure*)
CONFIG_SCRIPTS+=("$script")
;;
*proxmox*|*pve*|*qm*|*pct*)
PROXMOX_SCRIPTS+=("$script")
;;
*)
OTHER_SCRIPTS+=("$script")
;;
esac
}
# Process all small scripts
TOTAL=0
while IFS= read -r script; do
if [ -n "$script" ]; then
categorize_script "$script"
TOTAL=$((TOTAL + 1))
fi
done <<< "$SMALL_SCRIPTS"
# Report findings
log_info "Total small scripts found: $TOTAL"
echo ""
log_info "Categorized by functionality:"
echo " Container scripts: ${#CONTAINER_SCRIPTS[@]}"
echo " Network scripts: ${#NETWORK_SCRIPTS[@]}"
echo " Service scripts: ${#SERVICE_SCRIPTS[@]}"
echo " Config scripts: ${#CONFIG_SCRIPTS[@]}"
echo " Proxmox scripts: ${#PROXMOX_SCRIPTS[@]}"
echo " Other scripts: ${#OTHER_SCRIPTS[@]}"
echo ""
# Show examples
if [ ${#CONTAINER_SCRIPTS[@]} -gt 0 ]; then
log_info "Container script examples:"
printf '%s\n' "${CONTAINER_SCRIPTS[@]}" | head -5 | while read -r s; do
echo " - $(basename "$s")"
done
fi
if [ ${#NETWORK_SCRIPTS[@]} -gt 0 ]; then
log_info "Network script examples:"
printf '%s\n' "${NETWORK_SCRIPTS[@]}" | head -5 | while read -r s; do
echo " - $(basename "$s")"
done
fi
log_success "Analysis complete!"

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env bash
# Apply Cloudflare DNS (1.1.1.1, 1.0.0.1) to all Proxmox hosts and LXC containers.
# Run from project root. Re-run when adding new containers.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
# Source IP config if available
[ -f config/ip-addresses.conf ] && source config/ip-addresses.conf 2>/dev/null || true
HOSTS="${PROXMOX_HOST_R630_01:-192.168.11.11} ${PROXMOX_HOST_R630_02:-192.168.11.12} ${PROXMOX_HOST_ML110:-192.168.11.10}"
DNS="1.1.1.1 1.0.0.1"
GREEN='\033[0;32m'
BLUE='\033[0;34m'
NC='\033[0m'
log_ok() { echo -e "${GREEN}[OK]${NC} $1"; }
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo " Apply Cloudflare DNS to Proxmox (1.1.1.1, 1.0.0.1)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
for host in $HOSTS; do
log_info "Host $host"
if ! ssh -o ConnectTimeout=5 -o BatchMode=yes root@$host "exit" 2>/dev/null; then
log_info " Skipped (not reachable)"
continue
fi
# Host resolv.conf
ssh root@$host "cp /etc/resolv.conf /etc/resolv.conf.bak.cloudflare 2>/dev/null; echo -e 'search lan\nnameserver 1.1.1.1\nnameserver 1.0.0.1' > /etc/resolv.conf" 2>/dev/null && log_ok " Host resolv.conf" || true
# Containers
for vmid in $(ssh root@$host "pct list 2>/dev/null | awk 'NR>1 {print \$1}'"); do
ssh root@$host "pct set $vmid --nameserver '1.1.1.1 1.0.0.1'" 2>/dev/null && log_ok " VMID $vmid" || true
done
echo ""
done
log_ok "Done. UDM Pro: configure manually per docs/04-configuration/UDM_PRO_CLOUDFLARE_DNS_SETUP.md"
echo ""

View File

@@ -0,0 +1,96 @@
#!/usr/bin/env bash
set -euo pipefail
# Apply direct Blockscout route configuration
# Updates NPMplus to use direct route: explorer.d-bis.org → 192.168.11.140:4000
# Usage: ./apply-direct-blockscout-route.sh
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
echo "=========================================="
echo "Apply Direct Blockscout Route"
echo "=========================================="
echo "This will update NPMplus configuration"
echo "to route explorer.d-bis.org directly to"
echo "Blockscout on port 4000 (bypassing nginx)"
echo "=========================================="
echo ""
# Check if Node.js is available
if ! command -v node &> /dev/null; then
echo "❌ Node.js is not installed"
echo " Please install Node.js to run the NPMplus update script"
exit 1
fi
# Check if Playwright is installed
if [ ! -d "$PROJECT_ROOT/node_modules/playwright" ]; then
echo "⚠️ Playwright not found, installing dependencies..."
cd "$PROJECT_ROOT"
if [ -f "package.json" ]; then
npm install
else
echo "❌ package.json not found"
echo " Please install dependencies manually"
exit 1
fi
fi
echo "✅ Dependencies ready"
echo ""
# Run the NPMplus update script
echo "=== Updating NPMplus Configuration ==="
cd "$PROJECT_ROOT/scripts/nginx-proxy-manager"
if [ -f "update-explorer-direct-route.js" ]; then
echo "Running NPMplus update script..."
node update-explorer-direct-route.js
UPDATE_SUCCESS=$?
else
echo "⚠️ update-explorer-direct-route.js not found"
echo " Creating it now..."
# The script should have been created by configure-direct-blockscout-route.sh
# If not, we'll use the main configure script with updated settings
echo " Using main configuration script with direct route..."
node configure-npmplus-domains.js
UPDATE_SUCCESS=$?
fi
echo ""
if [ $UPDATE_SUCCESS -eq 0 ]; then
echo "=========================================="
echo "✅ Configuration Updated Successfully"
echo "=========================================="
echo ""
echo "Changes Applied:"
echo " Domain: explorer.d-bis.org"
echo " Old Route: http://192.168.11.140:80 (via nginx)"
echo " New Route: http://192.168.11.140:4000 (direct)"
echo ""
echo "Next Steps:"
echo " 1. Wait 10-30 seconds for NPMplus to reload"
echo " 2. Test the API:"
echo " curl -I https://explorer.d-bis.org/api/v2/stats"
echo " 3. Check browser console for any errors"
echo ""
else
echo "=========================================="
echo "⚠️ Configuration Update Had Issues"
echo "=========================================="
echo ""
echo "You may need to manually update NPMplus:"
echo " 1. Log into: https://192.168.0.166:81"
echo " 2. Find 'explorer.d-bis.org' proxy host"
echo " 3. Update Forward Host: 192.168.11.140"
echo " 4. Update Forward Port: 4000"
echo " 5. Save changes"
echo ""
fi
echo "=========================================="

View File

@@ -0,0 +1,67 @@
#!/usr/bin/env bash
# Apply public RPC config to VMID 2201 (besu-rpc-public-1).
# Ensures no permissions allow contract deployment: account permissioning enabled
# with empty allowlist (permissions-accounts-public.toml).
# Run from project root; requires pct (typically on Proxmox host).
set -euo pipefail
VMID=2201
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SOURCE_CONFIG="$PROJECT_ROOT/smom-dbis-138/config"
CONFIG_RPC_PUBLIC="$SOURCE_CONFIG/config-rpc-public.toml"
PERM_ACCOUNTS_PUBLIC="$SOURCE_CONFIG/permissions-accounts-public.toml"
PERM_NODES="$SOURCE_CONFIG/permissions-nodes.toml"
log() { echo "[INFO] $1"; }
success() { echo "[✓] $1"; }
warn() { echo "[WARN] $1"; }
err() { echo "[ERROR] $1"; }
if [[ ! -f "$CONFIG_RPC_PUBLIC" ]]; then
err "Config not found: $CONFIG_RPC_PUBLIC"
exit 1
fi
if [[ ! -f "$PERM_ACCOUNTS_PUBLIC" ]]; then
err "Permissions file not found: $PERM_ACCOUNTS_PUBLIC"
exit 1
fi
if ! pct status "$VMID" &>/dev/null; then
err "VMID $VMID not found or pct not available (run on Proxmox host?)."
exit 1
fi
if ! pct status "$VMID" 2>/dev/null | grep -q running; then
warn "VMID $VMID is not running. Start it first, then re-run."
exit 1
fi
log "Applying public RPC config to VMID $VMID (no contract deployment)..."
# Copy main config (paths in config use /data/besu, /genesis/, /permissions/)
pct push "$VMID" "$CONFIG_RPC_PUBLIC" "/etc/besu/config-rpc-public.toml"
pct exec "$VMID" -- chown besu:besu /etc/besu/config-rpc-public.toml 2>/dev/null || true
# Ensure /permissions exists and copy permissions-accounts-public.toml
pct exec "$VMID" -- mkdir -p /permissions
pct push "$VMID" "$PERM_ACCOUNTS_PUBLIC" "/permissions/permissions-accounts-public.toml"
pct exec "$VMID" -- chown -R besu:besu /permissions 2>/dev/null || true
if [[ -f "$PERM_NODES" ]]; then
pct push "$VMID" "$PERM_NODES" "/permissions/permissions-nodes.toml"
pct exec "$VMID" -- chown besu:besu /permissions/permissions-nodes.toml 2>/dev/null || true
fi
# Point systemd to config-rpc-public.toml if besu-rpc.service exists
if pct exec "$VMID" -- systemctl cat besu-rpc.service &>/dev/null; then
pct exec "$VMID" -- bash -c 'sed -i "s|--config-file=\$BESU_CONFIG/[^ ]*|--config-file=\$BESU_CONFIG/config-rpc-public.toml|g" /etc/systemd/system/besu-rpc.service 2>/dev/null || true'
pct exec "$VMID" -- systemctl daemon-reload
log "Restarting besu-rpc.service..."
pct exec "$VMID" -- systemctl restart besu-rpc.service
success "Config applied and besu-rpc restarted."
else
log "besu-rpc.service not found; config files are in place. Restart Besu manually if needed."
success "Config files applied."
fi

View File

@@ -0,0 +1,103 @@
#!/bin/bash
# Backup current container configurations before IP changes
# Creates rollback script
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
BACKUP_DIR="/home/intlc/projects/proxmox/backups/ip_conversion_$(date +%Y%m%d_%H%M%S)"
ROLLBACK_SCRIPT="$BACKUP_DIR/rollback-ip-changes.sh"
mkdir -p "$BACKUP_DIR"
echo "=== Backing Up Container Configurations ==="
echo "Backup directory: $BACKUP_DIR"
echo ""
# Define conversions directly (from IP_ASSIGNMENT_PLAN.md)
declare -a CONVERSIONS=(
"${PROXMOX_HOST_ML110:-192.168.11.10}:3501:${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}:192.168.11.28:ccip-monitor-1:ml110"
"${PROXMOX_HOST_ML110:-192.168.11.10}:3500:192.168.11.15:192.168.11.29:oracle-publisher-1:ml110"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:103:${IP_OMADA:-192.168.11.20}:192.168.11.30:omada:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:104:192.168.11.18:192.168.11.31:gitea:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:100:192.168.11.4:192.168.11.32:proxmox-mail-gateway:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:101:192.168.11.6:192.168.11.33:proxmox-datacenter-manager:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:102:192.168.11.9:192.168.11.34:cloudflared:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:6200:192.168.11.7:${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}:firefly-1:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:7811:N/A:${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}:mim-api-1:r630-02"
)
# Create rollback script header
cat > "$ROLLBACK_SCRIPT" << 'EOF'
#!/bin/bash
# Rollback script for IP changes
# Generated automatically - DO NOT EDIT MANUALLY
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
echo "=== Rolling Back IP Changes ==="
echo ""
EOF
chmod +x "$ROLLBACK_SCRIPT"
# Backup each container
for conversion in "${CONVERSIONS[@]}"; do
IFS=':' read -r host_ip vmid old_ip new_ip name hostname <<< "$conversion"
echo "Backing up VMID $vmid ($name) on $hostname..."
# Backup container config
backup_file="$BACKUP_DIR/${hostname}_${vmid}_config.txt"
ssh -o ConnectTimeout=10 root@"$host_ip" "pct config $vmid" > "$backup_file" 2>/dev/null || echo "Warning: Could not backup $vmid"
# Add to rollback script (only if old_ip is not N/A)
if [ "$old_ip" != "N/A" ] && [ -n "$old_ip" ]; then
cat >> "$ROLLBACK_SCRIPT" << EOF
# Rollback VMID $vmid ($name) on $hostname
echo "Rolling back VMID $vmid to $old_ip..."
ssh -o ConnectTimeout=10 root@$host_ip "pct stop $vmid" 2>/dev/null || true
sleep 2
ssh -o ConnectTimeout=10 root@$host_ip "pct set $vmid --net0 bridge=vmbr0,name=eth0,ip=$old_ip/24,gw=${NETWORK_GATEWAY:-192.168.11.1},type=veth" || echo "Warning: Failed to rollback $vmid"
ssh -o ConnectTimeout=10 root@$host_ip "pct start $vmid" 2>/dev/null || true
echo ""
EOF
fi
done
# Create summary
cat > "$BACKUP_DIR/backup_summary.txt" << EOF
Backup Summary
Generated: $(date)
Total containers to convert: ${#CONVERSIONS[@]}
Conversions:
$(printf '%s\n' "${CONVERSIONS[@]}")
Backup files:
$(ls -1 "$BACKUP_DIR"/*_config.txt 2>/dev/null | wc -l) config files backed up
Rollback script: $ROLLBACK_SCRIPT
EOF
echo ""
echo "=== Backup Complete ==="
echo "Backed up ${#CONVERSIONS[@]} container configurations"
echo "Backup directory: $BACKUP_DIR"
echo "Rollback script: $ROLLBACK_SCRIPT"
echo ""
echo "To rollback changes, run: $ROLLBACK_SCRIPT"

View File

@@ -0,0 +1,229 @@
#!/bin/bash
# Automated backup of NPMplus configuration and data
# Backs up database, proxy hosts, certificates, and configuration files
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
NPMPLUS_HOST="${NPMPLUS_HOST:-192.168.11.11}"
NPMPLUS_VMID="${NPMPLUS_VMID:-10233}"
NPM_URL="${NPM_URL:-https://${IP_NPMPLUS_ETH0:-192.168.11.166}:81}"
NPM_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
BACKUP_DEST="${BACKUP_DEST:-$PROJECT_ROOT/backups/npmplus}"
RETENTION_DAYS="${RETENTION_DAYS:-30}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR="$BACKUP_DEST/npmplus-backup-$TIMESTAMP"
mkdir -p "$BACKUP_DIR"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "💾 NPMplus Backup"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Backup directory: $BACKUP_DIR"
log_info "NPMplus Host: $NPMPLUS_HOST"
log_info "NPMplus VMID: $NPMPLUS_VMID"
# 1. Backup Database
log_info "Backing up NPMplus database..."
DB_BACKUP_SUCCESS=false
# Try direct file copy first
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker cp npmplus:/data/database.sqlite /tmp/db-backup.sqlite 2>/dev/null"; then
scp -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST:/tmp/db-backup.sqlite" \
"$BACKUP_DIR/database.sqlite" 2>/dev/null && \
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" "rm -f /tmp/db-backup.sqlite" 2>/dev/null && \
DB_BACKUP_SUCCESS=true
fi
# Try SQL dump as fallback
if [ "$DB_BACKUP_SUCCESS" = false ]; then
DB_DUMP=$(ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus sqlite3 /data/database.sqlite '.dump' 2>/dev/null" || echo "")
if [ -n "$DB_DUMP" ] && ! echo "$DB_DUMP" | grep -q "executable file not found"; then
echo "$DB_DUMP" > "$BACKUP_DIR/database.sql"
DB_BACKUP_SUCCESS=true
fi
fi
if [ "$DB_BACKUP_SUCCESS" = true ]; then
DB_SIZE=$(stat -f%z "$BACKUP_DIR/database.sqlite" 2>/dev/null || \
stat -c%s "$BACKUP_DIR/database.sqlite" 2>/dev/null || \
stat -c%s "$BACKUP_DIR/database.sql" 2>/dev/null || echo "0")
log_success "Database backed up ($DB_SIZE bytes)"
else
log_warn "Database backup failed - database may be empty or inaccessible"
fi
# 2. Backup Proxy Hosts via API
if [ -n "$NPM_PASSWORD" ]; then
log_info "Backing up proxy hosts via API..."
TOKEN_RESPONSE=$(curl -s -k -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$NPM_EMAIL\",\"secret\":\"$NPM_PASSWORD\"}" 2>/dev/null || echo "{}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -n "$TOKEN" ] && [ "$TOKEN" != "null" ]; then
# Export proxy hosts
PROXY_HOSTS_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$PROXY_HOSTS_JSON" | jq '.' > "$BACKUP_DIR/proxy_hosts.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/proxy_hosts.json"
PROXY_COUNT=$(echo "$PROXY_HOSTS_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Proxy hosts backed up ($PROXY_COUNT hosts)"
# Export certificates metadata
CERTIFICATES_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$CERTIFICATES_JSON" | jq '.' > "$BACKUP_DIR/certificates.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/certificates.json"
CERT_COUNT=$(echo "$CERTIFICATES_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Certificates metadata backed up ($CERT_COUNT certificates)"
else
log_warn "API authentication failed - skipping API-based backups"
fi
else
log_warn "NPM_PASSWORD not set - skipping API-based backups"
fi
# 3. Backup Certificate Files
log_info "Backing up certificate files..."
CERT_BACKUP_DIR="$BACKUP_DIR/certificates"
mkdir -p "$CERT_BACKUP_DIR"
# Find certificate path
CERT_PATH=$(ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker volume inspect npmplus_data --format '{{.Mountpoint}}' 2>/dev/null" || echo "")
if [ -n "$CERT_PATH" ] && [ "$CERT_PATH" != "null" ]; then
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"test -d $CERT_PATH/tls/certbot/live 2>/dev/null"; then
CERT_SOURCE="$CERT_PATH/tls/certbot/live"
elif ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"test -d $CERT_PATH/certbot/live 2>/dev/null"; then
CERT_SOURCE="$CERT_PATH/certbot/live"
else
CERT_SOURCE=""
fi
if [ -n "$CERT_SOURCE" ]; then
rsync -avz --delete \
-e "ssh -o StrictHostKeyChecking=no" \
root@"$NPMPLUS_HOST:$CERT_SOURCE/" \
"$CERT_BACKUP_DIR/" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
CERT_COUNT=$(find "$CERT_BACKUP_DIR" -type d -mindepth 1 -maxdepth 1 2>/dev/null | wc -l || echo "0")
log_success "Certificate files backed up ($CERT_COUNT certificate directories)"
else
log_warn "Certificate directory not found"
fi
else
log_warn "Could not determine certificate path"
fi
# 4. Backup Nginx Configuration Files
log_info "Backing up Nginx configuration files..."
NGINX_BACKUP_DIR="$BACKUP_DIR/nginx"
mkdir -p "$NGINX_BACKUP_DIR"
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus test -d /data/nginx 2>/dev/null"; then
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus tar czf /tmp/nginx-config.tar.gz -C /data nginx 2>/dev/null" && \
scp -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST:/tmp/nginx-config.tar.gz" \
"$NGINX_BACKUP_DIR/nginx-config.tar.gz" 2>/dev/null && \
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" "rm -f /tmp/nginx-config.tar.gz" 2>/dev/null && \
log_success "Nginx configuration backed up"
else
log_warn "Nginx configuration directory not found"
fi
# 5. Create Backup Manifest
log_info "Creating backup manifest..."
cat > "$BACKUP_DIR/manifest.txt" <<EOF
NPMplus Backup Manifest
=======================
Date: $(date)
Host: $NPMPLUS_HOST
VMID: $NPMPLUS_VMID
NPM URL: $NPM_URL
Contents:
- database.sqlite or database.sql: NPMplus SQLite database
- proxy_hosts.json: Proxy hosts configuration (if API available)
- certificates.json: Certificates metadata (if API available)
- certificates/: Certificate files from disk
- nginx/: Nginx configuration files
Backup Size: $(du -sh "$BACKUP_DIR" | awk '{print $1}')
Restore Instructions:
See: docs/04-configuration/NPMPLUS_BACKUP_RESTORE.md
EOF
log_success "Backup manifest created"
# 6. Compress Backup
log_info "Compressing backup..."
cd "$BACKUP_DEST"
tar czf "npmplus-backup-$TIMESTAMP.tar.gz" "npmplus-backup-$TIMESTAMP" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
if [ -f "npmplus-backup-$TIMESTAMP.tar.gz" ]; then
COMPRESSED_SIZE=$(stat -f%z "npmplus-backup-$TIMESTAMP.tar.gz" 2>/dev/null || \
stat -c%s "npmplus-backup-$TIMESTAMP.tar.gz" 2>/dev/null || echo "0")
log_success "Backup compressed ($(numfmt --to=iec-i --suffix=B $COMPRESSED_SIZE 2>/dev/null || echo "$COMPRESSED_SIZE bytes"))"
# Remove uncompressed directory
rm -rf "npmplus-backup-$TIMESTAMP"
else
log_warn "Compression failed - keeping uncompressed backup"
fi
# 7. Cleanup Old Backups
log_info "Cleaning up old backups (retention: $RETENTION_DAYS days)..."
find "$BACKUP_DEST" -name "npmplus-backup-*.tar.gz" -type f -mtime +$RETENTION_DAYS -delete 2>/dev/null || true
OLD_COUNT=$(find "$BACKUP_DEST" -name "npmplus-backup-*.tar.gz" -type f | wc -l || echo "0")
log_success "Old backups cleaned up ($OLD_COUNT backups retained)"
# Summary
echo ""
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "✅ Backup Complete!"
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Backup location: $BACKUP_DEST/npmplus-backup-$TIMESTAMP.tar.gz"
log_info "Manifest: $BACKUP_DIR/manifest.txt"
echo ""

View File

@@ -0,0 +1,223 @@
#!/bin/bash
# Automated backup of NPMplus configuration and data
# Backs up database, proxy hosts, certificates, and configuration files
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
NPMPLUS_HOST="${NPMPLUS_HOST:-192.168.11.11}"
NPMPLUS_VMID="${NPMPLUS_VMID:-10233}"
NPM_URL="${NPM_URL:-https://192.168.11.166:81}"
NPM_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
BACKUP_DEST="${BACKUP_DEST:-$PROJECT_ROOT/backups/npmplus}"
RETENTION_DAYS="${RETENTION_DAYS:-30}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR="$BACKUP_DEST/npmplus-backup-$TIMESTAMP"
mkdir -p "$BACKUP_DIR"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "💾 NPMplus Backup"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Backup directory: $BACKUP_DIR"
log_info "NPMplus Host: $NPMPLUS_HOST"
log_info "NPMplus VMID: $NPMPLUS_VMID"
# 1. Backup Database
log_info "Backing up NPMplus database..."
DB_BACKUP_SUCCESS=false
# Try direct file copy first
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker cp npmplus:/data/database.sqlite /tmp/db-backup.sqlite 2>/dev/null"; then
scp -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST:/tmp/db-backup.sqlite" \
"$BACKUP_DIR/database.sqlite" 2>/dev/null && \
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" "rm -f /tmp/db-backup.sqlite" 2>/dev/null && \
DB_BACKUP_SUCCESS=true
fi
# Try SQL dump as fallback
if [ "$DB_BACKUP_SUCCESS" = false ]; then
DB_DUMP=$(ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus sqlite3 /data/database.sqlite '.dump' 2>/dev/null" || echo "")
if [ -n "$DB_DUMP" ] && ! echo "$DB_DUMP" | grep -q "executable file not found"; then
echo "$DB_DUMP" > "$BACKUP_DIR/database.sql"
DB_BACKUP_SUCCESS=true
fi
fi
if [ "$DB_BACKUP_SUCCESS" = true ]; then
DB_SIZE=$(stat -f%z "$BACKUP_DIR/database.sqlite" 2>/dev/null || \
stat -c%s "$BACKUP_DIR/database.sqlite" 2>/dev/null || \
stat -c%s "$BACKUP_DIR/database.sql" 2>/dev/null || echo "0")
log_success "Database backed up ($DB_SIZE bytes)"
else
log_warn "Database backup failed - database may be empty or inaccessible"
fi
# 2. Backup Proxy Hosts via API
if [ -n "$NPM_PASSWORD" ]; then
log_info "Backing up proxy hosts via API..."
TOKEN_RESPONSE=$(curl -s -k -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$NPM_EMAIL\",\"secret\":\"$NPM_PASSWORD\"}" 2>/dev/null || echo "{}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -n "$TOKEN" ] && [ "$TOKEN" != "null" ]; then
# Export proxy hosts
PROXY_HOSTS_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$PROXY_HOSTS_JSON" | jq '.' > "$BACKUP_DIR/proxy_hosts.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/proxy_hosts.json"
PROXY_COUNT=$(echo "$PROXY_HOSTS_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Proxy hosts backed up ($PROXY_COUNT hosts)"
# Export certificates metadata
CERTIFICATES_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$CERTIFICATES_JSON" | jq '.' > "$BACKUP_DIR/certificates.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/certificates.json"
CERT_COUNT=$(echo "$CERTIFICATES_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Certificates metadata backed up ($CERT_COUNT certificates)"
else
log_warn "API authentication failed - skipping API-based backups"
fi
else
log_warn "NPM_PASSWORD not set - skipping API-based backups"
fi
# 3. Backup Certificate Files
log_info "Backing up certificate files..."
CERT_BACKUP_DIR="$BACKUP_DIR/certificates"
mkdir -p "$CERT_BACKUP_DIR"
# Find certificate path
CERT_PATH=$(ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker volume inspect npmplus_data --format '{{.Mountpoint}}' 2>/dev/null" || echo "")
if [ -n "$CERT_PATH" ] && [ "$CERT_PATH" != "null" ]; then
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"test -d $CERT_PATH/tls/certbot/live 2>/dev/null"; then
CERT_SOURCE="$CERT_PATH/tls/certbot/live"
elif ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"test -d $CERT_PATH/certbot/live 2>/dev/null"; then
CERT_SOURCE="$CERT_PATH/certbot/live"
else
CERT_SOURCE=""
fi
if [ -n "$CERT_SOURCE" ]; then
rsync -avz --delete \
-e "ssh -o StrictHostKeyChecking=no" \
root@"$NPMPLUS_HOST:$CERT_SOURCE/" \
"$CERT_BACKUP_DIR/" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
CERT_COUNT=$(find "$CERT_BACKUP_DIR" -type d -mindepth 1 -maxdepth 1 2>/dev/null | wc -l || echo "0")
log_success "Certificate files backed up ($CERT_COUNT certificate directories)"
else
log_warn "Certificate directory not found"
fi
else
log_warn "Could not determine certificate path"
fi
# 4. Backup Nginx Configuration Files
log_info "Backing up Nginx configuration files..."
NGINX_BACKUP_DIR="$BACKUP_DIR/nginx"
mkdir -p "$NGINX_BACKUP_DIR"
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus test -d /data/nginx 2>/dev/null"; then
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus tar czf /tmp/nginx-config.tar.gz -C /data nginx 2>/dev/null" && \
scp -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST:/tmp/nginx-config.tar.gz" \
"$NGINX_BACKUP_DIR/nginx-config.tar.gz" 2>/dev/null && \
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" "rm -f /tmp/nginx-config.tar.gz" 2>/dev/null && \
log_success "Nginx configuration backed up"
else
log_warn "Nginx configuration directory not found"
fi
# 5. Create Backup Manifest
log_info "Creating backup manifest..."
cat > "$BACKUP_DIR/manifest.txt" <<EOF
NPMplus Backup Manifest
=======================
Date: $(date)
Host: $NPMPLUS_HOST
VMID: $NPMPLUS_VMID
NPM URL: $NPM_URL
Contents:
- database.sqlite or database.sql: NPMplus SQLite database
- proxy_hosts.json: Proxy hosts configuration (if API available)
- certificates.json: Certificates metadata (if API available)
- certificates/: Certificate files from disk
- nginx/: Nginx configuration files
Backup Size: $(du -sh "$BACKUP_DIR" | awk '{print $1}')
Restore Instructions:
See: docs/04-configuration/NPMPLUS_BACKUP_RESTORE.md
EOF
log_success "Backup manifest created"
# 6. Compress Backup
log_info "Compressing backup..."
cd "$BACKUP_DEST"
tar czf "npmplus-backup-$TIMESTAMP.tar.gz" "npmplus-backup-$TIMESTAMP" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
if [ -f "npmplus-backup-$TIMESTAMP.tar.gz" ]; then
COMPRESSED_SIZE=$(stat -f%z "npmplus-backup-$TIMESTAMP.tar.gz" 2>/dev/null || \
stat -c%s "npmplus-backup-$TIMESTAMP.tar.gz" 2>/dev/null || echo "0")
log_success "Backup compressed ($(numfmt --to=iec-i --suffix=B $COMPRESSED_SIZE 2>/dev/null || echo "$COMPRESSED_SIZE bytes"))"
# Remove uncompressed directory
rm -rf "npmplus-backup-$TIMESTAMP"
else
log_warn "Compression failed - keeping uncompressed backup"
fi
# 7. Cleanup Old Backups
log_info "Cleaning up old backups (retention: $RETENTION_DAYS days)..."
find "$BACKUP_DEST" -name "npmplus-backup-*.tar.gz" -type f -mtime +$RETENTION_DAYS -delete 2>/dev/null || true
OLD_COUNT=$(find "$BACKUP_DEST" -name "npmplus-backup-*.tar.gz" -type f | wc -l || echo "0")
log_success "Old backups cleaned up ($OLD_COUNT backups retained)"
# Summary
echo ""
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "✅ Backup Complete!"
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Backup location: $BACKUP_DEST/npmplus-backup-$TIMESTAMP.tar.gz"
log_info "Manifest: $BACKUP_DIR/manifest.txt"
echo ""

View File

@@ -0,0 +1,283 @@
#!/usr/bin/env bash
# Comprehensive Cleanup of Old, Backup, and Unreferenced Files
# Safely removes old files from both local projects and remote ml110
#
# Targets:
# - Backup directories (backup-*, *backup*)
# - Temporary key generation directories (temp-all-keys-*)
# - Old log files (logs/*.log older than 30 days)
# - Temporary files (*.bak, *.old, *~, *.swp)
# - Old documentation files that are no longer referenced
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Configuration
DRY_RUN="${DRY_RUN:-true}"
REMOTE_HOST="${REMOTE_HOST:-192.168.11.10}"
REMOTE_USER="${REMOTE_USER:-root}"
REMOTE_PASS="${REMOTE_PASS:-L@kers2010}"
MIN_LOG_AGE_DAYS=30
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--execute)
DRY_RUN=false
shift
;;
--help)
cat << EOF
Usage: $0 [OPTIONS]
Comprehensive cleanup of old, backup, and unreferenced files.
Options:
--execute Actually delete files (default: dry-run)
--help Show this help
Safety:
- By default, runs in DRY-RUN mode
- Use --execute to actually delete files
- Creates detailed manifest of files to be deleted
EOF
exit 0
;;
*)
log_error "Unknown option: $1"
exit 1
;;
esac
done
# Create cleanup manifest
CLEANUP_LOG="$PROJECT_ROOT/logs/cleanup-$(date +%Y%m%d-%H%M%S).log"
mkdir -p "$PROJECT_ROOT/logs"
> "$CLEANUP_LOG"
log_info "========================================="
log_info "Comprehensive File Cleanup"
log_info "========================================="
log_info "Mode: $([ "$DRY_RUN" == "true" ] && echo "DRY-RUN" || echo "EXECUTE")"
log_info "Log: $CLEANUP_LOG"
log_info ""
TOTAL_FOUND=0
TOTAL_DELETED=0
# Function to safely delete a file/directory
safe_delete() {
local target="$1"
local label="${2:-item}"
if [[ ! -e "$target" ]]; then
return 0
fi
echo "$target" >> "$CLEANUP_LOG"
TOTAL_FOUND=$((TOTAL_FOUND + 1))
if [[ "$DRY_RUN" != "true" ]]; then
if rm -rf "$target" 2>/dev/null; then
TOTAL_DELETED=$((TOTAL_DELETED + 1))
echo "✓ Deleted: $target"
return 0
else
echo "✗ Failed: $target" >&2
return 1
fi
else
echo "Would delete: $target"
return 0
fi
}
# Clean local proxmox project
log_info "=== Cleaning Local Proxmox Project ==="
PROXMOX_DIR="$PROJECT_ROOT"
# Old markdown files in root (status/completion docs that are superseded)
OLD_DOCS_PROXMOX=(
"$PROXMOX_DIR/ACTION_PLAN_NOW.md"
"$PROXMOX_DIR/DEPLOYMENT_IN_PROGRESS.md"
"$PROXMOX_DIR/DEPLOYMENT_SOLUTION.md"
"$PROXMOX_DIR/FINAL_STATUS.txt"
"$PROXMOX_DIR/IMPLEMENTATION_COMPLETE.md"
"$PROXMOX_DIR/NEXT_STEPS_QUICK_REFERENCE.md"
"$PROXMOX_DIR/ORGANIZATION_SUMMARY.md"
"$PROXMOX_DIR/PROJECT_STRUCTURE.md"
"$PROXMOX_DIR/QUICK_DEPLOY_FIX.md"
"$PROXMOX_DIR/QUICK_DEPLOY.md"
"$PROXMOX_DIR/QUICK_START_VALIDATED_SET.md"
"$PROXMOX_DIR/STATUS_FINAL.md"
"$PROXMOX_DIR/STATUS.md"
"$PROXMOX_DIR/VALIDATED_SET_IMPLEMENTATION_SUMMARY.md"
)
for doc in "${OLD_DOCS_PROXMOX[@]}"; do
safe_delete "$doc" "old doc"
done
# Temporary besu-enodes directories
while IFS= read -r dir; do
safe_delete "$dir" "temp enode dir"
done < <(find "$PROXMOX_DIR" -maxdepth 1 -type d -name "besu-enodes-*" 2>/dev/null)
# Old log files in smom-dbis-138-proxmox/logs
if [[ -d "$PROXMOX_DIR/smom-dbis-138-proxmox/logs" ]]; then
while IFS= read -r logfile; do
if [[ -f "$logfile" ]]; then
file_age=$(( ($(date +%s) - $(stat -c %Y "$logfile" 2>/dev/null || echo 0)) / 86400 ))
if [[ $file_age -gt $MIN_LOG_AGE_DAYS ]]; then
safe_delete "$logfile" "old log"
fi
fi
done < <(find "$PROXMOX_DIR/smom-dbis-138-proxmox/logs" -type f -name "*.log" 2>/dev/null)
fi
# Backup/temp files (only in specific project directories)
while IFS= read -r file; do
# Only process files in our project directories
if [[ "$file" == "$PROXMOX_DIR/"* ]] && [[ "$file" != *"/node_modules/"* ]] && [[ "$file" != *"/ProxmoxVE/"* ]] && [[ "$file" != *"/mcp-proxmox/"* ]] && [[ "$file" != *"/the_order/"* ]]; then
safe_delete "$file" "backup/temp file"
fi
done < <(find "$PROXMOX_DIR" -maxdepth 3 -type f \( -name "*.bak" -o -name "*.old" -o -name "*~" -o -name "*.swp" \) 2>/dev/null)
# Clean local smom-dbis-138 project
log_info ""
log_info "=== Cleaning Local smom-dbis-138 Project ==="
# Try different possible locations
SMOM_DIR=""
for possible_dir in "$PROJECT_ROOT/../smom-dbis-138" "/home/intlc/projects/smom-dbis-138"; do
if [[ -d "$possible_dir" ]]; then
SMOM_DIR="$possible_dir"
break
fi
done
if [[ -n "$SMOM_DIR" ]] && [[ -d "$SMOM_DIR" ]]; then
log_info "Using smom-dbis-138 directory: $SMOM_DIR"
# Temporary key generation directories
while IFS= read -r dir; do
safe_delete "$dir" "temp key gen dir"
done < <(find "$SMOM_DIR" -maxdepth 1 -type d -name "temp-all-keys-*" 2>/dev/null)
# Backup key directories (keep only the most recent)
LATEST_BACKUP=$(find "$SMOM_DIR" -maxdepth 1 -type d -name "backup-keys-*" 2>/dev/null | sort | tail -1)
while IFS= read -r dir; do
if [[ "$dir" != "$LATEST_BACKUP" ]]; then
safe_delete "$dir" "old backup keys"
fi
done < <(find "$SMOM_DIR" -maxdepth 1 -type d -name "backup-keys-*" 2>/dev/null)
# Old log files
if [[ -d "$SMOM_DIR/logs" ]]; then
while IFS= read -r logfile; do
if [[ -f "$logfile" ]]; then
file_age=$(( ($(date +%s) - $(stat -c %Y "$logfile" 2>/dev/null || echo 0)) / 86400 ))
if [[ $file_age -gt $MIN_LOG_AGE_DAYS ]]; then
safe_delete "$logfile" "old log"
fi
fi
done < <(find "$SMOM_DIR/logs" -type f -name "*.log" 2>/dev/null)
fi
# Temporary/backup files
while IFS= read -r file; do
safe_delete "$file" "backup/temp file"
done < <(find "$SMOM_DIR" -maxdepth 2 -type f \( -name "*.bak" -o -name "*.old" -o -name "*~" -o -name "*.swp" \) ! -path "*/node_modules/*" 2>/dev/null)
else
log_warn "smom-dbis-138 directory not found: $SMOM_DIR"
fi
# Clean remote ml110
log_info ""
log_info "=== Cleaning Remote Host (ml110) ==="
if sshpass -p "$REMOTE_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 \
"${REMOTE_USER}@${REMOTE_HOST}" "echo 'Connected'" 2>/dev/null; then
log_info "Connected to ${REMOTE_HOST}"
# Get list of files to clean
REMOTE_CLEANUP=$(sshpass -p "$REMOTE_PASS" ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "cd /opt && {
# Find backup/temp directories
find smom-dbis-138* -type d -name '*backup*' 2>/dev/null
find smom-dbis-138* -type d -name 'temp-all-keys-*' 2>/dev/null
# Find old log files (older than $MIN_LOG_AGE_DAYS days)
find smom-dbis-138*/logs -type f -name '*.log' 2>/dev/null | while read -r log; do
age=\$(( (\$(date +%s) - \$(stat -c %Y \"\$log\" 2>/dev/null || echo 0)) / 86400 ))
if [[ \$age -gt $MIN_LOG_AGE_DAYS ]]; then
echo \"\$log\"
fi
done
# Find backup/temp files
find smom-dbis-138* -type f \( -name '*.bak' -o -name '*.old' -o -name '*~' -o -name '*.swp' \) 2>/dev/null
}" 2>/dev/null)
if [[ -n "$REMOTE_CLEANUP" ]]; then
REMOTE_COUNT=0
echo "$REMOTE_CLEANUP" | while IFS= read -r item; do
if [[ -n "$item" ]]; then
REMOTE_COUNT=$((REMOTE_COUNT + 1))
echo "/opt/$item" >> "$CLEANUP_LOG"
echo "Would delete (remote): /opt/$item"
if [[ "$DRY_RUN" != "true" ]]; then
if sshpass -p "$REMOTE_PASS" ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "rm -rf \"/opt/$item\" 2>/dev/null && echo '✓' || echo '✗'" 2>/dev/null | grep -q "✓"; then
TOTAL_DELETED=$((TOTAL_DELETED + 1))
fi
fi
fi
done
log_info "Found $REMOTE_COUNT items on remote"
else
log_info "No cleanup targets found on remote"
fi
else
log_warn "Cannot connect to ${REMOTE_HOST}, skipping remote cleanup"
fi
# Summary
log_info ""
log_info "========================================="
log_info "Cleanup Summary"
log_info "========================================="
log_info "Total items found: $TOTAL_FOUND"
if [[ "$DRY_RUN" == "true" ]]; then
log_warn "DRY-RUN mode: No files were deleted"
log_info "Review the log file: $CLEANUP_LOG"
log_info "Run with --execute to actually delete: $0 --execute"
else
log_success "Total items deleted: $TOTAL_DELETED"
log_info "Cleanup log: $CLEANUP_LOG"
fi
log_info ""

View File

@@ -0,0 +1,227 @@
#!/usr/bin/env bash
# Cleanup Deprecated Besu Configuration Options
# Removes deprecated/invalid options that cause Besu v23.10.0+ to fail
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Dry-run mode flag
DRY_RUN="${1:-}"
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Function to backup a file
backup_file() {
local file="$1"
if [ -f "$file" ]; then
local backup="${file}.backup.$(date +%Y%m%d_%H%M%S)"
if [ "$DRY_RUN" != "--dry-run" ]; then
cp "$file" "$backup"
echo "$backup"
else
echo "${file}.backup.TIMESTAMP"
fi
fi
}
# Function to clean deprecated options from a file
clean_deprecated_options() {
local file="$1"
local node_type="$2"
if [ ! -f "$file" ]; then
log_warn "File not found: $file (skipping)"
return 1
fi
log_info "Cleaning deprecated options from: $file ($node_type)"
if [ "$DRY_RUN" != "--dry-run" ]; then
# Backup file
local backup=$(backup_file "$file")
log_info " Backup created: $backup"
# Remove deprecated options
# Note: Using sed with -i for in-place editing
sed -i \
-e '/^log-destination=/d' \
-e '/^fast-sync-min-peers=/d' \
-e '/^database-path=/d' \
-e '/^trie-logs-enabled=/d' \
-e '/^accounts-enabled=/d' \
-e '/^max-remote-initiated-connections=/d' \
-e '/^rpc-http-host-allowlist=/d' \
-e '/^rpc-tx-feecap="0x0"/d' \
-e '/^tx-pool-max-size=/d' \
-e '/^tx-pool-price-bump=/d' \
-e '/^tx-pool-retention-hours=/d' \
"$file"
log_success " Deprecated options removed"
return 0
else
log_info " [DRY-RUN] Would remove deprecated options:"
log_info " - log-destination"
log_info " - fast-sync-min-peers"
log_info " - database-path"
log_info " - trie-logs-enabled"
log_info " - accounts-enabled"
log_info " - max-remote-initiated-connections"
log_info " - rpc-http-host-allowlist"
log_info " - rpc-tx-feecap=\"0x0\""
log_info " - tx-pool-max-size"
log_info " - tx-pool-price-bump"
log_info " - tx-pool-retention-hours"
return 0
fi
}
# Main execution
echo -e "${BLUE}╔══════════════════════════════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ BESU DEPRECATED OPTIONS CLEANUP ║${NC}"
echo -e "${BLUE}╚══════════════════════════════════════════════════════════════╝${NC}"
echo ""
if [ "$DRY_RUN" == "--dry-run" ]; then
log_warn "DRY-RUN MODE: No files will be modified"
echo ""
fi
# Track statistics
CLEANED=0
SKIPPED=0
# Validator configurations
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo -e "${BLUE}Cleaning Validator Configurations${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo ""
VALIDATOR_FILES=(
"$PROJECT_ROOT/smom-dbis-138/config/config-validator.toml"
"$PROJECT_ROOT/smom-dbis-138-proxmox/templates/besu-configs/config-validator.toml"
)
for file in "${VALIDATOR_FILES[@]}"; do
if clean_deprecated_options "$file" "validator"; then
CLEANED=$((CLEANED + 1))
else
SKIPPED=$((SKIPPED + 1))
fi
echo ""
done
# RPC node configurations
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo -e "${BLUE}Cleaning RPC Node Configurations${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo ""
RPC_FILES=(
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-core.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-public.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-perm.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-thirdweb.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-4.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-putu-1.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-putu-8a.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-luis-1.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-luis-8a.toml"
"$PROJECT_ROOT/smom-dbis-138-proxmox/templates/besu-configs/config-rpc-core.toml"
"$PROJECT_ROOT/smom-dbis-138-proxmox/templates/besu-configs/config-rpc.toml"
"$PROJECT_ROOT/smom-dbis-138-proxmox/templates/besu-configs/config-rpc-4.toml"
)
for file in "${RPC_FILES[@]}"; do
if clean_deprecated_options "$file" "RPC"; then
CLEANED=$((CLEANED + 1))
else
SKIPPED=$((SKIPPED + 1))
fi
echo ""
done
# Sentry configurations
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo -e "${BLUE}Cleaning Sentry Configurations${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo ""
SENTRY_FILES=(
"$PROJECT_ROOT/smom-dbis-138-proxmox/templates/besu-configs/config-sentry.toml"
)
for file in "${SENTRY_FILES[@]}"; do
if clean_deprecated_options "$file" "sentry"; then
CLEANED=$((CLEANED + 1))
else
SKIPPED=$((SKIPPED + 1))
fi
echo ""
done
# Member configurations
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo -e "${BLUE}Cleaning Member Configurations${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo ""
MEMBER_FILES=(
"$PROJECT_ROOT/smom-dbis-138/config/config-member.toml"
)
for file in "${MEMBER_FILES[@]}"; do
if clean_deprecated_options "$file" "member"; then
CLEANED=$((CLEANED + 1))
else
SKIPPED=$((SKIPPED + 1))
fi
echo ""
done
# Summary
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo -e "${BLUE}Summary${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo ""
echo "Files cleaned: $CLEANED"
echo "Files skipped: $SKIPPED"
echo ""
if [ "$DRY_RUN" == "--dry-run" ]; then
log_warn "This was a dry-run. No files were modified."
echo "Run without --dry-run to apply changes."
else
log_success "Deprecated options cleanup complete!"
echo ""
echo "Deprecated options removed:"
echo " ✓ log-destination"
echo " ✓ fast-sync-min-peers (incompatible with FULL sync-mode)"
echo " ✓ database-path (use data-path instead)"
echo " ✓ trie-logs-enabled"
echo " ✓ accounts-enabled"
echo " ✓ max-remote-initiated-connections"
echo " ✓ rpc-http-host-allowlist"
echo " ✓ rpc-tx-feecap=\"0x0\" (invalid value)"
echo " ✓ tx-pool-max-size (legacy, incompatible with layered implementation)"
echo " ✓ tx-pool-price-bump (legacy, incompatible with layered implementation)"
echo " ✓ tx-pool-retention-hours (legacy, incompatible with layered implementation)"
echo ""
echo "Next steps:"
echo " 1. Review the cleaned configuration files"
echo " 2. Test configurations with Besu v23.10.0+"
echo " 3. Deploy updated configurations to nodes"
fi

View File

@@ -0,0 +1,301 @@
#!/usr/bin/env bash
# Cleanup Old, Backup, and Unreferenced Files
# Safely removes old files, backups, and unused files from both local and remote
#
# This script identifies and removes:
# - Backup directories (backup-*, *backup*)
# - Temporary files (*.tmp, *.temp, *~, *.swp)
# - Old log files (logs/*.log older than 30 days)
# - Duplicate/unused files
# - Old documentation that's been superseded
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Configuration
DRY_RUN="${DRY_RUN:-true}"
REMOTE_HOST="${REMOTE_HOST:-192.168.11.10}"
REMOTE_USER="${REMOTE_USER:-root}"
CLEAN_LOCAL="${CLEAN_LOCAL:-true}"
CLEAN_REMOTE="${CLEAN_REMOTE:-true}"
MIN_LOG_AGE_DAYS=30
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--execute)
DRY_RUN=false
shift
;;
--skip-remote)
CLEAN_REMOTE=false
shift
;;
--skip-local)
CLEAN_LOCAL=false
shift
;;
--help)
cat << EOF
Usage: $0 [OPTIONS]
Cleanup old, backup, and unreferenced files from project directories.
Options:
--execute Actually delete files (default: dry-run, only shows what would be deleted)
--skip-remote Skip cleaning remote host (ml110)
--skip-local Skip cleaning local project
--help Show this help
Safety:
- By default, runs in DRY-RUN mode (shows files but doesn't delete)
- Use --execute to actually delete files
- Creates a manifest of files that will be deleted
EOF
exit 0
;;
*)
log_error "Unknown option: $1"
exit 1
;;
esac
done
# Create cleanup manifest
CLEANUP_MANIFEST="$PROJECT_ROOT/logs/cleanup-manifest-$(date +%Y%m%d-%H%M%S).txt"
mkdir -p "$PROJECT_ROOT/logs"
> "$CLEANUP_MANIFEST"
log_info "========================================="
log_info "File Cleanup Script"
log_info "========================================="
log_info "Mode: $([ "$DRY_RUN" == "true" ] && echo "DRY-RUN (no files will be deleted)" || echo "EXECUTE (files will be deleted)")"
log_info "Manifest: $CLEANUP_MANIFEST"
log_info ""
# Function to find and catalog files to delete
find_cleanup_targets() {
local base_dir="$1"
local label="$2"
log_info "=== Scanning $label ==="
local count=0
# Backup directories
while IFS= read -r dir; do
if [[ -d "$dir" ]]; then
echo "$dir" >> "$CLEANUP_MANIFEST"
echo "DIR: $dir"
((count++))
fi
done < <(find "$base_dir" -type d -name "*backup*" 2>/dev/null)
# Temporary directories
while IFS= read -r dir; do
if [[ -d "$dir" ]] && [[ "$dir" != "$base_dir" ]]; then
echo "$dir" >> "$CLEANUP_MANIFEST"
echo "DIR: $dir"
((count++))
fi
done < <(find "$base_dir" -type d \( -name "*tmp*" -o -name "*temp*" \) 2>/dev/null)
# Temporary/backup files
while IFS= read -r file; do
if [[ -f "$file" ]]; then
echo "$file" >> "$CLEANUP_MANIFEST"
echo "FILE: $file"
((count++))
fi
done < <(find "$base_dir" -type f \( -name "*.bak" -o -name "*.old" -o -name "*~" -o -name "*.swp" -o -name "*.tmp" -o -name "*.temp" \) 2>/dev/null)
# Old log files (older than MIN_LOG_AGE_DAYS)
if [[ -d "$base_dir/logs" ]]; then
while IFS= read -r file; do
if [[ -f "$file" ]]; then
local file_age=$(( ($(date +%s) - $(stat -c %Y "$file" 2>/dev/null || echo 0)) / 86400 ))
if [[ $file_age -gt $MIN_LOG_AGE_DAYS ]]; then
echo "$file" >> "$CLEANUP_MANIFEST"
echo "OLD LOG ($file_age days): $file"
((count++))
fi
fi
done < <(find "$base_dir/logs" -type f -name "*.log" 2>/dev/null)
fi
# temp-all-keys-* directories in smom-dbis-138
if [[ "$base_dir" == *"smom-dbis-138"* ]]; then
while IFS= read -r dir; do
if [[ -d "$dir" ]]; then
echo "$dir" >> "$CLEANUP_MANIFEST"
echo "TEMP KEY GEN: $dir"
((count++))
fi
done < <(find "$base_dir" -type d -name "temp-all-keys-*" 2>/dev/null)
fi
log_info "Found $count items to clean"
echo "$count"
}
# Function to delete files from manifest
delete_from_manifest() {
local manifest_file="$1"
local label="$2"
if [[ ! -f "$manifest_file" ]]; then
log_warn "Manifest file not found: $manifest_file"
return 0
fi
local count=$(wc -l < "$manifest_file" | tr -d ' ')
if [[ $count -eq 0 ]]; then
log_info "No files to delete for $label"
return 0
fi
log_info "Deleting $count items from $label..."
local deleted=0
local failed=0
while IFS= read -r target; do
if [[ -z "$target" ]]; then
continue
fi
if [[ -e "$target" ]]; then
if rm -rf "$target" 2>/dev/null; then
((deleted++))
else
log_warn "Failed to delete: $target"
((failed++))
fi
fi
done < "$manifest_file"
log_success "Deleted $deleted items, $failed failures"
}
# Clean local project
if [[ "$CLEAN_LOCAL" == "true" ]]; then
log_info ""
log_info "=== Local Project Cleanup ==="
# Clean proxmox project
PROXMOX_CLEANUP="$PROJECT_ROOT/logs/proxmox-cleanup-$(date +%Y%m%d-%H%M%S).txt"
> "$PROXMOX_CLEANUP"
find_cleanup_targets "$PROJECT_ROOT" "proxmox project" | tee -a "$PROXMOX_CLEANUP" | tail -20
proxmox_count=$(tail -1 "$PROXMOX_CLEANUP" | grep -oE '[0-9]+' | head -1 || echo "0")
# Clean smom-dbis-138 project
if [[ -d "$PROJECT_ROOT/../smom-dbis-138" ]]; then
SMOM_CLEANUP="$PROJECT_ROOT/logs/smom-cleanup-$(date +%Y%m%d-%H%M%S).txt"
> "$SMOM_CLEANUP"
find_cleanup_targets "$PROJECT_ROOT/../smom-dbis-138" "smom-dbis-138 project" | tee -a "$SMOM_CLEANUP" | tail -20
smom_count=$(tail -1 "$SMOM_CLEANUP" | grep -oE '[0-9]+' | head -1 || echo "0")
else
smom_count=0
fi
total_local=$((proxmox_count + smom_count))
if [[ "$DRY_RUN" != "true" ]] && [[ $total_local -gt 0 ]]; then
log_info ""
log_warn "Executing deletion of $total_local local items..."
delete_from_manifest "$CLEANUP_MANIFEST" "local project"
fi
fi
# Clean remote host
if [[ "$CLEAN_REMOTE" == "true" ]]; then
log_info ""
log_info "=== Remote Host Cleanup (ml110) ==="
# Test SSH connection
if ! sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 \
"${REMOTE_USER}@${REMOTE_HOST}" "echo 'Connected'" 2>/dev/null; then
log_warn "Cannot connect to ${REMOTE_HOST}, skipping remote cleanup"
else
log_info "Scanning remote host..."
# Get list of files to clean on remote
REMOTE_CLEANUP_LIST=$(sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "cd /opt && \
find smom-dbis-138* -type d -name '*backup*' 2>/dev/null && \
find smom-dbis-138* -type d \( -name '*tmp*' -o -name '*temp*' \) 2>/dev/null && \
find smom-dbis-138* -type f \( -name '*.bak' -o -name '*.old' -o -name '*~' -o -name '*.swp' \) 2>/dev/null" 2>/dev/null | head -50)
remote_count=0
if [[ -n "$REMOTE_CLEANUP_LIST" ]]; then
echo "$REMOTE_CLEANUP_LIST" | while IFS= read -r item; do
if [[ -n "$item" ]]; then
echo "/opt/$item" >> "$CLEANUP_MANIFEST"
echo "REMOTE: /opt/$item"
((remote_count++))
fi
done
log_info "Found $remote_count items to clean on remote"
else
log_info "No cleanup targets found on remote"
fi
if [[ "$DRY_RUN" != "true" ]] && [[ $remote_count -gt 0 ]]; then
log_info ""
log_warn "Executing deletion of $remote_count remote items..."
# Delete remote files
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "cd /opt && \
find smom-dbis-138* -type d -name '*backup*' -exec rm -rf {} + 2>/dev/null; \
find smom-dbis-138* -type d \( -name '*tmp*' -o -name '*temp*' \) -exec rm -rf {} + 2>/dev/null; \
find smom-dbis-138* -type f \( -name '*.bak' -o -name '*.old' -o -name '*~' -o -name '*.swp' \) -delete 2>/dev/null; \
echo 'Remote cleanup completed'"
log_success "Remote cleanup completed"
fi
fi
fi
# Summary
log_info ""
log_info "========================================="
log_info "Cleanup Summary"
log_info "========================================="
log_info "Manifest file: $CLEANUP_MANIFEST"
log_info "Mode: $([ "$DRY_RUN" == "true" ] && echo "DRY-RUN" || echo "EXECUTED")"
log_info ""
if [[ "$DRY_RUN" == "true" ]]; then
log_warn "This was a DRY-RUN. No files were deleted."
log_info "Review the manifest file and run with --execute to delete files:"
log_info " $0 --execute"
else
log_success "Cleanup completed. Check manifest for details: $CLEANUP_MANIFEST"
fi
log_info ""

View File

@@ -0,0 +1,117 @@
#!/bin/bash
# Create .env.example templates from existing .env files
# Removes actual secrets and replaces with placeholders
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
PROJECT_ROOT="${PROJECT_ROOT:-/home/intlc/projects}"
DRY_RUN="${DRY_RUN:-true}"
# Patterns to replace with placeholders
declare -A SECRET_PATTERNS=(
["PRIVATE_KEY"]="your-private-key-here"
["API_KEY"]="your-api-key-here"
["API_TOKEN"]="your-api-token-here"
["SECRET"]="your-secret-here"
["PASSWORD"]="your-password-here"
["TOKEN"]="your-token-here"
["CLOUDFLARE_API_TOKEN"]="your-cloudflare-api-token"
["CLOUDFLARE_API_KEY"]="your-cloudflare-api-key"
["CLOUDFLARE_TUNNEL_TOKEN"]="your-cloudflare-tunnel-token"
["CLOUDFLARE_ORIGIN_CA_KEY"]="your-cloudflare-origin-ca-key"
["NPM_PASSWORD"]="your-npm-password"
["DATABASE_URL"]="postgresql://user:password@host:port/database"
["JWT_SECRET"]="your-jwt-secret-here"
)
echo "═══════════════════════════════════════════════════════════"
echo " Create .env.example Templates"
echo "═══════════════════════════════════════════════════════════"
echo ""
log_info "Mode: $([ "$DRY_RUN" = "true" ] && echo "DRY RUN" || echo "LIVE")"
echo ""
# Find all .env files
ENV_FILES=$(find "$PROJECT_ROOT" -type f -name ".env" ! -name "*.example" ! -path "*/node_modules/*" ! -path "*/.git/*" 2>/dev/null)
CREATED=0
SKIPPED=0
while IFS= read -r env_file; do
if [ -z "$env_file" ]; then
continue
fi
example_file="${env_file}.example"
# Skip if .example already exists and is newer
if [ -f "$example_file" ] && [ "$example_file" -nt "$env_file" ]; then
log_info "Skipping $env_file (example file is newer)"
SKIPPED=$((SKIPPED + 1))
continue
fi
log_info "Processing: $env_file"
if [ "$DRY_RUN" = "false" ]; then
# Create .env.example by copying and sanitizing
cp "$env_file" "$example_file"
# Replace secrets with placeholders
for pattern in "${!SECRET_PATTERNS[@]}"; do
placeholder="${SECRET_PATTERNS[$pattern]}"
# Handle different formats: KEY=value, KEY="value", KEY='value'
sed -i "s/^${pattern}=.*/${pattern}=${placeholder}/" "$example_file"
sed -i "s/^${pattern}=\".*\"/${pattern}=\"${placeholder}\"/" "$example_file"
sed -i "s/^${pattern}='.*'/${pattern}='${placeholder}'/" "$example_file"
done
# Add header comment
{
echo "# Environment Variables Template"
echo "# Copy this file to .env and fill in your actual values"
echo "# DO NOT commit .env files to version control"
echo "#"
echo ""
cat "$example_file"
} > "${example_file}.tmp"
mv "${example_file}.tmp" "$example_file"
log_success " Created: $example_file"
CREATED=$((CREATED + 1))
else
log_info " Would create: $example_file"
CREATED=$((CREATED + 1))
fi
done <<< "$ENV_FILES"
echo ""
echo "═══════════════════════════════════════════════════════════"
echo " Summary"
echo "═══════════════════════════════════════════════════════════"
echo ""
if [ "$DRY_RUN" = "true" ]; then
log_info "DRY RUN complete. Would create $CREATED template(s)"
log_info "To create templates, run:"
log_info " DRY_RUN=false $0"
else
log_success "Created $CREATED .env.example template(s)"
log_info "Skipped $SKIPPED file(s) (already up to date)"
fi
echo ""

View File

@@ -0,0 +1,206 @@
#!/bin/bash
# Safely handle backup files containing secrets
# Options: encrypt, move to secure location, or delete (with confirmation)
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
PROJECT_ROOT="${PROJECT_ROOT:-/home/intlc/projects}"
ACTION="${ACTION:-list}" # list, encrypt, move, delete
SECURE_DIR="${SECURE_DIR:-$HOME/.secure-secrets-backups}"
DRY_RUN="${DRY_RUN:-true}"
echo "═══════════════════════════════════════════════════════════"
echo " Backup Files Handler"
echo "═══════════════════════════════════════════════════════════"
echo ""
# Find all backup files
log_info "Scanning for backup files..."
BACKUP_FILES=$(find "$PROJECT_ROOT" -type f \( -name "*.env.backup*" -o -name ".env.backup*" \) ! -path "*/node_modules/*" ! -path "*/.git/*" 2>/dev/null)
if [ -z "$BACKUP_FILES" ]; then
log_success "No backup files found"
exit 0
fi
# Identify files with secrets
FILES_WITH_SECRETS=()
while IFS= read -r backup_file; do
if [ -z "$backup_file" ]; then
continue
fi
if grep -qE "^(PRIVATE_KEY|API_KEY|SECRET|PASSWORD|TOKEN|CLOUDFLARE)" "$backup_file" 2>/dev/null; then
FILES_WITH_SECRETS+=("$backup_file")
fi
done <<< "$BACKUP_FILES"
if [ ${#FILES_WITH_SECRETS[@]} -eq 0 ]; then
log_success "No backup files with secrets found"
exit 0
fi
echo "Found ${#FILES_WITH_SECRETS[@]} backup file(s) with secrets:"
echo ""
for file in "${FILES_WITH_SECRETS[@]}"; do
echo " - $file"
# Show first secret type found
secret_type=$(grep -hE "^(PRIVATE_KEY|API_KEY|SECRET|PASSWORD|TOKEN|CLOUDFLARE)" "$file" 2>/dev/null | head -1 | cut -d'=' -f1)
if [ -n "$secret_type" ]; then
echo " Contains: $secret_type"
fi
done
echo ""
case "$ACTION" in
list)
log_info "Mode: LIST (no changes)"
log_info ""
log_info "Available actions:"
log_info " ACTION=encrypt - Encrypt and move to secure location"
log_info " ACTION=move - Move to secure location (unencrypted)"
log_info " ACTION=delete - Delete files (with confirmation)"
;;
encrypt)
log_info "Mode: ENCRYPT and move to secure location"
if [ "$DRY_RUN" = "true" ]; then
log_warn "DRY RUN - No changes will be made"
fi
# Create secure directory
if [ "$DRY_RUN" = "false" ]; then
mkdir -p "$SECURE_DIR"
chmod 700 "$SECURE_DIR"
fi
for file in "${FILES_WITH_SECRETS[@]}"; do
filename=$(basename "$file")
dirname=$(dirname "$file")
relative_path="${dirname#$PROJECT_ROOT/}"
secure_path="$SECURE_DIR/${relative_path//\//_}_${filename}.enc"
log_info "Processing: $file"
if [ "$DRY_RUN" = "false" ]; then
# Encrypt using openssl
if command -v openssl &> /dev/null; then
openssl enc -aes-256-cbc -salt -pbkdf2 -in "$file" -out "$secure_path" 2>/dev/null || {
log_error "Failed to encrypt $file"
continue
}
chmod 600 "$secure_path"
log_success " Encrypted to: $secure_path"
# Remove original
rm "$file"
log_success " Removed original: $file"
else
log_error "openssl not found. Cannot encrypt."
exit 1
fi
else
log_info " Would encrypt to: $secure_path"
log_info " Would remove: $file"
fi
done
if [ "$DRY_RUN" = "false" ]; then
log_success "Encryption complete!"
log_info "Encrypted files stored in: $SECURE_DIR"
log_info "To decrypt: openssl enc -d -aes-256-cbc -pbkdf2 -in <file.enc> -out <file>"
fi
;;
move)
log_info "Mode: MOVE to secure location"
if [ "$DRY_RUN" = "true" ]; then
log_warn "DRY RUN - No changes will be made"
fi
# Create secure directory
if [ "$DRY_RUN" = "false" ]; then
mkdir -p "$SECURE_DIR"
chmod 700 "$SECURE_DIR"
fi
for file in "${FILES_WITH_SECRETS[@]}"; do
filename=$(basename "$file")
dirname=$(dirname "$file")
relative_path="${dirname#$PROJECT_ROOT/}"
secure_path="$SECURE_DIR/${relative_path//\//_}_${filename}"
log_info "Processing: $file"
if [ "$DRY_RUN" = "false" ]; then
cp "$file" "$secure_path"
chmod 600 "$secure_path"
log_success " Moved to: $secure_path"
# Remove original
rm "$file"
log_success " Removed original: $file"
else
log_info " Would move to: $secure_path"
log_info " Would remove: $file"
fi
done
if [ "$DRY_RUN" = "false" ]; then
log_success "Move complete!"
log_info "Files stored in: $SECURE_DIR"
fi
;;
delete)
log_warn "Mode: DELETE"
log_warn "This will permanently delete backup files with secrets!"
echo ""
if [ "$DRY_RUN" = "true" ]; then
log_warn "DRY RUN - No files will be deleted"
for file in "${FILES_WITH_SECRETS[@]}"; do
log_info "Would delete: $file"
done
else
read -p "Are you sure you want to delete these files? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
log_info "Cancelled"
exit 0
fi
for file in "${FILES_WITH_SECRETS[@]}"; do
log_info "Deleting: $file"
rm "$file"
log_success " Deleted: $file"
done
log_success "Deletion complete!"
fi
;;
*)
log_error "Unknown action: $ACTION"
log_info "Valid actions: list, encrypt, move, delete"
exit 1
;;
esac
echo ""
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,262 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Install NPMplus using existing template or create from existing container
set -e
PROXMOX_HOST="${1:-192.168.11.11}"
TZ="${2:-America/New_York}"
ACME_EMAIL="${3:-nsatoshi2007@hotmail.com}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🚀 NPMplus Installation (Using Available Resources)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Find next container ID
CTID=$(ssh root@"$PROXMOX_HOST" "pct list | tail -n +2 | awk '{print \$1}' | sort -n | tail -1")
CTID=$((CTID + 1))
echo "📋 Using container ID: $CTID"
echo ""
# Check for existing templates
echo "📦 Checking for available templates..."
EXISTING_TEMPLATE=$(ssh root@"$PROXMOX_HOST" "pveam list local | grep -i alpine | head -1 | awk '{print \$1}' || echo ''")
if [ -n "$EXISTING_TEMPLATE" ]; then
echo " ✅ Found existing template: $EXISTING_TEMPLATE"
# pveam list returns format like "local:vztmpl/alpine-3.22-default_20250617_amd64.tar.xz"
TEMPLATE="$EXISTING_TEMPLATE"
else
# Check what the existing NPM container uses
echo " ⚠️ No Alpine template found locally"
echo " 📋 Checking existing NPM container (105) for template info..."
# Try to use ubuntu or debian template if available
UBUNTU_TEMPLATE=$(ssh root@"$PROXMOX_HOST" "pveam list local | grep -iE 'ubuntu|debian' | head -1 | awk '{print \$1}' || echo ''")
if [ -n "$UBUNTU_TEMPLATE" ]; then
echo " ✅ Found alternative template: $UBUNTU_TEMPLATE"
# pveam list returns format like "local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst"
TEMPLATE="$UBUNTU_TEMPLATE"
else
echo " ❌ No suitable template found"
echo ""
echo " 💡 Solution: Download template manually or use Proxmox web UI"
echo " Or run on Proxmox host:"
echo " pveam download local alpine-3.22-default_20250617_amd64.tar.xz"
exit 1
fi
fi
# Create container
echo ""
echo "📦 Creating container with template: $TEMPLATE..."
# Template from pveam is already in format "local:vztmpl/filename", use directly
ssh root@"$PROXMOX_HOST" "pct create $CTID \\
$TEMPLATE \\
--hostname npmplus \\
--memory 512 \\
--cores 1 \\
--rootfs local-lvm:3 \\
--net0 name=eth0,bridge=vmbr0,ip=dhcp \\
--unprivileged 1 \\
--features nesting=1" || {
echo " ❌ Failed to create container"
exit 1
}
echo " ✅ Container created"
# Start container
echo ""
echo "🚀 Starting container..."
ssh root@"$PROXMOX_HOST" "pct start $CTID" || {
echo " ❌ Failed to start container"
exit 1
}
# Wait for container to be ready
echo " ⏳ Waiting for container to be ready..."
sleep 5
# Install NPMplus inside container
echo ""
echo "📦 Installing NPMplus inside container..."
ssh root@"$PROXMOX_HOST" "pct exec $CTID -- bash" << INSTALL_EOF
set -e
# Detect OS and install accordingly
if [ -f /etc/alpine-release ]; then
echo " 📋 Detected Alpine Linux"
apk update
apk add --no-cache tzdata gawk yq docker curl bash
# Start Docker
rc-service docker start || true
rc-update add docker default || true
# Install docker compose plugin
DOCKER_COMPOSE_VERSION=\$(curl -fsSL https://api.github.com/repos/docker/compose/releases/latest 2>/dev/null | grep '"tag_name":' | cut -d'"' -f4 || echo "v2.24.0")
DOCKER_CONFIG=\${DOCKER_CONFIG:-\$HOME/.docker}
mkdir -p \$DOCKER_CONFIG/cli-plugins
curl -fsSL "https://github.com/docker/compose/releases/download/\${DOCKER_COMPOSE_VERSION#v}/docker-compose-linux-x86_64" -o \$DOCKER_CONFIG/cli-plugins/docker-compose 2>/dev/null || \\
curl -fsSL "https://github.com/docker/compose/releases/download/v2.24.0/docker-compose-linux-x86_64" -o \$DOCKER_CONFIG/cli-plugins/docker-compose
chmod +x \$DOCKER_CONFIG/cli-plugins/docker-compose
elif [ -f /etc/debian_version ]; then
echo " 📋 Detected Debian/Ubuntu"
apt-get update
apt-get install -y tzdata gawk curl bash ca-certificates gnupg lsb-release
# Install Docker from official repository
echo " 📦 Installing Docker..."
if [ ! -f /etc/apt/keyrings/docker.gpg ]; then
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
chmod a+r /etc/apt/keyrings/docker.gpg
# Detect Ubuntu version for Docker repo
UBUNTU_CODENAME=\$(lsb_release -cs 2>/dev/null || echo "jammy")
echo "deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \$UBUNTU_CODENAME stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update
fi
# Install Docker packages (skip if already installed)
if ! command -v docker >/dev/null 2>&1; then
apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
else
echo " Docker already installed, ensuring docker-compose-plugin..."
apt-get install -y docker-compose-plugin || true
fi
# Install yq from GitHub releases
echo " 📦 Installing yq..."
if ! command -v yq >/dev/null 2>&1; then
YQ_VERSION=\$(curl -fsSL https://api.github.com/repos/mikefarah/yq/releases/latest 2>/dev/null | grep '"tag_name":' | cut -d'"' -f4 || echo "v4.40.5")
curl -fsSL "https://github.com/mikefarah/yq/releases/download/\${YQ_VERSION}/yq_linux_amd64" -o /usr/local/bin/yq
chmod +x /usr/local/bin/yq
else
echo " yq already installed"
fi
# Start Docker
systemctl start docker || true
systemctl enable docker || true
else
echo " ❌ Unsupported OS"
exit 1
fi
# Wait for Docker
sleep 5
# Fetch NPMplus compose file
cd /opt
echo " 📥 Downloading NPMplus compose.yaml..."
curl -fsSL "https://raw.githubusercontent.com/ZoeyVid/NPMplus/refs/heads/develop/compose.yaml" -o compose.yaml || {
echo " ❌ Failed to download compose.yaml"
exit 1
}
# Update compose file with timezone and email
if command -v yq >/dev/null 2>&1; then
echo " 📝 Updating compose.yaml..."
yq -i "
.services.npmplus.environment |=
(map(select(. != \"TZ=*\" and . != \"ACME_EMAIL=*\")) +
[\"TZ=$TZ\", \"ACME_EMAIL=$ACME_EMAIL\"])
" compose.yaml
else
echo " ⚠️ yq not available, updating manually..."
sed -i "s|TZ=.*|TZ=$TZ|g" compose.yaml || true
sed -i "s|ACME_EMAIL=.*|ACME_EMAIL=$ACME_EMAIL|g" compose.yaml || true
fi
# Start NPMplus
echo " 🚀 Starting NPMplus (this may take 1-2 minutes)..."
cd /opt
docker compose up -d || {
echo " ⚠️ docker compose failed, checking status..."
docker compose ps || true
exit 1
}
# Wait for NPMplus to be ready
echo " ⏳ Waiting for NPMplus to start..."
CONTAINER_ID=""
for i in {1..60}; do
CONTAINER_ID=\$(docker ps --filter "name=npmplus" --format "{{.ID}}" 2>/dev/null || echo "")
if [ -n "\$CONTAINER_ID" ]; then
STATUS=\$(docker inspect --format '{{.State.Health.Status}}' "\$CONTAINER_ID" 2>/dev/null || echo "starting")
if [ "\$STATUS" = "healthy" ] || [ "\$STATUS" = "running" ]; then
echo " ✅ NPMplus is running"
break
fi
fi
sleep 2
done
# Get admin password
echo " 🔑 Retrieving admin password..."
PASSWORD_LINE=\$(docker logs "\$CONTAINER_ID" 2>&1 | grep -i "Creating a new user" | tail -1 || echo "")
if [ -n "\$PASSWORD_LINE" ]; then
PASSWORD=\$(echo "\$PASSWORD_LINE" | grep -oP "password: \K[^\s]+" || echo "")
if [ -n "\$PASSWORD" ]; then
echo "username: admin@example.org" > /opt/.npm_pwd
echo "password: \$PASSWORD" >> /opt/.npm_pwd
echo " ✅ Admin password saved"
fi
fi
echo " ✅ NPMplus installation complete!"
INSTALL_EOF
if [ $? -eq 0 ]; then
# Get container IP
CONTAINER_IP=$(ssh root@"$PROXMOX_HOST" "pct exec $CTID -- hostname -I | awk '{print \$1}'")
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ NPMplus Installation Complete!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "📋 Container Information:"
echo " • Container ID: $CTID"
echo " • Container IP: $CONTAINER_IP"
echo " • Access URL: https://$CONTAINER_IP:81"
echo " • Admin Email: admin@example.org"
echo ""
echo "🔑 Get admin password:"
echo " ssh root@$PROXMOX_HOST \"pct exec $CTID -- cat /opt/.npm_pwd\""
echo ""
# Continue with migration
echo "🚀 Continuing with configuration migration..."
ADMIN_PASSWORD=$(ssh root@"$PROXMOX_HOST" "pct exec $CTID -- cat /opt/.npm_pwd 2>/dev/null | grep -i password | cut -d: -f2 | tr -d ' ' || echo '')
if [ -z "$ADMIN_PASSWORD" ]; then
echo " ⚠️ Could not retrieve password automatically"
echo " 💡 Run migration manually:"
echo " bash scripts/nginx-proxy-manager/post-install-migration.sh $PROXMOX_HOST $CTID $CONTAINER_IP"
else
echo "$ADMIN_PASSWORD" | bash scripts/nginx-proxy-manager/migrate-configs-to-npmplus.sh \
"$PROXMOX_HOST" \
"$CTID" \
"https://$CONTAINER_IP:81" || {
echo " ⚠️ Migration had issues, but installation is complete"
}
fi
else
echo ""
echo "❌ Installation failed. Check the output above."
exit 1
fi

View File

@@ -0,0 +1,256 @@
#!/bin/bash
set -euo pipefail
# Install NPMplus using existing template or create from existing container
set -e
PROXMOX_HOST="${1:-192.168.11.11}"
TZ="${2:-America/New_York}"
ACME_EMAIL="${3:-nsatoshi2007@hotmail.com}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🚀 NPMplus Installation (Using Available Resources)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Find next container ID
CTID=$(ssh root@"$PROXMOX_HOST" "pct list | tail -n +2 | awk '{print \$1}' | sort -n | tail -1")
CTID=$((CTID + 1))
echo "📋 Using container ID: $CTID"
echo ""
# Check for existing templates
echo "📦 Checking for available templates..."
EXISTING_TEMPLATE=$(ssh root@"$PROXMOX_HOST" "pveam list local | grep -i alpine | head -1 | awk '{print \$1}' || echo ''")
if [ -n "$EXISTING_TEMPLATE" ]; then
echo " ✅ Found existing template: $EXISTING_TEMPLATE"
# pveam list returns format like "local:vztmpl/alpine-3.22-default_20250617_amd64.tar.xz"
TEMPLATE="$EXISTING_TEMPLATE"
else
# Check what the existing NPM container uses
echo " ⚠️ No Alpine template found locally"
echo " 📋 Checking existing NPM container (105) for template info..."
# Try to use ubuntu or debian template if available
UBUNTU_TEMPLATE=$(ssh root@"$PROXMOX_HOST" "pveam list local | grep -iE 'ubuntu|debian' | head -1 | awk '{print \$1}' || echo ''")
if [ -n "$UBUNTU_TEMPLATE" ]; then
echo " ✅ Found alternative template: $UBUNTU_TEMPLATE"
# pveam list returns format like "local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst"
TEMPLATE="$UBUNTU_TEMPLATE"
else
echo " ❌ No suitable template found"
echo ""
echo " 💡 Solution: Download template manually or use Proxmox web UI"
echo " Or run on Proxmox host:"
echo " pveam download local alpine-3.22-default_20250617_amd64.tar.xz"
exit 1
fi
fi
# Create container
echo ""
echo "📦 Creating container with template: $TEMPLATE..."
# Template from pveam is already in format "local:vztmpl/filename", use directly
ssh root@"$PROXMOX_HOST" "pct create $CTID \\
$TEMPLATE \\
--hostname npmplus \\
--memory 512 \\
--cores 1 \\
--rootfs local-lvm:3 \\
--net0 name=eth0,bridge=vmbr0,ip=dhcp \\
--unprivileged 1 \\
--features nesting=1" || {
echo " ❌ Failed to create container"
exit 1
}
echo " ✅ Container created"
# Start container
echo ""
echo "🚀 Starting container..."
ssh root@"$PROXMOX_HOST" "pct start $CTID" || {
echo " ❌ Failed to start container"
exit 1
}
# Wait for container to be ready
echo " ⏳ Waiting for container to be ready..."
sleep 5
# Install NPMplus inside container
echo ""
echo "📦 Installing NPMplus inside container..."
ssh root@"$PROXMOX_HOST" "pct exec $CTID -- bash" << INSTALL_EOF
set -e
# Detect OS and install accordingly
if [ -f /etc/alpine-release ]; then
echo " 📋 Detected Alpine Linux"
apk update
apk add --no-cache tzdata gawk yq docker curl bash
# Start Docker
rc-service docker start || true
rc-update add docker default || true
# Install docker compose plugin
DOCKER_COMPOSE_VERSION=\$(curl -fsSL https://api.github.com/repos/docker/compose/releases/latest 2>/dev/null | grep '"tag_name":' | cut -d'"' -f4 || echo "v2.24.0")
DOCKER_CONFIG=\${DOCKER_CONFIG:-\$HOME/.docker}
mkdir -p \$DOCKER_CONFIG/cli-plugins
curl -fsSL "https://github.com/docker/compose/releases/download/\${DOCKER_COMPOSE_VERSION#v}/docker-compose-linux-x86_64" -o \$DOCKER_CONFIG/cli-plugins/docker-compose 2>/dev/null || \\
curl -fsSL "https://github.com/docker/compose/releases/download/v2.24.0/docker-compose-linux-x86_64" -o \$DOCKER_CONFIG/cli-plugins/docker-compose
chmod +x \$DOCKER_CONFIG/cli-plugins/docker-compose
elif [ -f /etc/debian_version ]; then
echo " 📋 Detected Debian/Ubuntu"
apt-get update
apt-get install -y tzdata gawk curl bash ca-certificates gnupg lsb-release
# Install Docker from official repository
echo " 📦 Installing Docker..."
if [ ! -f /etc/apt/keyrings/docker.gpg ]; then
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
chmod a+r /etc/apt/keyrings/docker.gpg
# Detect Ubuntu version for Docker repo
UBUNTU_CODENAME=\$(lsb_release -cs 2>/dev/null || echo "jammy")
echo "deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \$UBUNTU_CODENAME stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update
fi
# Install Docker packages (skip if already installed)
if ! command -v docker >/dev/null 2>&1; then
apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
else
echo " Docker already installed, ensuring docker-compose-plugin..."
apt-get install -y docker-compose-plugin || true
fi
# Install yq from GitHub releases
echo " 📦 Installing yq..."
if ! command -v yq >/dev/null 2>&1; then
YQ_VERSION=\$(curl -fsSL https://api.github.com/repos/mikefarah/yq/releases/latest 2>/dev/null | grep '"tag_name":' | cut -d'"' -f4 || echo "v4.40.5")
curl -fsSL "https://github.com/mikefarah/yq/releases/download/\${YQ_VERSION}/yq_linux_amd64" -o /usr/local/bin/yq
chmod +x /usr/local/bin/yq
else
echo " yq already installed"
fi
# Start Docker
systemctl start docker || true
systemctl enable docker || true
else
echo " ❌ Unsupported OS"
exit 1
fi
# Wait for Docker
sleep 5
# Fetch NPMplus compose file
cd /opt
echo " 📥 Downloading NPMplus compose.yaml..."
curl -fsSL "https://raw.githubusercontent.com/ZoeyVid/NPMplus/refs/heads/develop/compose.yaml" -o compose.yaml || {
echo " ❌ Failed to download compose.yaml"
exit 1
}
# Update compose file with timezone and email
if command -v yq >/dev/null 2>&1; then
echo " 📝 Updating compose.yaml..."
yq -i "
.services.npmplus.environment |=
(map(select(. != \"TZ=*\" and . != \"ACME_EMAIL=*\")) +
[\"TZ=$TZ\", \"ACME_EMAIL=$ACME_EMAIL\"])
" compose.yaml
else
echo " ⚠️ yq not available, updating manually..."
sed -i "s|TZ=.*|TZ=$TZ|g" compose.yaml || true
sed -i "s|ACME_EMAIL=.*|ACME_EMAIL=$ACME_EMAIL|g" compose.yaml || true
fi
# Start NPMplus
echo " 🚀 Starting NPMplus (this may take 1-2 minutes)..."
cd /opt
docker compose up -d || {
echo " ⚠️ docker compose failed, checking status..."
docker compose ps || true
exit 1
}
# Wait for NPMplus to be ready
echo " ⏳ Waiting for NPMplus to start..."
CONTAINER_ID=""
for i in {1..60}; do
CONTAINER_ID=\$(docker ps --filter "name=npmplus" --format "{{.ID}}" 2>/dev/null || echo "")
if [ -n "\$CONTAINER_ID" ]; then
STATUS=\$(docker inspect --format '{{.State.Health.Status}}' "\$CONTAINER_ID" 2>/dev/null || echo "starting")
if [ "\$STATUS" = "healthy" ] || [ "\$STATUS" = "running" ]; then
echo " ✅ NPMplus is running"
break
fi
fi
sleep 2
done
# Get admin password
echo " 🔑 Retrieving admin password..."
PASSWORD_LINE=\$(docker logs "\$CONTAINER_ID" 2>&1 | grep -i "Creating a new user" | tail -1 || echo "")
if [ -n "\$PASSWORD_LINE" ]; then
PASSWORD=\$(echo "\$PASSWORD_LINE" | grep -oP "password: \K[^\s]+" || echo "")
if [ -n "\$PASSWORD" ]; then
echo "username: admin@example.org" > /opt/.npm_pwd
echo "password: \$PASSWORD" >> /opt/.npm_pwd
echo " ✅ Admin password saved"
fi
fi
echo " ✅ NPMplus installation complete!"
INSTALL_EOF
if [ $? -eq 0 ]; then
# Get container IP
CONTAINER_IP=$(ssh root@"$PROXMOX_HOST" "pct exec $CTID -- hostname -I | awk '{print \$1}'")
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ NPMplus Installation Complete!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "📋 Container Information:"
echo " • Container ID: $CTID"
echo " • Container IP: $CONTAINER_IP"
echo " • Access URL: https://$CONTAINER_IP:81"
echo " • Admin Email: admin@example.org"
echo ""
echo "🔑 Get admin password:"
echo " ssh root@$PROXMOX_HOST \"pct exec $CTID -- cat /opt/.npm_pwd\""
echo ""
# Continue with migration
echo "🚀 Continuing with configuration migration..."
ADMIN_PASSWORD=$(ssh root@"$PROXMOX_HOST" "pct exec $CTID -- cat /opt/.npm_pwd 2>/dev/null | grep -i password | cut -d: -f2 | tr -d ' ' || echo '')
if [ -z "$ADMIN_PASSWORD" ]; then
echo " ⚠️ Could not retrieve password automatically"
echo " 💡 Run migration manually:"
echo " bash scripts/nginx-proxy-manager/post-install-migration.sh $PROXMOX_HOST $CTID $CONTAINER_IP"
else
echo "$ADMIN_PASSWORD" | bash scripts/nginx-proxy-manager/migrate-configs-to-npmplus.sh \
"$PROXMOX_HOST" \
"$CTID" \
"https://$CONTAINER_IP:81" || {
echo " ⚠️ Migration had issues, but installation is complete"
}
fi
else
echo ""
echo "❌ Installation failed. Check the output above."
exit 1
fi

View File

@@ -0,0 +1,98 @@
#!/usr/bin/env bash
# Migrate 2 containers to pve2 using thin1 storage via backup/restore method
# This approach allows us to specify target storage
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
PROXMOX_PASS="${PROXMOX_PASS:-L@kers2010}"
SOURCE_NODE="ml110"
TARGET_NODE="pve2"
TARGET_STORAGE="thin1"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
ssh_proxmox() {
sshpass -p "$PROXMOX_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$PROXMOX_HOST" "$@"
}
# Migrate container via backup/restore
migrate_via_backup() {
local vmid=$1
local name=$2
log_info "Migrating container $vmid ($name) to $TARGET_NODE using $TARGET_STORAGE..."
# Step 1: Create backup on source node
log_info " Step 1: Creating backup of container $vmid..."
backup_file="/tmp/vzdump-lxc-${vmid}-$(date +%Y_%m_%d_%H_%M_%S).tar.zst"
if ssh_proxmox "vzdump $vmid --compress zstd --storage local --dumpdir /tmp --remove 0" 2>&1 | tee /tmp/backup-${vmid}.log; then
log_success " Backup created"
# Find backup file
backup_file=$(ssh_proxmox "ls -t /tmp/vzdump-lxc-${vmid}-*.tar.zst 2>/dev/null | head -1")
if [[ -z "$backup_file" ]]; then
log_error " Backup file not found"
return 1
fi
log_info " Backup file: $backup_file"
else
log_error " Backup failed"
return 1
fi
# Step 2: Restore on target node with thin1 storage
log_info " Step 2: Restoring container on $TARGET_NODE with $TARGET_STORAGE storage..."
if ssh_proxmox "vzdump --storage $TARGET_STORAGE --ostype unmanaged $vmid $backup_file" 2>&1; then
log_success " Container restored on $TARGET_NODE"
else
log_error " Restore failed"
return 1
fi
# Step 3: Remove original container (optional - ask user)
log_warn " Original container still exists on $SOURCE_NODE"
log_info " You may want to remove it after verifying the migration"
return 0
}
echo "========================================="
echo "Migrate 2 containers via backup/restore"
echo "========================================="
echo ""
CONTAINERS=(
"1500:besu-sentry-1"
"1501:besu-sentry-2"
)
for container in "${CONTAINERS[@]}"; do
vmid="${container%%:*}"
name="${container#*:}"
echo ""
migrate_via_backup "$vmid" "$name"
echo ""
done
echo "Migration complete!"

View File

@@ -0,0 +1,173 @@
#!/bin/bash
# Migrate VMIDs 100-130 and 7800-7811 using backup/restore method
# VMIDs 100-130 → thin1 storage
# VMIDs 7800-7811 → local storage
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/load-physical-inventory.sh" 2>/dev/null || true
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "\n${CYAN}=== $1 ===${NC}\n"; }
SOURCE_NODE="r630-02"
TARGET_NODE="r630-01"
BACKUP_STORAGE="local"
# VMs to migrate
VMIDS_100_130=(100 101 102 103 104 105 130)
VMIDS_7800_7811=(7800 7801 7802 7810 7811)
log_section "VM Migration to r630-01 (Backup/Restore)"
log_info "Source Node: $SOURCE_NODE"
log_info "Target Node: $TARGET_NODE"
log_info "VMIDs 100-130 → thin1 storage (96 GB)"
log_info "VMIDs 7800-7811 → local storage (210 GB)"
echo ""
log_warn "This will migrate ${#VMIDS_100_130[@]} + ${#VMIDS_7800_7811[@]} = $((${#VMIDS_100_130[@]} + ${#VMIDS_7800_7811[@]})) containers using backup/restore"
echo ""
read -p "Continue with migration? (yes/no): " confirm
if [[ "$confirm" != "yes" ]]; then
log_info "Migration cancelled."
exit 0
fi
FAILED=()
SUCCESS=()
# Migrate VMIDs 100-130 to thin1
log_section "Migrating VMIDs 100-130 to thin1 storage"
for vmid in "${VMIDS_100_130[@]}"; do
log_info "Migrating VMID $vmid..."
# Check if VM exists
if ! sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct list 2>/dev/null | grep -q \"^$vmid\"" 2>/dev/null; then
log_warn "VMID $vmid not found, skipping"
continue
fi
# Create backup (this will work since VMs are stopped)
log_info " Creating backup..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"vzdump $vmid --storage $BACKUP_STORAGE --compress gzip --mode stop 2>&1 | tail -5"
# Find backup file
BACKUP_FILE=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"ls -t /var/lib/vz/dump/vzdump-lxc-$vmid-*.tar.gz 2>/dev/null | head -1" 2>/dev/null)
if [ -z "$BACKUP_FILE" ]; then
log_error " Backup file not found for VMID $vmid"
FAILED+=($vmid)
continue
fi
log_info " Backup: $BACKUP_FILE"
# Restore to target
log_info " Restoring to $TARGET_NODE (thin1 storage)..."
RESTORE_OUTPUT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct restore $vmid $BACKUP_FILE --storage thin1 --target $TARGET_NODE 2>&1")
if [ $? -eq 0 ]; then
log_success " VMID $vmid migrated successfully"
SUCCESS+=($vmid)
# Remove from source
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct destroy $vmid 2>&1" || true
else
log_error " Restore failed: $RESTORE_OUTPUT"
FAILED+=($vmid)
fi
echo ""
done
# Migrate VMIDs 7800-7811 to local storage
log_section "Migrating VMIDs 7800-7811 to local storage"
for vmid in "${VMIDS_7800_7811[@]}"; do
log_info "Migrating VMID $vmid..."
# Check if VM exists
if ! sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct list 2>/dev/null | grep -q \"^$vmid\"" 2>/dev/null; then
log_warn "VMID $vmid not found, skipping"
continue
fi
# Create backup
log_info " Creating backup..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"vzdump $vmid --storage $BACKUP_STORAGE --compress gzip --mode stop 2>&1 | tail -5"
# Find backup file
BACKUP_FILE=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"ls -t /var/lib/vz/dump/vzdump-lxc-$vmid-*.tar.gz 2>/dev/null | head -1" 2>/dev/null)
if [ -z "$BACKUP_FILE" ]; then
log_error " Backup file not found for VMID $vmid"
FAILED+=($vmid)
continue
fi
log_info " Backup: $BACKUP_FILE"
# Restore to target
log_info " Restoring to $TARGET_NODE (local storage)..."
RESTORE_OUTPUT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct restore $vmid $BACKUP_FILE --storage local --target $TARGET_NODE 2>&1")
if [ $? -eq 0 ]; then
log_success " VMID $vmid migrated successfully"
SUCCESS+=($vmid)
# Remove from source
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct destroy $vmid 2>&1" || true
else
log_error " Restore failed: $RESTORE_OUTPUT"
FAILED+=($vmid)
fi
echo ""
done
log_section "Migration Summary"
log_info "Successful: ${#SUCCESS[@]}"
if [ ${#SUCCESS[@]} -gt 0 ]; then
echo " VMIDs: ${SUCCESS[*]}"
fi
if [ ${#FAILED[@]} -gt 0 ]; then
log_warn "Failed: ${#FAILED[@]}"
echo " VMIDs: ${FAILED[*]}"
fi
log_section "Verification"
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_01:-192.168.11.11} \
"pct list 2>/dev/null | grep -E '100|101|102|103|104|105|130|7800|7801|7802|7810|7811'" 2>/dev/null || true
log_section "Complete"

View File

@@ -0,0 +1,140 @@
#!/bin/bash
# Migrate VMIDs 100-130 and 7800-7811 using backup/restore method
# This handles storage differences between nodes
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/load-physical-inventory.sh" 2>/dev/null || true
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "\n${CYAN}=== $1 ===${NC}\n"; }
SOURCE_NODE="r630-02"
TARGET_NODE="r630-01"
TARGET_STORAGE="thin1"
BACKUP_STORAGE="local"
# VMs to migrate
VMIDS_100_130=(100 101 102 103 104 105 130)
VMIDS_7800_7811=(7800 7801 7802 7810 7811)
ALL_VMIDS=("${VMIDS_100_130[@]}" "${VMIDS_7800_7811[@]}")
log_section "VM Migration to r630-01 (Backup/Restore Method)"
log_info "Source Node: $SOURCE_NODE"
log_info "Target Node: $TARGET_NODE"
log_info "Target Storage: $TARGET_STORAGE"
log_info "VMs to migrate: ${#ALL_VMIDS[@]} containers"
echo ""
log_warn "This will migrate the following VMs using backup/restore:"
echo " VMIDs 100-130: ${VMIDS_100_130[*]}"
echo " VMIDs 7800-7811: ${VMIDS_7800_7811[*]}"
echo ""
read -p "Continue with migration? (yes/no): " confirm
if [[ "$confirm" != "yes" ]]; then
log_info "Migration cancelled."
exit 0
fi
log_section "Starting Migration"
FAILED=()
SUCCESS=()
for vmid in "${ALL_VMIDS[@]}"; do
log_info "Migrating VMID $vmid..."
# Check if VM exists
if ! sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct list 2>/dev/null | grep -q \"^$vmid\"" 2>/dev/null; then
log_warn "VMID $vmid not found on source, skipping"
continue
fi
# Step 1: Create backup on source node
log_info " Creating backup..."
BACKUP_RESULT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"vzdump $vmid --storage $BACKUP_STORAGE --compress gzip --mode stop 2>&1")
if [ $? -ne 0 ]; then
log_error " Backup failed for VMID $vmid"
FAILED+=($vmid)
continue
fi
# Get backup filename
BACKUP_FILE=$(echo "$BACKUP_RESULT" | grep -o "vzdump-lxc-$vmid-[0-9]*.tar.gz" | tail -1)
if [ -z "$BACKUP_FILE" ]; then
log_error " Could not determine backup filename for VMID $vmid"
FAILED+=($vmid)
continue
fi
log_info " Backup created: $BACKUP_FILE"
# Step 2: Restore on target node
log_info " Restoring to $TARGET_NODE..."
RESTORE_RESULT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct restore $vmid /var/lib/vz/dump/$BACKUP_FILE --storage $TARGET_STORAGE --target $TARGET_NODE 2>&1")
if [ $? -eq 0 ]; then
log_success " VMID $vmid migrated successfully"
SUCCESS+=($vmid)
# Step 3: Delete from source (optional)
log_info " Removing from source node..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct destroy $vmid 2>&1" || true
else
log_error " Restore failed for VMID $vmid: $RESTORE_RESULT"
FAILED+=($vmid)
fi
echo ""
done
log_section "Migration Summary"
log_info "Successful migrations: ${#SUCCESS[@]}"
if [ ${#SUCCESS[@]} -gt 0 ]; then
echo " VMIDs: ${SUCCESS[*]}"
fi
if [ ${#FAILED[@]} -gt 0 ]; then
log_warn "Failed migrations: ${#FAILED[@]}"
echo " VMIDs: ${FAILED[*]}"
fi
log_section "Verification"
log_info "Checking VMs on $TARGET_NODE..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_01:-192.168.11.11} \
"pct list 2>/dev/null | grep -E '$(IFS='|'; echo "${ALL_VMIDS[*]}")'" 2>/dev/null || true
log_section "Migration Complete"
if [ ${#FAILED[@]} -eq 0 ]; then
log_success "All VMs migrated successfully!"
else
log_warn "Some migrations failed. Please check the errors above."
fi

View File

@@ -0,0 +1,88 @@
#!/bin/bash
# Vault Raft Snapshot Backup Script
# Creates automated backups of Vault cluster
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PROXMOX_HOST_1="${PROXMOX_HOST_1:-192.168.11.11}"
VAULT_CONTAINER="${VAULT_CONTAINER:-8640}"
VAULT_TOKEN="${VAULT_TOKEN:-}"
BACKUP_DIR="${BACKUP_DIR:-/home/intlc/projects/proxmox/.secure/vault-backups}"
RETENTION_DAYS="${RETENTION_DAYS:-30}"
if [ -z "$VAULT_TOKEN" ]; then
log_error "VAULT_TOKEN environment variable is required"
log_info "Usage: VAULT_TOKEN=<token> ./scripts/vault-backup.sh"
exit 1
fi
# Create backup directory
mkdir -p "$BACKUP_DIR"
chmod 700 "$BACKUP_DIR"
# Generate backup filename
BACKUP_FILE="$BACKUP_DIR/vault-snapshot-$(date +%Y%m%d-%H%M%S).snapshot"
echo "═══════════════════════════════════════════════════════════"
echo " Vault Raft Snapshot Backup"
echo "═══════════════════════════════════════════════════════════"
echo ""
log_info "Creating Raft snapshot..."
log_info "Backup file: $BACKUP_FILE"
# Create snapshot
if ssh root@"$PROXMOX_HOST_1" "pct exec $VAULT_CONTAINER -- bash -c 'export VAULT_ADDR=http://127.0.0.1:8200 && export VAULT_TOKEN=$VAULT_TOKEN && vault operator raft snapshot save -'" > "$BACKUP_FILE" 2>/dev/null; then
BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log_success "Snapshot created successfully ($BACKUP_SIZE)"
else
log_error "Failed to create snapshot"
exit 1
fi
# Compress backup
log_info "Compressing backup..."
if gzip "$BACKUP_FILE"; then
BACKUP_FILE="${BACKUP_FILE}.gz"
BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log_success "Backup compressed ($BACKUP_SIZE)"
else
log_warn "Compression failed, keeping uncompressed backup"
fi
# Clean up old backups
log_info "Cleaning up backups older than $RETENTION_DAYS days..."
find "$BACKUP_DIR" -name "vault-snapshot-*.snapshot*" -type f -mtime +$RETENTION_DAYS -delete
DELETED_COUNT=$(find "$BACKUP_DIR" -name "vault-snapshot-*.snapshot*" -type f | wc -l)
log_success "Retained $DELETED_COUNT backup(s)"
# Create backup index
BACKUP_INDEX="$BACKUP_DIR/backup-index.txt"
echo "$(date -Iseconds) | $BACKUP_FILE | $(du -h "$BACKUP_FILE" | cut -f1)" >> "$BACKUP_INDEX"
log_success "Backup index updated"
echo ""
log_success "✅ Backup completed successfully"
log_info "Backup location: $BACKUP_FILE"
log_info "To restore: vault operator raft snapshot restore $BACKUP_FILE"
echo ""

View File

@@ -0,0 +1,82 @@
#!/bin/bash
# Vault Raft Snapshot Backup Script
# Creates automated backups of Vault cluster
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PROXMOX_HOST_1="${PROXMOX_HOST_1:-192.168.11.11}"
VAULT_CONTAINER="${VAULT_CONTAINER:-8640}"
VAULT_TOKEN="${VAULT_TOKEN:-}"
BACKUP_DIR="${BACKUP_DIR:-/home/intlc/projects/proxmox/.secure/vault-backups}"
RETENTION_DAYS="${RETENTION_DAYS:-30}"
if [ -z "$VAULT_TOKEN" ]; then
log_error "VAULT_TOKEN environment variable is required"
log_info "Usage: VAULT_TOKEN=<token> ./scripts/vault-backup.sh"
exit 1
fi
# Create backup directory
mkdir -p "$BACKUP_DIR"
chmod 700 "$BACKUP_DIR"
# Generate backup filename
BACKUP_FILE="$BACKUP_DIR/vault-snapshot-$(date +%Y%m%d-%H%M%S).snapshot"
echo "═══════════════════════════════════════════════════════════"
echo " Vault Raft Snapshot Backup"
echo "═══════════════════════════════════════════════════════════"
echo ""
log_info "Creating Raft snapshot..."
log_info "Backup file: $BACKUP_FILE"
# Create snapshot
if ssh root@"$PROXMOX_HOST_1" "pct exec $VAULT_CONTAINER -- bash -c 'export VAULT_ADDR=http://127.0.0.1:8200 && export VAULT_TOKEN=$VAULT_TOKEN && vault operator raft snapshot save -'" > "$BACKUP_FILE" 2>/dev/null; then
BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log_success "Snapshot created successfully ($BACKUP_SIZE)"
else
log_error "Failed to create snapshot"
exit 1
fi
# Compress backup
log_info "Compressing backup..."
if gzip "$BACKUP_FILE"; then
BACKUP_FILE="${BACKUP_FILE}.gz"
BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log_success "Backup compressed ($BACKUP_SIZE)"
else
log_warn "Compression failed, keeping uncompressed backup"
fi
# Clean up old backups
log_info "Cleaning up backups older than $RETENTION_DAYS days..."
find "$BACKUP_DIR" -name "vault-snapshot-*.snapshot*" -type f -mtime +$RETENTION_DAYS -delete
DELETED_COUNT=$(find "$BACKUP_DIR" -name "vault-snapshot-*.snapshot*" -type f | wc -l)
log_success "Retained $DELETED_COUNT backup(s)"
# Create backup index
BACKUP_INDEX="$BACKUP_DIR/backup-index.txt"
echo "$(date -Iseconds) | $BACKUP_FILE | $(du -h "$BACKUP_FILE" | cut -f1)" >> "$BACKUP_INDEX"
log_success "Backup index updated"
echo ""
log_success "✅ Backup completed successfully"
log_info "Backup location: $BACKUP_FILE"
log_info "To restore: vault operator raft snapshot restore $BACKUP_FILE"
echo ""

View File

@@ -0,0 +1,343 @@
#!/usr/bin/env bash
# Configure all Cloudflare DNS records for all 19 domains
# Uses provided API key to ensure all DNS is correctly setup
# All domains point to public IP: 76.53.10.36
# Temporarily disable strict mode for .env sourcing
set +euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Source .env file if it exists (with error handling)
if [ -f "$PROJECT_ROOT/.env" ]; then
source "$PROJECT_ROOT/.env" 2>/dev/null || true
fi
# Re-enable strict mode
set -euo pipefail
# Cloudflare API Key (provided)
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-JSEO_sruWB6lf1id77gtI7HOLVdhkhaR2goPEJIk}"
# Public IP for all domains
PUBLIC_IP="${PUBLIC_IP:-76.53.10.36}"
# Zone IDs (from .env or environment variables)
ZONE_SANKOFA_NEXUS="${CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS:-}"
ZONE_D_BIS_ORG="${CLOUDFLARE_ZONE_ID_D_BIS_ORG:-${CLOUDFLARE_ZONE_ID:-}}"
ZONE_MIM4U_ORG="${CLOUDFLARE_ZONE_ID_MIM4U_ORG:-}"
ZONE_DEFI_ORACLE_IO="${CLOUDFLARE_ZONE_ID_DEFI_ORACLE_IO:-}"
# All domains configuration
declare -A DOMAIN_ZONES=(
["sankofa.nexus"]="sankofa.nexus"
["www.sankofa.nexus"]="sankofa.nexus"
["phoenix.sankofa.nexus"]="sankofa.nexus"
["www.phoenix.sankofa.nexus"]="sankofa.nexus"
["the-order.sankofa.nexus"]="sankofa.nexus"
["explorer.d-bis.org"]="d-bis.org"
["rpc-http-pub.d-bis.org"]="d-bis.org"
["rpc-ws-pub.d-bis.org"]="d-bis.org"
["rpc-http-prv.d-bis.org"]="d-bis.org"
["rpc-ws-prv.d-bis.org"]="d-bis.org"
["dbis-admin.d-bis.org"]="d-bis.org"
["dbis-api.d-bis.org"]="d-bis.org"
["dbis-api-2.d-bis.org"]="d-bis.org"
["secure.d-bis.org"]="d-bis.org"
["mim4u.org"]="mim4u.org"
["www.mim4u.org"]="mim4u.org"
["secure.mim4u.org"]="mim4u.org"
["training.mim4u.org"]="mim4u.org"
["rpc.public-0138.defi-oracle.io"]="defi-oracle.io"
)
# Function to make Cloudflare API request
cf_api_request() {
local method="$1"
local url="$2"
local data="${3:-}"
if [ -n "$data" ]; then
curl -s -X "$method" "$url" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" \
--data "$data"
else
curl -s -X "$method" "$url" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json"
fi
}
# Function to get zone ID from API or existing DNS record
get_zone_id() {
local zone_name="$1"
# Try to get from environment variable first
case "$zone_name" in
"sankofa.nexus")
if [ -n "$ZONE_SANKOFA_NEXUS" ]; then
echo "$ZONE_SANKOFA_NEXUS"
return 0
fi
;;
"d-bis.org")
if [ -n "$ZONE_D_BIS_ORG" ]; then
echo "$ZONE_D_BIS_ORG"
return 0
fi
;;
"mim4u.org")
if [ -n "$ZONE_MIM4U_ORG" ]; then
echo "$ZONE_MIM4U_ORG"
return 0
fi
;;
"defi-oracle.io")
if [ -n "$ZONE_DEFI_ORACLE_IO" ]; then
echo "$ZONE_DEFI_ORACLE_IO"
return 0
fi
;;
esac
# Try to get from API
log_info "Getting zone ID for: $zone_name"
local response=$(cf_api_request "GET" "https://api.cloudflare.com/client/v4/zones?name=${zone_name}")
local zone_id=$(echo "$response" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
local zone_status=$(echo "$response" | jq -r '.success // false' 2>/dev/null || echo "false")
if [ "$zone_status" = "true" ] && [ -n "$zone_id" ] && [ "$zone_id" != "null" ]; then
echo "$zone_id"
return 0
fi
# Try to get zone ID from existing DNS record (query any subdomain)
log_info " Trying to get zone ID from existing DNS record..."
local test_domain=""
case "$zone_name" in
"sankofa.nexus") test_domain="sankofa.nexus" ;;
"d-bis.org") test_domain="explorer.d-bis.org" ;;
"mim4u.org") test_domain="mim4u.org" ;;
"defi-oracle.io") test_domain="rpc.public-0138.defi-oracle.io" ;;
esac
if [ -n "$test_domain" ]; then
# Try to get zone ID by querying all zones and checking DNS records
local all_zones=$(cf_api_request "GET" "https://api.cloudflare.com/client/v4/zones")
local zone_count=$(echo "$all_zones" | jq -r '.result | length' 2>/dev/null || echo "0")
if [ "$zone_count" -gt 0 ]; then
for i in $(seq 0 $((zone_count - 1))); do
local check_zone_id=$(echo "$all_zones" | jq -r ".result[$i].id" 2>/dev/null || echo "")
local check_zone_name=$(echo "$all_zones" | jq -r ".result[$i].name" 2>/dev/null || echo "")
if [ -n "$check_zone_id" ] && [ "$check_zone_id" != "null" ]; then
# Check if this zone has a DNS record for our test domain
local dns_check=$(cf_api_request "GET" "https://api.cloudflare.com/client/v4/zones/${check_zone_id}/dns_records?name=${test_domain}")
local has_record=$(echo "$dns_check" | jq -r '.result | length' 2>/dev/null || echo "0")
if [ "$has_record" -gt 0 ] || [ "$check_zone_name" = "$zone_name" ]; then
echo "$check_zone_id"
return 0
fi
fi
done
fi
fi
log_error "Failed to get zone ID for $zone_name"
log_warn " Please provide zone ID via environment variable:"
case "$zone_name" in
"sankofa.nexus") log_warn " CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS=your-zone-id" ;;
"d-bis.org") log_warn " CLOUDFLARE_ZONE_ID_D_BIS_ORG=your-zone-id" ;;
"mim4u.org") log_warn " CLOUDFLARE_ZONE_ID_MIM4U_ORG=your-zone-id" ;;
"defi-oracle.io") log_warn " CLOUDFLARE_ZONE_ID_DEFI_ORACLE_IO=your-zone-id" ;;
esac
return 1
}
# Function to get existing DNS record
get_dns_record() {
local zone_id="$1"
local domain_name="$2"
local response=$(cf_api_request "GET" "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records?name=${domain_name}&type=A")
echo "$response" | jq -r '.result[0] // empty' 2>/dev/null || echo ""
}
# Function to create or update DNS A record
create_or_update_dns_record() {
local zone_id="$1"
local domain_name="$2"
local ip_address="$3"
# Get existing record
local existing=$(get_dns_record "$zone_id" "$domain_name")
local record_id=$(echo "$existing" | jq -r '.id // empty' 2>/dev/null || echo "")
# Prepare DNS record data
local data=$(jq -n \
--arg name "$domain_name" \
--arg content "$ip_address" \
'{
type: "A",
name: $name,
content: $content,
proxied: false,
ttl: 1
}')
local response=""
if [ -n "$record_id" ] && [ "$record_id" != "null" ]; then
# Update existing record
log_info " Updating existing DNS record: $domain_name"
response=$(cf_api_request "PUT" "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records/${record_id}" "$data")
else
# Create new record
log_info " Creating new DNS record: $domain_name"
response=$(cf_api_request "POST" "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records" "$data")
fi
local success=$(echo "$response" | jq -r '.success // false' 2>/dev/null || echo "false")
if [ "$success" = "true" ]; then
log_success " ✓ DNS record configured: $domain_name$ip_address"
return 0
else
local error=$(echo "$response" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "Unknown error")
log_error " ✗ Failed to configure DNS: $domain_name - $error"
return 1
fi
}
# Main execution
main() {
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🌐 Cloudflare DNS Configuration"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Public IP: $PUBLIC_IP"
log_info "Total domains: ${#DOMAIN_ZONES[@]}"
echo ""
# Get zone IDs for all unique zones
log_info "Step 1: Getting zone IDs..."
declare -A ZONE_IDS
local unique_zones=($(printf '%s\n' "${DOMAIN_ZONES[@]}" | sort -u))
local missing_zones=()
for zone in "${unique_zones[@]}"; do
local zone_id=$(get_zone_id "$zone")
if [ -n "$zone_id" ] && [ "$zone_id" != "null" ]; then
ZONE_IDS["$zone"]="$zone_id"
log_success " Zone: $zone$zone_id"
else
log_warn " Could not get zone ID for: $zone"
missing_zones+=("$zone")
fi
done
echo ""
# If zone IDs are missing, provide instructions
if [ ${#missing_zones[@]} -gt 0 ]; then
log_warn "Some zone IDs could not be automatically detected."
log_info "To get zone IDs:"
log_info " 1. Go to Cloudflare Dashboard: https://dash.cloudflare.com"
log_info " 2. Select each domain zone"
log_info " 3. Scroll down to 'API' section on the right sidebar"
log_info " 4. Copy the 'Zone ID'"
log_info ""
log_info "Then set environment variables:"
for zone in "${missing_zones[@]}"; do
case "$zone" in
"sankofa.nexus")
log_info " export CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS=your-zone-id" ;;
"d-bis.org")
log_info " export CLOUDFLARE_ZONE_ID_D_BIS_ORG=your-zone-id" ;;
"mim4u.org")
log_info " export CLOUDFLARE_ZONE_ID_MIM4U_ORG=your-zone-id" ;;
"defi-oracle.io")
log_info " export CLOUDFLARE_ZONE_ID_DEFI_ORACLE_IO=your-zone-id" ;;
esac
done
echo ""
if [ ${#ZONE_IDS[@]} -eq 0 ]; then
log_error "No zone IDs available. Cannot proceed."
exit 1
else
log_warn "Proceeding with available zone IDs. Some domains may be skipped."
fi
fi
echo ""
# Configure DNS records
log_info "Step 2: Configuring DNS records..."
local success_count=0
local fail_count=0
for domain in "${!DOMAIN_ZONES[@]}"; do
local zone_name="${DOMAIN_ZONES[$domain]}"
local zone_id="${ZONE_IDS[$zone_name]}"
if [ -z "$zone_id" ] || [ "$zone_id" = "null" ]; then
log_warn "Skipping $domain - no zone ID for $zone_name"
fail_count=$((fail_count + 1))
continue
fi
set +e
if create_or_update_dns_record "$zone_id" "$domain" "$PUBLIC_IP"; then
success_count=$((success_count + 1))
else
fail_count=$((fail_count + 1))
fi
set -e
# Small delay to avoid rate limiting
sleep 0.5
done
echo ""
# Summary
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Configuration Summary:"
log_success " Successful: $success_count"
if [ $fail_count -gt 0 ]; then
log_error " Failed: $fail_count"
fi
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
if [ $fail_count -eq 0 ]; then
log_success "✅ All DNS records configured successfully!"
log_info "DNS changes may take a few minutes to propagate"
return 0
else
log_warn "⚠️ Some DNS records failed to configure"
return 1
fi
}
# Run main function
main "$@"

View File

@@ -0,0 +1,70 @@
#!/bin/bash
# Configure All Databases - Create databases and users
set -uo pipefail
NODE_IP="${PROXMOX_HOST_R630_01}"
log_info() { echo -e "\033[0;32m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_error() { echo -e "\033[0;31m[ERROR]\033[0m $1"; }
configure_order_db() {
local vmid="$1"
log_info "Configuring Order database on CT $vmid..."
ssh -o ConnectTimeout=15 -o StrictHostKeyChecking=no root@${NODE_IP} "
pct exec $vmid -- su - postgres -c \"
psql << 'SQL_EOF'
CREATE DATABASE order_db;
CREATE USER order_user WITH PASSWORD 'order_password';
GRANT ALL PRIVILEGES ON DATABASE order_db TO order_user;
ALTER DATABASE order_db OWNER TO order_user;
\\l order_db
SQL_EOF
\" 2>&1
" && log_success "Order DB configured on CT $vmid" || log_error "Failed on CT $vmid"
}
configure_dbis_db() {
local vmid="$1"
log_info "Configuring DBIS database on CT $vmid..."
ssh -o ConnectTimeout=15 -o StrictHostKeyChecking=no root@${NODE_IP} "
pct exec $vmid -- su - postgres -c \"
psql << 'SQL_EOF'
CREATE DATABASE dbis_core;
CREATE USER dbis WITH PASSWORD '8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771';
GRANT ALL PRIVILEGES ON DATABASE dbis_core TO dbis;
ALTER DATABASE dbis_core OWNER TO dbis;
\\l dbis_core
SQL_EOF
\" 2>&1
" && log_success "DBIS DB configured on CT $vmid" || log_error "Failed on CT $vmid"
}
echo "═══════════════════════════════════════════════════════════"
echo "Configure All Databases"
echo "═══════════════════════════════════════════════════════════"
echo ""
# Wait for PostgreSQL to be ready
log_info "Waiting for PostgreSQL services to be ready..."
sleep 5
# Configure Order databases
log_info "Configuring Order databases..."
for vmid in 10000 10001; do
configure_order_db "$vmid"
sleep 2
done
# Configure DBIS databases
log_info "Configuring DBIS databases..."
for vmid in 10100 10101; do
configure_dbis_db "$vmid"
sleep 2
done
echo ""
log_success "Database configuration complete!"

View File

@@ -0,0 +1,70 @@
#!/bin/bash
# Configure All Databases - Create databases and users
set -uo pipefail
NODE_IP="192.168.11.11"
log_info() { echo -e "\033[0;32m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_error() { echo -e "\033[0;31m[ERROR]\033[0m $1"; }
configure_order_db() {
local vmid="$1"
log_info "Configuring Order database on CT $vmid..."
ssh -o ConnectTimeout=15 -o StrictHostKeyChecking=no root@${NODE_IP} "
pct exec $vmid -- su - postgres -c \"
psql << 'SQL_EOF'
CREATE DATABASE order_db;
CREATE USER order_user WITH PASSWORD 'order_password';
GRANT ALL PRIVILEGES ON DATABASE order_db TO order_user;
ALTER DATABASE order_db OWNER TO order_user;
\\l order_db
SQL_EOF
\" 2>&1
" && log_success "Order DB configured on CT $vmid" || log_error "Failed on CT $vmid"
}
configure_dbis_db() {
local vmid="$1"
log_info "Configuring DBIS database on CT $vmid..."
ssh -o ConnectTimeout=15 -o StrictHostKeyChecking=no root@${NODE_IP} "
pct exec $vmid -- su - postgres -c \"
psql << 'SQL_EOF'
CREATE DATABASE dbis_core;
CREATE USER dbis WITH PASSWORD '8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771';
GRANT ALL PRIVILEGES ON DATABASE dbis_core TO dbis;
ALTER DATABASE dbis_core OWNER TO dbis;
\\l dbis_core
SQL_EOF
\" 2>&1
" && log_success "DBIS DB configured on CT $vmid" || log_error "Failed on CT $vmid"
}
echo "═══════════════════════════════════════════════════════════"
echo "Configure All Databases"
echo "═══════════════════════════════════════════════════════════"
echo ""
# Wait for PostgreSQL to be ready
log_info "Waiting for PostgreSQL services to be ready..."
sleep 5
# Configure Order databases
log_info "Configuring Order databases..."
for vmid in 10000 10001; do
configure_order_db "$vmid"
sleep 2
done
# Configure DBIS databases
log_info "Configuring DBIS databases..."
for vmid in 10100 10101; do
configure_dbis_db "$vmid"
sleep 2
done
echo ""
log_success "Database configuration complete!"

View File

@@ -11,8 +11,10 @@
set -euo pipefail
# Load IP configuration (script is in scripts/archive/consolidated/config/; repo root is 3 levels up)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$SCRIPT_DIR/../../../.." && pwd)}"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
RED='\033[0;31m'
@@ -28,8 +30,8 @@ log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Configuration
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
WORK_DIR="${WORK_DIR:-/tmp/besu-chain138-config}"
OUTPUT_DIR="${OUTPUT_DIR:-$PROJECT_ROOT/output/chain138-config}"
WORK_DIR="${WORK_DIR:-$OUTPUT_DIR/.work}"
# All Besu nodes for ChainID 138
# Validators: 1000-1004
@@ -37,27 +39,27 @@ OUTPUT_DIR="${OUTPUT_DIR:-$PROJECT_ROOT/output/chain138-config}"
# RPC: 2500-2502, 2503 (new)
declare -A BESU_NODES=(
# Validators
[1000]="192.168.11.100"
[1001]="192.168.11.101"
[1002]="192.168.11.102"
[1003]="192.168.11.103"
[1004]="192.168.11.104"
[1000]="${IP_VALIDATOR_0:-${IP_VALIDATOR_0:-${IP_VALIDATOR_0:-${IP_VALIDATOR_0:-192.168.11.100}}}}"
[1001]="${IP_VALIDATOR_1:-${IP_VALIDATOR_1:-${IP_VALIDATOR_1:-${IP_VALIDATOR_1:-192.168.11.101}}}}"
[1002]="${IP_VALIDATOR_2:-${IP_VALIDATOR_2:-${IP_VALIDATOR_2:-${IP_VALIDATOR_2:-192.168.11.102}}}}"
[1003]="${IP_VALIDATOR_3:-${IP_VALIDATOR_3:-${IP_VALIDATOR_3:-${IP_VALIDATOR_3:-192.168.11.103}}}}"
[1004]="${IP_VALIDATOR_4:-${IP_VALIDATOR_4:-${IP_VALIDATOR_4:-${IP_VALIDATOR_4:-192.168.11.104}}}}"
# Sentries
[1500]="192.168.11.150"
[1501]="192.168.11.151"
[1502]="192.168.11.152"
[1503]="192.168.11.153"
[1504]="192.168.11.154" # New: besu-sentry-5
[1500]="${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-192.168.11.150}}}}"
[1501]="${IP_BESU_RPC_1:-${IP_BESU_RPC_1:-${IP_BESU_RPC_1:-${IP_BESU_RPC_1:-192.168.11.151}}}}"
[1502]="${IP_BESU_RPC_2:-${IP_BESU_RPC_2:-${IP_BESU_RPC_2:-${IP_BESU_RPC_2:-192.168.11.152}}}}"
[1503]="${IP_BESU_RPC_3:-${IP_BESU_RPC_3:-${IP_BESU_RPC_3:-${IP_BESU_RPC_3:-192.168.11.153}}}}"
[1504]="${IP_BESU_SENTRY:-192.168.11.154}" # New: besu-sentry-5
# RPC Nodes
[2500]="192.168.11.250"
[2501]="192.168.11.251"
[2502]="192.168.11.252"
[2500]="${RPC_ALLTRA_1:-192.168.11.250}"
[2501]="${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}"
[2502]="${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}"
[2503]="192.168.11.253" # Ali's RPC node (0x8a identity)
[2504]="192.168.11.254" # Ali's RPC node (0x1 identity)
[2505]="192.168.11.255" # Luis's RPC node (0x8a identity)
[2506]="192.168.11.256" # Luis's RPC node (0x1 identity)
[2507]="192.168.11.257" # Putu's RPC node (0x8a identity)
[2508]="192.168.11.258" # Putu's RPC node (0x1 identity)
[2506]="${RPC_LUIS_2:-192.168.11.202}" # Luis's RPC node (0x1 identity)
[2507]="${RPC_PUTU_1:-192.168.11.203}" # Putu's RPC node (0x8a identity)
[2508]="${RPC_PUTU_2:-192.168.11.204}" # Putu's RPC node (0x1 identity)
)
# RPC nodes that should have discovery disabled (report chainID 0x1 to MetaMask for wallet compatibility)
@@ -118,20 +120,26 @@ extract_enode() {
local enode
enode=$(ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- /opt/besu/bin/besu public-key export --node-private-key-file=\"$nodekey_path\" --format=enode 2>/dev/null" || echo "")
if [[ -n "$enode" ]]; then
# Replace IP in enode with actual IP
if [[ -n "$enode" ]] && [[ "$enode" == enode://* ]]; then
echo "$enode" | sed "s/@[0-9.]*:/@${ip}:/"
return 0
fi
# Fallback: Besu may not support --format=enode; get hex and build enode
local hex
hex=$(ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- /opt/besu/bin/besu public-key export --node-private-key-file=\"$nodekey_path\" 2>/dev/null" | grep -oE '0x[0-9a-fA-F]{128}' | tail -1 | sed 's/^0x//')
if [[ -n "$hex" ]] && [[ ${#hex} -eq 128 ]]; then
echo "enode://${hex}@${ip}:30303"
return 0
fi
fi
log_warn "Could not extract enode for VMID $vmid"
return 1
}
# Function to collect all enodes
# Function to collect all enodes (logs to stderr so only path is on stdout)
collect_enodes() {
log_info "=== Collecting Enodes from All Besu Nodes ==="
log_info "=== Collecting Enodes from All Besu Nodes ===" >&2
local collected_file="$WORK_DIR/collected-enodes.txt"
> "$collected_file"
@@ -143,25 +151,25 @@ collect_enodes() {
local ip="${BESU_NODES[$vmid]}"
if ! check_container "$vmid"; then
log_warn "Container $vmid not running, skipping..."
log_warn "Container $vmid not running, skipping..." >&2
((fail_count++))
continue
fi
if enode=$(extract_enode "$vmid" "$ip"); then
if enode=$(extract_enode "$vmid" "$ip" 2>/dev/null); then
echo "$enode" >> "$collected_file"
log_success "VMID $vmid: $enode"
log_success "VMID $vmid: $enode" >&2
((success_count++))
else
log_warn "Failed to extract enode from VMID $vmid"
log_warn "Failed to extract enode from VMID $vmid" >&2
((fail_count++))
fi
done
log_info "Collected $success_count enodes, $fail_count failed"
log_info "Collected $success_count enodes, $fail_count failed" >&2
if [[ $success_count -eq 0 ]]; then
log_error "No enodes collected. Cannot proceed."
log_error "No enodes collected. Cannot proceed." >&2
exit 1
fi
@@ -172,61 +180,52 @@ collect_enodes() {
echo "$collected_file"
}
# Function to generate static-nodes.json
# Function to generate static-nodes.json (logs to stderr so only path is on stdout)
generate_static_nodes() {
local enodes_file=$1
local output_file="$OUTPUT_DIR/static-nodes.json"
log_info "=== Generating static-nodes.json ==="
log_info "=== Generating static-nodes.json ===" >&2
# Create JSON array from enodes
python3 << PYEOF
# Create JSON array from enodes (use env vars to avoid heredoc argv issues)
ENODES_FILE="$enodes_file" OUTPUT_FILE="$output_file" python3 -c '
import json
import sys
if len(sys.argv) < 3:
print("Usage: python3 script.py <enodes_file> <output_file>", file=sys.stderr)
sys.exit(1)
enodes_file = sys.argv[1]
output_file = sys.argv[2]
import os
enodes_file = os.environ.get("ENODES_FILE", "").strip()
output_file = os.environ.get("OUTPUT_FILE", "").strip()
if not enodes_file or not output_file:
print("Error: ENODES_FILE and OUTPUT_FILE must be set", file=__import__("sys").stderr)
exit(1)
enodes = []
try:
with open(enodes_file, 'r') as f:
with open(enodes_file, "r") as f:
for line in f:
enode = line.strip()
if enode and enode.startswith('enode://'):
if enode and enode.startswith("enode://"):
enodes.append(enode)
except FileNotFoundError:
print(f"Error: File not found: {enodes_file}", file=sys.stderr)
sys.exit(1)
# Sort for consistency
print("Error: File not found: " + enodes_file, file=__import__("sys").stderr)
exit(1)
enodes.sort()
with open(output_file, 'w') as f:
with open(output_file, "w") as f:
json.dump(enodes, f, indent=2)
print("Generated static-nodes.json with {} nodes".format(len(enodes)))
PYEOF
"$enodes_file" "$output_file"
print("Generated static-nodes.json with {} nodes".format(len(enodes)), file=__import__("sys").stderr)
'
log_success "Generated: $output_file"
log_success "Generated: $output_file" >&2
echo "$output_file"
}
# Function to generate permissioned-nodes.json
# Function to generate permissioned-nodes.json (logs to stderr so only path is on stdout)
generate_permissioned_nodes() {
local enodes_file=$1
local output_file="$OUTPUT_DIR/permissioned-nodes.json"
log_info "=== Generating permissioned-nodes.json ==="
log_info "=== Generating permissioned-nodes.json ===" >&2
# Same content as static-nodes.json (all nodes must be permissioned)
cp "$OUTPUT_DIR/static-nodes.json" "$output_file"
log_success "Generated: $output_file"
log_success "Generated: $output_file" >&2
echo "$output_file"
}
@@ -235,13 +234,16 @@ deploy_to_container() {
local vmid=$1
local static_nodes_file=$2
local permissioned_nodes_file=$3
# Ensure path vars are single-line (no log/ANSI leakage into scp)
static_nodes_file=$(printf '%s' "$static_nodes_file" | head -1 | tr -d '\r\n')
permissioned_nodes_file=$(printf '%s' "$permissioned_nodes_file" | head -1 | tr -d '\r\n')
log_info "Deploying to VMID $vmid..."
# Create directories if they don't exist
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- mkdir -p ${BESU_DATA_PATH} ${BESU_PERMISSIONS_PATH} ${BESU_GENESIS_PATH} 2>/dev/null || true"
# Copy static-nodes.json
# Copy static-nodes.json (host must be clean; PROXMOX_HOST is set at top)
scp -o StrictHostKeyChecking=accept-new \
"$static_nodes_file" \
"root@${PROXMOX_HOST}:/tmp/static-nodes.json"
@@ -284,8 +286,8 @@ configure_discovery() {
config_found=true
if [[ "$disable" == "true" ]]; then
# Disable discovery
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- sed -i 's/^discovery-enabled=.*/discovery-enabled=false/' $config_file && sed -i 's/^# discovery-enabled=.*/discovery-enabled=false/' $config_file || echo 'discovery-enabled=false' >> $config_file"
# Disable discovery (all commands run inside container via bash -c)
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- bash -c \"sed -i 's/^discovery-enabled=.*/discovery-enabled=false/' $config_file 2>/dev/null || true; grep -q 'discovery-enabled' $config_file 2>/dev/null || echo 'discovery-enabled=false' >> $config_file\""
log_success "Discovery disabled in $config_file"
else
# Enable discovery (default)
@@ -373,7 +375,7 @@ restart_besu_service() {
if [[ -n "$service_name" ]]; then
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- systemctl restart $service_name 2>/dev/null || true"
sleep 3
sleep 1
if ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- systemctl is-active --quiet $service_name 2>/dev/null"; then
log_success "Service $service_name restarted successfully"
@@ -392,16 +394,17 @@ main() {
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Step 1: Collect enodes
# Step 1: Collect enodes (capture only the path line)
local enodes_file
enodes_file=$(collect_enodes)
enodes_file=$(collect_enodes | head -1 | tr -d '\r\n')
[[ -z "$enodes_file" || ! -f "$enodes_file" ]] && { log_error "Collected enodes file missing: $enodes_file"; exit 1; }
# Step 2: Generate configuration files
local static_nodes_file
static_nodes_file=$(generate_static_nodes "$enodes_file")
static_nodes_file=$(generate_static_nodes "$enodes_file" | head -1 | tr -d '\r\n')
local permissioned_nodes_file
permissioned_nodes_file=$(generate_permissioned_nodes "$enodes_file")
permissioned_nodes_file=$(generate_permissioned_nodes "$enodes_file" | head -1 | tr -d '\r\n')
# Step 3: Deploy to all containers
log_info "=== Deploying Configurations to All Besu Nodes ==="

View File

@@ -0,0 +1,289 @@
#!/usr/bin/env bash
# Configure Besu RPC nodes (2500, 2501, 2502) with correct configurations
# This script ensures each RPC node has the correct config based on its role
#
# Node Roles:
# 2500 = Core - No public access, all features enabled (ADMIN, DEBUG, TRACE)
# 2501 = Prv (Permissioned) - Public permissioned access, non-Admin features only
# 2502 = Pub (Public) - Public non-auth access, minimal wallet features
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
CONFIG_DIR="$PROJECT_ROOT/smom-dbis-138/config"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Check if running on Proxmox host
if ! command -v pct &>/dev/null; then
log_error "This script must be run on Proxmox host (pct command not found)"
exit 1
fi
# RPC Node Configuration Mapping
declare -A RPC_CONFIGS
RPC_CONFIGS[2500]="config-rpc-core.toml"
RPC_CONFIGS[2501]="config-rpc-perm.toml"
RPC_CONFIGS[2502]="config-rpc-public.toml"
declare -A RPC_ROLES
RPC_ROLES[2500]="Core (no public access, all features)"
RPC_ROLES[2501]="Permissioned (public permissioned, non-Admin features)"
RPC_ROLES[2502]="Public (public non-auth, minimal wallet features)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Besu RPC Nodes Configuration Script"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Function to check if container is running
check_container() {
local vmid=$1
if ! pct status "$vmid" 2>/dev/null | grep -q running; then
log_warn "Container $vmid is not running. Starting..."
pct start "$vmid" || {
log_error "Failed to start container $vmid"
return 1
}
sleep 5
fi
return 0
}
# Function to copy config file to container
copy_config() {
local vmid=$1
local config_file=$2
local dest_file="/etc/besu/$config_file"
local source_file="$CONFIG_DIR/$config_file"
if [[ ! -f "$source_file" ]]; then
log_error "Config file not found: $source_file"
return 1
fi
log_info "Copying $config_file to VMID $vmid..."
pct push "$vmid" "$source_file" "$dest_file" || {
log_error "Failed to copy config to container $vmid"
return 1
}
# Set ownership
pct exec "$vmid" -- chown besu:besu "$dest_file" 2>/dev/null || true
log_success "Config copied to $vmid"
return 0
}
# Function to update systemd service file
update_service() {
local vmid=$1
local config_file=$2
log_info "Updating systemd service for VMID $vmid..."
# Update service file to use correct config
pct exec "$vmid" -- sed -i "s|--config-file=\$BESU_CONFIG/[^ ]*|--config-file=\$BESU_CONFIG/$config_file|g" \
/etc/systemd/system/besu-rpc.service 2>/dev/null || {
log_warn "Could not update service file (may need manual update)"
}
pct exec "$vmid" -- systemctl daemon-reload 2>/dev/null || true
}
# Function to verify configuration
verify_config() {
local vmid=$1
local expected_config=$2
local role="${RPC_ROLES[$vmid]}"
log_info "Verifying configuration for VMID $vmid ($role)..."
local config_path="/etc/besu/$expected_config"
# Check if config file exists
if ! pct exec "$vmid" -- test -f "$config_path" 2>/dev/null; then
log_error "Config file not found: $config_path"
return 1
fi
log_success "Config file exists: $config_path"
# Verify specific settings based on node type
case $vmid in
2500)
# Core: Should have ADMIN, DEBUG, TRACE, discovery disabled
log_info " Checking Core RPC settings..."
if pct exec "$vmid" -- grep -q 'rpc-http-api=.*"ADMIN"' "$config_path" 2>/dev/null; then
log_success " ✓ ADMIN API enabled"
else
log_warn " ✗ ADMIN API not found (should be enabled)"
fi
if pct exec "$vmid" -- grep -q 'discovery-enabled=false' "$config_path" 2>/dev/null; then
log_success " ✓ Discovery disabled (no public routing)"
else
log_warn " ✗ Discovery may be enabled (should be disabled)"
fi
;;
2501)
# Permissioned: Should NOT have ADMIN, should have account permissions
log_info " Checking Permissioned RPC settings..."
if ! pct exec "$vmid" -- grep -q 'rpc-http-api=.*"ADMIN"' "$config_path" 2>/dev/null; then
log_success " ✓ ADMIN API not enabled (correct)"
else
log_warn " ✗ ADMIN API found (should be removed)"
fi
if pct exec "$vmid" -- grep -q 'permissions-accounts-config-file-enabled=true' "$config_path" 2>/dev/null; then
log_success " ✓ Account permissions enabled"
else
log_warn " ✗ Account permissions not enabled"
fi
;;
2502)
# Public: Should have minimal APIs (ETH, NET, WEB3 only)
log_info " Checking Public RPC settings..."
local api_line=$(pct exec "$vmid" -- grep 'rpc-http-api=' "$config_path" 2>/dev/null || echo "")
if echo "$api_line" | grep -q '"ETH"' && \
echo "$api_line" | grep -q '"NET"' && \
echo "$api_line" | grep -q '"WEB3"' && \
! echo "$api_line" | grep -q '"ADMIN"'; then
log_success " ✓ Minimal APIs enabled (ETH, NET, WEB3)"
else
log_warn " ✗ API configuration may not be minimal"
fi
if ! pct exec "$vmid" -- grep -q 'permissions-accounts-config-file-enabled=true' "$config_path" 2>/dev/null; then
log_success " ✓ No account permissions (public non-auth)"
else
log_warn " ✗ Account permissions enabled (should be disabled for public)"
fi
;;
esac
return 0
}
# Function to check if nodes are reversed
check_reversed() {
log_info ""
log_info "Checking if 2501 and 2502 are reversed..."
local vmid_2501_config=$(pct exec 2501 -- grep 'rpc-http-api=' /etc/besu/config-rpc-perm.toml 2>/dev/null | head -1 || echo "")
local vmid_2502_config=$(pct exec 2502 -- grep 'rpc-http-api=' /etc/besu/config-rpc-public.toml 2>/dev/null | head -1 || echo "")
# Check if 2501 has ADMIN (shouldn't) or 2502 has more than minimal APIs
if echo "$vmid_2501_config" | grep -q '"ADMIN"'; then
log_warn "VMID 2501 has ADMIN API - may need to check if reversed"
fi
if echo "$vmid_2502_config" | grep -q '"ADMIN"\|"TXPOOL"\|"QBFT"'; then
log_warn "VMID 2502 has non-minimal APIs - may need to check if reversed"
fi
log_info "Current configuration check complete"
}
# Main deployment
main() {
log_info "Starting RPC nodes configuration..."
log_info ""
# Process each RPC node
for vmid in 2500 2501 2502; do
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Processing VMID $vmid: ${RPC_ROLES[$vmid]}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
# Check container
if ! check_container "$vmid"; then
log_error "Skipping VMID $vmid (container not available)"
continue
fi
# Get config file
local config_file="${RPC_CONFIGS[$vmid]}"
if [[ -z "$config_file" ]]; then
log_error "No config mapping for VMID $vmid"
continue
fi
# Stop service
log_info "Stopping Besu service..."
pct exec "$vmid" -- systemctl stop besu-rpc.service 2>/dev/null || true
sleep 2
# Copy config
if ! copy_config "$vmid" "$config_file"; then
log_error "Failed to copy config for VMID $vmid"
continue
fi
# Update service
update_service "$vmid" "$config_file"
# Verify config
verify_config "$vmid" "$config_file"
# Start service
log_info "Starting Besu service..."
pct exec "$vmid" -- systemctl start besu-rpc.service 2>/dev/null || {
log_error "Failed to start service on VMID $vmid"
log_info "Check logs: pct exec $vmid -- journalctl -u besu-rpc.service -n 50"
continue
}
sleep 3
# Check service status
if pct exec "$vmid" -- systemctl is-active --quiet besu-rpc.service 2>/dev/null; then
log_success "Service started successfully on VMID $vmid"
else
log_warn "Service may not be running on VMID $vmid"
log_info "Check status: pct exec $vmid -- systemctl status besu-rpc.service"
fi
done
# Check if reversed
check_reversed
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "Configuration complete!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Next steps:"
log_info "1. Verify services are running:"
log_info " pct exec 2500 -- systemctl status besu-rpc.service"
log_info " pct exec 2501 -- systemctl status besu-rpc.service"
log_info " pct exec 2502 -- systemctl status besu-rpc.service"
log_info ""
log_info "2. Test RPC endpoints:"
log_info " curl -X POST http://${RPC_ALLTRA_1:-192.168.11.250}:8545 -H 'Content-Type: application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}'"
log_info ""
log_info "3. Check logs if issues:"
log_info " pct exec 2500 -- journalctl -u besu-rpc.service -f"
}
# Run main function
main "$@"

View File

@@ -0,0 +1,336 @@
#!/bin/bash
# Configure and start Blockscout with correct settings
# Run this INSIDE the Blockscout container (VMID 5000)
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configuration
CHAIN_ID=138
RPC_URL="http://${RPC_ALLTRA_1:-192.168.11.250}:8545"
WS_URL="ws://${RPC_ALLTRA_1:-192.168.11.250}:8546"
BLOCKSCOUT_HOST="${IP_BLOCKSCOUT}"
DB_PASSWORD="${DB_PASSWORD:-blockscout}"
# Colors
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
echo "════════════════════════════════════════════════════════"
echo "Blockscout Configuration and Startup"
echo "════════════════════════════════════════════════════════"
echo ""
echo "Configuration:"
echo " Chain ID: $CHAIN_ID"
echo " RPC URL: $RPC_URL"
echo " WS URL: $WS_URL"
echo " Host: $BLOCKSCOUT_HOST"
echo ""
# Step 1: Check Docker
log_info "Step 1: Checking Docker..."
if ! command -v docker &> /dev/null; then
log_error "Docker not found. Installing..."
apt-get update -qq
apt-get install -y -qq docker.io
systemctl enable docker
systemctl start docker
fi
log_success "Docker: $(docker --version 2>/dev/null | head -1 || echo 'installed')"
if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null 2>&1; then
log_error "Docker Compose not found. Installing..."
apt-get install -y -qq docker-compose
fi
log_success "Docker Compose: available"
echo ""
# Step 2: Find or create Blockscout directory
log_info "Step 2: Locating Blockscout directory..."
if [ -d /opt/blockscout ]; then
BLOCKSCOUT_DIR="/opt/blockscout"
elif [ -d /root/blockscout ]; then
BLOCKSCOUT_DIR="/root/blockscout"
else
BLOCKSCOUT_DIR="/opt/blockscout"
mkdir -p "$BLOCKSCOUT_DIR"
log_info "Created directory: $BLOCKSCOUT_DIR"
fi
cd "$BLOCKSCOUT_DIR"
log_success "Blockscout directory: $BLOCKSCOUT_DIR"
echo ""
# Step 3: Create/Update docker-compose.yml with correct settings
log_info "Step 3: Configuring docker-compose.yml..."
cat > docker-compose.yml <<'EOF'
version: '3.8'
services:
postgres:
image: postgres:15-alpine
container_name: blockscout-postgres
environment:
POSTGRES_USER: blockscout
POSTGRES_PASSWORD: blockscout
POSTGRES_DB: blockscout
volumes:
- postgres-data:/var/lib/postgresql/data
restart: unless-stopped
networks:
- blockscout-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U blockscout"]
interval: 10s
timeout: 5s
retries: 5
blockscout:
image: blockscout/blockscout:latest
container_name: blockscout
depends_on:
postgres:
condition: service_healthy
environment:
- DATABASE_URL=postgresql://blockscout:blockscout@postgres:5432/blockscout
- ETHEREUM_JSONRPC_HTTP_URL=http://${RPC_ALLTRA_1:-192.168.11.250}:8545
- ETHEREUM_JSONRPC_WS_URL=ws://${RPC_ALLTRA_1:-192.168.11.250}:8546
- ETHEREUM_JSONRPC_TRACE_URL=http://${RPC_ALLTRA_1:-192.168.11.250}:8545
- ETHEREUM_JSONRPC_VARIANT=besu
- CHAIN_ID=138
- COIN=ETH
- BLOCKSCOUT_HOST=${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}
- BLOCKSCOUT_PROTOCOL=http
- SECRET_KEY_BASE=CHANGEME_SECRET_KEY_BASE
- POOL_SIZE=10
- ECTO_USE_SSL=false
ports:
- "4000:4000"
volumes:
- blockscout-data:/app/apps/explorer/priv/static
restart: unless-stopped
networks:
- blockscout-network
volumes:
postgres-data:
blockscout-data:
networks:
blockscout-network:
driver: bridge
EOF
# Generate and replace secret key
SECRET_KEY=$(openssl rand -hex 64)
sed -i "s|SECRET_KEY_BASE=CHANGEME_SECRET_KEY_BASE|SECRET_KEY_BASE=${SECRET_KEY}|" docker-compose.yml
log_success "docker-compose.yml configured"
echo ""
# Step 4: Stop existing containers
log_info "Step 4: Stopping existing containers..."
docker-compose down 2>/dev/null || docker compose down 2>/dev/null || true
log_success "Existing containers stopped"
echo ""
# Step 5: Start PostgreSQL
log_info "Step 5: Starting PostgreSQL..."
docker-compose up -d postgres || docker compose up -d postgres
log_info "Waiting for PostgreSQL to be ready..."
for i in {1..30}; do
if docker exec blockscout-postgres pg_isready -U blockscout >/dev/null 2>&1; then
log_success "PostgreSQL is ready"
break
fi
echo -n "."
sleep 2
done
echo ""
echo ""
# Step 6: Start Blockscout
log_info "Step 6: Starting Blockscout..."
docker-compose up -d blockscout || docker compose up -d blockscout
log_success "Blockscout started (may take 1-2 minutes to fully initialize)"
echo ""
# Step 7: Configure and start Nginx
log_info "Step 7: Configuring Nginx..."
if ! command -v nginx &> /dev/null; then
log_info "Installing Nginx..."
apt-get update -qq
apt-get install -y -qq nginx
fi
# Configure Nginx
cat > /etc/nginx/sites-available/blockscout <<'EOF'
server {
listen 80;
listen [::]:80;
server_name ${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0} explorer.d-bis.org;
client_max_body_size 100M;
location / {
proxy_pass http://localhost:4000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
proxy_read_timeout 300s;
proxy_connect_timeout 75s;
}
location /api {
proxy_pass http://localhost:4000/api;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300s;
}
location /health {
proxy_pass http://localhost:4000/api/health;
proxy_http_version 1.1;
proxy_set_header Host $host;
access_log off;
}
}
EOF
# Enable site
ln -sf /etc/nginx/sites-available/blockscout /etc/nginx/sites-enabled/blockscout
rm -f /etc/nginx/sites-enabled/default 2>/dev/null || true
# Test and reload Nginx
nginx -t && systemctl reload nginx
systemctl enable nginx
systemctl start nginx
log_success "Nginx configured and running"
echo ""
# Step 8: Check status
log_info "Step 8: Checking service status..."
sleep 5
echo ""
echo "Docker Containers:"
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | head -5
echo ""
echo "Service Status:"
if systemctl is-active --quiet nginx; then
log_success "Nginx: Running"
else
log_warn "Nginx: Not running"
fi
if docker ps | grep -q blockscout-postgres; then
log_success "PostgreSQL: Running"
else
log_warn "PostgreSQL: Not running"
fi
if docker ps | grep -q "^blockscout "; then
log_success "Blockscout: Running"
log_info "Note: Blockscout may take 1-2 minutes to fully start"
else
log_warn "Blockscout: Not running - check logs: docker logs blockscout"
fi
echo ""
# Step 9: Verify connectivity
log_info "Step 9: Testing connectivity..."
sleep 5
echo ""
echo "Connectivity Tests:"
echo ""
# Test RPC
log_info "Testing RPC endpoint..."
RPC_TEST=$(curl -s -X POST "$RPC_URL" \
-H 'Content-Type: application/json' \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' 2>/dev/null || echo "")
if echo "$RPC_TEST" | grep -q '"result"'; then
BLOCK_HEX=$(echo "$RPC_TEST" | grep -o '"result":"[^"]*"' | cut -d'"' -f4)
BLOCK_DEC=$(printf "%d" "$BLOCK_HEX" 2>/dev/null || echo "unknown")
log_success "RPC endpoint accessible (Block: $BLOCK_DEC)"
else
log_warn "RPC endpoint may not be accessible"
fi
# Test Blockscout API
log_info "Testing Blockscout API..."
for i in {1..6}; do
API_TEST=$(curl -s http://localhost:4000/api/health 2>/dev/null || echo "")
if [ -n "$API_TEST" ]; then
log_success "Blockscout API responding: $API_TEST"
break
fi
if [ $i -lt 6 ]; then
log_info "Waiting for Blockscout to start... ($i/6)"
sleep 10
else
log_warn "Blockscout API not responding yet (may need more time)"
log_info "Check logs: docker logs blockscout"
fi
done
# Test Nginx
log_info "Testing Nginx proxy..."
NGINX_TEST=$(curl -s -o /dev/null -w '%{http_code}' http://localhost/ 2>/dev/null || echo "000")
if [ "$NGINX_TEST" = "200" ] || [ "$NGINX_TEST" = "302" ] || [ "$NGINX_TEST" = "301" ]; then
log_success "Nginx proxy working (HTTP $NGINX_TEST)"
else
log_warn "Nginx returned: HTTP $NGINX_TEST"
fi
echo ""
# Final summary
echo "════════════════════════════════════════════════════════"
echo "Configuration Complete!"
echo "════════════════════════════════════════════════════════"
echo ""
echo "Access Points:"
echo " Internal: http://${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
echo " External: https://explorer.d-bis.org"
echo " API: http://${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}/api"
echo " Health: http://${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}/health"
echo ""
echo "Configuration:"
echo " Chain ID: $CHAIN_ID"
echo " RPC: $RPC_URL"
echo " WS: $WS_URL"
echo ""
echo "Useful Commands:"
echo " View logs: docker-compose logs -f"
echo " Restart: docker-compose restart"
echo " Stop: docker-compose down"
echo " Start: docker-compose up -d"
echo " Check status: docker ps"
echo ""
log_info "Blockscout may take 1-2 minutes to fully initialize"
log_info "Monitor progress: docker logs -f blockscout"
echo ""

View File

@@ -0,0 +1,181 @@
#!/usr/bin/env bash
# Configure all bridge destinations for CCIPWETH9Bridge and CCIPWETH10Bridge
# Usage: ./configure-bridge-destinations.sh
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load environment variables
if [ -f "$SOURCE_PROJECT/.env" ]; then
source "$SOURCE_PROJECT/.env"
else
log_error ".env file not found in $SOURCE_PROJECT"
exit 1
fi
# Required variables
RPC_URL="${RPC_URL_138:-http://${RPC_ALLTRA_1:-192.168.11.250}:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-}"
if [ -z "${PRIVATE_KEY:-}" ]; then
log_error "PRIVATE_KEY not set in .env file"
exit 1
fi
if [ -z "$WETH9_BRIDGE" ] || [ -z "$WETH10_BRIDGE" ]; then
log_error "Bridge addresses not set in .env file"
log_error "Please deploy bridges first: bash scripts/deploy-bridge-contracts.sh"
exit 1
fi
# Destination chain configurations
declare -A WETH9_DESTINATIONS=(
["11344663589394136015"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # BSC
["4051577828743386545"]="0xa780ef19a041745d353c9432f2a7f5a241335ffe" # Polygon
["6433500567565415381"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Avalanche
["15971525489660198786"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Base
["4949039107694359620"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Arbitrum
["3734403246176062136"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Optimism
["5009297550715157269"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Ethereum Mainnet
)
declare -A WETH10_DESTINATIONS=(
["11344663589394136015"]="0x105f8a15b819948a89153505762444ee9f324684" # BSC
["4051577828743386545"]="0xdab0591e5e89295ffad75a71dcfc30c5625c4fa2" # Polygon
["6433500567565415381"]="0x105f8a15b819948a89153505762444ee9f324684" # Avalanche
["15971525489660198786"]="0x105f8a15b819948a89153505762444ee9f324684" # Base
["4949039107694359620"]="0x105f8a15b819948a89153505762444ee9f324684" # Arbitrum
["3734403246176062136"]="0x105f8a15b819948a89153505762444ee9f324684" # Optimism
["5009297550715157269"]="0x105f8a15b819948a89153505762444ee9f324684" # Ethereum Mainnet
)
declare -A CHAIN_NAMES=(
["11344663589394136015"]="BSC"
["4051577828743386545"]="Polygon"
["6433500567565415381"]="Avalanche"
["15971525489660198786"]="Base"
["4949039107694359620"]="Arbitrum"
["3734403246176062136"]="Optimism"
["5009297550715157269"]="Ethereum"
)
log_info "========================================="
log_info "Configure Bridge Destinations"
log_info "========================================="
log_info ""
log_info "WETH9 Bridge: $WETH9_BRIDGE"
log_info "WETH10 Bridge: $WETH10_BRIDGE"
log_info "RPC URL: $RPC_URL"
log_info ""
# Function to check if destination is already configured
check_destination() {
local bridge="$1"
local selector="$2"
local name="$3"
log_info "Checking $name destination..."
local result=$(cast call "$bridge" "destinations(uint64)" "$selector" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
if echo "$result" | grep -qE "(true|enabled|0x0000000000000000000000000000000000000000)" && ! echo "$result" | grep -q "0x0000000000000000000000000000000000000000$"; then
return 0 # Already configured
else
return 1 # Not configured
fi
}
# Configure WETH9 Bridge destinations
log_info "Configuring WETH9 Bridge destinations..."
WETH9_COUNT=0
for selector in "${!WETH9_DESTINATIONS[@]}"; do
chain_name="${CHAIN_NAMES[$selector]}"
dest_address="${WETH9_DESTINATIONS[$selector]}"
if check_destination "$WETH9_BRIDGE" "$selector" "$chain_name (WETH9)"; then
log_success "$chain_name already configured for WETH9"
else
log_info "Configuring $chain_name for WETH9..."
local output=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$selector" \
"$dest_address" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" 2>&1 | tee /tmp/weth9-config-$chain_name.log)
if echo "$output" | grep -qE "(blockHash|transactionHash|Success)"; then
log_success "$chain_name configured for WETH9"
((WETH9_COUNT++))
elif echo "$output" | grep -q "destination already exists"; then
log_success "$chain_name already configured for WETH9"
else
log_error "✗ Failed to configure $chain_name for WETH9"
log_info "Check /tmp/weth9-config-$chain_name.log for details"
fi
fi
done
log_info ""
# Configure WETH10 Bridge destinations
log_info "Configuring WETH10 Bridge destinations..."
WETH10_COUNT=0
for selector in "${!WETH10_DESTINATIONS[@]}"; do
chain_name="${CHAIN_NAMES[$selector]}"
dest_address="${WETH10_DESTINATIONS[$selector]}"
if check_destination "$WETH10_BRIDGE" "$selector" "$chain_name (WETH10)"; then
log_success "$chain_name already configured for WETH10"
else
log_info "Configuring $chain_name for WETH10..."
local output=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$selector" \
"$dest_address" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" 2>&1 | tee /tmp/weth10-config-$chain_name.log)
if echo "$output" | grep -qE "(blockHash|transactionHash|Success)"; then
log_success "$chain_name configured for WETH10"
((WETH10_COUNT++))
elif echo "$output" | grep -q "destination already exists"; then
log_success "$chain_name already configured for WETH10"
else
log_error "✗ Failed to configure $chain_name for WETH10"
log_info "Check /tmp/weth10-config-$chain_name.log for details"
fi
fi
done
log_info ""
log_success "========================================="
log_success "Bridge Configuration Complete!"
log_success "========================================="
log_info ""
log_info "Summary:"
log_info " WETH9 destinations configured: $WETH9_COUNT new"
log_info " WETH10 destinations configured: $WETH10_COUNT new"
log_info ""
log_info "All 7 destination chains configured for both bridges"
log_info ""

View File

@@ -0,0 +1,477 @@
#!/usr/bin/env bash
# Configure Cloudflare Tunnel Routes and DNS Records via API
# Usage: ./configure-cloudflare-api.sh
# Requires: CLOUDFLARE_API_TOKEN and CLOUDFLARE_ZONE_ID environment variables
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
info() { echo -e "${GREEN}[INFO]${NC} $1"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
error() { echo -e "${RED}[ERROR]${NC} $1"; }
debug() { echo -e "${BLUE}[DEBUG]${NC} $1"; }
# Check for required tools
if ! command -v curl >/dev/null 2>&1; then
error "curl is required but not installed"
exit 1
fi
if ! command -v jq >/dev/null 2>&1; then
error "jq is required but not installed. Install with: apt-get install jq"
exit 1
fi
# Load environment variables
if [[ -f "$SCRIPT_DIR/../.env" ]]; then
source "$SCRIPT_DIR/../.env"
fi
# Cloudflare API configuration (support multiple naming conventions)
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_ZONE_ID="${CLOUDFLARE_ZONE_ID:-}"
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
DOMAIN="${DOMAIN:-${CLOUDFLARE_DOMAIN:-d-bis.org}}"
# Tunnel configuration (support multiple naming conventions)
# Prefer JWT token from installed service, then env vars
INSTALLED_TOKEN=""
if command -v ssh >/dev/null 2>&1; then
INSTALLED_TOKEN=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST:-192.168.11.10} \
"pct exec 102 -- cat /etc/systemd/system/cloudflared.service 2>/dev/null | grep -o 'tunnel run --token [^ ]*' | cut -d' ' -f3" 2>/dev/null || echo "")
fi
TUNNEL_TOKEN="${INSTALLED_TOKEN:-${TUNNEL_TOKEN:-${CLOUDFLARE_TUNNEL_TOKEN:-eyJhIjoiNTJhZDU3YTcxNjcxYzVmYzAwOWVkZjA3NDQ2NTgxOTYiLCJ0IjoiMTBhYjIyZGEtOGVhMy00ZTJlLWE4OTYtMjdlY2UyMjExYTA1IiwicyI6IlptRXlOMkkyTVRrdE1EZzFNeTAwTkRBNExXSXhaalF0Wm1KaE5XVmpaVEEzTVdGbCJ9}}}"
# RPC endpoint configuration
# Public endpoints route to VMID 2502 (NO JWT authentication)
# Private endpoints route to VMID 2501 (JWT authentication required)
declare -A RPC_ENDPOINTS=(
[rpc-http-pub]="https://${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}:443"
[rpc-ws-pub]="https://${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}:443"
[rpc-http-prv]="https://${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}:443"
[rpc-ws-prv]="https://${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}:443"
)
# API base URLs
CF_API_BASE="https://api.cloudflare.com/client/v4"
CF_ZERO_TRUST_API="https://api.cloudflare.com/client/v4/accounts"
# Function to make Cloudflare API request
cf_api_request() {
local method="$1"
local endpoint="$2"
local data="${3:-}"
local url="${CF_API_BASE}${endpoint}"
local headers=()
if [[ -n "$CLOUDFLARE_API_TOKEN" ]]; then
headers+=("-H" "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}")
elif [[ -n "$CLOUDFLARE_API_KEY" ]]; then
# Global API Keys are typically 40 chars, API Tokens are longer
# If no email provided, assume it's an API Token
if [[ -z "$CLOUDFLARE_EMAIL" ]] || [[ ${#CLOUDFLARE_API_KEY} -gt 50 ]]; then
headers+=("-H" "Authorization: Bearer ${CLOUDFLARE_API_KEY}")
else
headers+=("-H" "X-Auth-Email: ${CLOUDFLARE_EMAIL}")
headers+=("-H" "X-Auth-Key: ${CLOUDFLARE_API_KEY}")
fi
else
error "Cloudflare API credentials not found!"
error "Set CLOUDFLARE_API_TOKEN or CLOUDFLARE_EMAIL + CLOUDFLARE_API_KEY"
exit 1
fi
headers+=("-H" "Content-Type: application/json")
local response
if [[ -n "$data" ]]; then
response=$(curl -s -X "$method" "$url" "${headers[@]}" -d "$data")
else
response=$(curl -s -X "$method" "$url" "${headers[@]}")
fi
# Check if response is valid JSON
if ! echo "$response" | jq -e . >/dev/null 2>&1; then
error "Invalid JSON response from API"
debug "Response: $response"
return 1
fi
# Check for API errors
local success=$(echo "$response" | jq -r '.success // false' 2>/dev/null)
if [[ "$success" != "true" ]]; then
local errors=$(echo "$response" | jq -r '.errors[]?.message // .error // "Unknown error"' 2>/dev/null | head -3)
if [[ -z "$errors" ]]; then
errors="API request failed (check response)"
fi
error "API request failed: $errors"
debug "Response: $response"
return 1
fi
echo "$response"
}
# Function to get zone ID from domain
get_zone_id() {
if [[ -n "$CLOUDFLARE_ZONE_ID" ]]; then
echo "$CLOUDFLARE_ZONE_ID"
return 0
fi
info "Getting zone ID for domain: $DOMAIN"
local response=$(cf_api_request "GET" "/zones?name=${DOMAIN}")
local zone_id=$(echo "$response" | jq -r '.result[0].id // empty')
if [[ -z "$zone_id" ]]; then
error "Zone not found for domain: $DOMAIN"
exit 1
fi
info "Zone ID: $zone_id"
echo "$zone_id"
}
# Function to get account ID (needed for Zero Trust API)
get_account_id() {
info "Getting account ID..."
# Try to get from token verification
local response=$(cf_api_request "GET" "/user/tokens/verify")
local account_id=$(echo "$response" | jq -r '.result.id // empty')
if [[ -z "$account_id" ]]; then
# Try alternative: get from accounts list
response=$(cf_api_request "GET" "/accounts")
account_id=$(echo "$response" | jq -r '.result[0].id // empty')
fi
if [[ -z "$account_id" ]]; then
# Last resort: try to get from zone
local zone_id=$(get_zone_id)
response=$(cf_api_request "GET" "/zones/${zone_id}")
account_id=$(echo "$response" | jq -r '.result.account.id // empty')
fi
if [[ -z "$account_id" ]]; then
error "Could not determine account ID"
error "You may need to specify CLOUDFLARE_ACCOUNT_ID in .env file"
exit 1
fi
info "Account ID: $account_id"
echo "$account_id"
}
# Function to extract tunnel ID from token
get_tunnel_id_from_token() {
local token="$1"
# Check if it's a JWT token (has dots)
if [[ "$token" == *.*.* ]]; then
# Decode JWT token (basic base64 decode of payload)
local payload=$(echo "$token" | cut -d'.' -f2)
# Add padding if needed
local padding=$((4 - ${#payload} % 4))
if [[ $padding -ne 4 ]]; then
payload="${payload}$(printf '%*s' $padding | tr ' ' '=')"
fi
# Decode and extract tunnel ID (field 't' contains tunnel ID)
if command -v python3 >/dev/null 2>&1; then
echo "$payload" | python3 -c "import sys, base64, json; payload=sys.stdin.read().strip(); padding=4-len(payload)%4; payload+=('='*padding if padding<4 else ''); data=json.loads(base64.b64decode(payload)); print(data.get('t', ''))" 2>/dev/null || echo ""
else
echo "$payload" | base64 -d 2>/dev/null | jq -r '.t // empty' 2>/dev/null || echo ""
fi
else
# Not a JWT token, return empty
echo ""
fi
}
# Function to get tunnel ID
get_tunnel_id() {
local account_id="$1"
local token="$2"
# Try to extract from JWT token first
local tunnel_id=$(get_tunnel_id_from_token "$token")
if [[ -n "$tunnel_id" ]]; then
info "Tunnel ID from token: $tunnel_id"
echo "$tunnel_id"
return 0
fi
# Fallback: list tunnels and find the one
warn "Could not extract tunnel ID from token, listing tunnels..."
local response=$(cf_api_request "GET" "/accounts/${account_id}/cfd_tunnel" 2>/dev/null)
if [[ -z "$response" ]]; then
error "Failed to list tunnels. Check API credentials."
exit 1
fi
local tunnel_id=$(echo "$response" | jq -r '.result[0].id // empty' 2>/dev/null)
if [[ -z "$tunnel_id" ]]; then
error "Could not find tunnel ID"
debug "Response: $response"
exit 1
fi
info "Tunnel ID: $tunnel_id"
echo "$tunnel_id"
}
# Function to get tunnel name
get_tunnel_name() {
local account_id="$1"
local tunnel_id="$2"
local response=$(cf_api_request "GET" "/accounts/${account_id}/cfd_tunnel/${tunnel_id}")
local tunnel_name=$(echo "$response" | jq -r '.result.name // empty')
echo "$tunnel_name"
}
# Function to configure tunnel routes
configure_tunnel_routes() {
local account_id="$1"
local tunnel_id="$2"
local tunnel_name="$3"
info "Configuring tunnel routes for: $tunnel_name"
# Build ingress rules array
local ingress_array="["
local first=true
for subdomain in "${!RPC_ENDPOINTS[@]}"; do
local service="${RPC_ENDPOINTS[$subdomain]}"
local hostname="${subdomain}.${DOMAIN}"
if [[ "$first" == "true" ]]; then
first=false
else
ingress_array+=","
fi
# Determine if WebSocket
local is_ws=false
if [[ "$subdomain" == *"ws"* ]]; then
is_ws=true
fi
# Build ingress rule
# Add noTLSVerify to skip certificate validation (certificates don't have IP SANs)
if [[ "$is_ws" == "true" ]]; then
ingress_array+="{\"hostname\":\"${hostname}\",\"service\":\"${service}\",\"originRequest\":{\"httpHostHeader\":\"${hostname}\",\"noTLSVerify\":true}}"
else
ingress_array+="{\"hostname\":\"${hostname}\",\"service\":\"${service}\",\"originRequest\":{\"noTLSVerify\":true}}"
fi
info " Adding route: ${hostname}${service}"
done
# Add catch-all (must be last)
ingress_array+=",{\"service\":\"http_status:404\"}]"
# Create config JSON
local config_data=$(echo "$ingress_array" | jq -c '{
config: {
ingress: .
}
}')
info "Updating tunnel configuration..."
local response=$(cf_api_request "PUT" "/accounts/${account_id}/cfd_tunnel/${tunnel_id}/configurations" "$config_data")
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
info "✓ Tunnel routes configured successfully"
else
local errors=$(echo "$response" | jq -r '.errors[]?.message // "Unknown error"' | head -3)
error "Failed to configure tunnel routes: $errors"
debug "Response: $response"
return 1
fi
}
# Function to create or update DNS record
create_or_update_dns_record() {
local zone_id="$1"
local name="$2"
local target="$3"
local proxied="${4:-true}"
# Check if record exists
local response=$(cf_api_request "GET" "/zones/${zone_id}/dns_records?name=${name}.${DOMAIN}&type=CNAME")
local record_id=$(echo "$response" | jq -r '.result[0].id // empty')
local data=$(jq -n \
--arg name "${name}.${DOMAIN}" \
--arg target "$target" \
--argjson proxied "$proxied" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: $proxied,
ttl: 1
}')
if [[ -n "$record_id" ]]; then
info " Updating existing DNS record: ${name}.${DOMAIN}"
response=$(cf_api_request "PUT" "/zones/${zone_id}/dns_records/${record_id}" "$data")
else
info " Creating DNS record: ${name}.${DOMAIN}"
response=$(cf_api_request "POST" "/zones/${zone_id}/dns_records" "$data")
fi
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
info " ✓ DNS record configured"
else
error " ✗ Failed to configure DNS record"
return 1
fi
}
# Function to configure DNS records
configure_dns_records() {
local zone_id="$1"
local tunnel_id="$2"
local tunnel_target="${tunnel_id}.cfargotunnel.com"
info "Configuring DNS records..."
info "Tunnel target: $tunnel_target"
for subdomain in "${!RPC_ENDPOINTS[@]}"; do
create_or_update_dns_record "$zone_id" "$subdomain" "$tunnel_target" "true"
done
}
# Main execution
main() {
info "Cloudflare API Configuration Script"
info "===================================="
echo ""
# Validate credentials
if [[ -z "$CLOUDFLARE_API_TOKEN" ]] && [[ -z "$CLOUDFLARE_EMAIL" ]] && [[ -z "$CLOUDFLARE_API_KEY" ]]; then
error "Cloudflare API credentials required!"
echo ""
echo "Set one of:"
echo " export CLOUDFLARE_API_TOKEN='your-api-token'"
echo " OR"
echo " export CLOUDFLARE_EMAIL='your-email@example.com'"
echo " export CLOUDFLARE_API_KEY='your-api-key'"
echo ""
echo "You can also create a .env file in the project root with these variables."
exit 1
fi
# If API_KEY is provided but no email, we need email for Global API Key
if [[ -n "$CLOUDFLARE_API_KEY" ]] && [[ -z "$CLOUDFLARE_EMAIL" ]] && [[ -z "$CLOUDFLARE_API_TOKEN" ]]; then
error "CLOUDFLARE_API_KEY requires CLOUDFLARE_EMAIL"
error "Please add CLOUDFLARE_EMAIL to your .env file"
error ""
error "OR create an API Token instead:"
error " 1. Go to: https://dash.cloudflare.com/profile/api-tokens"
error " 2. Create token with: Zone:DNS:Edit, Account:Cloudflare Tunnel:Edit"
error " 3. Set CLOUDFLARE_API_TOKEN in .env"
exit 1
fi
# Get zone ID
local zone_id=$(get_zone_id)
# Get account ID
local account_id="${CLOUDFLARE_ACCOUNT_ID:-}"
if [[ -z "$account_id" ]]; then
account_id=$(get_account_id)
else
info "Using provided Account ID: $account_id"
fi
# Get tunnel ID - try from .env first, then extraction, then API
local tunnel_id="${CLOUDFLARE_TUNNEL_ID:-}"
# If not in .env, try to extract from JWT token
if [[ -z "$tunnel_id" ]] && [[ "$TUNNEL_TOKEN" == *.*.* ]]; then
local payload=$(echo "$TUNNEL_TOKEN" | cut -d'.' -f2)
local padding=$((4 - ${#payload} % 4))
if [[ $padding -ne 4 ]]; then
payload="${payload}$(printf '%*s' $padding | tr ' ' '=')"
fi
if command -v python3 >/dev/null 2>&1; then
tunnel_id=$(echo "$payload" | python3 -c "import sys, base64, json; payload=sys.stdin.read().strip(); padding=4-len(payload)%4; payload+=('='*padding if padding<4 else ''); data=json.loads(base64.b64decode(payload)); print(data.get('t', ''))" 2>/dev/null || echo "")
fi
fi
# If extraction failed, try API (but don't fail if API doesn't work)
if [[ -z "$tunnel_id" ]]; then
tunnel_id=$(get_tunnel_id "$account_id" "$TUNNEL_TOKEN" 2>/dev/null || echo "")
fi
if [[ -z "$tunnel_id" ]]; then
error "Could not determine tunnel ID"
error "Please set CLOUDFLARE_TUNNEL_ID in .env file"
error "Or ensure API credentials are valid to fetch it automatically"
exit 1
fi
info "Using Tunnel ID: $tunnel_id"
local tunnel_name=$(get_tunnel_name "$account_id" "$tunnel_id" 2>/dev/null || echo "tunnel-${tunnel_id:0:8}")
echo ""
info "Configuration Summary:"
echo " Domain: $DOMAIN"
echo " Zone ID: $zone_id"
echo " Account ID: $account_id"
echo " Tunnel: $tunnel_name (ID: $tunnel_id)"
echo ""
# Configure tunnel routes
echo "=========================================="
info "Step 1: Configuring Tunnel Routes"
echo "=========================================="
configure_tunnel_routes "$account_id" "$tunnel_id" "$tunnel_name"
echo ""
echo "=========================================="
info "Step 2: Configuring DNS Records"
echo "=========================================="
configure_dns_records "$zone_id" "$tunnel_id"
echo ""
echo "=========================================="
info "Configuration Complete!"
echo "=========================================="
echo ""
info "Next steps:"
echo " 1. Wait 1-2 minutes for DNS propagation"
echo " 2. Test endpoints:"
echo " curl https://rpc-http-pub.d-bis.org/health"
echo " 3. Verify in Cloudflare Dashboard:"
echo " - Zero Trust → Networks → Tunnels → Check routes"
echo " - DNS → Records → Verify CNAME records"
}
# Run main function
main

View File

@@ -0,0 +1,218 @@
#!/bin/bash
# Configure Cloudflare DNS and SSL via API using .env credentials
# This script does NOT require container access - only Cloudflare API
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
# Configuration
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
TUNNEL_TOKEN="eyJhIjoiNTJhZDU3YTcxNjcxYzVmYzAwOWVkZjA3NDQ2NTgxOTYiLCJ0IjoiYjAyZmUxZmUtY2I3ZC00ODRlLTkwOWItN2NjNDEyOThlYmU4IiwicyI6Ik5HTmtOV0kwWXpNdFpUVmxaUzAwTVRFMkxXRXdNMk10WlRJNU1ETTFaRFF4TURBMiJ9"
echo ""
log_info "═══════════════════════════════════════════════════════════"
log_info " CLOUDFLARE DNS & SSL CONFIGURATION (API)"
log_info "═══════════════════════════════════════════════════════════"
echo ""
# Load .env
if [ ! -f "$ENV_FILE" ]; then
log_error ".env file not found: $ENV_FILE"
exit 1
fi
set -a
source "$ENV_FILE"
set +a
# Get credentials
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_ZONE_ID="${CLOUDFLARE_ZONE_ID:-}"
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
# Determine auth method
AUTH_HEADERS=()
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
AUTH_HEADERS=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
log_success "Using API Token"
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
CLOUDFLARE_API_KEY=$(echo "$CLOUDFLARE_API_KEY" | tr -d '"')
CLOUDFLARE_EMAIL=$(echo "$CLOUDFLARE_EMAIL" | tr -d '"')
AUTH_HEADERS=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY")
log_success "Using API Key"
else
log_error "No Cloudflare credentials found"
exit 1
fi
# Extract tunnel ID from token
log_info "Extracting tunnel ID from token..."
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.TunnelID // empty' 2>/dev/null || echo "")
if [ -z "$TUNNEL_ID" ]; then
# Try alternative extraction
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.t // empty' 2>/dev/null || echo "")
fi
if [ -z "$TUNNEL_ID" ]; then
log_warn "Could not extract tunnel ID from token"
log_info "You may need to get tunnel ID from: cloudflared tunnel list"
TUNNEL_ID="<tunnel-id>"
else
log_success "Tunnel ID: $TUNNEL_ID"
fi
# Get Zone ID
if [ -z "$CLOUDFLARE_ZONE_ID" ]; then
log_info "Getting zone ID for $DOMAIN..."
ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
CLOUDFLARE_ZONE_ID=$(echo "$ZONE_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
if [ -z "$CLOUDFLARE_ZONE_ID" ] || [ "$CLOUDFLARE_ZONE_ID" = "null" ]; then
log_error "Failed to get zone ID"
exit 1
fi
fi
log_success "Zone ID: $CLOUDFLARE_ZONE_ID"
# Get Account ID
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_info "Getting account ID..."
ACCOUNT_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
CLOUDFLARE_ACCOUNT_ID=$(echo "$ACCOUNT_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
fi
if [ -n "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_success "Account ID: $CLOUDFLARE_ACCOUNT_ID"
fi
# Configure DNS
log_info "Configuring DNS record..."
TARGET="${TUNNEL_ID}.cfargotunnel.com"
# Check existing record
EXISTING=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$CLOUDFLARE_ZONE_ID/dns_records?name=$EXPLORER_DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
RECORD_ID=$(echo "$EXISTING" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
DNS_DATA=$(jq -n \
--arg name "explorer" \
--arg target "$TARGET" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: true,
ttl: 1
}')
if [ -n "$RECORD_ID" ] && [ "$RECORD_ID" != "null" ]; then
log_info "Updating existing DNS record..."
DNS_RESPONSE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$CLOUDFLARE_ZONE_ID/dns_records/$RECORD_ID" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
else
log_info "Creating new DNS record..."
DNS_RESPONSE=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$CLOUDFLARE_ZONE_ID/dns_records" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
fi
if echo "$DNS_RESPONSE" | jq -e '.success' >/dev/null 2>&1; then
log_success "DNS record configured: $EXPLORER_DOMAIN$TARGET (🟠 Proxied)"
else
ERROR=$(echo "$DNS_RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "DNS configuration failed: $ERROR"
echo "$DNS_RESPONSE" | jq '.' 2>/dev/null || echo "$DNS_RESPONSE"
exit 1
fi
# Configure Tunnel Route (if account ID available)
if [ -n "$CLOUDFLARE_ACCOUNT_ID" ] && [ "$TUNNEL_ID" != "<tunnel-id>" ]; then
log_info "Configuring tunnel route..."
TUNNEL_CONFIG=$(jq -n \
--arg hostname "$EXPLORER_DOMAIN" \
--arg service "http://$EXPLORER_IP:80" \
'{
config: {
ingress: [
{
hostname: $hostname,
service: $service
},
{
service: "http_status:404"
}
]
}
}')
TUNNEL_UPDATE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$TUNNEL_CONFIG")
if echo "$TUNNEL_UPDATE" | jq -e '.success' >/dev/null 2>&1; then
log_success "Tunnel route configured: $EXPLORER_DOMAIN → http://$EXPLORER_IP:80"
else
ERROR=$(echo "$TUNNEL_UPDATE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_warn "Tunnel route configuration failed: $ERROR"
log_info "Configure manually in Cloudflare Zero Trust dashboard"
fi
else
log_warn "Tunnel route requires manual configuration"
fi
# SSL is automatic with Cloudflare proxy
log_success "SSL/TLS: Automatic (Cloudflare Universal SSL enabled)"
# Verify
log_info "Waiting for DNS propagation (10 seconds)..."
sleep 10
PUBLIC_HTTP=$(curl -s -o /dev/null -w "%{http_code}" "https://$EXPLORER_DOMAIN/api/v2/stats" 2>&1)
if [ "$PUBLIC_HTTP" = "200" ]; then
log_success "Public URL: HTTP 200 - Working!"
else
log_warn "Public URL: HTTP $PUBLIC_HTTP (may need more time for propagation)"
fi
echo ""
log_success "Configuration complete!"
echo ""

View File

@@ -0,0 +1,405 @@
#!/bin/bash
# Complete Cloudflare Configuration for Explorer - Automated
# Uses .env credentials to configure DNS, SSL, and tunnel routes
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "${CYAN}════════════════════════════════════════${NC}"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
# Configuration
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
VMID=5000
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
TUNNEL_TOKEN="eyJhIjoiNTJhZDU3YTcxNjcxYzVmYzAwOWVkZjA3NDQ2NTgxOTYiLCJ0IjoiYjAyZmUxZmUtY2I3ZC00ODRlLTkwOWItN2NjNDEyOThlYmU4IiwicyI6Ik5HTmtOV0kwWXpNdFpUVmxaUzAwTVRFMkxXRXdNMk10WlRJNU1ETTFaRFF4TURBMiJ9"
echo ""
log_section
log_info " COMPLETE CLOUDFLARE EXPLORER CONFIGURATION"
log_info " Using .env Credentials for Full Automation"
log_section
echo ""
# Step 1: Load .env file
log_info "Step 1: Loading credentials from .env file..."
if [ ! -f "$ENV_FILE" ]; then
log_error ".env file not found: $ENV_FILE"
log_info "Looking for .env files..."
find "$SCRIPT_DIR/.." -maxdepth 2 -name ".env" -type f 2>/dev/null | head -3
log_info ""
log_info "Please create .env file with:"
echo " CLOUDFLARE_API_TOKEN=your-token"
echo " CLOUDFLARE_ZONE_ID=your-zone-id (optional)"
echo " CLOUDFLARE_ACCOUNT_ID=your-account-id (optional)"
echo " DOMAIN=d-bis.org"
exit 1
fi
# Source .env file
set -a
source "$ENV_FILE"
set +a
log_success ".env file loaded"
# Check for required credentials
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_ZONE_ID="${CLOUDFLARE_ZONE_ID:-}"
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
# Determine auth method
AUTH_METHOD=""
AUTH_HEADERS=()
# Check for API_TOKEN first (preferred), then API_KEY
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
AUTH_METHOD="token"
AUTH_HEADERS=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
log_success "Using API Token authentication"
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
AUTH_METHOD="key"
# Remove quotes from API_KEY if present
CLOUDFLARE_API_KEY=$(echo "$CLOUDFLARE_API_KEY" | tr -d '"')
CLOUDFLARE_EMAIL=$(echo "$CLOUDFLARE_EMAIL" | tr -d '"')
AUTH_HEADERS=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY")
log_success "Using API Key authentication"
else
log_error "No Cloudflare API credentials found in .env"
log_info "Required: CLOUDFLARE_API_TOKEN or (CLOUDFLARE_API_KEY + CLOUDFLARE_EMAIL)"
exit 1
fi
# Function to find container node
find_container_node() {
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
"for node in ml110 pve pve2; do if pvesh get /nodes/\$node/lxc/$VMID/status/current --output-format json >/dev/null 2>&1; then echo \$node; break; fi; done" 2>/dev/null || echo "pve2"
}
# Function to execute command in container
exec_container() {
local cmd="$1"
# Try direct pct exec via main host first
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec $VMID -- bash -c '$cmd'" 2>&1
}
# Step 2: Install Cloudflare Tunnel Service
log_section
log_info "Step 2: Installing Cloudflare Tunnel Service"
log_section
log_info "Checking cloudflared installation..."
if ! exec_container "command -v cloudflared >/dev/null 2>&1"; then
log_info "Installing cloudflared..."
exec_container "cd /tmp && wget -q https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb && dpkg -i cloudflared-linux-amd64.deb || apt install -f -y"
log_success "cloudflared installed"
else
log_success "cloudflared already installed"
fi
log_info "Installing tunnel service with token..."
INSTALL_OUTPUT=$(exec_container "cloudflared service install $TUNNEL_TOKEN 2>&1" || echo "INSTALL_FAILED")
if echo "$INSTALL_OUTPUT" | grep -q -E "successfully|installed|Service installed"; then
log_success "Tunnel service installed"
else
log_warn "Installation output: $INSTALL_OUTPUT"
# Continue - service might already be installed
fi
log_info "Starting cloudflared service..."
exec_container "systemctl start cloudflared" || true
exec_container "systemctl enable cloudflared" || true
sleep 3
CLOUDFLARED_STATUS=$(exec_container "systemctl is-active cloudflared 2>/dev/null || echo 'inactive'")
if [ "$CLOUDFLARED_STATUS" = "active" ]; then
log_success "Cloudflared service is running"
else
log_warn "Cloudflared service is $CLOUDFLARED_STATUS"
fi
# Get tunnel ID
log_info "Getting tunnel ID..."
TUNNEL_LIST=$(exec_container "cloudflared tunnel list 2>&1" || echo "")
TUNNEL_ID=$(echo "$TUNNEL_LIST" | grep -v "NAME" | head -1 | awk '{print $1}' || echo "")
if [ -n "$TUNNEL_ID" ]; then
log_success "Tunnel ID: $TUNNEL_ID"
else
log_warn "Could not get tunnel ID from tunnel list"
# Try to extract from token (base64 decode)
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.TunnelID // empty' 2>/dev/null || echo "")
if [ -n "$TUNNEL_ID" ]; then
log_success "Tunnel ID from token: $TUNNEL_ID"
else
log_error "Cannot determine tunnel ID"
exit 1
fi
fi
# Step 3: Get Zone ID
log_section
log_info "Step 3: Getting Cloudflare Zone ID"
log_section
if [ -z "$CLOUDFLARE_ZONE_ID" ]; then
log_info "Fetching zone ID for $DOMAIN..."
ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
ZONE_ID=$(echo "$ZONE_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
if [ -z "$ZONE_ID" ] || [ "$ZONE_ID" = "null" ]; then
ERROR=$(echo "$ZONE_RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to get zone ID: $ERROR"
exit 1
fi
log_success "Zone ID: $ZONE_ID"
else
ZONE_ID="$CLOUDFLARE_ZONE_ID"
log_success "Using provided Zone ID: $ZONE_ID"
fi
# Step 4: Get Account ID (for tunnel configuration)
log_section
log_info "Step 4: Getting Cloudflare Account ID"
log_section
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_info "Fetching account ID..."
ACCOUNT_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
CLOUDFLARE_ACCOUNT_ID=$(echo "$ACCOUNT_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ] || [ "$CLOUDFLARE_ACCOUNT_ID" = "null" ]; then
log_warn "Could not get account ID automatically"
else
log_success "Account ID: $CLOUDFLARE_ACCOUNT_ID"
fi
else
log_success "Using provided Account ID: $CLOUDFLARE_ACCOUNT_ID"
fi
# Step 5: Configure DNS Record
log_section
log_info "Step 5: Configuring DNS Record"
log_section
TARGET="${TUNNEL_ID}.cfargotunnel.com"
log_info "DNS Target: $TARGET"
# Check if record exists
EXISTING_RECORD=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?name=$EXPLORER_DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
RECORD_ID=$(echo "$EXISTING_RECORD" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
EXISTING_TYPE=$(echo "$EXISTING_RECORD" | jq -r '.result[0].type // empty' 2>/dev/null || echo "")
DNS_DATA=$(jq -n \
--arg name "explorer" \
--arg target "$TARGET" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: true,
ttl: 1
}')
if [ -n "$RECORD_ID" ] && [ "$RECORD_ID" != "null" ]; then
log_info "Updating existing DNS record (ID: $RECORD_ID, Type: $EXISTING_TYPE)..."
if [ "$EXISTING_TYPE" != "CNAME" ]; then
log_warn "Existing record is type $EXISTING_TYPE, deleting and creating CNAME..."
curl -s -X DELETE "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" >/dev/null 2>&1
DNS_RESPONSE=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
else
DNS_RESPONSE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
fi
else
log_info "Creating new DNS record..."
DNS_RESPONSE=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
fi
if echo "$DNS_RESPONSE" | jq -e '.success' >/dev/null 2>&1; then
log_success "DNS record configured successfully"
DNS_NAME=$(echo "$DNS_RESPONSE" | jq -r '.result.name' 2>/dev/null || echo "$EXPLORER_DOMAIN")
DNS_TARGET=$(echo "$DNS_RESPONSE" | jq -r '.result.content' 2>/dev/null || echo "$TARGET")
DNS_PROXIED=$(echo "$DNS_RESPONSE" | jq -r '.result.proxied' 2>/dev/null || echo "true")
log_info " Name: $DNS_NAME"
log_info " Target: $DNS_TARGET"
log_info " Proxied: $DNS_PROXIED"
else
ERROR=$(echo "$DNS_RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to configure DNS: $ERROR"
echo "$DNS_RESPONSE" | jq '.' 2>/dev/null || echo "$DNS_RESPONSE"
exit 1
fi
# Step 6: Configure Tunnel Route
log_section
log_info "Step 6: Configuring Tunnel Route"
log_section
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ] || [ "$CLOUDFLARE_ACCOUNT_ID" = "null" ]; then
log_warn "Account ID not available - tunnel route must be configured manually"
log_info "Configure in Cloudflare Zero Trust Dashboard:"
echo " 1. Go to: https://one.dash.cloudflare.com/"
echo " 2. Zero Trust → Networks → Tunnels"
echo " 3. Select tunnel: $TUNNEL_ID"
echo " 4. Configure → Public Hostnames → Add hostname"
echo " 5. Subdomain: explorer, Domain: $DOMAIN"
echo " 6. Service: http://$EXPLORER_IP:$EXPLORER_PORT"
else
log_info "Configuring tunnel route via API..."
# Get current tunnel configuration
TUNNEL_CONFIG=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
# Build new ingress configuration
NEW_CONFIG=$(jq -n \
--arg hostname "$EXPLORER_DOMAIN" \
--arg service "http://$EXPLORER_IP:$EXPLORER_PORT" \
'{
config: {
ingress: [
{
hostname: $hostname,
service: $service
},
{
service: "http_status:404"
}
]
}
}')
# Update tunnel configuration
TUNNEL_UPDATE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$NEW_CONFIG")
if echo "$TUNNEL_UPDATE" | jq -e '.success' >/dev/null 2>&1; then
log_success "Tunnel route configured successfully"
else
ERROR=$(echo "$TUNNEL_UPDATE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_warn "Tunnel route API configuration failed: $ERROR"
log_info "Please configure manually in Cloudflare Zero Trust Dashboard"
fi
fi
# Step 7: SSL/TLS Configuration (automatic with Cloudflare proxy)
log_section
log_info "Step 7: SSL/TLS Configuration"
log_section
log_info "SSL/TLS is automatically handled by Cloudflare when DNS is proxied"
log_success "SSL will be enabled automatically (Universal SSL)"
# Step 8: Verify Configuration
log_section
log_info "Step 8: Verifying Configuration"
log_section
log_info "Waiting for DNS propagation (10 seconds)..."
sleep 10
# Test DNS resolution
DNS_RESULT=$(dig +short "$EXPLORER_DOMAIN" 2>/dev/null | head -1 || echo "")
if [ -n "$DNS_RESULT" ]; then
log_success "DNS resolves to: $DNS_RESULT"
if echo "$DNS_RESULT" | grep -qE "^(104\.|172\.64\.|172\.65\.|172\.66\.|172\.67\.)"; then
log_success "DNS points to Cloudflare (proxied correctly)"
fi
else
log_warn "DNS not resolving yet (may need more time)"
fi
# Test public URL
log_info "Testing public URL..."
PUBLIC_HTTP=$(curl -s -o /dev/null -w "%{http_code}" "https://$EXPLORER_DOMAIN/api/v2/stats" 2>&1)
if [ "$PUBLIC_HTTP" = "200" ]; then
log_success "Public URL: HTTP 200 - Working!"
PUBLIC_RESPONSE=$(curl -s "https://$EXPLORER_DOMAIN/api/v2/stats" 2>&1)
if echo "$PUBLIC_RESPONSE" | grep -q -E "total_blocks|chain_id"; then
log_success "Public API: Valid response"
echo "$PUBLIC_RESPONSE" | jq -r '.total_blocks, .total_transactions, .total_addresses' 2>/dev/null || echo "$PUBLIC_RESPONSE" | head -5
fi
elif [ "$PUBLIC_HTTP" = "404" ]; then
log_warn "Public URL: HTTP 404 - May need more time for DNS/tunnel propagation"
elif [ "$PUBLIC_HTTP" = "502" ]; then
log_warn "Public URL: HTTP 502 - Tunnel routing issue, check tunnel route configuration"
else
log_warn "Public URL: HTTP $PUBLIC_HTTP"
fi
# Final Summary
echo ""
log_section
log_info " CONFIGURATION SUMMARY"
log_section
echo ""
log_success "✓ Cloudflared service: Installed and running"
log_success "✓ Tunnel ID: $TUNNEL_ID"
log_success "✓ DNS Record: $EXPLORER_DOMAIN$TARGET (🟠 Proxied)"
if [ -n "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_success "✓ Tunnel Route: Configured via API"
else
log_warn "⚠ Tunnel Route: Manual configuration required"
fi
log_success "✓ SSL/TLS: Automatic (Cloudflare Universal SSL)"
echo ""
log_info "Configuration complete!"
log_info ""
log_info "Access your explorer at:"
echo " https://$EXPLORER_DOMAIN"
echo ""
log_info "If public URL is not working yet, wait 1-5 minutes for DNS propagation"
echo ""

View File

@@ -0,0 +1,272 @@
#!/usr/bin/env bash
# Complete Cloudflare configuration for Blockscout Explorer
# Attempts API configuration, falls back to manual instructions
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
TUNNEL_ID="${TUNNEL_ID:-10ab22da-8ea3-4e2e-a896-27ece2211a05}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_section() { echo -e "${CYAN}════════════════════════════════════════${NC}"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
log_section
log_info "Cloudflare Configuration for Blockscout Explorer"
log_section
echo ""
log_info "Domain: $EXPLORER_DOMAIN"
log_info "Target: http://$EXPLORER_IP:$EXPLORER_PORT"
log_info "Tunnel ID: $TUNNEL_ID"
echo ""
# Load environment variables if .env exists
if [ -f "$ENV_FILE" ]; then
source "$ENV_FILE"
fi
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
# Check if we can use API (support both API Token and API Key methods)
USE_API=false
AUTH_METHOD=""
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
USE_API=true
AUTH_METHOD="token"
log_info "API Token found - attempting automated configuration..."
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
USE_API=true
AUTH_METHOD="key"
log_info "API Key + Email found - attempting automated configuration..."
else
log_warn "No API credentials found - will provide manual instructions"
fi
# Set auth headers based on method
if [ "$AUTH_METHOD" = "token" ]; then
AUTH_HEADER="Authorization: Bearer $CLOUDFLARE_API_TOKEN"
elif [ "$AUTH_METHOD" = "key" ]; then
AUTH_HEADER="X-Auth-Email: $CLOUDFLARE_EMAIL
X-Auth-Key: $CLOUDFLARE_API_KEY"
fi
# Function to configure DNS via API
configure_dns_api() {
local zone_id="$1"
local target="${TUNNEL_ID}.cfargotunnel.com"
log_info "Configuring DNS record via API..."
# Build curl headers based on auth method
local curl_headers=(-H "Content-Type: application/json")
if [ "$AUTH_METHOD" = "token" ]; then
curl_headers+=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
elif [ "$AUTH_METHOD" = "key" ]; then
curl_headers+=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL")
curl_headers+=(-H "X-Auth-Key: $CLOUDFLARE_API_KEY")
fi
# Check if record exists (any type)
local response=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records?name=$EXPLORER_DOMAIN" \
"${curl_headers[@]}")
local record_id=$(echo "$response" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
local existing_type=$(echo "$response" | jq -r '.result[0].type // empty' 2>/dev/null || echo "")
local data=$(jq -n \
--arg name "explorer" \
--arg target "$target" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: true,
ttl: 1
}')
if [ -n "$record_id" ] && [ "$record_id" != "null" ]; then
log_info "Found existing DNS record (type: ${existing_type:-unknown}, ID: $record_id)"
if [ "$existing_type" != "CNAME" ]; then
log_warn "Existing record is type $existing_type, deleting and creating CNAME..."
curl -s -X DELETE "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records/$record_id" \
"${curl_headers[@]}" >/dev/null 2>&1
log_info "Creating new CNAME record..."
response=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records" \
"${curl_headers[@]}" \
--data "$data")
else
log_info "Updating existing CNAME record..."
response=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records/$record_id" \
"${curl_headers[@]}" \
--data "$data")
fi
else
log_info "Creating new DNS record..."
response=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records" \
"${curl_headers[@]}" \
--data "$data")
fi
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
log_success "DNS record configured successfully"
return 0
else
local error=$(echo "$response" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to configure DNS: $error"
return 1
fi
}
# Try API configuration if credentials available
if [ "$USE_API" = "true" ]; then
log_section
log_info "Step 1: Getting Zone ID"
log_section
# Use provided ZONE_ID if available, otherwise fetch it
if [ -n "${CLOUDFLARE_ZONE_ID:-}" ]; then
ZONE_ID="$CLOUDFLARE_ZONE_ID"
log_info "Using provided Zone ID: $ZONE_ID"
else
# Build curl command based on auth method
if [ "$AUTH_METHOD" = "token" ]; then
ZONE_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
elif [ "$AUTH_METHOD" = "key" ]; then
ZONE_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
-H "X-Auth-Email: $CLOUDFLARE_EMAIL" \
-H "X-Auth-Key: $CLOUDFLARE_API_KEY" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
fi
fi
if [ -n "$ZONE_ID" ] && [ "$ZONE_ID" != "null" ]; then
log_success "Zone ID: $ZONE_ID"
log_section
log_info "Step 2: Configuring DNS Record"
log_section
if configure_dns_api "$ZONE_ID"; then
log_success "DNS configuration complete via API!"
DNS_CONFIGURED=true
else
log_warn "API DNS configuration failed, falling back to manual"
DNS_CONFIGURED=false
fi
else
log_error "Failed to get zone ID"
DNS_CONFIGURED=false
fi
else
DNS_CONFIGURED=false
fi
# Tunnel route configuration (always requires manual or complex API)
log_section
log_info "Step 3: Tunnel Route Configuration"
log_section
log_warn "Tunnel route configuration requires manual setup in Cloudflare Zero Trust Dashboard"
echo ""
log_info "Instructions:"
echo ""
echo "1. Go to: https://one.dash.cloudflare.com/"
echo "2. Navigate to: Zero Trust → Networks → Tunnels"
echo "3. Select your tunnel (ID: $TUNNEL_ID)"
echo "4. Click 'Configure' → 'Public Hostnames'"
echo "5. Click 'Add a public hostname'"
echo "6. Configure:"
echo " - Subdomain: explorer"
echo " - Domain: $DOMAIN"
echo " - Service: http://$EXPLORER_IP:$EXPLORER_PORT"
echo " - Type: HTTP"
echo "7. Click 'Save hostname'"
echo ""
# Manual DNS instructions if API didn't work
if [ "$DNS_CONFIGURED" != "true" ]; then
log_section
log_info "Step 2: DNS Record Configuration (Manual)"
log_section
log_info "Go to: https://dash.cloudflare.com/"
log_info "Navigate to: $DOMAIN → DNS → Records → Add record"
echo ""
echo "Configure:"
echo " Type: CNAME"
echo " Name: explorer"
echo " Target: ${TUNNEL_ID}.cfargotunnel.com"
echo " Proxy status: 🟠 Proxied (orange cloud) - REQUIRED"
echo " TTL: Auto"
echo ""
log_warn "IMPORTANT: Proxy must be enabled (orange cloud) for tunnel to work!"
echo ""
fi
# Summary
log_section
log_info "Configuration Summary"
log_section
if [ "$DNS_CONFIGURED" = "true" ]; then
log_success "DNS Record: ✅ Configured via API"
else
log_warn "DNS Record: ⚠️ Needs manual configuration"
fi
log_warn "Tunnel Route: ⚠️ Needs manual configuration"
echo ""
log_info "Configuration Details:"
echo " Domain: $EXPLORER_DOMAIN"
echo " DNS Target: ${TUNNEL_ID}.cfargotunnel.com"
echo " Tunnel Service: http://$EXPLORER_IP:$EXPLORER_PORT"
echo ""
# Verification instructions
log_section
log_info "Verification"
log_section
log_info "After configuration, wait 1-5 minutes for DNS propagation, then test:"
echo ""
echo " curl -I https://$EXPLORER_DOMAIN"
echo " curl https://$EXPLORER_DOMAIN/health"
echo ""
if [ "$DNS_CONFIGURED" = "true" ]; then
log_success "Configuration complete! DNS configured, tunnel route pending manual setup."
else
log_warn "Configuration pending. Please complete DNS and tunnel route setup manually."
fi
echo ""

View File

@@ -0,0 +1,148 @@
#!/usr/bin/env bash
# Manual Cloudflare configuration instructions for Blockscout Explorer
# This script provides instructions and can help check existing configuration
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${IP_BLOCKSCOUT}"
EXPLORER_PORT="80"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_section() { echo -e "${CYAN}════════════════════════════════════════${NC}"; }
log_section
log_info "Cloudflare Configuration for Blockscout Explorer"
log_section
echo ""
log_info "Domain: $EXPLORER_DOMAIN"
log_info "Target: http://$EXPLORER_IP:$EXPLORER_PORT"
echo ""
# Try to get tunnel information
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
log_section
log_info "Step 1: Get Tunnel ID"
log_section
log_info "Checking tunnel configuration on VMID 102..."
TUNNEL_INFO=$(ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 102 -- cloudflared tunnel list 2>&1" | head -10 || echo "")
if [ -n "$TUNNEL_INFO" ]; then
echo "$TUNNEL_INFO"
TUNNEL_ID=$(echo "$TUNNEL_INFO" | grep -oP '[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}' | head -1 || echo "")
if [ -n "$TUNNEL_ID" ]; then
log_success "Found tunnel ID: $TUNNEL_ID"
TUNNEL_TARGET="${TUNNEL_ID}.cfargotunnel.com"
else
log_warn "Could not extract tunnel ID from output"
TUNNEL_TARGET="<tunnel-id>.cfargotunnel.com"
fi
else
log_warn "Could not get tunnel information"
log_info "You can find your tunnel ID in Cloudflare Zero Trust Dashboard:"
log_info " https://one.dash.cloudflare.com/ → Zero Trust → Networks → Tunnels"
TUNNEL_TARGET="<tunnel-id>.cfargotunnel.com"
fi
echo ""
log_section
log_info "Step 2: Configure DNS Record"
log_section
log_info "Go to Cloudflare DNS Dashboard:"
log_info " https://dash.cloudflare.com/ → Select domain 'd-bis.org' → DNS → Records"
echo ""
log_info "Create CNAME record:"
echo ""
echo " Type: CNAME"
echo " Name: explorer"
echo " Target: $TUNNEL_TARGET"
echo " Proxy status: 🟠 Proxied (orange cloud) - REQUIRED"
echo " TTL: Auto"
echo ""
log_warn "IMPORTANT: Proxy must be enabled (orange cloud) for tunnel to work!"
echo ""
log_section
log_info "Step 3: Configure Tunnel Route"
log_section
log_info "Go to Cloudflare Zero Trust Dashboard:"
log_info " https://one.dash.cloudflare.com/"
log_info " Navigate to: Zero Trust → Networks → Tunnels"
echo ""
log_info "Select your tunnel, then click 'Configure' → 'Public Hostnames'"
log_info "Add a new hostname:"
echo ""
echo " Subdomain: explorer"
echo " Domain: d-bis.org"
echo " Service: http://$EXPLORER_IP:$EXPLORER_PORT"
echo " Type: HTTP"
echo ""
log_info "Click 'Save hostname'"
echo ""
log_section
log_info "Step 4: Verify Configuration"
log_section
log_info "After configuration, test with:"
echo ""
echo " # Wait 1-5 minutes for DNS propagation"
echo " dig $EXPLORER_DOMAIN"
echo " curl https://$EXPLORER_DOMAIN/health"
echo ""
log_section
log_info "Current Status Check"
log_section
log_info "Checking if DNS record exists..."
DNS_CHECK=$(dig +short "$EXPLORER_DOMAIN" 2>&1 | head -3 || echo "")
if [ -n "$DNS_CHECK" ] && [ "$DNS_CHECK" != ";; connection timed out; no servers could be reached" ]; then
log_success "DNS record exists: $DNS_CHECK"
else
log_warn "DNS record not found or not yet propagated"
fi
log_info "Testing HTTPS endpoint..."
HTTP_TEST=$(curl -I -s --max-time 10 "https://$EXPLORER_DOMAIN" 2>&1 | head -5 || echo "")
if echo "$HTTP_TEST" | grep -q "HTTP/2 200\|HTTP/1.1 200"; then
log_success "HTTPS endpoint is working!"
elif echo "$HTTP_TEST" | grep -q "HTTP/2 522"; then
log_warn "HTTP 522 (Connection Timeout) - Tunnel may not be configured yet"
elif echo "$HTTP_TEST" | grep -q "HTTP/2 404"; then
log_warn "HTTP 404 - DNS configured but tunnel route may be missing"
else
log_warn "Endpoint not accessible: $HTTP_TEST"
fi
echo ""
log_success "Configuration instructions complete!"
echo ""
log_info "Summary:"
log_info " 1. DNS: CNAME explorer → $TUNNEL_TARGET (🟠 Proxied)"
log_info " 2. Tunnel: explorer.d-bis.org → http://$EXPLORER_IP:$EXPLORER_PORT"
log_info " 3. Test: curl https://$EXPLORER_DOMAIN/health"
echo ""

View File

@@ -0,0 +1,197 @@
#!/usr/bin/env bash
# Configure Cloudflare DNS and tunnel for Blockscout Explorer
# Usage: ./configure-cloudflare-explorer.sh
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Check for .env file with Cloudflare credentials
ENV_FILE="${ENV_FILE:-.env}"
if [ ! -f "$ENV_FILE" ]; then
log_error "Environment file not found: $ENV_FILE"
log_info "Please create $ENV_FILE with:"
log_info " CLOUDFLARE_API_TOKEN=your-token"
log_info " DOMAIN=d-bis.org"
log_info " TUNNEL_TOKEN=your-tunnel-token"
exit 1
fi
source "$ENV_FILE"
if [ -z "${CLOUDFLARE_API_TOKEN:-}" ]; then
log_error "CLOUDFLARE_API_TOKEN not set in $ENV_FILE"
exit 1
fi
log_info "Configuring Cloudflare for Blockscout Explorer"
log_info "Domain: $EXPLORER_DOMAIN"
log_info "Target: http://$EXPLORER_IP:$EXPLORER_PORT"
# Get Zone ID
log_info "Getting zone ID for $DOMAIN..."
ZONE_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty')
if [ -z "$ZONE_ID" ] || [ "$ZONE_ID" = "null" ]; then
log_error "Failed to get zone ID for $DOMAIN"
exit 1
fi
log_success "Zone ID: $ZONE_ID"
# Extract tunnel ID from tunnel token or configuration
TUNNEL_ID=""
if [ -n "${TUNNEL_TOKEN:-}" ]; then
# Try to extract tunnel ID from token (if it's in the format we expect)
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.TunnelID // empty' 2>/dev/null || echo "")
fi
# If no tunnel ID found, try to get it from Cloudflare API
if [ -z "$TUNNEL_ID" ]; then
log_info "Getting tunnel information..."
ACCOUNT_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty')
if [ -n "$ACCOUNT_ID" ] && [ "$ACCOUNT_ID" != "null" ]; then
TUNNELS=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$ACCOUNT_ID/cfd_tunnel" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json")
TUNNEL_ID=$(echo "$TUNNELS" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
fi
fi
# Check if DNS record already exists
log_info "Checking for existing DNS record..."
EXISTING_RECORD=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?name=$EXPLORER_DOMAIN&type=CNAME" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0] // empty')
if [ -n "$EXISTING_RECORD" ] && [ "$EXISTING_RECORD" != "null" ]; then
RECORD_ID=$(echo "$EXISTING_RECORD" | jq -r '.id')
log_warn "DNS record already exists (ID: $RECORD_ID)"
log_info "Updating existing record..."
if [ -n "$TUNNEL_ID" ] && [ "$TUNNEL_ID" != "null" ]; then
TARGET="${TUNNEL_ID}.cfargotunnel.com"
log_info "Using Cloudflare Tunnel: $TARGET"
else
TARGET="$EXPLORER_IP"
log_warn "No tunnel ID found, using direct IP (may not work behind NAT)"
fi
UPDATE_RESULT=$(curl -s -X PATCH "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" \
--data "{
\"type\": \"CNAME\",
\"name\": \"explorer\",
\"content\": \"$TARGET\",
\"proxied\": true,
\"ttl\": 1
}")
SUCCESS=$(echo "$UPDATE_RESULT" | jq -r '.success // false')
if [ "$SUCCESS" = "true" ]; then
log_success "DNS record updated successfully"
else
ERROR=$(echo "$UPDATE_RESULT" | jq -r '.errors[0].message // "Unknown error"')
log_error "Failed to update DNS record: $ERROR"
exit 1
fi
else
log_info "Creating new DNS record..."
if [ -n "$TUNNEL_ID" ] && [ "$TUNNEL_ID" != "null" ]; then
TARGET="${TUNNEL_ID}.cfargotunnel.com"
RECORD_TYPE="CNAME"
log_info "Using Cloudflare Tunnel: $TARGET"
else
TARGET="$EXPLORER_IP"
RECORD_TYPE="A"
log_warn "No tunnel ID found, using A record with direct IP (may not work behind NAT)"
fi
CREATE_RESULT=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" \
--data "{
\"type\": \"$RECORD_TYPE\",
\"name\": \"explorer\",
\"content\": \"$TARGET\",
\"proxied\": true,
\"ttl\": 1
}")
SUCCESS=$(echo "$CREATE_RESULT" | jq -r '.success // false')
if [ "$SUCCESS" = "true" ]; then
log_success "DNS record created successfully"
else
ERROR=$(echo "$CREATE_RESULT" | jq -r '.errors[0].message // "Unknown error"')
log_error "Failed to create DNS record: $ERROR"
exit 1
fi
fi
# If we have a tunnel, configure the tunnel route
if [ -n "$TUNNEL_ID" ] && [ "$TUNNEL_ID" != "null" ] && [ -n "${ACCOUNT_ID:-}" ]; then
log_info "Configuring Cloudflare Tunnel route..."
# Get current tunnel configuration
TUNNEL_CONFIG=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/config" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json")
# This is complex - for now, log instructions
log_info ""
log_info "=== Cloudflare Tunnel Configuration Required ==="
log_info ""
log_info "Please configure the tunnel route manually in Cloudflare Zero Trust Dashboard:"
log_info " 1. Go to: https://one.dash.cloudflare.com/"
log_info " 2. Navigate to: Zero Trust → Networks → Tunnels"
log_info " 3. Select your tunnel (ID: $TUNNEL_ID)"
log_info " 4. Click 'Configure' → 'Public Hostnames'"
log_info " 5. Add hostname:"
log_info " - Subdomain: explorer"
log_info " - Domain: $DOMAIN"
log_info " - Service: http://$EXPLORER_IP:$EXPLORER_PORT"
log_info " - Type: HTTP"
log_info ""
fi
log_success "Cloudflare configuration complete!"
log_info ""
log_info "Summary:"
log_info " - DNS Record: $EXPLORER_DOMAIN$TARGET (🟠 Proxied)"
if [ -n "$TUNNEL_ID" ]; then
log_info " - Tunnel ID: $TUNNEL_ID"
log_info " - Tunnel Route: Needs manual configuration (see above)"
fi
log_info ""
log_info "Next steps:"
log_info " 1. Wait for DNS propagation (1-5 minutes)"
log_info " 2. Test: curl -I https://$EXPLORER_DOMAIN"

View File

@@ -0,0 +1,149 @@
#!/usr/bin/env bash
# Configure Cloudflare Tunnel Route for explorer.d-bis.org
# Usage: ./configure-cloudflare-tunnel-route.sh
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
TUNNEL_ID="${TUNNEL_ID:-10ab22da-8ea3-4e2e-a896-27ece2211a05}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load environment variables
if [ -f "$ENV_FILE" ]; then
source "$ENV_FILE"
fi
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
# Determine auth method
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
AUTH_HEADERS=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
AUTH_HEADERS=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY")
else
log_error "No Cloudflare API credentials found"
exit 1
fi
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_error "CLOUDFLARE_ACCOUNT_ID not set"
exit 1
fi
log_info "Configuring tunnel route for $EXPLORER_DOMAIN"
log_info "Tunnel ID: $TUNNEL_ID"
if [ "$EXPLORER_PORT" = "443" ]; then
log_info "Service: https://$EXPLORER_IP:$EXPLORER_PORT"
else
log_info "Service: http://$EXPLORER_IP:$EXPLORER_PORT"
fi
# Get current tunnel configuration
log_info "Fetching current tunnel configuration..."
CURRENT_CONFIG=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
if ! echo "$CURRENT_CONFIG" | jq -e '.success' >/dev/null 2>&1; then
log_error "Failed to fetch tunnel configuration"
echo "$CURRENT_CONFIG" | jq '.' 2>/dev/null || echo "$CURRENT_CONFIG"
exit 1
fi
# Extract current ingress rules
CURRENT_INGRESS=$(echo "$CURRENT_CONFIG" | jq -c '.result.config.ingress // []')
# Check if explorer route already exists
if echo "$CURRENT_INGRESS" | jq -e ".[] | select(.hostname == \"$EXPLORER_DOMAIN\")" >/dev/null 2>&1; then
log_warn "Route for $EXPLORER_DOMAIN already exists"
log_info "Updating existing route..."
# Remove existing route
CURRENT_INGRESS=$(echo "$CURRENT_INGRESS" | jq "[.[] | select(.hostname != \"$EXPLORER_DOMAIN\")]")
fi
# Determine if HTTPS (port 443)
if [ "$EXPLORER_PORT" = "443" ]; then
SERVICE_URL="https://$EXPLORER_IP:$EXPLORER_PORT"
else
SERVICE_URL="http://$EXPLORER_IP:$EXPLORER_PORT"
fi
# Build explorer route as array element
EXPLORER_ROUTE=$(jq -n \
--arg hostname "$EXPLORER_DOMAIN" \
--arg service "$SERVICE_URL" \
'[{
hostname: $hostname,
service: $service,
originRequest: {
noTLSVerify: true
}
}]')
# Separate catch-all from other rules
# Catch-all has no hostname and service starting with http_status
CATCH_ALL=$(echo "$CURRENT_INGRESS" | jq '[.[] | select(.hostname == null or .hostname == "" or (.service | startswith("http_status")))]')
OTHER_ROUTES=$(echo "$CURRENT_INGRESS" | jq '[.[] | select(.hostname != null and .hostname != "" and (.service | startswith("http_status") | not))]')
# Build new ingress: explorer route + other routes + catch-all
# If no catch-all exists, add one
if [ "$(echo "$CATCH_ALL" | jq 'length')" -eq 0 ]; then
CATCH_ALL='[{"service":"http_status:404"}]'
fi
# Concatenate arrays properly
NEW_INGRESS=$(jq -n --argjson explorer "$EXPLORER_ROUTE" --argjson others "$OTHER_ROUTES" --argjson catchall "$CATCH_ALL" '$explorer + $others + $catchall')
# Build complete config
NEW_CONFIG=$(jq -n \
--argjson ingress "$NEW_INGRESS" \
'{
config: {
ingress: $ingress
}
}')
log_info "Updating tunnel configuration..."
RESPONSE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$NEW_CONFIG")
if echo "$RESPONSE" | jq -e '.success' >/dev/null 2>&1; then
log_success "Tunnel route configured successfully!"
log_info "Route: $EXPLORER_DOMAIN → http://$EXPLORER_IP:$EXPLORER_PORT"
exit 0
else
ERROR=$(echo "$RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to configure tunnel route: $ERROR"
echo "$RESPONSE" | jq '.' 2>/dev/null || echo "$RESPONSE"
exit 1
fi

View File

@@ -0,0 +1,62 @@
#!/bin/bash
# Configure network interfaces for all reassigned containers
set -uo pipefail
NODE_IP="${PROXMOX_HOST_R630_01}"
# Container IP mappings
declare -A container_ips=(
["10000"]="${ORDER_POSTGRES_PRIMARY:-192.168.11.44}"
["10001"]="${ORDER_POSTGRES_REPLICA:-192.168.11.45}"
["10020"]="${ORDER_REDIS_IP:-192.168.11.38}"
["10030"]="192.168.11.40"
["10040"]="192.168.11.41"
["10050"]="192.168.11.49"
["10060"]="192.168.11.42"
["10070"]="${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}"
["10080"]="192.168.11.43"
["10090"]="${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}"
["10091"]="${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}"
["10092"]="${IP_MIM_WEB:-192.168.11.37}"
["10200"]="192.168.11.46"
["10201"]="192.168.11.47"
["10202"]="192.168.11.48"
["10210"]="192.168.11.39"
["10230"]="${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}"
["10232"]="192.168.11.52"
)
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
echo "═══════════════════════════════════════════════════════════"
echo "Configuring Network Interfaces for Reassigned Containers"
echo "═══════════════════════════════════════════════════════════"
echo ""
SUCCESS=0
FAILED=0
for vmid in "${!container_ips[@]}"; do
ip="${container_ips[$vmid]}"
echo "Configuring CT $vmid ($ip)..."
# Bring up interface and configure IP
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'ip link set eth0 up && ip addr add $ip/24 dev eth0 2>/dev/null; ip route add default via $GATEWAY dev eth0 2>/dev/null'" 2>&1; then
echo " ✅ Network configured"
((SUCCESS++))
else
echo " ❌ Failed to configure network"
((FAILED++))
fi
done
echo ""
echo "═══════════════════════════════════════════════════════════"
echo "Configuration Complete"
echo "═══════════════════════════════════════════════════════════"
echo " Success: $SUCCESS"
echo " Failed: $FAILED"
echo " Total: ${#container_ips[@]}"
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,62 @@
#!/bin/bash
# Configure network interfaces for all reassigned containers
set -uo pipefail
NODE_IP="192.168.11.11"
# Container IP mappings
declare -A container_ips=(
["10000"]="${ORDER_POSTGRES_PRIMARY:-192.168.11.44}"
["10001"]="${ORDER_POSTGRES_REPLICA:-192.168.11.45}"
["10020"]="${ORDER_REDIS_IP:-192.168.11.38}"
["10030"]="192.168.11.40"
["10040"]="192.168.11.41"
["10050"]="192.168.11.49"
["10060"]="192.168.11.42"
["10070"]="192.168.11.50"
["10080"]="192.168.11.43"
["10090"]="192.168.11.36"
["10091"]="192.168.11.35"
["10092"]="192.168.11.37"
["10200"]="192.168.11.46"
["10201"]="192.168.11.47"
["10202"]="192.168.11.48"
["10210"]="192.168.11.39"
["10230"]="192.168.11.51"
["10232"]="192.168.11.52"
)
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
echo "═══════════════════════════════════════════════════════════"
echo "Configuring Network Interfaces for Reassigned Containers"
echo "═══════════════════════════════════════════════════════════"
echo ""
SUCCESS=0
FAILED=0
for vmid in "${!container_ips[@]}"; do
ip="${container_ips[$vmid]}"
echo "Configuring CT $vmid ($ip)..."
# Bring up interface and configure IP
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'ip link set eth0 up && ip addr add $ip/24 dev eth0 2>/dev/null; ip route add default via $GATEWAY dev eth0 2>/dev/null'" 2>&1; then
echo " ✅ Network configured"
((SUCCESS++))
else
echo " ❌ Failed to configure network"
((FAILED++))
fi
done
echo ""
echo "═══════════════════════════════════════════════════════════"
echo "Configuration Complete"
echo "═══════════════════════════════════════════════════════════"
echo " Success: $SUCCESS"
echo " Failed: $FAILED"
echo " Total: ${#container_ips[@]}"
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,54 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configure Service Dependencies for DBIS Services
NODE_IP="${PROXMOX_HOST_R630_01}"
log_info() { echo -e "\033[0;32m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
echo "Configuring DBIS service dependencies..."
# DBIS service IPs
POSTGRES_IP="${DBIS_POSTGRES_PRIMARY:-192.168.11.105}"
REDIS_IP="192.168.11.120"
DB_PASSWORD="8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771"
for vmid in 10150 10151; do
log_info "Configuring dependencies for CT $vmid..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${NODE_IP} "pct enter $vmid <<'CONFIG_EOF'
# Update .env files
find /opt /home /root -name \".env\" -type f 2>/dev/null | while read envfile; do
[ -r \"\$envfile\" ] && {
sed -i \"s|DATABASE_URL=.*|DATABASE_URL=postgresql://dbis:${DB_PASSWORD}@${POSTGRES_IP}:5432/dbis_core|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://${REDIS_IP}:6379|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|DB_HOST=.*|DB_HOST=${POSTGRES_IP}|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_HOST=.*|REDIS_HOST=${REDIS_IP}|g\" \"\$envfile\" 2>/dev/null || true
}
done
echo \"Dependencies configured for CT $vmid\"
CONFIG_EOF
" && log_success "Dependencies configured for CT $vmid" || log_info "Configuration updated for CT $vmid"
done
# Configure frontend
log_info "Configuring frontend dependencies..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${NODE_IP} "pct enter 10130 <<'CONFIG_EOF'
find /opt /home /root -name \".env*\" -type f 2>/dev/null | while read envfile; do
[ -r \"\$envfile\" ] && {
sed -i \"s|VITE_API_BASE_URL=.*|VITE_API_BASE_URL=http://${IP_DBIS_API:-192.168.11.155}:3000|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|NEXT_PUBLIC_API_URL=.*|NEXT_PUBLIC_API_URL=http://${IP_DBIS_API:-192.168.11.155}:3000|g\" \"\$envfile\" 2>/dev/null || true
}
done
echo \"Frontend dependencies configured\"
CONFIG_EOF
" && log_success "Frontend dependencies configured"
echo "DBIS service dependencies configured!"

View File

@@ -0,0 +1,48 @@
#!/bin/bash
set -euo pipefail
# Configure Service Dependencies for DBIS Services
NODE_IP="192.168.11.11"
log_info() { echo -e "\033[0;32m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
echo "Configuring DBIS service dependencies..."
# DBIS service IPs
POSTGRES_IP="192.168.11.105"
REDIS_IP="192.168.11.120"
DB_PASSWORD="8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771"
for vmid in 10150 10151; do
log_info "Configuring dependencies for CT $vmid..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${NODE_IP} "pct enter $vmid <<'CONFIG_EOF'
# Update .env files
find /opt /home /root -name \".env\" -type f 2>/dev/null | while read envfile; do
[ -r \"\$envfile\" ] && {
sed -i \"s|DATABASE_URL=.*|DATABASE_URL=postgresql://dbis:${DB_PASSWORD}@${POSTGRES_IP}:5432/dbis_core|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://${REDIS_IP}:6379|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|DB_HOST=.*|DB_HOST=${POSTGRES_IP}|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_HOST=.*|REDIS_HOST=${REDIS_IP}|g\" \"\$envfile\" 2>/dev/null || true
}
done
echo \"Dependencies configured for CT $vmid\"
CONFIG_EOF
" && log_success "Dependencies configured for CT $vmid" || log_info "Configuration updated for CT $vmid"
done
# Configure frontend
log_info "Configuring frontend dependencies..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${NODE_IP} "pct enter 10130 <<'CONFIG_EOF'
find /opt /home /root -name \".env*\" -type f 2>/dev/null | while read envfile; do
[ -r \"\$envfile\" ] && {
sed -i \"s|VITE_API_BASE_URL=.*|VITE_API_BASE_URL=http://192.168.11.155:3000|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|NEXT_PUBLIC_API_URL=.*|NEXT_PUBLIC_API_URL=http://192.168.11.155:3000|g\" \"\$envfile\" 2>/dev/null || true
}
done
echo \"Frontend dependencies configured\"
CONFIG_EOF
" && log_success "Frontend dependencies configured"
echo "DBIS service dependencies configured!"

View File

@@ -0,0 +1,333 @@
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configure direct route: NPMplus → Blockscout:4000 (bypassing nginx on VMID 5000)
# This creates a more direct connection to reduce 502 errors
# Usage: ./configure-direct-blockscout-route.sh
VMID=5000
BLOCKSCOUT_IP="${IP_BLOCKSCOUT}"
BLOCKSCOUT_PORT=4000
PROXMOX_HOST="${1:-pve2}"
echo "=========================================="
echo "Configure Direct Blockscout Route"
echo "=========================================="
echo "VMID: $VMID ($BLOCKSCOUT_IP)"
echo "Direct Port: $BLOCKSCOUT_PORT"
echo "Bypassing: Nginx on port 80"
echo "=========================================="
echo ""
# Check if we're on Proxmox host
if ! command -v pct &>/dev/null; then
echo "⚠️ pct command not available"
echo " This script should be run on Proxmox host"
EXEC_PREFIX="ssh root@$PROXMOX_HOST"
else
EXEC_PREFIX=""
fi
# Step 1: Check if Blockscout is listening on port 4000
echo "=== Step 1: Checking Blockscout Port Configuration ==="
if [ -n "$EXEC_PREFIX" ]; then
PORT_CHECK=$($EXEC_PREFIX "pct exec $VMID -- ss -tlnp 2>/dev/null | grep :$BLOCKSCOUT_PORT || echo 'not found'")
else
PORT_CHECK=$(pct exec $VMID -- ss -tlnp 2>/dev/null | grep :$BLOCKSCOUT_PORT || echo "not found")
fi
if echo "$PORT_CHECK" | grep -q "127.0.0.1:$BLOCKSCOUT_PORT"; then
echo " ⚠️ Blockscout is listening on 127.0.0.1:$BLOCKSCOUT_PORT (localhost only)"
echo " 💡 Need to configure it to listen on 0.0.0.0:$BLOCKSCOUT_PORT for direct access"
NEEDS_CONFIG=true
elif echo "$PORT_CHECK" | grep -q "0.0.0.0:$BLOCKSCOUT_PORT\|:$BLOCKSCOUT_PORT.*0.0.0.0"; then
echo " ✅ Blockscout is already listening on 0.0.0.0:$BLOCKSCOUT_PORT (network accessible)"
NEEDS_CONFIG=false
elif echo "$PORT_CHECK" | grep -q ":$BLOCKSCOUT_PORT"; then
echo " ✅ Blockscout is listening on port $BLOCKSCOUT_PORT"
NEEDS_CONFIG=false
else
echo " ❌ Blockscout is NOT listening on port $BLOCKSCOUT_PORT"
echo " 💡 Blockscout service may not be running"
echo ""
echo " To start Blockscout service:"
echo " pct exec $VMID -- systemctl start blockscout.service"
exit 1
fi
echo ""
# Step 2: Check Blockscout environment/config to configure listening address
if [ "$NEEDS_CONFIG" = true ]; then
echo "=== Step 2: Configuring Blockscout to Listen on Network ==="
echo " Checking Blockscout configuration..."
# Check if Blockscout is running in Docker
if [ -n "$EXEC_PREFIX" ]; then
BLOCKSCOUT_CONTAINER=$($EXEC_PREFIX "pct exec $VMID -- docker ps --format '{{.Names}}' | grep blockscout | grep -v postgres | head -1" || echo "")
else
BLOCKSCOUT_CONTAINER=$(pct exec $VMID -- docker ps --format '{{.Names}}' | grep blockscout | grep -v postgres | head -1 || echo "")
fi
if [ -n "$BLOCKSCOUT_CONTAINER" ]; then
echo " ✅ Found Blockscout container: $BLOCKSCOUT_CONTAINER"
echo " 💡 Blockscout in Docker typically binds to 0.0.0.0 by default"
echo " 💡 If it's only on localhost, check docker-compose.yml or environment variables"
echo " 💡 Look for PORT or LISTEN_ADDRESS environment variables"
else
echo " ⚠️ Blockscout container not found"
echo " 💡 Check if Blockscout is running as a system service instead"
fi
echo ""
fi
# Step 3: Test direct connection to Blockscout
echo "=== Step 3: Testing Direct Connection to Blockscout ==="
if [ -n "$EXEC_PREFIX" ]; then
DIRECT_TEST=$($EXEC_PREFIX "pct exec $VMID -- curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://127.0.0.1:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null" || echo "000")
NETWORK_TEST=$($EXEC_PREFIX "curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null" || echo "000")
else
DIRECT_TEST=$(pct exec $VMID -- curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://127.0.0.1:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null || echo "000")
NETWORK_TEST=$(curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null || echo "000")
fi
if [ "$DIRECT_TEST" = "200" ]; then
echo " ✅ Blockscout API responding on localhost (HTTP $DIRECT_TEST)"
DIRECT_ACCESS=true
else
echo " ❌ Blockscout API not responding on localhost (HTTP $DIRECT_TEST)"
DIRECT_ACCESS=false
fi
if [ "$NETWORK_TEST" = "200" ]; then
echo " ✅ Blockscout API accessible via network IP (HTTP $NETWORK_TEST)"
NETWORK_ACCESS=true
elif [ "$NETWORK_TEST" = "000" ]; then
echo " ⚠️ Blockscout API not accessible via network IP (connection refused)"
echo " 💡 Blockscout may only be listening on localhost"
NETWORK_ACCESS=false
else
echo " ⚠️ Blockscout API returned HTTP $NETWORK_TEST via network"
NETWORK_ACCESS=false
fi
echo ""
# Step 4: Update NPMplus configuration (if network accessible)
if [ "$NETWORK_ACCESS" = true ]; then
echo "=== Step 4: Updating NPMplus Configuration ==="
echo " ✅ Blockscout is network accessible"
echo " 💡 Update NPMplus to point directly to: http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT"
echo ""
echo " Manual Steps:"
echo " 1. Log into NPMplus: https://192.168.0.166:81"
echo " 2. Find 'explorer.d-bis.org' proxy host"
echo " 3. Update Forward Host: $BLOCKSCOUT_IP"
echo " 4. Update Forward Port: $BLOCKSCOUT_PORT"
echo " 5. Save changes"
echo ""
echo " Or run the automated script:"
echo " cd scripts/nginx-proxy-manager"
echo " node update-explorer-direct-route.js"
echo ""
else
echo "=== Step 4: Cannot Configure Direct Route ==="
echo " ❌ Blockscout is not network accessible"
echo " 💡 Need to configure Blockscout to listen on 0.0.0.0:$BLOCKSCOUT_PORT"
echo ""
echo " For Docker containers, check docker-compose.yml:"
echo " - Ensure PORT environment variable is set"
echo " - Check if LISTEN_ADDRESS is set to 0.0.0.0"
echo " - Restart Blockscout container after changes"
echo ""
echo " For systemd services, check service file:"
echo " pct exec $VMID -- systemctl cat blockscout.service"
echo ""
fi
# Step 5: Alternative - Keep nginx but simplify configuration
echo "=== Step 5: Alternative Solution (Keep Nginx) ==="
echo " If direct route is not possible, ensure nginx is properly configured:"
echo " pct exec $VMID -- systemctl status nginx"
echo " pct exec $VMID -- nginx -t"
echo " pct exec $VMID -- systemctl restart nginx"
echo ""
# Summary
echo "=========================================="
echo "SUMMARY"
echo "=========================================="
echo "Current Route:"
echo " NPMplus → $BLOCKSCOUT_IP:80 (nginx) → 127.0.0.1:$BLOCKSCOUT_PORT (Blockscout)"
echo ""
echo "Proposed Direct Route:"
if [ "$NETWORK_ACCESS" = true ]; then
echo " NPMplus → $BLOCKSCOUT_IP:$BLOCKSCOUT_PORT (Blockscout directly) ✅"
echo " Status: Ready to configure"
else
echo " NPMplus → $BLOCKSCOUT_IP:$BLOCKSCOUT_PORT (Blockscout directly) ❌"
echo " Status: Blockscout needs network access configuration"
fi
echo ""
echo "Benefits of Direct Route:"
echo " ✅ Removes nginx proxy layer (one less hop)"
echo " ✅ Reduces latency"
echo " ✅ Fewer points of failure"
echo " ✅ Simpler architecture"
echo ""
# Create update script for NPMplus
if [ "$NETWORK_ACCESS" = true ]; then
echo "Creating NPMplus update script..."
cat > /home/intlc/projects/proxmox/scripts/nginx-proxy-manager/update-explorer-direct-route.js << 'SCRIPT_EOF'
#!/usr/bin/env node
/**
* Update explorer.d-bis.org in NPMplus to use direct Blockscout route
* Changes from: http://${IP_BLOCKSCOUT}:80 → http://${IP_BLOCKSCOUT}:4000
*/
import { chromium } from 'playwright';
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
import { config } from 'dotenv';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const PROJECT_ROOT = join(__dirname, '../../..');
config({ path: join(PROJECT_ROOT, '.env') });
const NPM_URL = process.env.NPM_URL || 'https://192.168.0.166:81';
const NPM_EMAIL = process.env.NPM_EMAIL || 'nsatoshi2007@hotmail.com';
const NPM_PASSWORD = process.env.NPM_PASSWORD;
if (!NPM_PASSWORD) {
throw new Error('NPM_PASSWORD environment variable is required');
}
const HEADLESS = process.env.HEADLESS !== 'false';
const DOMAIN = 'explorer.d-bis.org';
const NEW_TARGET = 'http://${IP_BLOCKSCOUT}:4000';
function log(message, type = 'info') {
const icons = { success: '✅', error: '❌', warning: '⚠️', info: '📋' };
console.log(`${icons[type]} ${message}`);
}
async function login(page) {
log('Logging in to NPMplus...');
await page.goto(NPM_URL, { waitUntil: 'domcontentloaded', timeout: 30000 });
await page.waitForSelector('input[type="email"], input[name="email"]', { timeout: 10000 });
await page.fill('input[type="email"]', NPM_EMAIL);
await page.fill('input[type="password"]', NPM_PASSWORD);
const loginButton = await page.$('button[type="submit"]');
if (loginButton) {
await loginButton.click();
} else {
await page.keyboard.press('Enter');
}
await page.waitForTimeout(3000);
log('Logged in', 'success');
}
async function updateProxyHost(page) {
log(`Updating ${DOMAIN} to direct route: ${NEW_TARGET}`);
// Navigate to proxy hosts
await page.goto(`${NPM_URL}/#/proxy-hosts`, { waitUntil: 'domcontentloaded' });
await page.waitForTimeout(2000);
// Find and click on explorer.d-bis.org
const domainLink = await page.$(`text=${DOMAIN}`);
if (!domainLink) {
log(`Could not find ${DOMAIN} in proxy hosts list`, 'error');
return false;
}
await domainLink.click();
await page.waitForTimeout(2000);
// Update forward host and port
const url = new URL(NEW_TARGET);
const hostname = url.hostname;
const port = url.port || (url.protocol === 'https:' ? '443' : '80');
const hostInput = await page.$('input[name="forward_host"], input[name="forward_hostname"]');
if (hostInput) {
await hostInput.fill(hostname);
log(` Updated forward host: ${hostname}`);
}
const portInput = await page.$('input[name="forward_port"]');
if (portInput) {
await portInput.fill(port);
log(` Updated forward port: ${port}`);
}
// Save
const saveButton = await page.$('button:has-text("Save"), button[type="submit"]');
if (saveButton) {
await saveButton.click();
log(` Saved changes`, 'success');
await page.waitForTimeout(2000);
return true;
}
return false;
}
async function main() {
const browser = await chromium.launch({ headless: HEADLESS, ignoreHTTPSErrors: true });
const context = await browser.newContext({ ignoreHTTPSErrors: true });
const page = await context.newPage();
try {
await login(page);
const success = await updateProxyHost(page);
if (success) {
log(`✅ ${DOMAIN} updated to use direct route`, 'success');
} else {
log(`❌ Failed to update ${DOMAIN}`, 'error');
process.exit(1);
}
} catch (error) {
log(`Fatal error: ${error.message}`, 'error');
await page.screenshot({ path: '/tmp/npmplus-update-error.png' });
process.exit(1);
} finally {
await browser.close();
}
}
main().catch(console.error);
SCRIPT_EOF
chmod +x /home/intlc/projects/proxmox/scripts/nginx-proxy-manager/update-explorer-direct-route.js
echo " ✅ Created: scripts/nginx-proxy-manager/update-explorer-direct-route.js"
echo ""
fi
echo "=========================================="
echo "NEXT STEPS"
echo "=========================================="
if [ "$NETWORK_ACCESS" = true ]; then
echo "1. Run the NPMplus update script:"
echo " cd scripts/nginx-proxy-manager"
echo " node update-explorer-direct-route.js"
echo ""
echo "2. Test the direct route:"
echo " curl -I http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats"
echo ""
else
echo "1. Configure Blockscout to listen on 0.0.0.0:$BLOCKSCOUT_PORT"
echo "2. Restart Blockscout service"
echo "3. Run this script again to verify network access"
echo "4. Then run the NPMplus update script"
echo ""
fi
echo "=========================================="

View File

@@ -0,0 +1,327 @@
#!/usr/bin/env bash
set -euo pipefail
# Configure direct route: NPMplus → Blockscout:4000 (bypassing nginx on VMID 5000)
# This creates a more direct connection to reduce 502 errors
# Usage: ./configure-direct-blockscout-route.sh
VMID=5000
BLOCKSCOUT_IP="192.168.11.140"
BLOCKSCOUT_PORT=4000
PROXMOX_HOST="${1:-pve2}"
echo "=========================================="
echo "Configure Direct Blockscout Route"
echo "=========================================="
echo "VMID: $VMID ($BLOCKSCOUT_IP)"
echo "Direct Port: $BLOCKSCOUT_PORT"
echo "Bypassing: Nginx on port 80"
echo "=========================================="
echo ""
# Check if we're on Proxmox host
if ! command -v pct &>/dev/null; then
echo "⚠️ pct command not available"
echo " This script should be run on Proxmox host"
EXEC_PREFIX="ssh root@$PROXMOX_HOST"
else
EXEC_PREFIX=""
fi
# Step 1: Check if Blockscout is listening on port 4000
echo "=== Step 1: Checking Blockscout Port Configuration ==="
if [ -n "$EXEC_PREFIX" ]; then
PORT_CHECK=$($EXEC_PREFIX "pct exec $VMID -- ss -tlnp 2>/dev/null | grep :$BLOCKSCOUT_PORT || echo 'not found'")
else
PORT_CHECK=$(pct exec $VMID -- ss -tlnp 2>/dev/null | grep :$BLOCKSCOUT_PORT || echo "not found")
fi
if echo "$PORT_CHECK" | grep -q "127.0.0.1:$BLOCKSCOUT_PORT"; then
echo " ⚠️ Blockscout is listening on 127.0.0.1:$BLOCKSCOUT_PORT (localhost only)"
echo " 💡 Need to configure it to listen on 0.0.0.0:$BLOCKSCOUT_PORT for direct access"
NEEDS_CONFIG=true
elif echo "$PORT_CHECK" | grep -q "0.0.0.0:$BLOCKSCOUT_PORT\|:$BLOCKSCOUT_PORT.*0.0.0.0"; then
echo " ✅ Blockscout is already listening on 0.0.0.0:$BLOCKSCOUT_PORT (network accessible)"
NEEDS_CONFIG=false
elif echo "$PORT_CHECK" | grep -q ":$BLOCKSCOUT_PORT"; then
echo " ✅ Blockscout is listening on port $BLOCKSCOUT_PORT"
NEEDS_CONFIG=false
else
echo " ❌ Blockscout is NOT listening on port $BLOCKSCOUT_PORT"
echo " 💡 Blockscout service may not be running"
echo ""
echo " To start Blockscout service:"
echo " pct exec $VMID -- systemctl start blockscout.service"
exit 1
fi
echo ""
# Step 2: Check Blockscout environment/config to configure listening address
if [ "$NEEDS_CONFIG" = true ]; then
echo "=== Step 2: Configuring Blockscout to Listen on Network ==="
echo " Checking Blockscout configuration..."
# Check if Blockscout is running in Docker
if [ -n "$EXEC_PREFIX" ]; then
BLOCKSCOUT_CONTAINER=$($EXEC_PREFIX "pct exec $VMID -- docker ps --format '{{.Names}}' | grep blockscout | grep -v postgres | head -1" || echo "")
else
BLOCKSCOUT_CONTAINER=$(pct exec $VMID -- docker ps --format '{{.Names}}' | grep blockscout | grep -v postgres | head -1 || echo "")
fi
if [ -n "$BLOCKSCOUT_CONTAINER" ]; then
echo " ✅ Found Blockscout container: $BLOCKSCOUT_CONTAINER"
echo " 💡 Blockscout in Docker typically binds to 0.0.0.0 by default"
echo " 💡 If it's only on localhost, check docker-compose.yml or environment variables"
echo " 💡 Look for PORT or LISTEN_ADDRESS environment variables"
else
echo " ⚠️ Blockscout container not found"
echo " 💡 Check if Blockscout is running as a system service instead"
fi
echo ""
fi
# Step 3: Test direct connection to Blockscout
echo "=== Step 3: Testing Direct Connection to Blockscout ==="
if [ -n "$EXEC_PREFIX" ]; then
DIRECT_TEST=$($EXEC_PREFIX "pct exec $VMID -- curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://127.0.0.1:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null" || echo "000")
NETWORK_TEST=$($EXEC_PREFIX "curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null" || echo "000")
else
DIRECT_TEST=$(pct exec $VMID -- curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://127.0.0.1:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null || echo "000")
NETWORK_TEST=$(curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null || echo "000")
fi
if [ "$DIRECT_TEST" = "200" ]; then
echo " ✅ Blockscout API responding on localhost (HTTP $DIRECT_TEST)"
DIRECT_ACCESS=true
else
echo " ❌ Blockscout API not responding on localhost (HTTP $DIRECT_TEST)"
DIRECT_ACCESS=false
fi
if [ "$NETWORK_TEST" = "200" ]; then
echo " ✅ Blockscout API accessible via network IP (HTTP $NETWORK_TEST)"
NETWORK_ACCESS=true
elif [ "$NETWORK_TEST" = "000" ]; then
echo " ⚠️ Blockscout API not accessible via network IP (connection refused)"
echo " 💡 Blockscout may only be listening on localhost"
NETWORK_ACCESS=false
else
echo " ⚠️ Blockscout API returned HTTP $NETWORK_TEST via network"
NETWORK_ACCESS=false
fi
echo ""
# Step 4: Update NPMplus configuration (if network accessible)
if [ "$NETWORK_ACCESS" = true ]; then
echo "=== Step 4: Updating NPMplus Configuration ==="
echo " ✅ Blockscout is network accessible"
echo " 💡 Update NPMplus to point directly to: http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT"
echo ""
echo " Manual Steps:"
echo " 1. Log into NPMplus: https://192.168.0.166:81"
echo " 2. Find 'explorer.d-bis.org' proxy host"
echo " 3. Update Forward Host: $BLOCKSCOUT_IP"
echo " 4. Update Forward Port: $BLOCKSCOUT_PORT"
echo " 5. Save changes"
echo ""
echo " Or run the automated script:"
echo " cd scripts/nginx-proxy-manager"
echo " node update-explorer-direct-route.js"
echo ""
else
echo "=== Step 4: Cannot Configure Direct Route ==="
echo " ❌ Blockscout is not network accessible"
echo " 💡 Need to configure Blockscout to listen on 0.0.0.0:$BLOCKSCOUT_PORT"
echo ""
echo " For Docker containers, check docker-compose.yml:"
echo " - Ensure PORT environment variable is set"
echo " - Check if LISTEN_ADDRESS is set to 0.0.0.0"
echo " - Restart Blockscout container after changes"
echo ""
echo " For systemd services, check service file:"
echo " pct exec $VMID -- systemctl cat blockscout.service"
echo ""
fi
# Step 5: Alternative - Keep nginx but simplify configuration
echo "=== Step 5: Alternative Solution (Keep Nginx) ==="
echo " If direct route is not possible, ensure nginx is properly configured:"
echo " pct exec $VMID -- systemctl status nginx"
echo " pct exec $VMID -- nginx -t"
echo " pct exec $VMID -- systemctl restart nginx"
echo ""
# Summary
echo "=========================================="
echo "SUMMARY"
echo "=========================================="
echo "Current Route:"
echo " NPMplus → $BLOCKSCOUT_IP:80 (nginx) → 127.0.0.1:$BLOCKSCOUT_PORT (Blockscout)"
echo ""
echo "Proposed Direct Route:"
if [ "$NETWORK_ACCESS" = true ]; then
echo " NPMplus → $BLOCKSCOUT_IP:$BLOCKSCOUT_PORT (Blockscout directly) ✅"
echo " Status: Ready to configure"
else
echo " NPMplus → $BLOCKSCOUT_IP:$BLOCKSCOUT_PORT (Blockscout directly) ❌"
echo " Status: Blockscout needs network access configuration"
fi
echo ""
echo "Benefits of Direct Route:"
echo " ✅ Removes nginx proxy layer (one less hop)"
echo " ✅ Reduces latency"
echo " ✅ Fewer points of failure"
echo " ✅ Simpler architecture"
echo ""
# Create update script for NPMplus
if [ "$NETWORK_ACCESS" = true ]; then
echo "Creating NPMplus update script..."
cat > /home/intlc/projects/proxmox/scripts/nginx-proxy-manager/update-explorer-direct-route.js << 'SCRIPT_EOF'
#!/usr/bin/env node
/**
* Update explorer.d-bis.org in NPMplus to use direct Blockscout route
* Changes from: http://192.168.11.140:80 → http://192.168.11.140:4000
*/
import { chromium } from 'playwright';
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
import { config } from 'dotenv';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const PROJECT_ROOT = join(__dirname, '../../..');
config({ path: join(PROJECT_ROOT, '.env') });
const NPM_URL = process.env.NPM_URL || 'https://192.168.0.166:81';
const NPM_EMAIL = process.env.NPM_EMAIL || 'nsatoshi2007@hotmail.com';
const NPM_PASSWORD = process.env.NPM_PASSWORD;
if (!NPM_PASSWORD) {
throw new Error('NPM_PASSWORD environment variable is required');
}
const HEADLESS = process.env.HEADLESS !== 'false';
const DOMAIN = 'explorer.d-bis.org';
const NEW_TARGET = 'http://192.168.11.140:4000';
function log(message, type = 'info') {
const icons = { success: '✅', error: '❌', warning: '⚠️', info: '📋' };
console.log(`${icons[type]} ${message}`);
}
async function login(page) {
log('Logging in to NPMplus...');
await page.goto(NPM_URL, { waitUntil: 'domcontentloaded', timeout: 30000 });
await page.waitForSelector('input[type="email"], input[name="email"]', { timeout: 10000 });
await page.fill('input[type="email"]', NPM_EMAIL);
await page.fill('input[type="password"]', NPM_PASSWORD);
const loginButton = await page.$('button[type="submit"]');
if (loginButton) {
await loginButton.click();
} else {
await page.keyboard.press('Enter');
}
await page.waitForTimeout(3000);
log('Logged in', 'success');
}
async function updateProxyHost(page) {
log(`Updating ${DOMAIN} to direct route: ${NEW_TARGET}`);
// Navigate to proxy hosts
await page.goto(`${NPM_URL}/#/proxy-hosts`, { waitUntil: 'domcontentloaded' });
await page.waitForTimeout(2000);
// Find and click on explorer.d-bis.org
const domainLink = await page.$(`text=${DOMAIN}`);
if (!domainLink) {
log(`Could not find ${DOMAIN} in proxy hosts list`, 'error');
return false;
}
await domainLink.click();
await page.waitForTimeout(2000);
// Update forward host and port
const url = new URL(NEW_TARGET);
const hostname = url.hostname;
const port = url.port || (url.protocol === 'https:' ? '443' : '80');
const hostInput = await page.$('input[name="forward_host"], input[name="forward_hostname"]');
if (hostInput) {
await hostInput.fill(hostname);
log(` Updated forward host: ${hostname}`);
}
const portInput = await page.$('input[name="forward_port"]');
if (portInput) {
await portInput.fill(port);
log(` Updated forward port: ${port}`);
}
// Save
const saveButton = await page.$('button:has-text("Save"), button[type="submit"]');
if (saveButton) {
await saveButton.click();
log(` Saved changes`, 'success');
await page.waitForTimeout(2000);
return true;
}
return false;
}
async function main() {
const browser = await chromium.launch({ headless: HEADLESS, ignoreHTTPSErrors: true });
const context = await browser.newContext({ ignoreHTTPSErrors: true });
const page = await context.newPage();
try {
await login(page);
const success = await updateProxyHost(page);
if (success) {
log(`✅ ${DOMAIN} updated to use direct route`, 'success');
} else {
log(`❌ Failed to update ${DOMAIN}`, 'error');
process.exit(1);
}
} catch (error) {
log(`Fatal error: ${error.message}`, 'error');
await page.screenshot({ path: '/tmp/npmplus-update-error.png' });
process.exit(1);
} finally {
await browser.close();
}
}
main().catch(console.error);
SCRIPT_EOF
chmod +x /home/intlc/projects/proxmox/scripts/nginx-proxy-manager/update-explorer-direct-route.js
echo " ✅ Created: scripts/nginx-proxy-manager/update-explorer-direct-route.js"
echo ""
fi
echo "=========================================="
echo "NEXT STEPS"
echo "=========================================="
if [ "$NETWORK_ACCESS" = true ]; then
echo "1. Run the NPMplus update script:"
echo " cd scripts/nginx-proxy-manager"
echo " node update-explorer-direct-route.js"
echo ""
echo "2. Test the direct route:"
echo " curl -I http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats"
echo ""
else
echo "1. Configure Blockscout to listen on 0.0.0.0:$BLOCKSCOUT_PORT"
echo "2. Restart Blockscout service"
echo "3. Run this script again to verify network access"
echo "4. Then run the NPMplus update script"
echo ""
fi
echo "=========================================="

View File

@@ -0,0 +1,198 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configure all 19 domains in Nginx Proxy Manager via API from inside container
# This script uses pct exec to run commands inside the NPM container
set -e
PROXMOX_HOST="${PROXMOX_HOST_R630_01}"
CONTAINER_ID="105"
NPM_URL="http://127.0.0.1:81"
EMAIL="nsatoshi2007@hotmail.com"
PASSWORD='L@ker$2010'
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔒 Nginx Proxy Manager SSL Configuration (Container)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Create the configuration script and run it inside container
ssh root@${PROXMOX_HOST} "pct exec ${CONTAINER_ID} -- bash" << 'INNER_SCRIPT'
set -e
NPM_URL="http://127.0.0.1:81"
EMAIL="nsatoshi2007@hotmail.com"
PASSWORD='L@ker$2010'
echo "🔐 Authenticating..."
TOKEN_RESPONSE=$(curl -s -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$EMAIL\",\"secret\":\"$PASSWORD\"}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ] || [ -z "${TOKEN// }" ]; then
ERROR_MSG=$(echo "$TOKEN_RESPONSE" | jq -r '.error.message // "Unknown error"' 2>/dev/null || echo "$TOKEN_RESPONSE")
echo "❌ Authentication failed: $ERROR_MSG"
exit 1
fi
echo "✅ Authentication successful"
echo ""
# Function to create or update proxy host
create_proxy_host() {
local domain=$1
local scheme=$2
local hostname=$3
local port=$4
local websocket=$5
echo "📋 Processing $domain..."
# Check if exists
EXISTING=$(curl -s -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" | jq -r ".result[] | select(.domain_names[] == \"$domain\") | .id" 2>/dev/null || echo "")
local HOST_ID
if [ -n "$EXISTING" ] && [ "$EXISTING" != "null" ]; then
echo " Already exists (ID: $EXISTING)"
HOST_ID=$EXISTING
else
# Create new
echo " Creating proxy host..."
RESPONSE=$(curl -s -X POST "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"domain_names\": [\"$domain\"],
\"forward_scheme\": \"$scheme\",
\"forward_hostname\": \"$hostname\",
\"forward_port\": $port,
\"allow_websocket_upgrade\": $websocket,
\"block_exploits\": true,
\"cache_enabled\": false,
\"ssl_forced\": true,
\"http2_support\": true,
\"hsts_enabled\": true,
\"hsts_subdomains\": true,
\"access_list_id\": 0,
\"certificate_id\": 0
}")
HOST_ID=$(echo "$RESPONSE" | jq -r '.id // empty' 2>/dev/null || echo "")
if [ -z "$HOST_ID" ] || [ "$HOST_ID" = "null" ]; then
ERROR=$(echo "$RESPONSE" | jq -r '.error.message // .error // "Unknown error"' 2>/dev/null || echo "$RESPONSE")
echo " ❌ Failed: $ERROR"
return 1
fi
echo " ✅ Created (ID: $HOST_ID)"
fi
# Check if certificate already exists
EXISTING_CERT=$(curl -s -X GET "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" | jq -r ".[] | select(.domain_names[] == \"$domain\") | .id" 2>/dev/null || echo "")
if [ -n "$EXISTING_CERT" ] && [ "$EXISTING_CERT" != "null" ]; then
echo " ✅ Certificate already exists (ID: $EXISTING_CERT)"
CERT_ID=$EXISTING_CERT
else
# Request SSL certificate
echo " 🔒 Requesting SSL certificate..."
CERT_RESPONSE=$(curl -s -X POST "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"domain_names\": [\"$domain\"],
\"provider\": \"letsencrypt\",
\"letsencrypt_email\": \"$EMAIL\",
\"letsencrypt_agree\": true
}")
CERT_ID=$(echo "$CERT_RESPONSE" | jq -r '.id // empty' 2>/dev/null || echo "")
if [ -z "$CERT_ID" ] || [ "$CERT_ID" = "null" ]; then
ERROR=$(echo "$CERT_RESPONSE" | jq -r '.error.message // .error // "Check manually"' 2>/dev/null || echo "$CERT_RESPONSE")
echo " ⚠️ Certificate request: $ERROR"
echo " Certificate may be processing or domain may need DNS verification"
return 0
fi
echo " ✅ Certificate requested (ID: $CERT_ID)"
fi
# Update proxy host with certificate
if [ -n "$CERT_ID" ] && [ "$CERT_ID" != "null" ] && [ "$CERT_ID" != "0" ]; then
UPDATE_RESPONSE=$(curl -s -X PUT "$NPM_URL/api/nginx/proxy-hosts/$HOST_ID" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"certificate_id\": $CERT_ID,
\"ssl_forced\": true
}")
echo " ✅ SSL configured for $domain"
fi
return 0
}
# Configure all domains
echo "🚀 Starting domain configuration (19 domains)..."
echo ""
SUCCESS=0
FAILED=0
# sankofa.nexus (5 domains)
create_proxy_host "sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "phoenix.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.phoenix.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "the-order.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# d-bis.org (9 domains)
create_proxy_host "explorer.d-bis.org" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-http-pub.d-bis.org" "https" "${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-ws-pub.d-bis.org" "https" "${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-http-prv.d-bis.org" "https" "${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-ws-prv.d-bis.org" "https" "${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-admin.d-bis.org" "http" "${IP_DBIS_FRONTEND:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-api.d-bis.org" "http" "${IP_DBIS_API:-192.168.11.155}" "3000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-api-2.d-bis.org" "http" "${IP_DBIS_API_2:-192.168.11.156}" "3000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "secure.d-bis.org" "http" "${IP_DBIS_FRONTEND:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# mim4u.org (4 domains)
create_proxy_host "mim4u.org" "http" "${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.mim4u.org" "http" "${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "secure.mim4u.org" "http" "${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "training.mim4u.org" "http" "${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# defi-oracle.io (1 domain)
create_proxy_host "rpc.public-0138.defi-oracle.io" "https" "${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}" "443" "true" && ((SUCCESS++)) || ((FAILED++))
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📊 Configuration Summary"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ Successful: $SUCCESS"
echo "⚠️ Failed: $FAILED"
echo "📋 Total: 19"
echo ""
echo "⏳ SSL certificates may take 1-2 minutes to be issued"
INNER_SCRIPT
echo ""
echo "✅ Configuration complete!"
echo ""
echo "🔍 To verify, run:"
echo " bash scripts/nginx-proxy-manager/verify-ssl-config.sh"

View File

@@ -0,0 +1,192 @@
#!/bin/bash
set -euo pipefail
# Configure all 19 domains in Nginx Proxy Manager via API from inside container
# This script uses pct exec to run commands inside the NPM container
set -e
PROXMOX_HOST="192.168.11.11"
CONTAINER_ID="105"
NPM_URL="http://127.0.0.1:81"
EMAIL="nsatoshi2007@hotmail.com"
PASSWORD='L@ker$2010'
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔒 Nginx Proxy Manager SSL Configuration (Container)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Create the configuration script and run it inside container
ssh root@${PROXMOX_HOST} "pct exec ${CONTAINER_ID} -- bash" << 'INNER_SCRIPT'
set -e
NPM_URL="http://127.0.0.1:81"
EMAIL="nsatoshi2007@hotmail.com"
PASSWORD='L@ker$2010'
echo "🔐 Authenticating..."
TOKEN_RESPONSE=$(curl -s -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$EMAIL\",\"secret\":\"$PASSWORD\"}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ] || [ -z "${TOKEN// }" ]; then
ERROR_MSG=$(echo "$TOKEN_RESPONSE" | jq -r '.error.message // "Unknown error"' 2>/dev/null || echo "$TOKEN_RESPONSE")
echo "❌ Authentication failed: $ERROR_MSG"
exit 1
fi
echo "✅ Authentication successful"
echo ""
# Function to create or update proxy host
create_proxy_host() {
local domain=$1
local scheme=$2
local hostname=$3
local port=$4
local websocket=$5
echo "📋 Processing $domain..."
# Check if exists
EXISTING=$(curl -s -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" | jq -r ".result[] | select(.domain_names[] == \"$domain\") | .id" 2>/dev/null || echo "")
local HOST_ID
if [ -n "$EXISTING" ] && [ "$EXISTING" != "null" ]; then
echo " Already exists (ID: $EXISTING)"
HOST_ID=$EXISTING
else
# Create new
echo " Creating proxy host..."
RESPONSE=$(curl -s -X POST "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"domain_names\": [\"$domain\"],
\"forward_scheme\": \"$scheme\",
\"forward_hostname\": \"$hostname\",
\"forward_port\": $port,
\"allow_websocket_upgrade\": $websocket,
\"block_exploits\": true,
\"cache_enabled\": false,
\"ssl_forced\": true,
\"http2_support\": true,
\"hsts_enabled\": true,
\"hsts_subdomains\": true,
\"access_list_id\": 0,
\"certificate_id\": 0
}")
HOST_ID=$(echo "$RESPONSE" | jq -r '.id // empty' 2>/dev/null || echo "")
if [ -z "$HOST_ID" ] || [ "$HOST_ID" = "null" ]; then
ERROR=$(echo "$RESPONSE" | jq -r '.error.message // .error // "Unknown error"' 2>/dev/null || echo "$RESPONSE")
echo " ❌ Failed: $ERROR"
return 1
fi
echo " ✅ Created (ID: $HOST_ID)"
fi
# Check if certificate already exists
EXISTING_CERT=$(curl -s -X GET "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" | jq -r ".[] | select(.domain_names[] == \"$domain\") | .id" 2>/dev/null || echo "")
if [ -n "$EXISTING_CERT" ] && [ "$EXISTING_CERT" != "null" ]; then
echo " ✅ Certificate already exists (ID: $EXISTING_CERT)"
CERT_ID=$EXISTING_CERT
else
# Request SSL certificate
echo " 🔒 Requesting SSL certificate..."
CERT_RESPONSE=$(curl -s -X POST "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"domain_names\": [\"$domain\"],
\"provider\": \"letsencrypt\",
\"letsencrypt_email\": \"$EMAIL\",
\"letsencrypt_agree\": true
}")
CERT_ID=$(echo "$CERT_RESPONSE" | jq -r '.id // empty' 2>/dev/null || echo "")
if [ -z "$CERT_ID" ] || [ "$CERT_ID" = "null" ]; then
ERROR=$(echo "$CERT_RESPONSE" | jq -r '.error.message // .error // "Check manually"' 2>/dev/null || echo "$CERT_RESPONSE")
echo " ⚠️ Certificate request: $ERROR"
echo " Certificate may be processing or domain may need DNS verification"
return 0
fi
echo " ✅ Certificate requested (ID: $CERT_ID)"
fi
# Update proxy host with certificate
if [ -n "$CERT_ID" ] && [ "$CERT_ID" != "null" ] && [ "$CERT_ID" != "0" ]; then
UPDATE_RESPONSE=$(curl -s -X PUT "$NPM_URL/api/nginx/proxy-hosts/$HOST_ID" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"certificate_id\": $CERT_ID,
\"ssl_forced\": true
}")
echo " ✅ SSL configured for $domain"
fi
return 0
}
# Configure all domains
echo "🚀 Starting domain configuration (19 domains)..."
echo ""
SUCCESS=0
FAILED=0
# sankofa.nexus (5 domains)
create_proxy_host "sankofa.nexus" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.sankofa.nexus" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "phoenix.sankofa.nexus" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.phoenix.sankofa.nexus" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "the-order.sankofa.nexus" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# d-bis.org (9 domains)
create_proxy_host "explorer.d-bis.org" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-http-pub.d-bis.org" "https" "192.168.11.252" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-ws-pub.d-bis.org" "https" "192.168.11.252" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-http-prv.d-bis.org" "https" "192.168.11.251" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-ws-prv.d-bis.org" "https" "192.168.11.251" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-admin.d-bis.org" "http" "192.168.11.130" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-api.d-bis.org" "http" "192.168.11.155" "3000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-api-2.d-bis.org" "http" "192.168.11.156" "3000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "secure.d-bis.org" "http" "192.168.11.130" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# mim4u.org (4 domains)
create_proxy_host "mim4u.org" "http" "192.168.11.36" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.mim4u.org" "http" "192.168.11.36" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "secure.mim4u.org" "http" "192.168.11.36" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "training.mim4u.org" "http" "192.168.11.36" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# defi-oracle.io (1 domain)
create_proxy_host "rpc.public-0138.defi-oracle.io" "https" "192.168.11.252" "443" "true" && ((SUCCESS++)) || ((FAILED++))
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📊 Configuration Summary"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ Successful: $SUCCESS"
echo "⚠️ Failed: $FAILED"
echo "📋 Total: 19"
echo ""
echo "⏳ SSL certificates may take 1-2 minutes to be issued"
INNER_SCRIPT
echo ""
echo "✅ Configuration complete!"
echo ""
echo "🔍 To verify, run:"
echo " bash scripts/nginx-proxy-manager/verify-ssl-config.sh"

View File

@@ -1,4 +1,12 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Quick configuration script to update .env with Proxmox credentials
set -e

View File

@@ -0,0 +1,56 @@
#!/bin/bash
set -euo pipefail
# Quick configuration script to update .env with Proxmox credentials
set -e
HOST="${1:-192.168.11.10}"
USER="${2:-root@pam}"
TOKEN_NAME="${3:-mcp-server}"
echo "Configuring .env file with Proxmox connection..."
echo "Host: $HOST"
echo "User: $USER"
echo "Token Name: $TOKEN_NAME"
echo ""
# Update .env file
cat > "$HOME/.env" << EOF
# Proxmox MCP Server Configuration
# Configured with: $HOST
# Proxmox Configuration
PROXMOX_HOST=$HOST
PROXMOX_USER=$USER
PROXMOX_TOKEN_NAME=$TOKEN_NAME
PROXMOX_TOKEN_VALUE=your-token-secret-here
# Security Settings
# ⚠️ WARNING: Setting PROXMOX_ALLOW_ELEVATED=true enables DESTRUCTIVE operations
PROXMOX_ALLOW_ELEVATED=false
# Optional Settings
PROXMOX_PORT=8006
EOF
echo "✅ .env file updated!"
echo ""
echo "⚠️ IMPORTANT: You need to create the API token and add it to .env"
echo ""
echo "Option 1: Via Proxmox Web UI (Recommended)"
echo " 1. Go to: https://$HOST:8006"
echo " 2. Navigate to: Datacenter → Permissions → API Tokens"
echo " 3. Click 'Add' and create token: $TOKEN_NAME"
echo " 4. Copy the secret value"
echo " 5. Update ~/.env: PROXMOX_TOKEN_VALUE=<paste-secret-here>"
echo ""
echo "Option 2: Try automated token creation"
echo " ./create-proxmox-token.sh $HOST $USER <password> $TOKEN_NAME"
echo ""
echo "Current .env contents:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
cat "$HOME/.env" | grep -v "TOKEN_VALUE="
echo "PROXMOX_TOKEN_VALUE=<needs-to-be-added>"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

View File

@@ -0,0 +1,105 @@
#!/usr/bin/env bash
# ER605 NAT Configuration Script
# Creates NAT rules for direct public IP routing to Nginx
# Note: This script generates configuration - manual application may be required
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PUBLIC_IP="${PUBLIC_IP:-76.53.10.35}"
NGINX_IP="${NGINX_IP:-192.168.11.26}"
NGINX_PORT_HTTPS=443
NGINX_PORT_HTTP=80
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔧 ER605 NAT Configuration Generator"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Public IP: $PUBLIC_IP"
log_info "Nginx Internal IP: $NGINX_IP"
log_info "Nginx HTTPS Port: $NGINX_PORT_HTTPS"
log_info "Nginx HTTP Port: $NGINX_PORT_HTTP"
echo ""
# Generate NAT rule configuration
generate_nat_rule() {
local rule_name="$1"
local external_ip="$2"
local external_port="$3"
local internal_ip="$4"
local internal_port="$5"
local description="$6"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Rule Name: $rule_name"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Enabled: ✅ Yes"
echo "Interface: WAN1"
echo "External IP: $external_ip"
echo "External Port: $external_port"
echo "Internal IP: $internal_ip"
echo "Internal Port: $internal_port"
echo "Protocol: TCP"
echo "Source IP: 0.0.0.0/0"
echo "Description: $description"
echo ""
}
echo "📋 NAT Rules Configuration"
echo ""
echo "All services route through a single public IP to Nginx,"
echo "which then routes to backend services based on hostname."
echo ""
# Main HTTPS rule (all services)
generate_nat_rule \
"Web Services (All Domains)" \
"$PUBLIC_IP" \
"$NGINX_PORT_HTTPS" \
"$NGINX_IP" \
"$NGINX_PORT_HTTPS" \
"Routes all HTTPS traffic to Nginx for hostname-based routing (SNI)"
# HTTP rule for Let's Encrypt
generate_nat_rule \
"HTTP (Let's Encrypt)" \
"$PUBLIC_IP" \
"$NGINX_PORT_HTTP" \
"$NGINX_IP" \
"$NGINX_PORT_HTTP" \
"HTTP for Let's Encrypt validation and redirects"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "NAT Configuration Generated"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "📝 Next Steps:"
echo " 1. Log in to Omada Controller or ER605 GUI"
echo " 2. Navigate to: NAT → Port Forwarding"
echo " 3. Add the two rules shown above"
echo " 4. Save and apply configuration"
echo ""
log_info "🔒 Firewall Rules Required:"
echo " • Allow HTTPS (443) from WAN to $NGINX_IP"
echo " • Allow HTTP (80) from WAN to $NGINX_IP (for Let's Encrypt)"
echo ""

View File

@@ -0,0 +1,99 @@
#!/usr/bin/env bash
# ER605 NAT Configuration Script
# Creates NAT rules for direct public IP routing to Nginx
# Note: This script generates configuration - manual application may be required
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PUBLIC_IP="${PUBLIC_IP:-76.53.10.35}"
NGINX_IP="${NGINX_IP:-192.168.11.26}"
NGINX_PORT_HTTPS=443
NGINX_PORT_HTTP=80
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔧 ER605 NAT Configuration Generator"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Public IP: $PUBLIC_IP"
log_info "Nginx Internal IP: $NGINX_IP"
log_info "Nginx HTTPS Port: $NGINX_PORT_HTTPS"
log_info "Nginx HTTP Port: $NGINX_PORT_HTTP"
echo ""
# Generate NAT rule configuration
generate_nat_rule() {
local rule_name="$1"
local external_ip="$2"
local external_port="$3"
local internal_ip="$4"
local internal_port="$5"
local description="$6"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Rule Name: $rule_name"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Enabled: ✅ Yes"
echo "Interface: WAN1"
echo "External IP: $external_ip"
echo "External Port: $external_port"
echo "Internal IP: $internal_ip"
echo "Internal Port: $internal_port"
echo "Protocol: TCP"
echo "Source IP: 0.0.0.0/0"
echo "Description: $description"
echo ""
}
echo "📋 NAT Rules Configuration"
echo ""
echo "All services route through a single public IP to Nginx,"
echo "which then routes to backend services based on hostname."
echo ""
# Main HTTPS rule (all services)
generate_nat_rule \
"Web Services (All Domains)" \
"$PUBLIC_IP" \
"$NGINX_PORT_HTTPS" \
"$NGINX_IP" \
"$NGINX_PORT_HTTPS" \
"Routes all HTTPS traffic to Nginx for hostname-based routing (SNI)"
# HTTP rule for Let's Encrypt
generate_nat_rule \
"HTTP (Let's Encrypt)" \
"$PUBLIC_IP" \
"$NGINX_PORT_HTTP" \
"$NGINX_IP" \
"$NGINX_PORT_HTTP" \
"HTTP for Let's Encrypt validation and redirects"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "NAT Configuration Generated"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "📝 Next Steps:"
echo " 1. Log in to Omada Controller or ER605 GUI"
echo " 2. Navigate to: NAT → Port Forwarding"
echo " 3. Add the two rules shown above"
echo " 4. Save and apply configuration"
echo ""
log_info "🔒 Firewall Rules Required:"
echo " • Allow HTTPS (443) from WAN to $NGINX_IP"
echo " • Allow HTTP (80) from WAN to $NGINX_IP (for Let's Encrypt)"
echo ""

Some files were not shown because too many files have changed in this diff Show More