docs: Ledger Live integration, contract deploy learnings, NEXT_STEPS updates
Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled

- ADD_CHAIN138_TO_LEDGER_LIVE: Ledger form done; public code review repo bis-innovations/LedgerLive; init/push commands
- CONTRACT_DEPLOYMENT_RUNBOOK: Chain 138 gas price 1 gwei, 36-addr check, TransactionMirror workaround
- CONTRACT_*: AddressMapper, MirrorManager deployed 2026-02-12; 36-address on-chain check
- NEXT_STEPS_FOR_YOU: Ledger done; steps completable now (no LAN); run-completable-tasks-from-anywhere
- MASTER_INDEX, OPERATOR_OPTIONAL, SMART_CONTRACTS_INVENTORY_SIMPLE: updates
- LEDGER_BLOCKCHAIN_INTEGRATION_COMPLETE: bis-innovations/LedgerLive reference

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
defiQUG
2026-02-12 15:46:57 -08:00
parent cc8dcaf356
commit fbda1b4beb
5114 changed files with 498901 additions and 4567 deletions

View File

@@ -0,0 +1,30 @@
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Add blockscout.defi-oracle.io to Nginx configuration on VMID 105
PROXMOX_HOST="${PROXMOX_HOST_R630_02}"
NGINX_VMID=105
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} "pct exec $NGINX_VMID -- bash" << 'NGINX_EOF'
cat >> /data/nginx/custom/http.conf << 'CONFIG_EOF'
# Blockscout (defi-oracle.io domain)
server {
listen 80;
server_name blockscout.defi-oracle.io;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
location / {
proxy_pass http://${IP_BLOCKSCOUT}:80;
}
}
CONFIG_EOF
nginx -t && systemctl restart npm && echo "✓ Nginx configuration updated"
NGINX_EOF

View File

@@ -0,0 +1,24 @@
#!/usr/bin/env bash
set -euo pipefail
# Add blockscout.defi-oracle.io to Nginx configuration on VMID 105
PROXMOX_HOST="192.168.11.12"
NGINX_VMID=105
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} "pct exec $NGINX_VMID -- bash" << 'NGINX_EOF'
cat >> /data/nginx/custom/http.conf << 'CONFIG_EOF'
# Blockscout (defi-oracle.io domain)
server {
listen 80;
server_name blockscout.defi-oracle.io;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
location / {
proxy_pass http://192.168.11.140:80;
}
}
CONFIG_EOF
nginx -t && systemctl restart npm && echo "✓ Nginx configuration updated"
NGINX_EOF

View File

@@ -0,0 +1,45 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Add VLAN 11 IP configuration to ~/.bashrc for WSL2 auto-configuration
# This runs the IP configuration command on each login
set -e
VLAN11_IP="192.168.11.23"
PRIMARY_IF="eth0"
BASHRC="$HOME/.bashrc"
echo "🔧 Adding VLAN 11 IP Auto-Configuration to ~/.bashrc"
echo ""
# Check if already added
if grep -q "add-vlan11-ip" "$BASHRC" 2>/dev/null; then
echo "✅ VLAN 11 IP configuration already in ~/.bashrc"
exit 0
fi
# Add configuration
cat >> "$BASHRC" << 'EOFBASHRC'
# Auto-configure VLAN 11 secondary IP (added automatically)
if [ -n "$(ip link show eth0 2>/dev/null)" ] && ! ip addr show eth0 | grep -q "192.168.11.23"; then
sudo ip addr add 192.168.11.23/24 dev eth0 2>/dev/null || true
sudo ip route add ${NETWORK_192_168_11_0:-192.168.11.0}/24 dev eth0 src 192.168.11.23 2>/dev/null || true
fi
EOFBASHRC
echo "✅ Configuration added to ~/.bashrc"
echo ""
echo "💡 VLAN 11 IP will be added automatically on each login"
echo " (Requires sudo password on first command after login)"
echo ""
echo "📋 To test immediately, run:"
echo " source ~/.bashrc"
echo ""

View File

@@ -0,0 +1,39 @@
#!/bin/bash
set -euo pipefail
# Add VLAN 11 IP configuration to ~/.bashrc for WSL2 auto-configuration
# This runs the IP configuration command on each login
set -e
VLAN11_IP="192.168.11.23"
PRIMARY_IF="eth0"
BASHRC="$HOME/.bashrc"
echo "🔧 Adding VLAN 11 IP Auto-Configuration to ~/.bashrc"
echo ""
# Check if already added
if grep -q "add-vlan11-ip" "$BASHRC" 2>/dev/null; then
echo "✅ VLAN 11 IP configuration already in ~/.bashrc"
exit 0
fi
# Add configuration
cat >> "$BASHRC" << 'EOFBASHRC'
# Auto-configure VLAN 11 secondary IP (added automatically)
if [ -n "$(ip link show eth0 2>/dev/null)" ] && ! ip addr show eth0 | grep -q "192.168.11.23"; then
sudo ip addr add 192.168.11.23/24 dev eth0 2>/dev/null || true
sudo ip route add 192.168.11.0/24 dev eth0 src 192.168.11.23 2>/dev/null || true
fi
EOFBASHRC
echo "✅ Configuration added to ~/.bashrc"
echo ""
echo "💡 VLAN 11 IP will be added automatically on each login"
echo " (Requires sudo password on first command after login)"
echo ""
echo "📋 To test immediately, run:"
echo " source ~/.bashrc"
echo ""

View File

@@ -0,0 +1,51 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Analyze running services on r630-01 and check for port conflicts
# Identify services that should be in NPMplus
PROXMOX_HOST="${PROXMOX_HOST_R630_01}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📊 Service Analysis for r630-01"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "=== Running Containers with IPs ==="
echo ""
# Get running containers with their IPs
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "
pct list 2>/dev/null | grep running | while read vmid status lock name; do
echo \"VMID: \$vmid - \$name\"
ip=\$(pct config \$vmid 2>/dev/null | grep '^ip=' | head -1 | cut -d: -f2 | awk '{print \$1}')
if [ -n \"\$ip\" ]; then
echo \" IP: \$ip\"
fi
echo \"\"
done
"
echo ""
echo "=== Services That Should Be in NPMplus ==="
echo ""
echo "Based on documentation, the following services should be accessible via NPMplus:"
echo ""
echo "1. DBIS Services (if running on r630-01):"
echo " - dbis-admin.d-bis.org → ${IP_DBIS_FRONTEND:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}0}:80 (VMID 10130)"
echo " - dbis-api.d-bis.org → ${IP_DBIS_API:-192.168.11.155}:3000 (VMID 10150)"
echo " - dbis-api-2.d-bis.org → ${IP_DBIS_API_2:-192.168.11.156}:3000 (VMID 10151)"
echo " - secure.d-bis.org → ${IP_DBIS_FRONTEND:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}0}:80 (VMID 10130)"
echo ""
echo "2. MIM4U Services (documented on r630-02, not r630-01):"
echo " - mim4u.org → ${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}:80 (VMID 7811 - on r630-02)"
echo ""
echo "3. Blockchain Explorer (on different host):"
echo " - explorer.d-bis.org → ${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}:4000 (VMID 5000 - on r630-02)"
echo ""

View File

@@ -0,0 +1,45 @@
#!/bin/bash
set -euo pipefail
# Analyze running services on r630-01 and check for port conflicts
# Identify services that should be in NPMplus
PROXMOX_HOST="192.168.11.11"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📊 Service Analysis for r630-01"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "=== Running Containers with IPs ==="
echo ""
# Get running containers with their IPs
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "
pct list 2>/dev/null | grep running | while read vmid status lock name; do
echo \"VMID: \$vmid - \$name\"
ip=\$(pct config \$vmid 2>/dev/null | grep '^ip=' | head -1 | cut -d: -f2 | awk '{print \$1}')
if [ -n \"\$ip\" ]; then
echo \" IP: \$ip\"
fi
echo \"\"
done
"
echo ""
echo "=== Services That Should Be in NPMplus ==="
echo ""
echo "Based on documentation, the following services should be accessible via NPMplus:"
echo ""
echo "1. DBIS Services (if running on r630-01):"
echo " - dbis-admin.d-bis.org → 192.168.11.130:80 (VMID 10130)"
echo " - dbis-api.d-bis.org → 192.168.11.155:3000 (VMID 10150)"
echo " - dbis-api-2.d-bis.org → 192.168.11.156:3000 (VMID 10151)"
echo " - secure.d-bis.org → 192.168.11.130:80 (VMID 10130)"
echo ""
echo "2. MIM4U Services (documented on r630-02, not r630-01):"
echo " - mim4u.org → 192.168.11.36:80 (VMID 7811 - on r630-02)"
echo ""
echo "3. Blockchain Explorer (on different host):"
echo " - explorer.d-bis.org → 192.168.11.140:4000 (VMID 5000 - on r630-02)"
echo ""

View File

@@ -0,0 +1,46 @@
#!/bin/bash
# Phase 4: Sync configuration to secondary
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_warn() { echo -e "\033[1;33m[⚠]\033[0m $1"; }
log_info "Syncing configuration to secondary..."
# Export from primary
log_info "Exporting primary configuration..."
EXPORT_DIR=$(bash "$SCRIPT_DIR/export-primary-config.sh" 2>&1 | grep "exported to" | awk '{print $NF}' || echo "")
if [ -z "$EXPORT_DIR" ] || [ ! -d "$EXPORT_DIR" ]; then
log_warn "Could not determine export directory, trying default location..."
EXPORT_DIR=$(ls -td /tmp/npmplus-config-backup-* 2>/dev/null | head -1 || echo "")
fi
if [ -n "$EXPORT_DIR" ] && [ -d "$EXPORT_DIR" ]; then
log_info "Importing to secondary..."
bash "$SCRIPT_DIR/import-secondary-config.sh" "$EXPORT_DIR" || {
log_warn "Import failed, but continuing..."
}
log_success "Configuration sync attempted"
else
log_warn "Export directory not found, skipping import"
fi
# Sync certificates
log_info "Syncing certificates..."
bash "$SCRIPT_DIR/sync-certificates.sh" || {
log_warn "Certificate sync failed"
}
log_success "Phase 4 complete: Configuration sync attempted"

View File

@@ -0,0 +1,31 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Connect to R630-04 from R630-03 (which we know works)
# This helps rule out network/SSH client issues
echo "Connecting to R630-03 first..."
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}} << 'EOF'
echo "=== Connected to R630-03 ($(hostname)) ==="
echo ""
echo "Now attempting to connect to R630-04..."
echo ""
# Try verbose SSH to see what's happening
ssh -v root@${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}} << 'R63004'
echo "=== Successfully connected to R630-04 ==="
hostname
pveversion
systemctl status pveproxy --no-pager | head -20
R63004
echo ""
echo "=== Connection attempt complete ==="
EOF

View File

@@ -0,0 +1,25 @@
#!/bin/bash
set -euo pipefail
# Connect to R630-04 from R630-03 (which we know works)
# This helps rule out network/SSH client issues
echo "Connecting to R630-03 first..."
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@192.168.11.13 << 'EOF'
echo "=== Connected to R630-03 ($(hostname)) ==="
echo ""
echo "Now attempting to connect to R630-04..."
echo ""
# Try verbose SSH to see what's happening
ssh -v root@192.168.11.14 << 'R63004'
echo "=== Successfully connected to R630-04 ==="
hostname
pveversion
systemctl status pveproxy --no-pager | head -20
R63004
echo ""
echo "=== Connection attempt complete ==="
EOF

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Copy entire smom-dbis-138-proxmox directory to Proxmox host
# This copies everything needed for deployment
# Suppress locale warnings
export LC_ALL=C
export LANG=C
HOST="${1:-192.168.11.10}"
USER="${2:-root}"
REMOTE_DIR="${3:-/opt/smom-dbis-138-proxmox}"
echo "Copying entire project to $USER@$HOST:$REMOTE_DIR"
# Test connection (suppress locale warnings)
if ! ssh -o BatchMode=yes -o ConnectTimeout=5 "$USER@$HOST" "export LC_ALL=C; export LANG=C; exit" 2>/dev/null; then
echo "❌ Cannot connect to $HOST"
echo " Ensure SSH key is set up: ssh-copy-id $USER@$HOST"
exit 1
fi
# Create remote directory
ssh "$USER@$HOST" "mkdir -p $REMOTE_DIR"
# Copy entire smom-dbis-138-proxmox directory
echo "Copying files (this may take a few minutes)..."
rsync -avz --exclude='.git' --exclude='*.log' \
smom-dbis-138-proxmox/ \
"$USER@$HOST:$REMOTE_DIR/" || {
echo "⚠ rsync not available, using scp..."
scp -r smom-dbis-138-proxmox/* "$USER@$HOST:$REMOTE_DIR/"
}
# Make all scripts executable
ssh "$USER@$HOST" "find $REMOTE_DIR -name '*.sh' -exec chmod +x {} \;"
echo "✅ All files copied to $REMOTE_DIR"
echo ""
echo "SSH to Proxmox host and run:"
echo " ssh $USER@$HOST"
echo " cd $REMOTE_DIR"
echo " sudo ./scripts/deployment/deploy-phased.sh --source-project /path/to/smom-dbis-138"

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env bash
set -euo pipefail
# Copy entire smom-dbis-138-proxmox directory to Proxmox host
# This copies everything needed for deployment
# Suppress locale warnings
export LC_ALL=C
export LANG=C
HOST="${1:-192.168.11.10}"
USER="${2:-root}"
REMOTE_DIR="${3:-/opt/smom-dbis-138-proxmox}"
echo "Copying entire project to $USER@$HOST:$REMOTE_DIR"
# Test connection (suppress locale warnings)
if ! ssh -o BatchMode=yes -o ConnectTimeout=5 "$USER@$HOST" "export LC_ALL=C; export LANG=C; exit" 2>/dev/null; then
echo "❌ Cannot connect to $HOST"
echo " Ensure SSH key is set up: ssh-copy-id $USER@$HOST"
exit 1
fi
# Create remote directory
ssh "$USER@$HOST" "mkdir -p $REMOTE_DIR"
# Copy entire smom-dbis-138-proxmox directory
echo "Copying files (this may take a few minutes)..."
rsync -avz --exclude='.git' --exclude='*.log' \
smom-dbis-138-proxmox/ \
"$USER@$HOST:$REMOTE_DIR/" || {
echo "⚠ rsync not available, using scp..."
scp -r smom-dbis-138-proxmox/* "$USER@$HOST:$REMOTE_DIR/"
}
# Make all scripts executable
ssh "$USER@$HOST" "find $REMOTE_DIR -name '*.sh' -exec chmod +x {} \;"
echo "✅ All files copied to $REMOTE_DIR"
echo ""
echo "SSH to Proxmox host and run:"
echo " ssh $USER@$HOST"
echo " cd $REMOTE_DIR"
echo " sudo ./scripts/deployment/deploy-phased.sh --source-project /path/to/smom-dbis-138"

View File

@@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Temporary script to migrate remaining containers, skipping 6000
VMIDS=(6400 10100 10101 10120 10130 10150 10151)
SOURCE_NODE="ml110"
TARGET_NODE="r630-01"
TARGET_STORAGE="thin1"
for vmid in "${VMIDS[@]}"; do
echo "=== Migrating $vmid ==="
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_ML110:-192.168.11.10} "pct migrate $vmid $TARGET_NODE" && \
sleep 60 && \
sshpass -p 'password' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_01:-192.168.11.11} "pct move-volume $vmid rootfs $TARGET_STORAGE" && \
echo "$vmid migrated" || echo "$vmid failed"
done

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
set -euo pipefail
# Temporary script to migrate remaining containers, skipping 6000
VMIDS=(6400 10100 10101 10120 10130 10150 10151)
SOURCE_NODE="ml110"
TARGET_NODE="r630-01"
TARGET_STORAGE="thin1"
for vmid in "${VMIDS[@]}"; do
echo "=== Migrating $vmid ==="
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@192.168.11.10 "pct migrate $vmid $TARGET_NODE" && \
sleep 60 && \
sshpass -p 'password' ssh -o StrictHostKeyChecking=no root@192.168.11.11 "pct move-volume $vmid rootfs $TARGET_STORAGE" && \
echo "$vmid migrated" || echo "$vmid failed"
done

View File

@@ -0,0 +1,43 @@
#!/usr/bin/env bash
# Simple approach: Migrate 2 containers, then move storage to thin1
# This works around the pct migrate storage limitation
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
PROXMOX_PASS="${PROXMOX_PASS:-L@kers2010}"
ssh_proxmox() {
sshpass -p "$PROXMOX_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$PROXMOX_HOST" "$@"
}
echo "========================================="
echo "Migrate 2 containers to pve2 thin1"
echo "========================================="
echo ""
echo "Note: pct migrate doesn't support --storage parameter"
echo "We'll need to:"
echo "1. Migrate containers to pve2 (they'll try to use local-lvm which fails)"
echo "2. OR: Change storage approach"
echo ""
echo "Actually, let me check if we can use the API with storage parameter..."
echo ""
# Try API approach with storage
echo "Testing API migration with storage parameter..."
VMID=1500
# Get current container info
echo "Container 1500 current config:"
ssh_proxmox "pvesh get /nodes/ml110/lxc/$VMID/config --output-format json" 2>&1 | python3 -c "import sys, json; d=json.load(sys.stdin); print(f\"rootfs: {d.get('rootfs', 'N/A')}\")" 2>&1
echo ""
echo "Let's try migrating container 1500 using a two-step process:"
echo ""

View File

@@ -0,0 +1,37 @@
#!/usr/bin/env bash
# Simple approach: Migrate 2 containers, then move storage to thin1
# This works around the pct migrate storage limitation
set -euo pipefail
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
PROXMOX_PASS="${PROXMOX_PASS:-L@kers2010}"
ssh_proxmox() {
sshpass -p "$PROXMOX_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$PROXMOX_HOST" "$@"
}
echo "========================================="
echo "Migrate 2 containers to pve2 thin1"
echo "========================================="
echo ""
echo "Note: pct migrate doesn't support --storage parameter"
echo "We'll need to:"
echo "1. Migrate containers to pve2 (they'll try to use local-lvm which fails)"
echo "2. OR: Change storage approach"
echo ""
echo "Actually, let me check if we can use the API with storage parameter..."
echo ""
# Try API approach with storage
echo "Testing API migration with storage parameter..."
VMID=1500
# Get current container info
echo "Container 1500 current config:"
ssh_proxmox "pvesh get /nodes/ml110/lxc/$VMID/config --output-format json" 2>&1 | python3 -c "import sys, json; d=json.load(sys.stdin); print(f\"rootfs: {d.get('rootfs', 'N/A')}\")" 2>&1
echo ""
echo "Let's try migrating container 1500 using a two-step process:"
echo ""

View File

@@ -0,0 +1,37 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Quick check of container services on r630-02
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.12}"
echo "=== Container Status Summary ==="
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct list" 2>&1
echo ""
echo "=== Key Services ==="
# Blockscout
echo "VMID 5000 (Blockscout):"
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 5000 -- systemctl is-active blockscout nginx docker 2>&1 | grep -v 'inactive'"
# Cloudflare
echo "VMID 102 (Cloudflare):"
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 102 -- systemctl is-active cloudflared 2>&1"
# Gitea
echo "VMID 104 (Gitea):"
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 104 -- systemctl is-active gitea 2>&1"
# Firefly nodes
echo "VMID 6200 (Firefly-1):"
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 6200 -- docker ps --format '{{.Names}}' 2>&1 | head -3"
echo "VMID 6201 (Firefly-ali-1):"
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 6201 -- docker ps --format '{{.Names}}' 2>&1 | head -3"

View File

@@ -0,0 +1,31 @@
#!/bin/bash
set -euo pipefail
# Quick check of container services on r630-02
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.12}"
echo "=== Container Status Summary ==="
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct list" 2>&1
echo ""
echo "=== Key Services ==="
# Blockscout
echo "VMID 5000 (Blockscout):"
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 5000 -- systemctl is-active blockscout nginx docker 2>&1 | grep -v 'inactive'"
# Cloudflare
echo "VMID 102 (Cloudflare):"
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 102 -- systemctl is-active cloudflared 2>&1"
# Gitea
echo "VMID 104 (Gitea):"
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 104 -- systemctl is-active gitea 2>&1"
# Firefly nodes
echo "VMID 6200 (Firefly-1):"
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 6200 -- docker ps --format '{{.Names}}' 2>&1 | head -3"
echo "VMID 6201 (Firefly-ali-1):"
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 6201 -- docker ps --format '{{.Names}}' 2>&1 | head -3"

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env bash
# Retry contract verification after Blockscout is started
# Usage: ./retry-contract-verification.sh
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
echo "========================================="
echo "Contract Verification Retry"
echo "========================================="
echo ""
# Check Blockscout API first
echo "1. Checking Blockscout API accessibility..."
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" --connect-timeout 10 "https://explorer.d-bis.org/api" 2>/dev/null || echo "000")
if [[ "$HTTP_CODE" == "200" ]]; then
echo " ✅ Blockscout API is accessible (HTTP $HTTP_CODE)"
echo ""
echo "2. Starting contract verification..."
echo ""
# Run verification script
cd /home/intlc/projects/proxmox
./scripts/verify-all-contracts.sh 0.8.20
elif [[ "$HTTP_CODE" == "502" ]] || [[ "$HTTP_CODE" == "000" ]]; then
echo " ⚠️ Blockscout API is not accessible (HTTP $HTTP_CODE)"
echo " 💡 Start Blockscout service first:"
echo " ./scripts/start-blockscout.sh"
echo ""
echo " Or manually on pve2:"
echo " ssh root@pve2 'pct exec 5000 -- systemctl start blockscout'"
exit 1
else
echo " ⚠️ Blockscout API returned HTTP $HTTP_CODE"
echo " Proceeding with verification attempt anyway..."
echo ""
cd /home/intlc/projects/proxmox
./scripts/verify-all-contracts.sh 0.8.20
fi

View File

@@ -0,0 +1,49 @@
#!/bin/bash
# Attempt to set container password via Proxmox API/config
# This script tries multiple methods to set the password
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
VMID=5000
PASSWORD="L@kers2010"
PROXMOX_HOST="${PROXMOX_HOST_ML110}"
echo "Attempting to set password for container $VMID..."
echo ""
# Method 1: Try via container config (if supported)
echo "Method 1: Attempting via container config..."
# Note: Proxmox doesn't support password in config, but we can document it
# Method 2: Create a script and execute it in container
echo "Method 2: Creating password script in container..."
# Get container node
CONTAINER_NODE=$(ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
"for node in ml110 pve pve2; do \
if pvesh get /nodes/\$node/lxc/$VMID/status/current 2>/dev/null | grep -q status; then \
echo \$node; break; \
fi; \
done" 2>/dev/null || echo "")
if [ -n "$CONTAINER_NODE" ]; then
echo "Container found on node: $CONTAINER_NODE"
echo ""
echo "Password must be set manually via Proxmox Web UI:"
echo " 1. Navigate to Container $VMID → Options → Password"
echo " 2. Enter password: $PASSWORD"
echo " 3. Click OK"
echo ""
echo "Or via container console:"
echo " ssh $PROXMOX_HOST"
echo " pct enter $VMID"
echo " passwd root"
echo " # Enter: $PASSWORD (twice)"
fi

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Attempt to set container password via Proxmox API/config
# This script tries multiple methods to set the password
set -euo pipefail
VMID=5000
PASSWORD="L@kers2010"
PROXMOX_HOST="192.168.11.10"
echo "Attempting to set password for container $VMID..."
echo ""
# Method 1: Try via container config (if supported)
echo "Method 1: Attempting via container config..."
# Note: Proxmox doesn't support password in config, but we can document it
# Method 2: Create a script and execute it in container
echo "Method 2: Creating password script in container..."
# Get container node
CONTAINER_NODE=$(ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
"for node in ml110 pve pve2; do \
if pvesh get /nodes/\$node/lxc/$VMID/status/current 2>/dev/null | grep -q status; then \
echo \$node; break; \
fi; \
done" 2>/dev/null || echo "")
if [ -n "$CONTAINER_NODE" ]; then
echo "Container found on node: $CONTAINER_NODE"
echo ""
echo "Password must be set manually via Proxmox Web UI:"
echo " 1. Navigate to Container $VMID → Options → Password"
echo " 2. Enter password: $PASSWORD"
echo " 3. Click OK"
echo ""
echo "Or via container console:"
echo " ssh $PROXMOX_HOST"
echo " pct enter $VMID"
echo " passwd root"
echo " # Enter: $PASSWORD (twice)"
fi

View File

@@ -0,0 +1,27 @@
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# SSH to Proxmox host with locale warnings suppressed
# Usage: ./scripts/ssh-proxmox.sh [command]
HOST="${PROXMOX_HOST:-192.168.11.10}"
USER="${PROXMOX_USER:-root}"
# Suppress locale warnings
export LC_ALL=C
export LANG=C
if [[ $# -eq 0 ]]; then
# Interactive SSH (suppress locale warnings)
ssh "$USER@$HOST" "export LC_ALL=C; export LANG=C; bash"
else
# Execute command (suppress locale warnings)
ssh "$USER@$HOST" "export LC_ALL=C; export LANG=C; $@"
fi

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
set -euo pipefail
# SSH to Proxmox host with locale warnings suppressed
# Usage: ./scripts/ssh-proxmox.sh [command]
HOST="${PROXMOX_HOST:-192.168.11.10}"
USER="${PROXMOX_USER:-root}"
# Suppress locale warnings
export LC_ALL=C
export LANG=C
if [[ $# -eq 0 ]]; then
# Interactive SSH (suppress locale warnings)
ssh "$USER@$HOST" "export LC_ALL=C; export LANG=C; bash"
else
# Execute command (suppress locale warnings)
ssh "$USER@$HOST" "export LC_ALL=C; export LANG=C; $@"
fi

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Start all stopped containers on pve2 - simplified version
# Usage: ./scripts/start-containers-on-pve2-simple.sh
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PVE2_IP="${PROXMOX_HOST_R630_01}"
# All containers that failed on r630-02 (but exist on pve2)
ALL_CONTAINERS=(3000 3001 3002 3003 3500 3501 5200 6000 6400 10000 10001 10020 10030 10040 10050 10060 10070 10080 10090 10091 10092 10100 10101 10120 10130 10150 10151 10200 10201 10202 10210 10230 10232)
echo "Starting containers on pve2 (${PROXMOX_HOST_R630_01:-192.168.11.11})..."
echo ""
SUCCESS=0
FAILED=0
# Clear lock for CT 10232 first
echo "Clearing lock for CT 10232..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PVE2_IP} \
"rm -f /var/lock/qemu-server/lock-10232 /var/lock/qemu-server/lxc-10232 2>/dev/null" || true
sleep 2
# Start containers
for vmid in "${ALL_CONTAINERS[@]}"; do
echo -n "Starting CT $vmid... "
if ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${PVE2_IP} \
"pct start $vmid" 2>&1 >/dev/null; then
echo "✓"
((SUCCESS++))
sleep 1
else
echo "✗"
((FAILED++))
fi
done
echo ""
echo "Summary:"
echo " Successfully started: $SUCCESS"
echo " Failed: $FAILED"
echo ""
if [[ $SUCCESS -gt 0 ]]; then
echo "✓ Started $SUCCESS container(s) on pve2"
fi

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env bash
# Start all stopped containers on pve2 - simplified version
# Usage: ./scripts/start-containers-on-pve2-simple.sh
set -euo pipefail
PVE2_IP="192.168.11.11"
# All containers that failed on r630-02 (but exist on pve2)
ALL_CONTAINERS=(3000 3001 3002 3003 3500 3501 5200 6000 6400 10000 10001 10020 10030 10040 10050 10060 10070 10080 10090 10091 10092 10100 10101 10120 10130 10150 10151 10200 10201 10202 10210 10230 10232)
echo "Starting containers on pve2 (192.168.11.11)..."
echo ""
SUCCESS=0
FAILED=0
# Clear lock for CT 10232 first
echo "Clearing lock for CT 10232..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PVE2_IP} \
"rm -f /var/lock/qemu-server/lock-10232 /var/lock/qemu-server/lxc-10232 2>/dev/null" || true
sleep 2
# Start containers
for vmid in "${ALL_CONTAINERS[@]}"; do
echo -n "Starting CT $vmid... "
if ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${PVE2_IP} \
"pct start $vmid" 2>&1 >/dev/null; then
echo "✓"
((SUCCESS++))
sleep 1
else
echo "✗"
((FAILED++))
fi
done
echo ""
echo "Summary:"
echo " Successfully started: $SUCCESS"
echo " Failed: $FAILED"
echo ""
if [[ $SUCCESS -gt 0 ]]; then
echo "✓ Started $SUCCESS container(s) on pve2"
fi

View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Test all deployed contracts functionality
# Usage: ./test-all-contracts.sh
RPC_URL="${RPC_URL:-http://${RPC_ALLTRA_1:-192.168.11.250}:8545}"
declare -A CONTRACTS=(
["Oracle Proxy"]="0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6"
["Oracle Aggregator"]="0x99b3511a2d315a497c8112c1fdd8d508d4b1e506"
["CCIP Router"]="0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e"
["CCIP Sender"]="0x105F8A15b819948a89153505762444Ee9f324684"
["CCIPWETH9Bridge"]="0x89dd12025bfCD38A168455A44B400e913ED33BE2"
["CCIPWETH10Bridge"]="0xe0E93247376aa097dB308B92e6Ba36bA015535D0"
["Price Feed Keeper"]="0xD3AD6831aacB5386B8A25BB8D8176a6C8a026f04"
)
echo "========================================="
echo "Contract Functionality Test"
echo "RPC: $RPC_URL"
echo "========================================="
echo ""
PASSED=0
FAILED=0
for name in "${!CONTRACTS[@]}"; do
addr="${CONTRACTS[$name]}"
echo "Testing $name ($addr)..."
BYTECODE=$(cast code "$addr" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
if [ -z "$BYTECODE" ] || [ "$BYTECODE" = "0x" ]; then
echo " ❌ No bytecode"
FAILED=$((FAILED + 1))
else
echo " ✅ Has bytecode"
PASSED=$((PASSED + 1))
fi
echo ""
done
echo "========================================="
echo "Summary: $PASSED passed, $FAILED failed"
echo "========================================="

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env bash
set -euo pipefail
# Test all deployed contracts functionality
# Usage: ./test-all-contracts.sh
RPC_URL="${RPC_URL:-http://192.168.11.250:8545}"
declare -A CONTRACTS=(
["Oracle Proxy"]="0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6"
["Oracle Aggregator"]="0x99b3511a2d315a497c8112c1fdd8d508d4b1e506"
["CCIP Router"]="0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e"
["CCIP Sender"]="0x105F8A15b819948a89153505762444Ee9f324684"
["CCIPWETH9Bridge"]="0x89dd12025bfCD38A168455A44B400e913ED33BE2"
["CCIPWETH10Bridge"]="0xe0E93247376aa097dB308B92e6Ba36bA015535D0"
["Price Feed Keeper"]="0xD3AD6831aacB5386B8A25BB8D8176a6C8a026f04"
)
echo "========================================="
echo "Contract Functionality Test"
echo "RPC: $RPC_URL"
echo "========================================="
echo ""
PASSED=0
FAILED=0
for name in "${!CONTRACTS[@]}"; do
addr="${CONTRACTS[$name]}"
echo "Testing $name ($addr)..."
BYTECODE=$(cast code "$addr" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
if [ -z "$BYTECODE" ] || [ "$BYTECODE" = "0x" ]; then
echo " ❌ No bytecode"
FAILED=$((FAILED + 1))
else
echo " ✅ Has bytecode"
PASSED=$((PASSED + 1))
fi
echo ""
done
echo "========================================="
echo "Summary: $PASSED passed, $FAILED failed"
echo "========================================="

View File

@@ -0,0 +1,47 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Instructions for updating Cloudflare Tunnel to route to central Nginx
# This script provides the commands and instructions needed
echo "═══════════════════════════════════════════════════════════"
echo " CLOUDFLARE TUNNEL UPDATE INSTRUCTIONS"
echo "═══════════════════════════════════════════════════════════"
echo ""
echo "The Cloudflare tunnel uses token-based configuration managed"
echo "in the Cloudflare dashboard. You need to update it manually."
echo ""
echo "Steps:"
echo "1. Go to: https://one.dash.cloudflare.com/"
echo "2. Navigate to: Zero Trust → Networks → Tunnels"
echo "3. Select tunnel: b02fe1fe-cb7d-484e-909b-7cc41298ebe8"
echo "4. Click 'Configure' → 'Public Hostnames'"
echo "5. Update ALL hostnames to route to: http://${IP_NGINX_LEGACY:-192.168.11.26}:80"
echo ""
echo "Required Updates:"
echo " - explorer.d-bis.org → http://${IP_NGINX_LEGACY:-192.168.11.26}:80"
echo " - rpc-http-pub.d-bis.org → http://${IP_NGINX_LEGACY:-192.168.11.26}:80"
echo " - rpc-ws-pub.d-bis.org → http://${IP_NGINX_LEGACY:-192.168.11.26}:80"
echo " - rpc-http-prv.d-bis.org → http://${IP_NGINX_LEGACY:-192.168.11.26}:80"
echo " - rpc-ws-prv.d-bis.org → http://${IP_NGINX_LEGACY:-192.168.11.26}:80"
echo " - dbis-admin.d-bis.org → http://${IP_NGINX_LEGACY:-192.168.11.26}:80"
echo " - dbis-api.d-bis.org → http://${IP_NGINX_LEGACY:-192.168.11.26}:80"
echo " - dbis-api-2.d-bis.org → http://${IP_NGINX_LEGACY:-192.168.11.26}:80"
echo " - mim4u.org → http://${IP_NGINX_LEGACY:-192.168.11.26}:80"
echo " - www.mim4u.org → http://${IP_NGINX_LEGACY:-192.168.11.26}:80"
echo ""
echo "After updating, the tunnel will automatically reload within 1-2 minutes."
echo ""
echo "Test after update:"
echo " curl https://explorer.d-bis.org/api/v2/stats"
echo " curl -X POST https://rpc-http-pub.d-bis.org \\"
echo " -H 'Content-Type: application/json' \\"
echo " -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}'"
echo ""

View File

@@ -0,0 +1,41 @@
#!/bin/bash
set -euo pipefail
# Instructions for updating Cloudflare Tunnel to route to central Nginx
# This script provides the commands and instructions needed
echo "═══════════════════════════════════════════════════════════"
echo " CLOUDFLARE TUNNEL UPDATE INSTRUCTIONS"
echo "═══════════════════════════════════════════════════════════"
echo ""
echo "The Cloudflare tunnel uses token-based configuration managed"
echo "in the Cloudflare dashboard. You need to update it manually."
echo ""
echo "Steps:"
echo "1. Go to: https://one.dash.cloudflare.com/"
echo "2. Navigate to: Zero Trust → Networks → Tunnels"
echo "3. Select tunnel: b02fe1fe-cb7d-484e-909b-7cc41298ebe8"
echo "4. Click 'Configure' → 'Public Hostnames'"
echo "5. Update ALL hostnames to route to: http://192.168.11.26:80"
echo ""
echo "Required Updates:"
echo " - explorer.d-bis.org → http://192.168.11.26:80"
echo " - rpc-http-pub.d-bis.org → http://192.168.11.26:80"
echo " - rpc-ws-pub.d-bis.org → http://192.168.11.26:80"
echo " - rpc-http-prv.d-bis.org → http://192.168.11.26:80"
echo " - rpc-ws-prv.d-bis.org → http://192.168.11.26:80"
echo " - dbis-admin.d-bis.org → http://192.168.11.26:80"
echo " - dbis-api.d-bis.org → http://192.168.11.26:80"
echo " - dbis-api-2.d-bis.org → http://192.168.11.26:80"
echo " - mim4u.org → http://192.168.11.26:80"
echo " - www.mim4u.org → http://192.168.11.26:80"
echo ""
echo "After updating, the tunnel will automatically reload within 1-2 minutes."
echo ""
echo "Test after update:"
echo " curl https://explorer.d-bis.org/api/v2/stats"
echo " curl -X POST https://rpc-http-pub.d-bis.org \\"
echo " -H 'Content-Type: application/json' \\"
echo " -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}'"
echo ""