docs: Ledger Live integration, contract deploy learnings, NEXT_STEPS updates
Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled

- ADD_CHAIN138_TO_LEDGER_LIVE: Ledger form done; public code review repo bis-innovations/LedgerLive; init/push commands
- CONTRACT_DEPLOYMENT_RUNBOOK: Chain 138 gas price 1 gwei, 36-addr check, TransactionMirror workaround
- CONTRACT_*: AddressMapper, MirrorManager deployed 2026-02-12; 36-address on-chain check
- NEXT_STEPS_FOR_YOU: Ledger done; steps completable now (no LAN); run-completable-tasks-from-anywhere
- MASTER_INDEX, OPERATOR_OPTIONAL, SMART_CONTRACTS_INVENTORY_SIMPLE: updates
- LEDGER_BLOCKCHAIN_INTEGRATION_COMPLETE: bis-innovations/LedgerLive reference

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
defiQUG
2026-02-12 15:46:57 -08:00
parent cc8dcaf356
commit fbda1b4beb
5114 changed files with 498901 additions and 4567 deletions

View File

@@ -0,0 +1,96 @@
#!/bin/bash
# Backup Configuration Files and Validator Keys
# Creates encrypted backups of critical files
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Colors
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARNING]${NC} $1"; }
# Backup configuration
BACKUP_BASE="${BACKUP_BASE:-/backup/smom-dbis-138}"
BACKUP_DIR="$BACKUP_BASE/$(date +%Y%m%d-%H%M%S)"
mkdir -p "$BACKUP_DIR"
log_info "Creating backup in: $BACKUP_DIR"
# Backup deployment configs (if on Proxmox host)
if [[ -d "$PROJECT_ROOT/config" ]]; then
log_info "Backing up deployment configuration files..."
tar -czf "$BACKUP_DIR/deployment-configs.tar.gz" -C "$PROJECT_ROOT" config/ || {
log_warn "Failed to backup deployment configs (may not be on Proxmox host)"
}
fi
# Backup source project configs (if accessible)
SOURCE_PROJECT="${SOURCE_PROJECT:-/home/intlc/projects/smom-dbis-138}"
if [[ -d "$SOURCE_PROJECT/config" ]]; then
log_info "Backing up source project configuration files..."
tar -czf "$BACKUP_DIR/source-configs.tar.gz" -C "$SOURCE_PROJECT" config/ || {
log_warn "Failed to backup source configs"
}
# Backup validator keys (encrypted if gpg available)
if [[ -d "$SOURCE_PROJECT/keys/validators" ]]; then
log_info "Backing up validator keys..."
if command -v gpg >/dev/null 2>&1; then
tar -czf - -C "$SOURCE_PROJECT" keys/validators/ | \
gpg -c --cipher-algo AES256 --batch --yes \
--passphrase "${BACKUP_PASSPHRASE:-}" \
> "$BACKUP_DIR/validator-keys.tar.gz.gpg" 2>/dev/null || {
log_warn "GPG encryption failed, backing up without encryption"
tar -czf "$BACKUP_DIR/validator-keys.tar.gz" -C "$SOURCE_PROJECT" keys/validators/
}
else
log_warn "GPG not available, backing up without encryption"
tar -czf "$BACKUP_DIR/validator-keys.tar.gz" -C "$SOURCE_PROJECT" keys/validators/
fi
fi
fi
# Backup container configurations (if pct available)
if command -v pct >/dev/null 2>&1; then
log_info "Backing up container configurations..."
mkdir -p "$BACKUP_DIR/containers"
for vmid in 1000 1001 1002 1003 1004 1500 1501 1502 1503 2500 2501 2502; do
if pct config "$vmid" >/dev/null 2>&1; then
pct config "$vmid" > "$BACKUP_DIR/containers/container-$vmid.conf" 2>/dev/null || true
fi
done
log_success "Container configs backed up"
fi
# Create backup manifest
cat > "$BACKUP_DIR/manifest.txt" <<MANIFEST
Backup created: $(date)
Backup location: $BACKUP_DIR
Contents:
- deployment-configs.tar.gz
- source-configs.tar.gz
- validator-keys.tar.gz[.gpg]
- containers/ (container configurations)
Restore instructions:
1. Extract configs: tar -xzf deployment-configs.tar.gz
2. Extract source configs: tar -xzf source-configs.tar.gz
3. Decrypt and extract keys: gpg -d validator-keys.tar.gz.gpg | tar -xzf -
4. Restore container configs from containers/ directory
MANIFEST
log_success "Backup complete: $BACKUP_DIR"
# Retention policy: Keep backups for 30 days
log_info "Cleaning up old backups (retention: 30 days)..."
find "$BACKUP_BASE" -mindepth 1 -maxdepth 1 -type d -mtime +30 -exec rm -rf {} \; 2>/dev/null || true
log_success "Backup process complete!"

View File

@@ -0,0 +1,103 @@
#!/bin/bash
# Backup current container configurations before IP changes
# Creates rollback script
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
BACKUP_DIR="/home/intlc/projects/proxmox/backups/ip_conversion_$(date +%Y%m%d_%H%M%S)"
ROLLBACK_SCRIPT="$BACKUP_DIR/rollback-ip-changes.sh"
mkdir -p "$BACKUP_DIR"
echo "=== Backing Up Container Configurations ==="
echo "Backup directory: $BACKUP_DIR"
echo ""
# Define conversions directly (from IP_ASSIGNMENT_PLAN.md)
declare -a CONVERSIONS=(
"${PROXMOX_HOST_ML110:-192.168.11.10}:3501:${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}}:192.168.11.28:ccip-monitor-1:ml110"
"${PROXMOX_HOST_ML110:-192.168.11.10}:3500:192.168.11.15:192.168.11.29:oracle-publisher-1:ml110"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:103:${IP_OMADA:-192.168.11.20}:192.168.11.30:omada:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:104:192.168.11.18:192.168.11.31:gitea:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:100:192.168.11.4:192.168.11.32:proxmox-mail-gateway:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:101:192.168.11.6:192.168.11.33:proxmox-datacenter-manager:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:102:192.168.11.9:192.168.11.34:cloudflared:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:6200:192.168.11.7:${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}:firefly-1:r630-02"
"${PROXMOX_HOST_R630_02:-192.168.11.12}:7811:N/A:${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}:mim-api-1:r630-02"
)
# Create rollback script header
cat > "$ROLLBACK_SCRIPT" << 'EOF'
#!/bin/bash
# Rollback script for IP changes
# Generated automatically - DO NOT EDIT MANUALLY
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
echo "=== Rolling Back IP Changes ==="
echo ""
EOF
chmod +x "$ROLLBACK_SCRIPT"
# Backup each container
for conversion in "${CONVERSIONS[@]}"; do
IFS=':' read -r host_ip vmid old_ip new_ip name hostname <<< "$conversion"
echo "Backing up VMID $vmid ($name) on $hostname..."
# Backup container config
backup_file="$BACKUP_DIR/${hostname}_${vmid}_config.txt"
ssh -o ConnectTimeout=10 root@"$host_ip" "pct config $vmid" > "$backup_file" 2>/dev/null || echo "Warning: Could not backup $vmid"
# Add to rollback script (only if old_ip is not N/A)
if [ "$old_ip" != "N/A" ] && [ -n "$old_ip" ]; then
cat >> "$ROLLBACK_SCRIPT" << EOF
# Rollback VMID $vmid ($name) on $hostname
echo "Rolling back VMID $vmid to $old_ip..."
ssh -o ConnectTimeout=10 root@$host_ip "pct stop $vmid" 2>/dev/null || true
sleep 2
ssh -o ConnectTimeout=10 root@$host_ip "pct set $vmid --net0 bridge=vmbr0,name=eth0,ip=$old_ip/24,gw=${NETWORK_GATEWAY:-192.168.11.1},type=veth" || echo "Warning: Failed to rollback $vmid"
ssh -o ConnectTimeout=10 root@$host_ip "pct start $vmid" 2>/dev/null || true
echo ""
EOF
fi
done
# Create summary
cat > "$BACKUP_DIR/backup_summary.txt" << EOF
Backup Summary
Generated: $(date)
Total containers to convert: ${#CONVERSIONS[@]}
Conversions:
$(printf '%s\n' "${CONVERSIONS[@]}")
Backup files:
$(ls -1 "$BACKUP_DIR"/*_config.txt 2>/dev/null | wc -l) config files backed up
Rollback script: $ROLLBACK_SCRIPT
EOF
echo ""
echo "=== Backup Complete ==="
echo "Backed up ${#CONVERSIONS[@]} container configurations"
echo "Backup directory: $BACKUP_DIR"
echo "Rollback script: $ROLLBACK_SCRIPT"
echo ""
echo "To rollback changes, run: $ROLLBACK_SCRIPT"

View File

@@ -0,0 +1,91 @@
#!/bin/bash
# Backup current container configurations before IP changes
# Creates rollback script
set -euo pipefail
BACKUP_DIR="/home/intlc/projects/proxmox/backups/ip_conversion_$(date +%Y%m%d_%H%M%S)"
ROLLBACK_SCRIPT="$BACKUP_DIR/rollback-ip-changes.sh"
mkdir -p "$BACKUP_DIR"
echo "=== Backing Up Container Configurations ==="
echo "Backup directory: $BACKUP_DIR"
echo ""
# Define conversions directly (from IP_ASSIGNMENT_PLAN.md)
declare -a CONVERSIONS=(
"192.168.11.10:3501:192.168.11.14:192.168.11.28:ccip-monitor-1:ml110"
"192.168.11.10:3500:192.168.11.15:192.168.11.29:oracle-publisher-1:ml110"
"192.168.11.12:103:192.168.11.20:192.168.11.30:omada:r630-02"
"192.168.11.12:104:192.168.11.18:192.168.11.31:gitea:r630-02"
"192.168.11.12:100:192.168.11.4:192.168.11.32:proxmox-mail-gateway:r630-02"
"192.168.11.12:101:192.168.11.6:192.168.11.33:proxmox-datacenter-manager:r630-02"
"192.168.11.12:102:192.168.11.9:192.168.11.34:cloudflared:r630-02"
"192.168.11.12:6200:192.168.11.7:192.168.11.35:firefly-1:r630-02"
"192.168.11.12:7811:N/A:192.168.11.36:mim-api-1:r630-02"
)
# Create rollback script header
cat > "$ROLLBACK_SCRIPT" << 'EOF'
#!/bin/bash
# Rollback script for IP changes
# Generated automatically - DO NOT EDIT MANUALLY
set -euo pipefail
echo "=== Rolling Back IP Changes ==="
echo ""
EOF
chmod +x "$ROLLBACK_SCRIPT"
# Backup each container
for conversion in "${CONVERSIONS[@]}"; do
IFS=':' read -r host_ip vmid old_ip new_ip name hostname <<< "$conversion"
echo "Backing up VMID $vmid ($name) on $hostname..."
# Backup container config
backup_file="$BACKUP_DIR/${hostname}_${vmid}_config.txt"
ssh -o ConnectTimeout=10 root@"$host_ip" "pct config $vmid" > "$backup_file" 2>/dev/null || echo "Warning: Could not backup $vmid"
# Add to rollback script (only if old_ip is not N/A)
if [ "$old_ip" != "N/A" ] && [ -n "$old_ip" ]; then
cat >> "$ROLLBACK_SCRIPT" << EOF
# Rollback VMID $vmid ($name) on $hostname
echo "Rolling back VMID $vmid to $old_ip..."
ssh -o ConnectTimeout=10 root@$host_ip "pct stop $vmid" 2>/dev/null || true
sleep 2
ssh -o ConnectTimeout=10 root@$host_ip "pct set $vmid --net0 bridge=vmbr0,name=eth0,ip=$old_ip/24,gw=192.168.11.1,type=veth" || echo "Warning: Failed to rollback $vmid"
ssh -o ConnectTimeout=10 root@$host_ip "pct start $vmid" 2>/dev/null || true
echo ""
EOF
fi
done
# Create summary
cat > "$BACKUP_DIR/backup_summary.txt" << EOF
Backup Summary
Generated: $(date)
Total containers to convert: ${#CONVERSIONS[@]}
Conversions:
$(printf '%s\n' "${CONVERSIONS[@]}")
Backup files:
$(ls -1 "$BACKUP_DIR"/*_config.txt 2>/dev/null | wc -l) config files backed up
Rollback script: $ROLLBACK_SCRIPT
EOF
echo ""
echo "=== Backup Complete ==="
echo "Backed up ${#CONVERSIONS[@]} container configurations"
echo "Backup directory: $BACKUP_DIR"
echo "Rollback script: $ROLLBACK_SCRIPT"
echo ""
echo "To rollback changes, run: $ROLLBACK_SCRIPT"

View File

@@ -0,0 +1,229 @@
#!/bin/bash
# Automated backup of NPMplus configuration and data
# Backs up database, proxy hosts, certificates, and configuration files
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
NPMPLUS_HOST="${NPMPLUS_HOST:-192.168.11.11}"
NPMPLUS_VMID="${NPMPLUS_VMID:-10233}"
NPM_URL="${NPM_URL:-https://${IP_NPMPLUS_ETH0:-192.168.11.166}:81}"
NPM_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
BACKUP_DEST="${BACKUP_DEST:-$PROJECT_ROOT/backups/npmplus}"
RETENTION_DAYS="${RETENTION_DAYS:-30}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR="$BACKUP_DEST/npmplus-backup-$TIMESTAMP"
mkdir -p "$BACKUP_DIR"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "💾 NPMplus Backup"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Backup directory: $BACKUP_DIR"
log_info "NPMplus Host: $NPMPLUS_HOST"
log_info "NPMplus VMID: $NPMPLUS_VMID"
# 1. Backup Database
log_info "Backing up NPMplus database..."
DB_BACKUP_SUCCESS=false
# Try direct file copy first
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker cp npmplus:/data/database.sqlite /tmp/db-backup.sqlite 2>/dev/null"; then
scp -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST:/tmp/db-backup.sqlite" \
"$BACKUP_DIR/database.sqlite" 2>/dev/null && \
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" "rm -f /tmp/db-backup.sqlite" 2>/dev/null && \
DB_BACKUP_SUCCESS=true
fi
# Try SQL dump as fallback
if [ "$DB_BACKUP_SUCCESS" = false ]; then
DB_DUMP=$(ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus sqlite3 /data/database.sqlite '.dump' 2>/dev/null" || echo "")
if [ -n "$DB_DUMP" ] && ! echo "$DB_DUMP" | grep -q "executable file not found"; then
echo "$DB_DUMP" > "$BACKUP_DIR/database.sql"
DB_BACKUP_SUCCESS=true
fi
fi
if [ "$DB_BACKUP_SUCCESS" = true ]; then
DB_SIZE=$(stat -f%z "$BACKUP_DIR/database.sqlite" 2>/dev/null || \
stat -c%s "$BACKUP_DIR/database.sqlite" 2>/dev/null || \
stat -c%s "$BACKUP_DIR/database.sql" 2>/dev/null || echo "0")
log_success "Database backed up ($DB_SIZE bytes)"
else
log_warn "Database backup failed - database may be empty or inaccessible"
fi
# 2. Backup Proxy Hosts via API
if [ -n "$NPM_PASSWORD" ]; then
log_info "Backing up proxy hosts via API..."
TOKEN_RESPONSE=$(curl -s -k -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$NPM_EMAIL\",\"secret\":\"$NPM_PASSWORD\"}" 2>/dev/null || echo "{}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -n "$TOKEN" ] && [ "$TOKEN" != "null" ]; then
# Export proxy hosts
PROXY_HOSTS_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$PROXY_HOSTS_JSON" | jq '.' > "$BACKUP_DIR/proxy_hosts.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/proxy_hosts.json"
PROXY_COUNT=$(echo "$PROXY_HOSTS_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Proxy hosts backed up ($PROXY_COUNT hosts)"
# Export certificates metadata
CERTIFICATES_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$CERTIFICATES_JSON" | jq '.' > "$BACKUP_DIR/certificates.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/certificates.json"
CERT_COUNT=$(echo "$CERTIFICATES_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Certificates metadata backed up ($CERT_COUNT certificates)"
else
log_warn "API authentication failed - skipping API-based backups"
fi
else
log_warn "NPM_PASSWORD not set - skipping API-based backups"
fi
# 3. Backup Certificate Files
log_info "Backing up certificate files..."
CERT_BACKUP_DIR="$BACKUP_DIR/certificates"
mkdir -p "$CERT_BACKUP_DIR"
# Find certificate path
CERT_PATH=$(ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker volume inspect npmplus_data --format '{{.Mountpoint}}' 2>/dev/null" || echo "")
if [ -n "$CERT_PATH" ] && [ "$CERT_PATH" != "null" ]; then
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"test -d $CERT_PATH/tls/certbot/live 2>/dev/null"; then
CERT_SOURCE="$CERT_PATH/tls/certbot/live"
elif ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"test -d $CERT_PATH/certbot/live 2>/dev/null"; then
CERT_SOURCE="$CERT_PATH/certbot/live"
else
CERT_SOURCE=""
fi
if [ -n "$CERT_SOURCE" ]; then
rsync -avz --delete \
-e "ssh -o StrictHostKeyChecking=no" \
root@"$NPMPLUS_HOST:$CERT_SOURCE/" \
"$CERT_BACKUP_DIR/" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
CERT_COUNT=$(find "$CERT_BACKUP_DIR" -type d -mindepth 1 -maxdepth 1 2>/dev/null | wc -l || echo "0")
log_success "Certificate files backed up ($CERT_COUNT certificate directories)"
else
log_warn "Certificate directory not found"
fi
else
log_warn "Could not determine certificate path"
fi
# 4. Backup Nginx Configuration Files
log_info "Backing up Nginx configuration files..."
NGINX_BACKUP_DIR="$BACKUP_DIR/nginx"
mkdir -p "$NGINX_BACKUP_DIR"
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus test -d /data/nginx 2>/dev/null"; then
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus tar czf /tmp/nginx-config.tar.gz -C /data nginx 2>/dev/null" && \
scp -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST:/tmp/nginx-config.tar.gz" \
"$NGINX_BACKUP_DIR/nginx-config.tar.gz" 2>/dev/null && \
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" "rm -f /tmp/nginx-config.tar.gz" 2>/dev/null && \
log_success "Nginx configuration backed up"
else
log_warn "Nginx configuration directory not found"
fi
# 5. Create Backup Manifest
log_info "Creating backup manifest..."
cat > "$BACKUP_DIR/manifest.txt" <<EOF
NPMplus Backup Manifest
=======================
Date: $(date)
Host: $NPMPLUS_HOST
VMID: $NPMPLUS_VMID
NPM URL: $NPM_URL
Contents:
- database.sqlite or database.sql: NPMplus SQLite database
- proxy_hosts.json: Proxy hosts configuration (if API available)
- certificates.json: Certificates metadata (if API available)
- certificates/: Certificate files from disk
- nginx/: Nginx configuration files
Backup Size: $(du -sh "$BACKUP_DIR" | awk '{print $1}')
Restore Instructions:
See: docs/04-configuration/NPMPLUS_BACKUP_RESTORE.md
EOF
log_success "Backup manifest created"
# 6. Compress Backup
log_info "Compressing backup..."
cd "$BACKUP_DEST"
tar czf "npmplus-backup-$TIMESTAMP.tar.gz" "npmplus-backup-$TIMESTAMP" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
if [ -f "npmplus-backup-$TIMESTAMP.tar.gz" ]; then
COMPRESSED_SIZE=$(stat -f%z "npmplus-backup-$TIMESTAMP.tar.gz" 2>/dev/null || \
stat -c%s "npmplus-backup-$TIMESTAMP.tar.gz" 2>/dev/null || echo "0")
log_success "Backup compressed ($(numfmt --to=iec-i --suffix=B $COMPRESSED_SIZE 2>/dev/null || echo "$COMPRESSED_SIZE bytes"))"
# Remove uncompressed directory
rm -rf "npmplus-backup-$TIMESTAMP"
else
log_warn "Compression failed - keeping uncompressed backup"
fi
# 7. Cleanup Old Backups
log_info "Cleaning up old backups (retention: $RETENTION_DAYS days)..."
find "$BACKUP_DEST" -name "npmplus-backup-*.tar.gz" -type f -mtime +$RETENTION_DAYS -delete 2>/dev/null || true
OLD_COUNT=$(find "$BACKUP_DEST" -name "npmplus-backup-*.tar.gz" -type f | wc -l || echo "0")
log_success "Old backups cleaned up ($OLD_COUNT backups retained)"
# Summary
echo ""
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "✅ Backup Complete!"
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Backup location: $BACKUP_DEST/npmplus-backup-$TIMESTAMP.tar.gz"
log_info "Manifest: $BACKUP_DIR/manifest.txt"
echo ""

View File

@@ -0,0 +1,223 @@
#!/bin/bash
# Automated backup of NPMplus configuration and data
# Backs up database, proxy hosts, certificates, and configuration files
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
NPMPLUS_HOST="${NPMPLUS_HOST:-192.168.11.11}"
NPMPLUS_VMID="${NPMPLUS_VMID:-10233}"
NPM_URL="${NPM_URL:-https://192.168.11.166:81}"
NPM_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
BACKUP_DEST="${BACKUP_DEST:-$PROJECT_ROOT/backups/npmplus}"
RETENTION_DAYS="${RETENTION_DAYS:-30}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR="$BACKUP_DEST/npmplus-backup-$TIMESTAMP"
mkdir -p "$BACKUP_DIR"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "💾 NPMplus Backup"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Backup directory: $BACKUP_DIR"
log_info "NPMplus Host: $NPMPLUS_HOST"
log_info "NPMplus VMID: $NPMPLUS_VMID"
# 1. Backup Database
log_info "Backing up NPMplus database..."
DB_BACKUP_SUCCESS=false
# Try direct file copy first
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker cp npmplus:/data/database.sqlite /tmp/db-backup.sqlite 2>/dev/null"; then
scp -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST:/tmp/db-backup.sqlite" \
"$BACKUP_DIR/database.sqlite" 2>/dev/null && \
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" "rm -f /tmp/db-backup.sqlite" 2>/dev/null && \
DB_BACKUP_SUCCESS=true
fi
# Try SQL dump as fallback
if [ "$DB_BACKUP_SUCCESS" = false ]; then
DB_DUMP=$(ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus sqlite3 /data/database.sqlite '.dump' 2>/dev/null" || echo "")
if [ -n "$DB_DUMP" ] && ! echo "$DB_DUMP" | grep -q "executable file not found"; then
echo "$DB_DUMP" > "$BACKUP_DIR/database.sql"
DB_BACKUP_SUCCESS=true
fi
fi
if [ "$DB_BACKUP_SUCCESS" = true ]; then
DB_SIZE=$(stat -f%z "$BACKUP_DIR/database.sqlite" 2>/dev/null || \
stat -c%s "$BACKUP_DIR/database.sqlite" 2>/dev/null || \
stat -c%s "$BACKUP_DIR/database.sql" 2>/dev/null || echo "0")
log_success "Database backed up ($DB_SIZE bytes)"
else
log_warn "Database backup failed - database may be empty or inaccessible"
fi
# 2. Backup Proxy Hosts via API
if [ -n "$NPM_PASSWORD" ]; then
log_info "Backing up proxy hosts via API..."
TOKEN_RESPONSE=$(curl -s -k -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$NPM_EMAIL\",\"secret\":\"$NPM_PASSWORD\"}" 2>/dev/null || echo "{}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -n "$TOKEN" ] && [ "$TOKEN" != "null" ]; then
# Export proxy hosts
PROXY_HOSTS_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$PROXY_HOSTS_JSON" | jq '.' > "$BACKUP_DIR/proxy_hosts.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/proxy_hosts.json"
PROXY_COUNT=$(echo "$PROXY_HOSTS_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Proxy hosts backed up ($PROXY_COUNT hosts)"
# Export certificates metadata
CERTIFICATES_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$CERTIFICATES_JSON" | jq '.' > "$BACKUP_DIR/certificates.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/certificates.json"
CERT_COUNT=$(echo "$CERTIFICATES_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Certificates metadata backed up ($CERT_COUNT certificates)"
else
log_warn "API authentication failed - skipping API-based backups"
fi
else
log_warn "NPM_PASSWORD not set - skipping API-based backups"
fi
# 3. Backup Certificate Files
log_info "Backing up certificate files..."
CERT_BACKUP_DIR="$BACKUP_DIR/certificates"
mkdir -p "$CERT_BACKUP_DIR"
# Find certificate path
CERT_PATH=$(ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker volume inspect npmplus_data --format '{{.Mountpoint}}' 2>/dev/null" || echo "")
if [ -n "$CERT_PATH" ] && [ "$CERT_PATH" != "null" ]; then
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"test -d $CERT_PATH/tls/certbot/live 2>/dev/null"; then
CERT_SOURCE="$CERT_PATH/tls/certbot/live"
elif ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"test -d $CERT_PATH/certbot/live 2>/dev/null"; then
CERT_SOURCE="$CERT_PATH/certbot/live"
else
CERT_SOURCE=""
fi
if [ -n "$CERT_SOURCE" ]; then
rsync -avz --delete \
-e "ssh -o StrictHostKeyChecking=no" \
root@"$NPMPLUS_HOST:$CERT_SOURCE/" \
"$CERT_BACKUP_DIR/" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
CERT_COUNT=$(find "$CERT_BACKUP_DIR" -type d -mindepth 1 -maxdepth 1 2>/dev/null | wc -l || echo "0")
log_success "Certificate files backed up ($CERT_COUNT certificate directories)"
else
log_warn "Certificate directory not found"
fi
else
log_warn "Could not determine certificate path"
fi
# 4. Backup Nginx Configuration Files
log_info "Backing up Nginx configuration files..."
NGINX_BACKUP_DIR="$BACKUP_DIR/nginx"
mkdir -p "$NGINX_BACKUP_DIR"
if ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus test -d /data/nginx 2>/dev/null"; then
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" \
"pct exec $NPMPLUS_VMID -- docker exec npmplus tar czf /tmp/nginx-config.tar.gz -C /data nginx 2>/dev/null" && \
scp -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST:/tmp/nginx-config.tar.gz" \
"$NGINX_BACKUP_DIR/nginx-config.tar.gz" 2>/dev/null && \
ssh -o StrictHostKeyChecking=no root@"$NPMPLUS_HOST" "rm -f /tmp/nginx-config.tar.gz" 2>/dev/null && \
log_success "Nginx configuration backed up"
else
log_warn "Nginx configuration directory not found"
fi
# 5. Create Backup Manifest
log_info "Creating backup manifest..."
cat > "$BACKUP_DIR/manifest.txt" <<EOF
NPMplus Backup Manifest
=======================
Date: $(date)
Host: $NPMPLUS_HOST
VMID: $NPMPLUS_VMID
NPM URL: $NPM_URL
Contents:
- database.sqlite or database.sql: NPMplus SQLite database
- proxy_hosts.json: Proxy hosts configuration (if API available)
- certificates.json: Certificates metadata (if API available)
- certificates/: Certificate files from disk
- nginx/: Nginx configuration files
Backup Size: $(du -sh "$BACKUP_DIR" | awk '{print $1}')
Restore Instructions:
See: docs/04-configuration/NPMPLUS_BACKUP_RESTORE.md
EOF
log_success "Backup manifest created"
# 6. Compress Backup
log_info "Compressing backup..."
cd "$BACKUP_DEST"
tar czf "npmplus-backup-$TIMESTAMP.tar.gz" "npmplus-backup-$TIMESTAMP" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
if [ -f "npmplus-backup-$TIMESTAMP.tar.gz" ]; then
COMPRESSED_SIZE=$(stat -f%z "npmplus-backup-$TIMESTAMP.tar.gz" 2>/dev/null || \
stat -c%s "npmplus-backup-$TIMESTAMP.tar.gz" 2>/dev/null || echo "0")
log_success "Backup compressed ($(numfmt --to=iec-i --suffix=B $COMPRESSED_SIZE 2>/dev/null || echo "$COMPRESSED_SIZE bytes"))"
# Remove uncompressed directory
rm -rf "npmplus-backup-$TIMESTAMP"
else
log_warn "Compression failed - keeping uncompressed backup"
fi
# 7. Cleanup Old Backups
log_info "Cleaning up old backups (retention: $RETENTION_DAYS days)..."
find "$BACKUP_DEST" -name "npmplus-backup-*.tar.gz" -type f -mtime +$RETENTION_DAYS -delete 2>/dev/null || true
OLD_COUNT=$(find "$BACKUP_DEST" -name "npmplus-backup-*.tar.gz" -type f | wc -l || echo "0")
log_success "Old backups cleaned up ($OLD_COUNT backups retained)"
# Summary
echo ""
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "✅ Backup Complete!"
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Backup location: $BACKUP_DEST/npmplus-backup-$TIMESTAMP.tar.gz"
log_info "Manifest: $BACKUP_DIR/manifest.txt"
echo ""

View File

@@ -0,0 +1,283 @@
#!/usr/bin/env bash
# Comprehensive Cleanup of Old, Backup, and Unreferenced Files
# Safely removes old files from both local projects and remote ml110
#
# Targets:
# - Backup directories (backup-*, *backup*)
# - Temporary key generation directories (temp-all-keys-*)
# - Old log files (logs/*.log older than 30 days)
# - Temporary files (*.bak, *.old, *~, *.swp)
# - Old documentation files that are no longer referenced
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Configuration
DRY_RUN="${DRY_RUN:-true}"
REMOTE_HOST="${REMOTE_HOST:-192.168.11.10}"
REMOTE_USER="${REMOTE_USER:-root}"
REMOTE_PASS="${REMOTE_PASS:-L@kers2010}"
MIN_LOG_AGE_DAYS=30
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--execute)
DRY_RUN=false
shift
;;
--help)
cat << EOF
Usage: $0 [OPTIONS]
Comprehensive cleanup of old, backup, and unreferenced files.
Options:
--execute Actually delete files (default: dry-run)
--help Show this help
Safety:
- By default, runs in DRY-RUN mode
- Use --execute to actually delete files
- Creates detailed manifest of files to be deleted
EOF
exit 0
;;
*)
log_error "Unknown option: $1"
exit 1
;;
esac
done
# Create cleanup manifest
CLEANUP_LOG="$PROJECT_ROOT/logs/cleanup-$(date +%Y%m%d-%H%M%S).log"
mkdir -p "$PROJECT_ROOT/logs"
> "$CLEANUP_LOG"
log_info "========================================="
log_info "Comprehensive File Cleanup"
log_info "========================================="
log_info "Mode: $([ "$DRY_RUN" == "true" ] && echo "DRY-RUN" || echo "EXECUTE")"
log_info "Log: $CLEANUP_LOG"
log_info ""
TOTAL_FOUND=0
TOTAL_DELETED=0
# Function to safely delete a file/directory
safe_delete() {
local target="$1"
local label="${2:-item}"
if [[ ! -e "$target" ]]; then
return 0
fi
echo "$target" >> "$CLEANUP_LOG"
TOTAL_FOUND=$((TOTAL_FOUND + 1))
if [[ "$DRY_RUN" != "true" ]]; then
if rm -rf "$target" 2>/dev/null; then
TOTAL_DELETED=$((TOTAL_DELETED + 1))
echo "✓ Deleted: $target"
return 0
else
echo "✗ Failed: $target" >&2
return 1
fi
else
echo "Would delete: $target"
return 0
fi
}
# Clean local proxmox project
log_info "=== Cleaning Local Proxmox Project ==="
PROXMOX_DIR="$PROJECT_ROOT"
# Old markdown files in root (status/completion docs that are superseded)
OLD_DOCS_PROXMOX=(
"$PROXMOX_DIR/ACTION_PLAN_NOW.md"
"$PROXMOX_DIR/DEPLOYMENT_IN_PROGRESS.md"
"$PROXMOX_DIR/DEPLOYMENT_SOLUTION.md"
"$PROXMOX_DIR/FINAL_STATUS.txt"
"$PROXMOX_DIR/IMPLEMENTATION_COMPLETE.md"
"$PROXMOX_DIR/NEXT_STEPS_QUICK_REFERENCE.md"
"$PROXMOX_DIR/ORGANIZATION_SUMMARY.md"
"$PROXMOX_DIR/PROJECT_STRUCTURE.md"
"$PROXMOX_DIR/QUICK_DEPLOY_FIX.md"
"$PROXMOX_DIR/QUICK_DEPLOY.md"
"$PROXMOX_DIR/QUICK_START_VALIDATED_SET.md"
"$PROXMOX_DIR/STATUS_FINAL.md"
"$PROXMOX_DIR/STATUS.md"
"$PROXMOX_DIR/VALIDATED_SET_IMPLEMENTATION_SUMMARY.md"
)
for doc in "${OLD_DOCS_PROXMOX[@]}"; do
safe_delete "$doc" "old doc"
done
# Temporary besu-enodes directories
while IFS= read -r dir; do
safe_delete "$dir" "temp enode dir"
done < <(find "$PROXMOX_DIR" -maxdepth 1 -type d -name "besu-enodes-*" 2>/dev/null)
# Old log files in smom-dbis-138-proxmox/logs
if [[ -d "$PROXMOX_DIR/smom-dbis-138-proxmox/logs" ]]; then
while IFS= read -r logfile; do
if [[ -f "$logfile" ]]; then
file_age=$(( ($(date +%s) - $(stat -c %Y "$logfile" 2>/dev/null || echo 0)) / 86400 ))
if [[ $file_age -gt $MIN_LOG_AGE_DAYS ]]; then
safe_delete "$logfile" "old log"
fi
fi
done < <(find "$PROXMOX_DIR/smom-dbis-138-proxmox/logs" -type f -name "*.log" 2>/dev/null)
fi
# Backup/temp files (only in specific project directories)
while IFS= read -r file; do
# Only process files in our project directories
if [[ "$file" == "$PROXMOX_DIR/"* ]] && [[ "$file" != *"/node_modules/"* ]] && [[ "$file" != *"/ProxmoxVE/"* ]] && [[ "$file" != *"/mcp-proxmox/"* ]] && [[ "$file" != *"/the_order/"* ]]; then
safe_delete "$file" "backup/temp file"
fi
done < <(find "$PROXMOX_DIR" -maxdepth 3 -type f \( -name "*.bak" -o -name "*.old" -o -name "*~" -o -name "*.swp" \) 2>/dev/null)
# Clean local smom-dbis-138 project
log_info ""
log_info "=== Cleaning Local smom-dbis-138 Project ==="
# Try different possible locations
SMOM_DIR=""
for possible_dir in "$PROJECT_ROOT/../smom-dbis-138" "/home/intlc/projects/smom-dbis-138"; do
if [[ -d "$possible_dir" ]]; then
SMOM_DIR="$possible_dir"
break
fi
done
if [[ -n "$SMOM_DIR" ]] && [[ -d "$SMOM_DIR" ]]; then
log_info "Using smom-dbis-138 directory: $SMOM_DIR"
# Temporary key generation directories
while IFS= read -r dir; do
safe_delete "$dir" "temp key gen dir"
done < <(find "$SMOM_DIR" -maxdepth 1 -type d -name "temp-all-keys-*" 2>/dev/null)
# Backup key directories (keep only the most recent)
LATEST_BACKUP=$(find "$SMOM_DIR" -maxdepth 1 -type d -name "backup-keys-*" 2>/dev/null | sort | tail -1)
while IFS= read -r dir; do
if [[ "$dir" != "$LATEST_BACKUP" ]]; then
safe_delete "$dir" "old backup keys"
fi
done < <(find "$SMOM_DIR" -maxdepth 1 -type d -name "backup-keys-*" 2>/dev/null)
# Old log files
if [[ -d "$SMOM_DIR/logs" ]]; then
while IFS= read -r logfile; do
if [[ -f "$logfile" ]]; then
file_age=$(( ($(date +%s) - $(stat -c %Y "$logfile" 2>/dev/null || echo 0)) / 86400 ))
if [[ $file_age -gt $MIN_LOG_AGE_DAYS ]]; then
safe_delete "$logfile" "old log"
fi
fi
done < <(find "$SMOM_DIR/logs" -type f -name "*.log" 2>/dev/null)
fi
# Temporary/backup files
while IFS= read -r file; do
safe_delete "$file" "backup/temp file"
done < <(find "$SMOM_DIR" -maxdepth 2 -type f \( -name "*.bak" -o -name "*.old" -o -name "*~" -o -name "*.swp" \) ! -path "*/node_modules/*" 2>/dev/null)
else
log_warn "smom-dbis-138 directory not found: $SMOM_DIR"
fi
# Clean remote ml110
log_info ""
log_info "=== Cleaning Remote Host (ml110) ==="
if sshpass -p "$REMOTE_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 \
"${REMOTE_USER}@${REMOTE_HOST}" "echo 'Connected'" 2>/dev/null; then
log_info "Connected to ${REMOTE_HOST}"
# Get list of files to clean
REMOTE_CLEANUP=$(sshpass -p "$REMOTE_PASS" ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "cd /opt && {
# Find backup/temp directories
find smom-dbis-138* -type d -name '*backup*' 2>/dev/null
find smom-dbis-138* -type d -name 'temp-all-keys-*' 2>/dev/null
# Find old log files (older than $MIN_LOG_AGE_DAYS days)
find smom-dbis-138*/logs -type f -name '*.log' 2>/dev/null | while read -r log; do
age=\$(( (\$(date +%s) - \$(stat -c %Y \"\$log\" 2>/dev/null || echo 0)) / 86400 ))
if [[ \$age -gt $MIN_LOG_AGE_DAYS ]]; then
echo \"\$log\"
fi
done
# Find backup/temp files
find smom-dbis-138* -type f \( -name '*.bak' -o -name '*.old' -o -name '*~' -o -name '*.swp' \) 2>/dev/null
}" 2>/dev/null)
if [[ -n "$REMOTE_CLEANUP" ]]; then
REMOTE_COUNT=0
echo "$REMOTE_CLEANUP" | while IFS= read -r item; do
if [[ -n "$item" ]]; then
REMOTE_COUNT=$((REMOTE_COUNT + 1))
echo "/opt/$item" >> "$CLEANUP_LOG"
echo "Would delete (remote): /opt/$item"
if [[ "$DRY_RUN" != "true" ]]; then
if sshpass -p "$REMOTE_PASS" ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "rm -rf \"/opt/$item\" 2>/dev/null && echo '✓' || echo '✗'" 2>/dev/null | grep -q "✓"; then
TOTAL_DELETED=$((TOTAL_DELETED + 1))
fi
fi
fi
done
log_info "Found $REMOTE_COUNT items on remote"
else
log_info "No cleanup targets found on remote"
fi
else
log_warn "Cannot connect to ${REMOTE_HOST}, skipping remote cleanup"
fi
# Summary
log_info ""
log_info "========================================="
log_info "Cleanup Summary"
log_info "========================================="
log_info "Total items found: $TOTAL_FOUND"
if [[ "$DRY_RUN" == "true" ]]; then
log_warn "DRY-RUN mode: No files were deleted"
log_info "Review the log file: $CLEANUP_LOG"
log_info "Run with --execute to actually delete: $0 --execute"
else
log_success "Total items deleted: $TOTAL_DELETED"
log_info "Cleanup log: $CLEANUP_LOG"
fi
log_info ""

View File

@@ -0,0 +1,277 @@
#!/usr/bin/env bash
# Comprehensive Cleanup of Old, Backup, and Unreferenced Files
# Safely removes old files from both local projects and remote ml110
#
# Targets:
# - Backup directories (backup-*, *backup*)
# - Temporary key generation directories (temp-all-keys-*)
# - Old log files (logs/*.log older than 30 days)
# - Temporary files (*.bak, *.old, *~, *.swp)
# - Old documentation files that are no longer referenced
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Configuration
DRY_RUN="${DRY_RUN:-true}"
REMOTE_HOST="${REMOTE_HOST:-192.168.11.10}"
REMOTE_USER="${REMOTE_USER:-root}"
REMOTE_PASS="${REMOTE_PASS:-L@kers2010}"
MIN_LOG_AGE_DAYS=30
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--execute)
DRY_RUN=false
shift
;;
--help)
cat << EOF
Usage: $0 [OPTIONS]
Comprehensive cleanup of old, backup, and unreferenced files.
Options:
--execute Actually delete files (default: dry-run)
--help Show this help
Safety:
- By default, runs in DRY-RUN mode
- Use --execute to actually delete files
- Creates detailed manifest of files to be deleted
EOF
exit 0
;;
*)
log_error "Unknown option: $1"
exit 1
;;
esac
done
# Create cleanup manifest
CLEANUP_LOG="$PROJECT_ROOT/logs/cleanup-$(date +%Y%m%d-%H%M%S).log"
mkdir -p "$PROJECT_ROOT/logs"
> "$CLEANUP_LOG"
log_info "========================================="
log_info "Comprehensive File Cleanup"
log_info "========================================="
log_info "Mode: $([ "$DRY_RUN" == "true" ] && echo "DRY-RUN" || echo "EXECUTE")"
log_info "Log: $CLEANUP_LOG"
log_info ""
TOTAL_FOUND=0
TOTAL_DELETED=0
# Function to safely delete a file/directory
safe_delete() {
local target="$1"
local label="${2:-item}"
if [[ ! -e "$target" ]]; then
return 0
fi
echo "$target" >> "$CLEANUP_LOG"
TOTAL_FOUND=$((TOTAL_FOUND + 1))
if [[ "$DRY_RUN" != "true" ]]; then
if rm -rf "$target" 2>/dev/null; then
TOTAL_DELETED=$((TOTAL_DELETED + 1))
echo "✓ Deleted: $target"
return 0
else
echo "✗ Failed: $target" >&2
return 1
fi
else
echo "Would delete: $target"
return 0
fi
}
# Clean local proxmox project
log_info "=== Cleaning Local Proxmox Project ==="
PROXMOX_DIR="$PROJECT_ROOT"
# Old markdown files in root (status/completion docs that are superseded)
OLD_DOCS_PROXMOX=(
"$PROXMOX_DIR/ACTION_PLAN_NOW.md"
"$PROXMOX_DIR/DEPLOYMENT_IN_PROGRESS.md"
"$PROXMOX_DIR/DEPLOYMENT_SOLUTION.md"
"$PROXMOX_DIR/FINAL_STATUS.txt"
"$PROXMOX_DIR/IMPLEMENTATION_COMPLETE.md"
"$PROXMOX_DIR/NEXT_STEPS_QUICK_REFERENCE.md"
"$PROXMOX_DIR/ORGANIZATION_SUMMARY.md"
"$PROXMOX_DIR/PROJECT_STRUCTURE.md"
"$PROXMOX_DIR/QUICK_DEPLOY_FIX.md"
"$PROXMOX_DIR/QUICK_DEPLOY.md"
"$PROXMOX_DIR/QUICK_START_VALIDATED_SET.md"
"$PROXMOX_DIR/STATUS_FINAL.md"
"$PROXMOX_DIR/STATUS.md"
"$PROXMOX_DIR/VALIDATED_SET_IMPLEMENTATION_SUMMARY.md"
)
for doc in "${OLD_DOCS_PROXMOX[@]}"; do
safe_delete "$doc" "old doc"
done
# Temporary besu-enodes directories
while IFS= read -r dir; do
safe_delete "$dir" "temp enode dir"
done < <(find "$PROXMOX_DIR" -maxdepth 1 -type d -name "besu-enodes-*" 2>/dev/null)
# Old log files in smom-dbis-138-proxmox/logs
if [[ -d "$PROXMOX_DIR/smom-dbis-138-proxmox/logs" ]]; then
while IFS= read -r logfile; do
if [[ -f "$logfile" ]]; then
file_age=$(( ($(date +%s) - $(stat -c %Y "$logfile" 2>/dev/null || echo 0)) / 86400 ))
if [[ $file_age -gt $MIN_LOG_AGE_DAYS ]]; then
safe_delete "$logfile" "old log"
fi
fi
done < <(find "$PROXMOX_DIR/smom-dbis-138-proxmox/logs" -type f -name "*.log" 2>/dev/null)
fi
# Backup/temp files (only in specific project directories)
while IFS= read -r file; do
# Only process files in our project directories
if [[ "$file" == "$PROXMOX_DIR/"* ]] && [[ "$file" != *"/node_modules/"* ]] && [[ "$file" != *"/ProxmoxVE/"* ]] && [[ "$file" != *"/mcp-proxmox/"* ]] && [[ "$file" != *"/the_order/"* ]]; then
safe_delete "$file" "backup/temp file"
fi
done < <(find "$PROXMOX_DIR" -maxdepth 3 -type f \( -name "*.bak" -o -name "*.old" -o -name "*~" -o -name "*.swp" \) 2>/dev/null)
# Clean local smom-dbis-138 project
log_info ""
log_info "=== Cleaning Local smom-dbis-138 Project ==="
# Try different possible locations
SMOM_DIR=""
for possible_dir in "$PROJECT_ROOT/../smom-dbis-138" "/home/intlc/projects/smom-dbis-138"; do
if [[ -d "$possible_dir" ]]; then
SMOM_DIR="$possible_dir"
break
fi
done
if [[ -n "$SMOM_DIR" ]] && [[ -d "$SMOM_DIR" ]]; then
log_info "Using smom-dbis-138 directory: $SMOM_DIR"
# Temporary key generation directories
while IFS= read -r dir; do
safe_delete "$dir" "temp key gen dir"
done < <(find "$SMOM_DIR" -maxdepth 1 -type d -name "temp-all-keys-*" 2>/dev/null)
# Backup key directories (keep only the most recent)
LATEST_BACKUP=$(find "$SMOM_DIR" -maxdepth 1 -type d -name "backup-keys-*" 2>/dev/null | sort | tail -1)
while IFS= read -r dir; do
if [[ "$dir" != "$LATEST_BACKUP" ]]; then
safe_delete "$dir" "old backup keys"
fi
done < <(find "$SMOM_DIR" -maxdepth 1 -type d -name "backup-keys-*" 2>/dev/null)
# Old log files
if [[ -d "$SMOM_DIR/logs" ]]; then
while IFS= read -r logfile; do
if [[ -f "$logfile" ]]; then
file_age=$(( ($(date +%s) - $(stat -c %Y "$logfile" 2>/dev/null || echo 0)) / 86400 ))
if [[ $file_age -gt $MIN_LOG_AGE_DAYS ]]; then
safe_delete "$logfile" "old log"
fi
fi
done < <(find "$SMOM_DIR/logs" -type f -name "*.log" 2>/dev/null)
fi
# Temporary/backup files
while IFS= read -r file; do
safe_delete "$file" "backup/temp file"
done < <(find "$SMOM_DIR" -maxdepth 2 -type f \( -name "*.bak" -o -name "*.old" -o -name "*~" -o -name "*.swp" \) ! -path "*/node_modules/*" 2>/dev/null)
else
log_warn "smom-dbis-138 directory not found: $SMOM_DIR"
fi
# Clean remote ml110
log_info ""
log_info "=== Cleaning Remote Host (ml110) ==="
if sshpass -p "$REMOTE_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 \
"${REMOTE_USER}@${REMOTE_HOST}" "echo 'Connected'" 2>/dev/null; then
log_info "Connected to ${REMOTE_HOST}"
# Get list of files to clean
REMOTE_CLEANUP=$(sshpass -p "$REMOTE_PASS" ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "cd /opt && {
# Find backup/temp directories
find smom-dbis-138* -type d -name '*backup*' 2>/dev/null
find smom-dbis-138* -type d -name 'temp-all-keys-*' 2>/dev/null
# Find old log files (older than $MIN_LOG_AGE_DAYS days)
find smom-dbis-138*/logs -type f -name '*.log' 2>/dev/null | while read -r log; do
age=\$(( (\$(date +%s) - \$(stat -c %Y \"\$log\" 2>/dev/null || echo 0)) / 86400 ))
if [[ \$age -gt $MIN_LOG_AGE_DAYS ]]; then
echo \"\$log\"
fi
done
# Find backup/temp files
find smom-dbis-138* -type f \( -name '*.bak' -o -name '*.old' -o -name '*~' -o -name '*.swp' \) 2>/dev/null
}" 2>/dev/null)
if [[ -n "$REMOTE_CLEANUP" ]]; then
REMOTE_COUNT=0
echo "$REMOTE_CLEANUP" | while IFS= read -r item; do
if [[ -n "$item" ]]; then
REMOTE_COUNT=$((REMOTE_COUNT + 1))
echo "/opt/$item" >> "$CLEANUP_LOG"
echo "Would delete (remote): /opt/$item"
if [[ "$DRY_RUN" != "true" ]]; then
if sshpass -p "$REMOTE_PASS" ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "rm -rf \"/opt/$item\" 2>/dev/null && echo '✓' || echo '✗'" 2>/dev/null | grep -q "✓"; then
TOTAL_DELETED=$((TOTAL_DELETED + 1))
fi
fi
fi
done
log_info "Found $REMOTE_COUNT items on remote"
else
log_info "No cleanup targets found on remote"
fi
else
log_warn "Cannot connect to ${REMOTE_HOST}, skipping remote cleanup"
fi
# Summary
log_info ""
log_info "========================================="
log_info "Cleanup Summary"
log_info "========================================="
log_info "Total items found: $TOTAL_FOUND"
if [[ "$DRY_RUN" == "true" ]]; then
log_warn "DRY-RUN mode: No files were deleted"
log_info "Review the log file: $CLEANUP_LOG"
log_info "Run with --execute to actually delete: $0 --execute"
else
log_success "Total items deleted: $TOTAL_DELETED"
log_info "Cleanup log: $CLEANUP_LOG"
fi
log_info ""

View File

@@ -0,0 +1,227 @@
#!/usr/bin/env bash
# Cleanup Deprecated Besu Configuration Options
# Removes deprecated/invalid options that cause Besu v23.10.0+ to fail
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Dry-run mode flag
DRY_RUN="${1:-}"
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Function to backup a file
backup_file() {
local file="$1"
if [ -f "$file" ]; then
local backup="${file}.backup.$(date +%Y%m%d_%H%M%S)"
if [ "$DRY_RUN" != "--dry-run" ]; then
cp "$file" "$backup"
echo "$backup"
else
echo "${file}.backup.TIMESTAMP"
fi
fi
}
# Function to clean deprecated options from a file
clean_deprecated_options() {
local file="$1"
local node_type="$2"
if [ ! -f "$file" ]; then
log_warn "File not found: $file (skipping)"
return 1
fi
log_info "Cleaning deprecated options from: $file ($node_type)"
if [ "$DRY_RUN" != "--dry-run" ]; then
# Backup file
local backup=$(backup_file "$file")
log_info " Backup created: $backup"
# Remove deprecated options
# Note: Using sed with -i for in-place editing
sed -i \
-e '/^log-destination=/d' \
-e '/^fast-sync-min-peers=/d' \
-e '/^database-path=/d' \
-e '/^trie-logs-enabled=/d' \
-e '/^accounts-enabled=/d' \
-e '/^max-remote-initiated-connections=/d' \
-e '/^rpc-http-host-allowlist=/d' \
-e '/^rpc-tx-feecap="0x0"/d' \
-e '/^tx-pool-max-size=/d' \
-e '/^tx-pool-price-bump=/d' \
-e '/^tx-pool-retention-hours=/d' \
"$file"
log_success " Deprecated options removed"
return 0
else
log_info " [DRY-RUN] Would remove deprecated options:"
log_info " - log-destination"
log_info " - fast-sync-min-peers"
log_info " - database-path"
log_info " - trie-logs-enabled"
log_info " - accounts-enabled"
log_info " - max-remote-initiated-connections"
log_info " - rpc-http-host-allowlist"
log_info " - rpc-tx-feecap=\"0x0\""
log_info " - tx-pool-max-size"
log_info " - tx-pool-price-bump"
log_info " - tx-pool-retention-hours"
return 0
fi
}
# Main execution
echo -e "${BLUE}╔══════════════════════════════════════════════════════════════╗${NC}"
echo -e "${BLUE}║ BESU DEPRECATED OPTIONS CLEANUP ║${NC}"
echo -e "${BLUE}╚══════════════════════════════════════════════════════════════╝${NC}"
echo ""
if [ "$DRY_RUN" == "--dry-run" ]; then
log_warn "DRY-RUN MODE: No files will be modified"
echo ""
fi
# Track statistics
CLEANED=0
SKIPPED=0
# Validator configurations
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo -e "${BLUE}Cleaning Validator Configurations${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo ""
VALIDATOR_FILES=(
"$PROJECT_ROOT/smom-dbis-138/config/config-validator.toml"
"$PROJECT_ROOT/smom-dbis-138-proxmox/templates/besu-configs/config-validator.toml"
)
for file in "${VALIDATOR_FILES[@]}"; do
if clean_deprecated_options "$file" "validator"; then
CLEANED=$((CLEANED + 1))
else
SKIPPED=$((SKIPPED + 1))
fi
echo ""
done
# RPC node configurations
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo -e "${BLUE}Cleaning RPC Node Configurations${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo ""
RPC_FILES=(
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-core.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-public.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-perm.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-thirdweb.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-4.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-putu-1.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-putu-8a.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-luis-1.toml"
"$PROJECT_ROOT/smom-dbis-138/config/config-rpc-luis-8a.toml"
"$PROJECT_ROOT/smom-dbis-138-proxmox/templates/besu-configs/config-rpc-core.toml"
"$PROJECT_ROOT/smom-dbis-138-proxmox/templates/besu-configs/config-rpc.toml"
"$PROJECT_ROOT/smom-dbis-138-proxmox/templates/besu-configs/config-rpc-4.toml"
)
for file in "${RPC_FILES[@]}"; do
if clean_deprecated_options "$file" "RPC"; then
CLEANED=$((CLEANED + 1))
else
SKIPPED=$((SKIPPED + 1))
fi
echo ""
done
# Sentry configurations
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo -e "${BLUE}Cleaning Sentry Configurations${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo ""
SENTRY_FILES=(
"$PROJECT_ROOT/smom-dbis-138-proxmox/templates/besu-configs/config-sentry.toml"
)
for file in "${SENTRY_FILES[@]}"; do
if clean_deprecated_options "$file" "sentry"; then
CLEANED=$((CLEANED + 1))
else
SKIPPED=$((SKIPPED + 1))
fi
echo ""
done
# Member configurations
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo -e "${BLUE}Cleaning Member Configurations${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo ""
MEMBER_FILES=(
"$PROJECT_ROOT/smom-dbis-138/config/config-member.toml"
)
for file in "${MEMBER_FILES[@]}"; do
if clean_deprecated_options "$file" "member"; then
CLEANED=$((CLEANED + 1))
else
SKIPPED=$((SKIPPED + 1))
fi
echo ""
done
# Summary
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo -e "${BLUE}Summary${NC}"
echo -e "${BLUE}═══════════════════════════════════════════════════════════════${NC}"
echo ""
echo "Files cleaned: $CLEANED"
echo "Files skipped: $SKIPPED"
echo ""
if [ "$DRY_RUN" == "--dry-run" ]; then
log_warn "This was a dry-run. No files were modified."
echo "Run without --dry-run to apply changes."
else
log_success "Deprecated options cleanup complete!"
echo ""
echo "Deprecated options removed:"
echo " ✓ log-destination"
echo " ✓ fast-sync-min-peers (incompatible with FULL sync-mode)"
echo " ✓ database-path (use data-path instead)"
echo " ✓ trie-logs-enabled"
echo " ✓ accounts-enabled"
echo " ✓ max-remote-initiated-connections"
echo " ✓ rpc-http-host-allowlist"
echo " ✓ rpc-tx-feecap=\"0x0\" (invalid value)"
echo " ✓ tx-pool-max-size (legacy, incompatible with layered implementation)"
echo " ✓ tx-pool-price-bump (legacy, incompatible with layered implementation)"
echo " ✓ tx-pool-retention-hours (legacy, incompatible with layered implementation)"
echo ""
echo "Next steps:"
echo " 1. Review the cleaned configuration files"
echo " 2. Test configurations with Besu v23.10.0+"
echo " 3. Deploy updated configurations to nodes"
fi

View File

@@ -0,0 +1,301 @@
#!/usr/bin/env bash
# Cleanup Old, Backup, and Unreferenced Files
# Safely removes old files, backups, and unused files from both local and remote
#
# This script identifies and removes:
# - Backup directories (backup-*, *backup*)
# - Temporary files (*.tmp, *.temp, *~, *.swp)
# - Old log files (logs/*.log older than 30 days)
# - Duplicate/unused files
# - Old documentation that's been superseded
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Configuration
DRY_RUN="${DRY_RUN:-true}"
REMOTE_HOST="${REMOTE_HOST:-192.168.11.10}"
REMOTE_USER="${REMOTE_USER:-root}"
CLEAN_LOCAL="${CLEAN_LOCAL:-true}"
CLEAN_REMOTE="${CLEAN_REMOTE:-true}"
MIN_LOG_AGE_DAYS=30
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--execute)
DRY_RUN=false
shift
;;
--skip-remote)
CLEAN_REMOTE=false
shift
;;
--skip-local)
CLEAN_LOCAL=false
shift
;;
--help)
cat << EOF
Usage: $0 [OPTIONS]
Cleanup old, backup, and unreferenced files from project directories.
Options:
--execute Actually delete files (default: dry-run, only shows what would be deleted)
--skip-remote Skip cleaning remote host (ml110)
--skip-local Skip cleaning local project
--help Show this help
Safety:
- By default, runs in DRY-RUN mode (shows files but doesn't delete)
- Use --execute to actually delete files
- Creates a manifest of files that will be deleted
EOF
exit 0
;;
*)
log_error "Unknown option: $1"
exit 1
;;
esac
done
# Create cleanup manifest
CLEANUP_MANIFEST="$PROJECT_ROOT/logs/cleanup-manifest-$(date +%Y%m%d-%H%M%S).txt"
mkdir -p "$PROJECT_ROOT/logs"
> "$CLEANUP_MANIFEST"
log_info "========================================="
log_info "File Cleanup Script"
log_info "========================================="
log_info "Mode: $([ "$DRY_RUN" == "true" ] && echo "DRY-RUN (no files will be deleted)" || echo "EXECUTE (files will be deleted)")"
log_info "Manifest: $CLEANUP_MANIFEST"
log_info ""
# Function to find and catalog files to delete
find_cleanup_targets() {
local base_dir="$1"
local label="$2"
log_info "=== Scanning $label ==="
local count=0
# Backup directories
while IFS= read -r dir; do
if [[ -d "$dir" ]]; then
echo "$dir" >> "$CLEANUP_MANIFEST"
echo "DIR: $dir"
((count++))
fi
done < <(find "$base_dir" -type d -name "*backup*" 2>/dev/null)
# Temporary directories
while IFS= read -r dir; do
if [[ -d "$dir" ]] && [[ "$dir" != "$base_dir" ]]; then
echo "$dir" >> "$CLEANUP_MANIFEST"
echo "DIR: $dir"
((count++))
fi
done < <(find "$base_dir" -type d \( -name "*tmp*" -o -name "*temp*" \) 2>/dev/null)
# Temporary/backup files
while IFS= read -r file; do
if [[ -f "$file" ]]; then
echo "$file" >> "$CLEANUP_MANIFEST"
echo "FILE: $file"
((count++))
fi
done < <(find "$base_dir" -type f \( -name "*.bak" -o -name "*.old" -o -name "*~" -o -name "*.swp" -o -name "*.tmp" -o -name "*.temp" \) 2>/dev/null)
# Old log files (older than MIN_LOG_AGE_DAYS)
if [[ -d "$base_dir/logs" ]]; then
while IFS= read -r file; do
if [[ -f "$file" ]]; then
local file_age=$(( ($(date +%s) - $(stat -c %Y "$file" 2>/dev/null || echo 0)) / 86400 ))
if [[ $file_age -gt $MIN_LOG_AGE_DAYS ]]; then
echo "$file" >> "$CLEANUP_MANIFEST"
echo "OLD LOG ($file_age days): $file"
((count++))
fi
fi
done < <(find "$base_dir/logs" -type f -name "*.log" 2>/dev/null)
fi
# temp-all-keys-* directories in smom-dbis-138
if [[ "$base_dir" == *"smom-dbis-138"* ]]; then
while IFS= read -r dir; do
if [[ -d "$dir" ]]; then
echo "$dir" >> "$CLEANUP_MANIFEST"
echo "TEMP KEY GEN: $dir"
((count++))
fi
done < <(find "$base_dir" -type d -name "temp-all-keys-*" 2>/dev/null)
fi
log_info "Found $count items to clean"
echo "$count"
}
# Function to delete files from manifest
delete_from_manifest() {
local manifest_file="$1"
local label="$2"
if [[ ! -f "$manifest_file" ]]; then
log_warn "Manifest file not found: $manifest_file"
return 0
fi
local count=$(wc -l < "$manifest_file" | tr -d ' ')
if [[ $count -eq 0 ]]; then
log_info "No files to delete for $label"
return 0
fi
log_info "Deleting $count items from $label..."
local deleted=0
local failed=0
while IFS= read -r target; do
if [[ -z "$target" ]]; then
continue
fi
if [[ -e "$target" ]]; then
if rm -rf "$target" 2>/dev/null; then
((deleted++))
else
log_warn "Failed to delete: $target"
((failed++))
fi
fi
done < "$manifest_file"
log_success "Deleted $deleted items, $failed failures"
}
# Clean local project
if [[ "$CLEAN_LOCAL" == "true" ]]; then
log_info ""
log_info "=== Local Project Cleanup ==="
# Clean proxmox project
PROXMOX_CLEANUP="$PROJECT_ROOT/logs/proxmox-cleanup-$(date +%Y%m%d-%H%M%S).txt"
> "$PROXMOX_CLEANUP"
find_cleanup_targets "$PROJECT_ROOT" "proxmox project" | tee -a "$PROXMOX_CLEANUP" | tail -20
proxmox_count=$(tail -1 "$PROXMOX_CLEANUP" | grep -oE '[0-9]+' | head -1 || echo "0")
# Clean smom-dbis-138 project
if [[ -d "$PROJECT_ROOT/../smom-dbis-138" ]]; then
SMOM_CLEANUP="$PROJECT_ROOT/logs/smom-cleanup-$(date +%Y%m%d-%H%M%S).txt"
> "$SMOM_CLEANUP"
find_cleanup_targets "$PROJECT_ROOT/../smom-dbis-138" "smom-dbis-138 project" | tee -a "$SMOM_CLEANUP" | tail -20
smom_count=$(tail -1 "$SMOM_CLEANUP" | grep -oE '[0-9]+' | head -1 || echo "0")
else
smom_count=0
fi
total_local=$((proxmox_count + smom_count))
if [[ "$DRY_RUN" != "true" ]] && [[ $total_local -gt 0 ]]; then
log_info ""
log_warn "Executing deletion of $total_local local items..."
delete_from_manifest "$CLEANUP_MANIFEST" "local project"
fi
fi
# Clean remote host
if [[ "$CLEAN_REMOTE" == "true" ]]; then
log_info ""
log_info "=== Remote Host Cleanup (ml110) ==="
# Test SSH connection
if ! sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 \
"${REMOTE_USER}@${REMOTE_HOST}" "echo 'Connected'" 2>/dev/null; then
log_warn "Cannot connect to ${REMOTE_HOST}, skipping remote cleanup"
else
log_info "Scanning remote host..."
# Get list of files to clean on remote
REMOTE_CLEANUP_LIST=$(sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "cd /opt && \
find smom-dbis-138* -type d -name '*backup*' 2>/dev/null && \
find smom-dbis-138* -type d \( -name '*tmp*' -o -name '*temp*' \) 2>/dev/null && \
find smom-dbis-138* -type f \( -name '*.bak' -o -name '*.old' -o -name '*~' -o -name '*.swp' \) 2>/dev/null" 2>/dev/null | head -50)
remote_count=0
if [[ -n "$REMOTE_CLEANUP_LIST" ]]; then
echo "$REMOTE_CLEANUP_LIST" | while IFS= read -r item; do
if [[ -n "$item" ]]; then
echo "/opt/$item" >> "$CLEANUP_MANIFEST"
echo "REMOTE: /opt/$item"
((remote_count++))
fi
done
log_info "Found $remote_count items to clean on remote"
else
log_info "No cleanup targets found on remote"
fi
if [[ "$DRY_RUN" != "true" ]] && [[ $remote_count -gt 0 ]]; then
log_info ""
log_warn "Executing deletion of $remote_count remote items..."
# Delete remote files
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "cd /opt && \
find smom-dbis-138* -type d -name '*backup*' -exec rm -rf {} + 2>/dev/null; \
find smom-dbis-138* -type d \( -name '*tmp*' -o -name '*temp*' \) -exec rm -rf {} + 2>/dev/null; \
find smom-dbis-138* -type f \( -name '*.bak' -o -name '*.old' -o -name '*~' -o -name '*.swp' \) -delete 2>/dev/null; \
echo 'Remote cleanup completed'"
log_success "Remote cleanup completed"
fi
fi
fi
# Summary
log_info ""
log_info "========================================="
log_info "Cleanup Summary"
log_info "========================================="
log_info "Manifest file: $CLEANUP_MANIFEST"
log_info "Mode: $([ "$DRY_RUN" == "true" ] && echo "DRY-RUN" || echo "EXECUTED")"
log_info ""
if [[ "$DRY_RUN" == "true" ]]; then
log_warn "This was a DRY-RUN. No files were deleted."
log_info "Review the manifest file and run with --execute to delete files:"
log_info " $0 --execute"
else
log_success "Cleanup completed. Check manifest for details: $CLEANUP_MANIFEST"
fi
log_info ""

View File

@@ -0,0 +1,295 @@
#!/usr/bin/env bash
# Cleanup Old, Backup, and Unreferenced Files
# Safely removes old files, backups, and unused files from both local and remote
#
# This script identifies and removes:
# - Backup directories (backup-*, *backup*)
# - Temporary files (*.tmp, *.temp, *~, *.swp)
# - Old log files (logs/*.log older than 30 days)
# - Duplicate/unused files
# - Old documentation that's been superseded
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Configuration
DRY_RUN="${DRY_RUN:-true}"
REMOTE_HOST="${REMOTE_HOST:-192.168.11.10}"
REMOTE_USER="${REMOTE_USER:-root}"
CLEAN_LOCAL="${CLEAN_LOCAL:-true}"
CLEAN_REMOTE="${CLEAN_REMOTE:-true}"
MIN_LOG_AGE_DAYS=30
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
--execute)
DRY_RUN=false
shift
;;
--skip-remote)
CLEAN_REMOTE=false
shift
;;
--skip-local)
CLEAN_LOCAL=false
shift
;;
--help)
cat << EOF
Usage: $0 [OPTIONS]
Cleanup old, backup, and unreferenced files from project directories.
Options:
--execute Actually delete files (default: dry-run, only shows what would be deleted)
--skip-remote Skip cleaning remote host (ml110)
--skip-local Skip cleaning local project
--help Show this help
Safety:
- By default, runs in DRY-RUN mode (shows files but doesn't delete)
- Use --execute to actually delete files
- Creates a manifest of files that will be deleted
EOF
exit 0
;;
*)
log_error "Unknown option: $1"
exit 1
;;
esac
done
# Create cleanup manifest
CLEANUP_MANIFEST="$PROJECT_ROOT/logs/cleanup-manifest-$(date +%Y%m%d-%H%M%S).txt"
mkdir -p "$PROJECT_ROOT/logs"
> "$CLEANUP_MANIFEST"
log_info "========================================="
log_info "File Cleanup Script"
log_info "========================================="
log_info "Mode: $([ "$DRY_RUN" == "true" ] && echo "DRY-RUN (no files will be deleted)" || echo "EXECUTE (files will be deleted)")"
log_info "Manifest: $CLEANUP_MANIFEST"
log_info ""
# Function to find and catalog files to delete
find_cleanup_targets() {
local base_dir="$1"
local label="$2"
log_info "=== Scanning $label ==="
local count=0
# Backup directories
while IFS= read -r dir; do
if [[ -d "$dir" ]]; then
echo "$dir" >> "$CLEANUP_MANIFEST"
echo "DIR: $dir"
((count++))
fi
done < <(find "$base_dir" -type d -name "*backup*" 2>/dev/null)
# Temporary directories
while IFS= read -r dir; do
if [[ -d "$dir" ]] && [[ "$dir" != "$base_dir" ]]; then
echo "$dir" >> "$CLEANUP_MANIFEST"
echo "DIR: $dir"
((count++))
fi
done < <(find "$base_dir" -type d \( -name "*tmp*" -o -name "*temp*" \) 2>/dev/null)
# Temporary/backup files
while IFS= read -r file; do
if [[ -f "$file" ]]; then
echo "$file" >> "$CLEANUP_MANIFEST"
echo "FILE: $file"
((count++))
fi
done < <(find "$base_dir" -type f \( -name "*.bak" -o -name "*.old" -o -name "*~" -o -name "*.swp" -o -name "*.tmp" -o -name "*.temp" \) 2>/dev/null)
# Old log files (older than MIN_LOG_AGE_DAYS)
if [[ -d "$base_dir/logs" ]]; then
while IFS= read -r file; do
if [[ -f "$file" ]]; then
local file_age=$(( ($(date +%s) - $(stat -c %Y "$file" 2>/dev/null || echo 0)) / 86400 ))
if [[ $file_age -gt $MIN_LOG_AGE_DAYS ]]; then
echo "$file" >> "$CLEANUP_MANIFEST"
echo "OLD LOG ($file_age days): $file"
((count++))
fi
fi
done < <(find "$base_dir/logs" -type f -name "*.log" 2>/dev/null)
fi
# temp-all-keys-* directories in smom-dbis-138
if [[ "$base_dir" == *"smom-dbis-138"* ]]; then
while IFS= read -r dir; do
if [[ -d "$dir" ]]; then
echo "$dir" >> "$CLEANUP_MANIFEST"
echo "TEMP KEY GEN: $dir"
((count++))
fi
done < <(find "$base_dir" -type d -name "temp-all-keys-*" 2>/dev/null)
fi
log_info "Found $count items to clean"
echo "$count"
}
# Function to delete files from manifest
delete_from_manifest() {
local manifest_file="$1"
local label="$2"
if [[ ! -f "$manifest_file" ]]; then
log_warn "Manifest file not found: $manifest_file"
return 0
fi
local count=$(wc -l < "$manifest_file" | tr -d ' ')
if [[ $count -eq 0 ]]; then
log_info "No files to delete for $label"
return 0
fi
log_info "Deleting $count items from $label..."
local deleted=0
local failed=0
while IFS= read -r target; do
if [[ -z "$target" ]]; then
continue
fi
if [[ -e "$target" ]]; then
if rm -rf "$target" 2>/dev/null; then
((deleted++))
else
log_warn "Failed to delete: $target"
((failed++))
fi
fi
done < "$manifest_file"
log_success "Deleted $deleted items, $failed failures"
}
# Clean local project
if [[ "$CLEAN_LOCAL" == "true" ]]; then
log_info ""
log_info "=== Local Project Cleanup ==="
# Clean proxmox project
PROXMOX_CLEANUP="$PROJECT_ROOT/logs/proxmox-cleanup-$(date +%Y%m%d-%H%M%S).txt"
> "$PROXMOX_CLEANUP"
find_cleanup_targets "$PROJECT_ROOT" "proxmox project" | tee -a "$PROXMOX_CLEANUP" | tail -20
proxmox_count=$(tail -1 "$PROXMOX_CLEANUP" | grep -oE '[0-9]+' | head -1 || echo "0")
# Clean smom-dbis-138 project
if [[ -d "$PROJECT_ROOT/../smom-dbis-138" ]]; then
SMOM_CLEANUP="$PROJECT_ROOT/logs/smom-cleanup-$(date +%Y%m%d-%H%M%S).txt"
> "$SMOM_CLEANUP"
find_cleanup_targets "$PROJECT_ROOT/../smom-dbis-138" "smom-dbis-138 project" | tee -a "$SMOM_CLEANUP" | tail -20
smom_count=$(tail -1 "$SMOM_CLEANUP" | grep -oE '[0-9]+' | head -1 || echo "0")
else
smom_count=0
fi
total_local=$((proxmox_count + smom_count))
if [[ "$DRY_RUN" != "true" ]] && [[ $total_local -gt 0 ]]; then
log_info ""
log_warn "Executing deletion of $total_local local items..."
delete_from_manifest "$CLEANUP_MANIFEST" "local project"
fi
fi
# Clean remote host
if [[ "$CLEAN_REMOTE" == "true" ]]; then
log_info ""
log_info "=== Remote Host Cleanup (ml110) ==="
# Test SSH connection
if ! sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 \
"${REMOTE_USER}@${REMOTE_HOST}" "echo 'Connected'" 2>/dev/null; then
log_warn "Cannot connect to ${REMOTE_HOST}, skipping remote cleanup"
else
log_info "Scanning remote host..."
# Get list of files to clean on remote
REMOTE_CLEANUP_LIST=$(sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "cd /opt && \
find smom-dbis-138* -type d -name '*backup*' 2>/dev/null && \
find smom-dbis-138* -type d \( -name '*tmp*' -o -name '*temp*' \) 2>/dev/null && \
find smom-dbis-138* -type f \( -name '*.bak' -o -name '*.old' -o -name '*~' -o -name '*.swp' \) 2>/dev/null" 2>/dev/null | head -50)
remote_count=0
if [[ -n "$REMOTE_CLEANUP_LIST" ]]; then
echo "$REMOTE_CLEANUP_LIST" | while IFS= read -r item; do
if [[ -n "$item" ]]; then
echo "/opt/$item" >> "$CLEANUP_MANIFEST"
echo "REMOTE: /opt/$item"
((remote_count++))
fi
done
log_info "Found $remote_count items to clean on remote"
else
log_info "No cleanup targets found on remote"
fi
if [[ "$DRY_RUN" != "true" ]] && [[ $remote_count -gt 0 ]]; then
log_info ""
log_warn "Executing deletion of $remote_count remote items..."
# Delete remote files
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no \
"${REMOTE_USER}@${REMOTE_HOST}" "cd /opt && \
find smom-dbis-138* -type d -name '*backup*' -exec rm -rf {} + 2>/dev/null; \
find smom-dbis-138* -type d \( -name '*tmp*' -o -name '*temp*' \) -exec rm -rf {} + 2>/dev/null; \
find smom-dbis-138* -type f \( -name '*.bak' -o -name '*.old' -o -name '*~' -o -name '*.swp' \) -delete 2>/dev/null; \
echo 'Remote cleanup completed'"
log_success "Remote cleanup completed"
fi
fi
fi
# Summary
log_info ""
log_info "========================================="
log_info "Cleanup Summary"
log_info "========================================="
log_info "Manifest file: $CLEANUP_MANIFEST"
log_info "Mode: $([ "$DRY_RUN" == "true" ] && echo "DRY-RUN" || echo "EXECUTED")"
log_info ""
if [[ "$DRY_RUN" == "true" ]]; then
log_warn "This was a DRY-RUN. No files were deleted."
log_info "Review the manifest file and run with --execute to delete files:"
log_info " $0 --execute"
else
log_success "Cleanup completed. Check manifest for details: $CLEANUP_MANIFEST"
fi
log_info ""

View File

@@ -0,0 +1,117 @@
#!/bin/bash
# Create .env.example templates from existing .env files
# Removes actual secrets and replaces with placeholders
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
PROJECT_ROOT="${PROJECT_ROOT:-/home/intlc/projects}"
DRY_RUN="${DRY_RUN:-true}"
# Patterns to replace with placeholders
declare -A SECRET_PATTERNS=(
["PRIVATE_KEY"]="your-private-key-here"
["API_KEY"]="your-api-key-here"
["API_TOKEN"]="your-api-token-here"
["SECRET"]="your-secret-here"
["PASSWORD"]="your-password-here"
["TOKEN"]="your-token-here"
["CLOUDFLARE_API_TOKEN"]="your-cloudflare-api-token"
["CLOUDFLARE_API_KEY"]="your-cloudflare-api-key"
["CLOUDFLARE_TUNNEL_TOKEN"]="your-cloudflare-tunnel-token"
["CLOUDFLARE_ORIGIN_CA_KEY"]="your-cloudflare-origin-ca-key"
["NPM_PASSWORD"]="your-npm-password"
["DATABASE_URL"]="postgresql://user:password@host:port/database"
["JWT_SECRET"]="your-jwt-secret-here"
)
echo "═══════════════════════════════════════════════════════════"
echo " Create .env.example Templates"
echo "═══════════════════════════════════════════════════════════"
echo ""
log_info "Mode: $([ "$DRY_RUN" = "true" ] && echo "DRY RUN" || echo "LIVE")"
echo ""
# Find all .env files
ENV_FILES=$(find "$PROJECT_ROOT" -type f -name ".env" ! -name "*.example" ! -path "*/node_modules/*" ! -path "*/.git/*" 2>/dev/null)
CREATED=0
SKIPPED=0
while IFS= read -r env_file; do
if [ -z "$env_file" ]; then
continue
fi
example_file="${env_file}.example"
# Skip if .example already exists and is newer
if [ -f "$example_file" ] && [ "$example_file" -nt "$env_file" ]; then
log_info "Skipping $env_file (example file is newer)"
SKIPPED=$((SKIPPED + 1))
continue
fi
log_info "Processing: $env_file"
if [ "$DRY_RUN" = "false" ]; then
# Create .env.example by copying and sanitizing
cp "$env_file" "$example_file"
# Replace secrets with placeholders
for pattern in "${!SECRET_PATTERNS[@]}"; do
placeholder="${SECRET_PATTERNS[$pattern]}"
# Handle different formats: KEY=value, KEY="value", KEY='value'
sed -i "s/^${pattern}=.*/${pattern}=${placeholder}/" "$example_file"
sed -i "s/^${pattern}=\".*\"/${pattern}=\"${placeholder}\"/" "$example_file"
sed -i "s/^${pattern}='.*'/${pattern}='${placeholder}'/" "$example_file"
done
# Add header comment
{
echo "# Environment Variables Template"
echo "# Copy this file to .env and fill in your actual values"
echo "# DO NOT commit .env files to version control"
echo "#"
echo ""
cat "$example_file"
} > "${example_file}.tmp"
mv "${example_file}.tmp" "$example_file"
log_success " Created: $example_file"
CREATED=$((CREATED + 1))
else
log_info " Would create: $example_file"
CREATED=$((CREATED + 1))
fi
done <<< "$ENV_FILES"
echo ""
echo "═══════════════════════════════════════════════════════════"
echo " Summary"
echo "═══════════════════════════════════════════════════════════"
echo ""
if [ "$DRY_RUN" = "true" ]; then
log_info "DRY RUN complete. Would create $CREATED template(s)"
log_info "To create templates, run:"
log_info " DRY_RUN=false $0"
else
log_success "Created $CREATED .env.example template(s)"
log_info "Skipped $SKIPPED file(s) (already up to date)"
fi
echo ""

View File

@@ -0,0 +1,206 @@
#!/bin/bash
# Safely handle backup files containing secrets
# Options: encrypt, move to secure location, or delete (with confirmation)
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
PROJECT_ROOT="${PROJECT_ROOT:-/home/intlc/projects}"
ACTION="${ACTION:-list}" # list, encrypt, move, delete
SECURE_DIR="${SECURE_DIR:-$HOME/.secure-secrets-backups}"
DRY_RUN="${DRY_RUN:-true}"
echo "═══════════════════════════════════════════════════════════"
echo " Backup Files Handler"
echo "═══════════════════════════════════════════════════════════"
echo ""
# Find all backup files
log_info "Scanning for backup files..."
BACKUP_FILES=$(find "$PROJECT_ROOT" -type f \( -name "*.env.backup*" -o -name ".env.backup*" \) ! -path "*/node_modules/*" ! -path "*/.git/*" 2>/dev/null)
if [ -z "$BACKUP_FILES" ]; then
log_success "No backup files found"
exit 0
fi
# Identify files with secrets
FILES_WITH_SECRETS=()
while IFS= read -r backup_file; do
if [ -z "$backup_file" ]; then
continue
fi
if grep -qE "^(PRIVATE_KEY|API_KEY|SECRET|PASSWORD|TOKEN|CLOUDFLARE)" "$backup_file" 2>/dev/null; then
FILES_WITH_SECRETS+=("$backup_file")
fi
done <<< "$BACKUP_FILES"
if [ ${#FILES_WITH_SECRETS[@]} -eq 0 ]; then
log_success "No backup files with secrets found"
exit 0
fi
echo "Found ${#FILES_WITH_SECRETS[@]} backup file(s) with secrets:"
echo ""
for file in "${FILES_WITH_SECRETS[@]}"; do
echo " - $file"
# Show first secret type found
secret_type=$(grep -hE "^(PRIVATE_KEY|API_KEY|SECRET|PASSWORD|TOKEN|CLOUDFLARE)" "$file" 2>/dev/null | head -1 | cut -d'=' -f1)
if [ -n "$secret_type" ]; then
echo " Contains: $secret_type"
fi
done
echo ""
case "$ACTION" in
list)
log_info "Mode: LIST (no changes)"
log_info ""
log_info "Available actions:"
log_info " ACTION=encrypt - Encrypt and move to secure location"
log_info " ACTION=move - Move to secure location (unencrypted)"
log_info " ACTION=delete - Delete files (with confirmation)"
;;
encrypt)
log_info "Mode: ENCRYPT and move to secure location"
if [ "$DRY_RUN" = "true" ]; then
log_warn "DRY RUN - No changes will be made"
fi
# Create secure directory
if [ "$DRY_RUN" = "false" ]; then
mkdir -p "$SECURE_DIR"
chmod 700 "$SECURE_DIR"
fi
for file in "${FILES_WITH_SECRETS[@]}"; do
filename=$(basename "$file")
dirname=$(dirname "$file")
relative_path="${dirname#$PROJECT_ROOT/}"
secure_path="$SECURE_DIR/${relative_path//\//_}_${filename}.enc"
log_info "Processing: $file"
if [ "$DRY_RUN" = "false" ]; then
# Encrypt using openssl
if command -v openssl &> /dev/null; then
openssl enc -aes-256-cbc -salt -pbkdf2 -in "$file" -out "$secure_path" 2>/dev/null || {
log_error "Failed to encrypt $file"
continue
}
chmod 600 "$secure_path"
log_success " Encrypted to: $secure_path"
# Remove original
rm "$file"
log_success " Removed original: $file"
else
log_error "openssl not found. Cannot encrypt."
exit 1
fi
else
log_info " Would encrypt to: $secure_path"
log_info " Would remove: $file"
fi
done
if [ "$DRY_RUN" = "false" ]; then
log_success "Encryption complete!"
log_info "Encrypted files stored in: $SECURE_DIR"
log_info "To decrypt: openssl enc -d -aes-256-cbc -pbkdf2 -in <file.enc> -out <file>"
fi
;;
move)
log_info "Mode: MOVE to secure location"
if [ "$DRY_RUN" = "true" ]; then
log_warn "DRY RUN - No changes will be made"
fi
# Create secure directory
if [ "$DRY_RUN" = "false" ]; then
mkdir -p "$SECURE_DIR"
chmod 700 "$SECURE_DIR"
fi
for file in "${FILES_WITH_SECRETS[@]}"; do
filename=$(basename "$file")
dirname=$(dirname "$file")
relative_path="${dirname#$PROJECT_ROOT/}"
secure_path="$SECURE_DIR/${relative_path//\//_}_${filename}"
log_info "Processing: $file"
if [ "$DRY_RUN" = "false" ]; then
cp "$file" "$secure_path"
chmod 600 "$secure_path"
log_success " Moved to: $secure_path"
# Remove original
rm "$file"
log_success " Removed original: $file"
else
log_info " Would move to: $secure_path"
log_info " Would remove: $file"
fi
done
if [ "$DRY_RUN" = "false" ]; then
log_success "Move complete!"
log_info "Files stored in: $SECURE_DIR"
fi
;;
delete)
log_warn "Mode: DELETE"
log_warn "This will permanently delete backup files with secrets!"
echo ""
if [ "$DRY_RUN" = "true" ]; then
log_warn "DRY RUN - No files will be deleted"
for file in "${FILES_WITH_SECRETS[@]}"; do
log_info "Would delete: $file"
done
else
read -p "Are you sure you want to delete these files? (yes/no): " confirm
if [ "$confirm" != "yes" ]; then
log_info "Cancelled"
exit 0
fi
for file in "${FILES_WITH_SECRETS[@]}"; do
log_info "Deleting: $file"
rm "$file"
log_success " Deleted: $file"
done
log_success "Deletion complete!"
fi
;;
*)
log_error "Unknown action: $ACTION"
log_info "Valid actions: list, encrypt, move, delete"
exit 1
;;
esac
echo ""
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,262 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Install NPMplus using existing template or create from existing container
set -e
PROXMOX_HOST="${1:-192.168.11.11}"
TZ="${2:-America/New_York}"
ACME_EMAIL="${3:-nsatoshi2007@hotmail.com}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🚀 NPMplus Installation (Using Available Resources)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Find next container ID
CTID=$(ssh root@"$PROXMOX_HOST" "pct list | tail -n +2 | awk '{print \$1}' | sort -n | tail -1")
CTID=$((CTID + 1))
echo "📋 Using container ID: $CTID"
echo ""
# Check for existing templates
echo "📦 Checking for available templates..."
EXISTING_TEMPLATE=$(ssh root@"$PROXMOX_HOST" "pveam list local | grep -i alpine | head -1 | awk '{print \$1}' || echo ''")
if [ -n "$EXISTING_TEMPLATE" ]; then
echo " ✅ Found existing template: $EXISTING_TEMPLATE"
# pveam list returns format like "local:vztmpl/alpine-3.22-default_20250617_amd64.tar.xz"
TEMPLATE="$EXISTING_TEMPLATE"
else
# Check what the existing NPM container uses
echo " ⚠️ No Alpine template found locally"
echo " 📋 Checking existing NPM container (105) for template info..."
# Try to use ubuntu or debian template if available
UBUNTU_TEMPLATE=$(ssh root@"$PROXMOX_HOST" "pveam list local | grep -iE 'ubuntu|debian' | head -1 | awk '{print \$1}' || echo ''")
if [ -n "$UBUNTU_TEMPLATE" ]; then
echo " ✅ Found alternative template: $UBUNTU_TEMPLATE"
# pveam list returns format like "local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst"
TEMPLATE="$UBUNTU_TEMPLATE"
else
echo " ❌ No suitable template found"
echo ""
echo " 💡 Solution: Download template manually or use Proxmox web UI"
echo " Or run on Proxmox host:"
echo " pveam download local alpine-3.22-default_20250617_amd64.tar.xz"
exit 1
fi
fi
# Create container
echo ""
echo "📦 Creating container with template: $TEMPLATE..."
# Template from pveam is already in format "local:vztmpl/filename", use directly
ssh root@"$PROXMOX_HOST" "pct create $CTID \\
$TEMPLATE \\
--hostname npmplus \\
--memory 512 \\
--cores 1 \\
--rootfs local-lvm:3 \\
--net0 name=eth0,bridge=vmbr0,ip=dhcp \\
--unprivileged 1 \\
--features nesting=1" || {
echo " ❌ Failed to create container"
exit 1
}
echo " ✅ Container created"
# Start container
echo ""
echo "🚀 Starting container..."
ssh root@"$PROXMOX_HOST" "pct start $CTID" || {
echo " ❌ Failed to start container"
exit 1
}
# Wait for container to be ready
echo " ⏳ Waiting for container to be ready..."
sleep 5
# Install NPMplus inside container
echo ""
echo "📦 Installing NPMplus inside container..."
ssh root@"$PROXMOX_HOST" "pct exec $CTID -- bash" << INSTALL_EOF
set -e
# Detect OS and install accordingly
if [ -f /etc/alpine-release ]; then
echo " 📋 Detected Alpine Linux"
apk update
apk add --no-cache tzdata gawk yq docker curl bash
# Start Docker
rc-service docker start || true
rc-update add docker default || true
# Install docker compose plugin
DOCKER_COMPOSE_VERSION=\$(curl -fsSL https://api.github.com/repos/docker/compose/releases/latest 2>/dev/null | grep '"tag_name":' | cut -d'"' -f4 || echo "v2.24.0")
DOCKER_CONFIG=\${DOCKER_CONFIG:-\$HOME/.docker}
mkdir -p \$DOCKER_CONFIG/cli-plugins
curl -fsSL "https://github.com/docker/compose/releases/download/\${DOCKER_COMPOSE_VERSION#v}/docker-compose-linux-x86_64" -o \$DOCKER_CONFIG/cli-plugins/docker-compose 2>/dev/null || \\
curl -fsSL "https://github.com/docker/compose/releases/download/v2.24.0/docker-compose-linux-x86_64" -o \$DOCKER_CONFIG/cli-plugins/docker-compose
chmod +x \$DOCKER_CONFIG/cli-plugins/docker-compose
elif [ -f /etc/debian_version ]; then
echo " 📋 Detected Debian/Ubuntu"
apt-get update
apt-get install -y tzdata gawk curl bash ca-certificates gnupg lsb-release
# Install Docker from official repository
echo " 📦 Installing Docker..."
if [ ! -f /etc/apt/keyrings/docker.gpg ]; then
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
chmod a+r /etc/apt/keyrings/docker.gpg
# Detect Ubuntu version for Docker repo
UBUNTU_CODENAME=\$(lsb_release -cs 2>/dev/null || echo "jammy")
echo "deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \$UBUNTU_CODENAME stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update
fi
# Install Docker packages (skip if already installed)
if ! command -v docker >/dev/null 2>&1; then
apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
else
echo " Docker already installed, ensuring docker-compose-plugin..."
apt-get install -y docker-compose-plugin || true
fi
# Install yq from GitHub releases
echo " 📦 Installing yq..."
if ! command -v yq >/dev/null 2>&1; then
YQ_VERSION=\$(curl -fsSL https://api.github.com/repos/mikefarah/yq/releases/latest 2>/dev/null | grep '"tag_name":' | cut -d'"' -f4 || echo "v4.40.5")
curl -fsSL "https://github.com/mikefarah/yq/releases/download/\${YQ_VERSION}/yq_linux_amd64" -o /usr/local/bin/yq
chmod +x /usr/local/bin/yq
else
echo " yq already installed"
fi
# Start Docker
systemctl start docker || true
systemctl enable docker || true
else
echo " ❌ Unsupported OS"
exit 1
fi
# Wait for Docker
sleep 5
# Fetch NPMplus compose file
cd /opt
echo " 📥 Downloading NPMplus compose.yaml..."
curl -fsSL "https://raw.githubusercontent.com/ZoeyVid/NPMplus/refs/heads/develop/compose.yaml" -o compose.yaml || {
echo " ❌ Failed to download compose.yaml"
exit 1
}
# Update compose file with timezone and email
if command -v yq >/dev/null 2>&1; then
echo " 📝 Updating compose.yaml..."
yq -i "
.services.npmplus.environment |=
(map(select(. != \"TZ=*\" and . != \"ACME_EMAIL=*\")) +
[\"TZ=$TZ\", \"ACME_EMAIL=$ACME_EMAIL\"])
" compose.yaml
else
echo " ⚠️ yq not available, updating manually..."
sed -i "s|TZ=.*|TZ=$TZ|g" compose.yaml || true
sed -i "s|ACME_EMAIL=.*|ACME_EMAIL=$ACME_EMAIL|g" compose.yaml || true
fi
# Start NPMplus
echo " 🚀 Starting NPMplus (this may take 1-2 minutes)..."
cd /opt
docker compose up -d || {
echo " ⚠️ docker compose failed, checking status..."
docker compose ps || true
exit 1
}
# Wait for NPMplus to be ready
echo " ⏳ Waiting for NPMplus to start..."
CONTAINER_ID=""
for i in {1..60}; do
CONTAINER_ID=\$(docker ps --filter "name=npmplus" --format "{{.ID}}" 2>/dev/null || echo "")
if [ -n "\$CONTAINER_ID" ]; then
STATUS=\$(docker inspect --format '{{.State.Health.Status}}' "\$CONTAINER_ID" 2>/dev/null || echo "starting")
if [ "\$STATUS" = "healthy" ] || [ "\$STATUS" = "running" ]; then
echo " ✅ NPMplus is running"
break
fi
fi
sleep 2
done
# Get admin password
echo " 🔑 Retrieving admin password..."
PASSWORD_LINE=\$(docker logs "\$CONTAINER_ID" 2>&1 | grep -i "Creating a new user" | tail -1 || echo "")
if [ -n "\$PASSWORD_LINE" ]; then
PASSWORD=\$(echo "\$PASSWORD_LINE" | grep -oP "password: \K[^\s]+" || echo "")
if [ -n "\$PASSWORD" ]; then
echo "username: admin@example.org" > /opt/.npm_pwd
echo "password: \$PASSWORD" >> /opt/.npm_pwd
echo " ✅ Admin password saved"
fi
fi
echo " ✅ NPMplus installation complete!"
INSTALL_EOF
if [ $? -eq 0 ]; then
# Get container IP
CONTAINER_IP=$(ssh root@"$PROXMOX_HOST" "pct exec $CTID -- hostname -I | awk '{print \$1}'")
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ NPMplus Installation Complete!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "📋 Container Information:"
echo " • Container ID: $CTID"
echo " • Container IP: $CONTAINER_IP"
echo " • Access URL: https://$CONTAINER_IP:81"
echo " • Admin Email: admin@example.org"
echo ""
echo "🔑 Get admin password:"
echo " ssh root@$PROXMOX_HOST \"pct exec $CTID -- cat /opt/.npm_pwd\""
echo ""
# Continue with migration
echo "🚀 Continuing with configuration migration..."
ADMIN_PASSWORD=$(ssh root@"$PROXMOX_HOST" "pct exec $CTID -- cat /opt/.npm_pwd 2>/dev/null | grep -i password | cut -d: -f2 | tr -d ' ' || echo '')
if [ -z "$ADMIN_PASSWORD" ]; then
echo " ⚠️ Could not retrieve password automatically"
echo " 💡 Run migration manually:"
echo " bash scripts/nginx-proxy-manager/post-install-migration.sh $PROXMOX_HOST $CTID $CONTAINER_IP"
else
echo "$ADMIN_PASSWORD" | bash scripts/nginx-proxy-manager/migrate-configs-to-npmplus.sh \
"$PROXMOX_HOST" \
"$CTID" \
"https://$CONTAINER_IP:81" || {
echo " ⚠️ Migration had issues, but installation is complete"
}
fi
else
echo ""
echo "❌ Installation failed. Check the output above."
exit 1
fi

View File

@@ -0,0 +1,256 @@
#!/bin/bash
set -euo pipefail
# Install NPMplus using existing template or create from existing container
set -e
PROXMOX_HOST="${1:-192.168.11.11}"
TZ="${2:-America/New_York}"
ACME_EMAIL="${3:-nsatoshi2007@hotmail.com}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🚀 NPMplus Installation (Using Available Resources)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Find next container ID
CTID=$(ssh root@"$PROXMOX_HOST" "pct list | tail -n +2 | awk '{print \$1}' | sort -n | tail -1")
CTID=$((CTID + 1))
echo "📋 Using container ID: $CTID"
echo ""
# Check for existing templates
echo "📦 Checking for available templates..."
EXISTING_TEMPLATE=$(ssh root@"$PROXMOX_HOST" "pveam list local | grep -i alpine | head -1 | awk '{print \$1}' || echo ''")
if [ -n "$EXISTING_TEMPLATE" ]; then
echo " ✅ Found existing template: $EXISTING_TEMPLATE"
# pveam list returns format like "local:vztmpl/alpine-3.22-default_20250617_amd64.tar.xz"
TEMPLATE="$EXISTING_TEMPLATE"
else
# Check what the existing NPM container uses
echo " ⚠️ No Alpine template found locally"
echo " 📋 Checking existing NPM container (105) for template info..."
# Try to use ubuntu or debian template if available
UBUNTU_TEMPLATE=$(ssh root@"$PROXMOX_HOST" "pveam list local | grep -iE 'ubuntu|debian' | head -1 | awk '{print \$1}' || echo ''")
if [ -n "$UBUNTU_TEMPLATE" ]; then
echo " ✅ Found alternative template: $UBUNTU_TEMPLATE"
# pveam list returns format like "local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst"
TEMPLATE="$UBUNTU_TEMPLATE"
else
echo " ❌ No suitable template found"
echo ""
echo " 💡 Solution: Download template manually or use Proxmox web UI"
echo " Or run on Proxmox host:"
echo " pveam download local alpine-3.22-default_20250617_amd64.tar.xz"
exit 1
fi
fi
# Create container
echo ""
echo "📦 Creating container with template: $TEMPLATE..."
# Template from pveam is already in format "local:vztmpl/filename", use directly
ssh root@"$PROXMOX_HOST" "pct create $CTID \\
$TEMPLATE \\
--hostname npmplus \\
--memory 512 \\
--cores 1 \\
--rootfs local-lvm:3 \\
--net0 name=eth0,bridge=vmbr0,ip=dhcp \\
--unprivileged 1 \\
--features nesting=1" || {
echo " ❌ Failed to create container"
exit 1
}
echo " ✅ Container created"
# Start container
echo ""
echo "🚀 Starting container..."
ssh root@"$PROXMOX_HOST" "pct start $CTID" || {
echo " ❌ Failed to start container"
exit 1
}
# Wait for container to be ready
echo " ⏳ Waiting for container to be ready..."
sleep 5
# Install NPMplus inside container
echo ""
echo "📦 Installing NPMplus inside container..."
ssh root@"$PROXMOX_HOST" "pct exec $CTID -- bash" << INSTALL_EOF
set -e
# Detect OS and install accordingly
if [ -f /etc/alpine-release ]; then
echo " 📋 Detected Alpine Linux"
apk update
apk add --no-cache tzdata gawk yq docker curl bash
# Start Docker
rc-service docker start || true
rc-update add docker default || true
# Install docker compose plugin
DOCKER_COMPOSE_VERSION=\$(curl -fsSL https://api.github.com/repos/docker/compose/releases/latest 2>/dev/null | grep '"tag_name":' | cut -d'"' -f4 || echo "v2.24.0")
DOCKER_CONFIG=\${DOCKER_CONFIG:-\$HOME/.docker}
mkdir -p \$DOCKER_CONFIG/cli-plugins
curl -fsSL "https://github.com/docker/compose/releases/download/\${DOCKER_COMPOSE_VERSION#v}/docker-compose-linux-x86_64" -o \$DOCKER_CONFIG/cli-plugins/docker-compose 2>/dev/null || \\
curl -fsSL "https://github.com/docker/compose/releases/download/v2.24.0/docker-compose-linux-x86_64" -o \$DOCKER_CONFIG/cli-plugins/docker-compose
chmod +x \$DOCKER_CONFIG/cli-plugins/docker-compose
elif [ -f /etc/debian_version ]; then
echo " 📋 Detected Debian/Ubuntu"
apt-get update
apt-get install -y tzdata gawk curl bash ca-certificates gnupg lsb-release
# Install Docker from official repository
echo " 📦 Installing Docker..."
if [ ! -f /etc/apt/keyrings/docker.gpg ]; then
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
chmod a+r /etc/apt/keyrings/docker.gpg
# Detect Ubuntu version for Docker repo
UBUNTU_CODENAME=\$(lsb_release -cs 2>/dev/null || echo "jammy")
echo "deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \$UBUNTU_CODENAME stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update
fi
# Install Docker packages (skip if already installed)
if ! command -v docker >/dev/null 2>&1; then
apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
else
echo " Docker already installed, ensuring docker-compose-plugin..."
apt-get install -y docker-compose-plugin || true
fi
# Install yq from GitHub releases
echo " 📦 Installing yq..."
if ! command -v yq >/dev/null 2>&1; then
YQ_VERSION=\$(curl -fsSL https://api.github.com/repos/mikefarah/yq/releases/latest 2>/dev/null | grep '"tag_name":' | cut -d'"' -f4 || echo "v4.40.5")
curl -fsSL "https://github.com/mikefarah/yq/releases/download/\${YQ_VERSION}/yq_linux_amd64" -o /usr/local/bin/yq
chmod +x /usr/local/bin/yq
else
echo " yq already installed"
fi
# Start Docker
systemctl start docker || true
systemctl enable docker || true
else
echo " ❌ Unsupported OS"
exit 1
fi
# Wait for Docker
sleep 5
# Fetch NPMplus compose file
cd /opt
echo " 📥 Downloading NPMplus compose.yaml..."
curl -fsSL "https://raw.githubusercontent.com/ZoeyVid/NPMplus/refs/heads/develop/compose.yaml" -o compose.yaml || {
echo " ❌ Failed to download compose.yaml"
exit 1
}
# Update compose file with timezone and email
if command -v yq >/dev/null 2>&1; then
echo " 📝 Updating compose.yaml..."
yq -i "
.services.npmplus.environment |=
(map(select(. != \"TZ=*\" and . != \"ACME_EMAIL=*\")) +
[\"TZ=$TZ\", \"ACME_EMAIL=$ACME_EMAIL\"])
" compose.yaml
else
echo " ⚠️ yq not available, updating manually..."
sed -i "s|TZ=.*|TZ=$TZ|g" compose.yaml || true
sed -i "s|ACME_EMAIL=.*|ACME_EMAIL=$ACME_EMAIL|g" compose.yaml || true
fi
# Start NPMplus
echo " 🚀 Starting NPMplus (this may take 1-2 minutes)..."
cd /opt
docker compose up -d || {
echo " ⚠️ docker compose failed, checking status..."
docker compose ps || true
exit 1
}
# Wait for NPMplus to be ready
echo " ⏳ Waiting for NPMplus to start..."
CONTAINER_ID=""
for i in {1..60}; do
CONTAINER_ID=\$(docker ps --filter "name=npmplus" --format "{{.ID}}" 2>/dev/null || echo "")
if [ -n "\$CONTAINER_ID" ]; then
STATUS=\$(docker inspect --format '{{.State.Health.Status}}' "\$CONTAINER_ID" 2>/dev/null || echo "starting")
if [ "\$STATUS" = "healthy" ] || [ "\$STATUS" = "running" ]; then
echo " ✅ NPMplus is running"
break
fi
fi
sleep 2
done
# Get admin password
echo " 🔑 Retrieving admin password..."
PASSWORD_LINE=\$(docker logs "\$CONTAINER_ID" 2>&1 | grep -i "Creating a new user" | tail -1 || echo "")
if [ -n "\$PASSWORD_LINE" ]; then
PASSWORD=\$(echo "\$PASSWORD_LINE" | grep -oP "password: \K[^\s]+" || echo "")
if [ -n "\$PASSWORD" ]; then
echo "username: admin@example.org" > /opt/.npm_pwd
echo "password: \$PASSWORD" >> /opt/.npm_pwd
echo " ✅ Admin password saved"
fi
fi
echo " ✅ NPMplus installation complete!"
INSTALL_EOF
if [ $? -eq 0 ]; then
# Get container IP
CONTAINER_IP=$(ssh root@"$PROXMOX_HOST" "pct exec $CTID -- hostname -I | awk '{print \$1}'")
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ NPMplus Installation Complete!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "📋 Container Information:"
echo " • Container ID: $CTID"
echo " • Container IP: $CONTAINER_IP"
echo " • Access URL: https://$CONTAINER_IP:81"
echo " • Admin Email: admin@example.org"
echo ""
echo "🔑 Get admin password:"
echo " ssh root@$PROXMOX_HOST \"pct exec $CTID -- cat /opt/.npm_pwd\""
echo ""
# Continue with migration
echo "🚀 Continuing with configuration migration..."
ADMIN_PASSWORD=$(ssh root@"$PROXMOX_HOST" "pct exec $CTID -- cat /opt/.npm_pwd 2>/dev/null | grep -i password | cut -d: -f2 | tr -d ' ' || echo '')
if [ -z "$ADMIN_PASSWORD" ]; then
echo " ⚠️ Could not retrieve password automatically"
echo " 💡 Run migration manually:"
echo " bash scripts/nginx-proxy-manager/post-install-migration.sh $PROXMOX_HOST $CTID $CONTAINER_IP"
else
echo "$ADMIN_PASSWORD" | bash scripts/nginx-proxy-manager/migrate-configs-to-npmplus.sh \
"$PROXMOX_HOST" \
"$CTID" \
"https://$CONTAINER_IP:81" || {
echo " ⚠️ Migration had issues, but installation is complete"
}
fi
else
echo ""
echo "❌ Installation failed. Check the output above."
exit 1
fi

View File

@@ -0,0 +1,98 @@
#!/usr/bin/env bash
# Migrate 2 containers to pve2 using thin1 storage via backup/restore method
# This approach allows us to specify target storage
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
PROXMOX_PASS="${PROXMOX_PASS:-L@kers2010}"
SOURCE_NODE="ml110"
TARGET_NODE="pve2"
TARGET_STORAGE="thin1"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
ssh_proxmox() {
sshpass -p "$PROXMOX_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$PROXMOX_HOST" "$@"
}
# Migrate container via backup/restore
migrate_via_backup() {
local vmid=$1
local name=$2
log_info "Migrating container $vmid ($name) to $TARGET_NODE using $TARGET_STORAGE..."
# Step 1: Create backup on source node
log_info " Step 1: Creating backup of container $vmid..."
backup_file="/tmp/vzdump-lxc-${vmid}-$(date +%Y_%m_%d_%H_%M_%S).tar.zst"
if ssh_proxmox "vzdump $vmid --compress zstd --storage local --dumpdir /tmp --remove 0" 2>&1 | tee /tmp/backup-${vmid}.log; then
log_success " Backup created"
# Find backup file
backup_file=$(ssh_proxmox "ls -t /tmp/vzdump-lxc-${vmid}-*.tar.zst 2>/dev/null | head -1")
if [[ -z "$backup_file" ]]; then
log_error " Backup file not found"
return 1
fi
log_info " Backup file: $backup_file"
else
log_error " Backup failed"
return 1
fi
# Step 2: Restore on target node with thin1 storage
log_info " Step 2: Restoring container on $TARGET_NODE with $TARGET_STORAGE storage..."
if ssh_proxmox "vzdump --storage $TARGET_STORAGE --ostype unmanaged $vmid $backup_file" 2>&1; then
log_success " Container restored on $TARGET_NODE"
else
log_error " Restore failed"
return 1
fi
# Step 3: Remove original container (optional - ask user)
log_warn " Original container still exists on $SOURCE_NODE"
log_info " You may want to remove it after verifying the migration"
return 0
}
echo "========================================="
echo "Migrate 2 containers via backup/restore"
echo "========================================="
echo ""
CONTAINERS=(
"1500:besu-sentry-1"
"1501:besu-sentry-2"
)
for container in "${CONTAINERS[@]}"; do
vmid="${container%%:*}"
name="${container#*:}"
echo ""
migrate_via_backup "$vmid" "$name"
echo ""
done
echo "Migration complete!"

View File

@@ -0,0 +1,92 @@
#!/usr/bin/env bash
# Migrate 2 containers to pve2 using thin1 storage via backup/restore method
# This approach allows us to specify target storage
set -euo pipefail
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
PROXMOX_PASS="${PROXMOX_PASS:-L@kers2010}"
SOURCE_NODE="ml110"
TARGET_NODE="pve2"
TARGET_STORAGE="thin1"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
ssh_proxmox() {
sshpass -p "$PROXMOX_PASS" ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$PROXMOX_HOST" "$@"
}
# Migrate container via backup/restore
migrate_via_backup() {
local vmid=$1
local name=$2
log_info "Migrating container $vmid ($name) to $TARGET_NODE using $TARGET_STORAGE..."
# Step 1: Create backup on source node
log_info " Step 1: Creating backup of container $vmid..."
backup_file="/tmp/vzdump-lxc-${vmid}-$(date +%Y_%m_%d_%H_%M_%S).tar.zst"
if ssh_proxmox "vzdump $vmid --compress zstd --storage local --dumpdir /tmp --remove 0" 2>&1 | tee /tmp/backup-${vmid}.log; then
log_success " Backup created"
# Find backup file
backup_file=$(ssh_proxmox "ls -t /tmp/vzdump-lxc-${vmid}-*.tar.zst 2>/dev/null | head -1")
if [[ -z "$backup_file" ]]; then
log_error " Backup file not found"
return 1
fi
log_info " Backup file: $backup_file"
else
log_error " Backup failed"
return 1
fi
# Step 2: Restore on target node with thin1 storage
log_info " Step 2: Restoring container on $TARGET_NODE with $TARGET_STORAGE storage..."
if ssh_proxmox "vzdump --storage $TARGET_STORAGE --ostype unmanaged $vmid $backup_file" 2>&1; then
log_success " Container restored on $TARGET_NODE"
else
log_error " Restore failed"
return 1
fi
# Step 3: Remove original container (optional - ask user)
log_warn " Original container still exists on $SOURCE_NODE"
log_info " You may want to remove it after verifying the migration"
return 0
}
echo "========================================="
echo "Migrate 2 containers via backup/restore"
echo "========================================="
echo ""
CONTAINERS=(
"1500:besu-sentry-1"
"1501:besu-sentry-2"
)
for container in "${CONTAINERS[@]}"; do
vmid="${container%%:*}"
name="${container#*:}"
echo ""
migrate_via_backup "$vmid" "$name"
echo ""
done
echo "Migration complete!"

View File

@@ -0,0 +1,173 @@
#!/bin/bash
# Migrate VMIDs 100-130 and 7800-7811 using backup/restore method
# VMIDs 100-130 → thin1 storage
# VMIDs 7800-7811 → local storage
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/load-physical-inventory.sh" 2>/dev/null || true
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "\n${CYAN}=== $1 ===${NC}\n"; }
SOURCE_NODE="r630-02"
TARGET_NODE="r630-01"
BACKUP_STORAGE="local"
# VMs to migrate
VMIDS_100_130=(100 101 102 103 104 105 130)
VMIDS_7800_7811=(7800 7801 7802 7810 7811)
log_section "VM Migration to r630-01 (Backup/Restore)"
log_info "Source Node: $SOURCE_NODE"
log_info "Target Node: $TARGET_NODE"
log_info "VMIDs 100-130 → thin1 storage (96 GB)"
log_info "VMIDs 7800-7811 → local storage (210 GB)"
echo ""
log_warn "This will migrate ${#VMIDS_100_130[@]} + ${#VMIDS_7800_7811[@]} = $((${#VMIDS_100_130[@]} + ${#VMIDS_7800_7811[@]})) containers using backup/restore"
echo ""
read -p "Continue with migration? (yes/no): " confirm
if [[ "$confirm" != "yes" ]]; then
log_info "Migration cancelled."
exit 0
fi
FAILED=()
SUCCESS=()
# Migrate VMIDs 100-130 to thin1
log_section "Migrating VMIDs 100-130 to thin1 storage"
for vmid in "${VMIDS_100_130[@]}"; do
log_info "Migrating VMID $vmid..."
# Check if VM exists
if ! sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct list 2>/dev/null | grep -q \"^$vmid\"" 2>/dev/null; then
log_warn "VMID $vmid not found, skipping"
continue
fi
# Create backup (this will work since VMs are stopped)
log_info " Creating backup..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"vzdump $vmid --storage $BACKUP_STORAGE --compress gzip --mode stop 2>&1 | tail -5"
# Find backup file
BACKUP_FILE=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"ls -t /var/lib/vz/dump/vzdump-lxc-$vmid-*.tar.gz 2>/dev/null | head -1" 2>/dev/null)
if [ -z "$BACKUP_FILE" ]; then
log_error " Backup file not found for VMID $vmid"
FAILED+=($vmid)
continue
fi
log_info " Backup: $BACKUP_FILE"
# Restore to target
log_info " Restoring to $TARGET_NODE (thin1 storage)..."
RESTORE_OUTPUT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct restore $vmid $BACKUP_FILE --storage thin1 --target $TARGET_NODE 2>&1")
if [ $? -eq 0 ]; then
log_success " VMID $vmid migrated successfully"
SUCCESS+=($vmid)
# Remove from source
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct destroy $vmid 2>&1" || true
else
log_error " Restore failed: $RESTORE_OUTPUT"
FAILED+=($vmid)
fi
echo ""
done
# Migrate VMIDs 7800-7811 to local storage
log_section "Migrating VMIDs 7800-7811 to local storage"
for vmid in "${VMIDS_7800_7811[@]}"; do
log_info "Migrating VMID $vmid..."
# Check if VM exists
if ! sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct list 2>/dev/null | grep -q \"^$vmid\"" 2>/dev/null; then
log_warn "VMID $vmid not found, skipping"
continue
fi
# Create backup
log_info " Creating backup..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"vzdump $vmid --storage $BACKUP_STORAGE --compress gzip --mode stop 2>&1 | tail -5"
# Find backup file
BACKUP_FILE=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"ls -t /var/lib/vz/dump/vzdump-lxc-$vmid-*.tar.gz 2>/dev/null | head -1" 2>/dev/null)
if [ -z "$BACKUP_FILE" ]; then
log_error " Backup file not found for VMID $vmid"
FAILED+=($vmid)
continue
fi
log_info " Backup: $BACKUP_FILE"
# Restore to target
log_info " Restoring to $TARGET_NODE (local storage)..."
RESTORE_OUTPUT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct restore $vmid $BACKUP_FILE --storage local --target $TARGET_NODE 2>&1")
if [ $? -eq 0 ]; then
log_success " VMID $vmid migrated successfully"
SUCCESS+=($vmid)
# Remove from source
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct destroy $vmid 2>&1" || true
else
log_error " Restore failed: $RESTORE_OUTPUT"
FAILED+=($vmid)
fi
echo ""
done
log_section "Migration Summary"
log_info "Successful: ${#SUCCESS[@]}"
if [ ${#SUCCESS[@]} -gt 0 ]; then
echo " VMIDs: ${SUCCESS[*]}"
fi
if [ ${#FAILED[@]} -gt 0 ]; then
log_warn "Failed: ${#FAILED[@]}"
echo " VMIDs: ${FAILED[*]}"
fi
log_section "Verification"
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_01:-192.168.11.11} \
"pct list 2>/dev/null | grep -E '100|101|102|103|104|105|130|7800|7801|7802|7810|7811'" 2>/dev/null || true
log_section "Complete"

View File

@@ -0,0 +1,167 @@
#!/bin/bash
# Migrate VMIDs 100-130 and 7800-7811 using backup/restore method
# VMIDs 100-130 → thin1 storage
# VMIDs 7800-7811 → local storage
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/load-physical-inventory.sh" 2>/dev/null || true
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "\n${CYAN}=== $1 ===${NC}\n"; }
SOURCE_NODE="r630-02"
TARGET_NODE="r630-01"
BACKUP_STORAGE="local"
# VMs to migrate
VMIDS_100_130=(100 101 102 103 104 105 130)
VMIDS_7800_7811=(7800 7801 7802 7810 7811)
log_section "VM Migration to r630-01 (Backup/Restore)"
log_info "Source Node: $SOURCE_NODE"
log_info "Target Node: $TARGET_NODE"
log_info "VMIDs 100-130 → thin1 storage (96 GB)"
log_info "VMIDs 7800-7811 → local storage (210 GB)"
echo ""
log_warn "This will migrate ${#VMIDS_100_130[@]} + ${#VMIDS_7800_7811[@]} = $((${#VMIDS_100_130[@]} + ${#VMIDS_7800_7811[@]})) containers using backup/restore"
echo ""
read -p "Continue with migration? (yes/no): " confirm
if [[ "$confirm" != "yes" ]]; then
log_info "Migration cancelled."
exit 0
fi
FAILED=()
SUCCESS=()
# Migrate VMIDs 100-130 to thin1
log_section "Migrating VMIDs 100-130 to thin1 storage"
for vmid in "${VMIDS_100_130[@]}"; do
log_info "Migrating VMID $vmid..."
# Check if VM exists
if ! sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"pct list 2>/dev/null | grep -q \"^$vmid\"" 2>/dev/null; then
log_warn "VMID $vmid not found, skipping"
continue
fi
# Create backup (this will work since VMs are stopped)
log_info " Creating backup..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"vzdump $vmid --storage $BACKUP_STORAGE --compress gzip --mode stop 2>&1 | tail -5"
# Find backup file
BACKUP_FILE=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"ls -t /var/lib/vz/dump/vzdump-lxc-$vmid-*.tar.gz 2>/dev/null | head -1" 2>/dev/null)
if [ -z "$BACKUP_FILE" ]; then
log_error " Backup file not found for VMID $vmid"
FAILED+=($vmid)
continue
fi
log_info " Backup: $BACKUP_FILE"
# Restore to target
log_info " Restoring to $TARGET_NODE (thin1 storage)..."
RESTORE_OUTPUT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"pct restore $vmid $BACKUP_FILE --storage thin1 --target $TARGET_NODE 2>&1")
if [ $? -eq 0 ]; then
log_success " VMID $vmid migrated successfully"
SUCCESS+=($vmid)
# Remove from source
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"pct destroy $vmid 2>&1" || true
else
log_error " Restore failed: $RESTORE_OUTPUT"
FAILED+=($vmid)
fi
echo ""
done
# Migrate VMIDs 7800-7811 to local storage
log_section "Migrating VMIDs 7800-7811 to local storage"
for vmid in "${VMIDS_7800_7811[@]}"; do
log_info "Migrating VMID $vmid..."
# Check if VM exists
if ! sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"pct list 2>/dev/null | grep -q \"^$vmid\"" 2>/dev/null; then
log_warn "VMID $vmid not found, skipping"
continue
fi
# Create backup
log_info " Creating backup..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"vzdump $vmid --storage $BACKUP_STORAGE --compress gzip --mode stop 2>&1 | tail -5"
# Find backup file
BACKUP_FILE=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"ls -t /var/lib/vz/dump/vzdump-lxc-$vmid-*.tar.gz 2>/dev/null | head -1" 2>/dev/null)
if [ -z "$BACKUP_FILE" ]; then
log_error " Backup file not found for VMID $vmid"
FAILED+=($vmid)
continue
fi
log_info " Backup: $BACKUP_FILE"
# Restore to target
log_info " Restoring to $TARGET_NODE (local storage)..."
RESTORE_OUTPUT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"pct restore $vmid $BACKUP_FILE --storage local --target $TARGET_NODE 2>&1")
if [ $? -eq 0 ]; then
log_success " VMID $vmid migrated successfully"
SUCCESS+=($vmid)
# Remove from source
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"pct destroy $vmid 2>&1" || true
else
log_error " Restore failed: $RESTORE_OUTPUT"
FAILED+=($vmid)
fi
echo ""
done
log_section "Migration Summary"
log_info "Successful: ${#SUCCESS[@]}"
if [ ${#SUCCESS[@]} -gt 0 ]; then
echo " VMIDs: ${SUCCESS[*]}"
fi
if [ ${#FAILED[@]} -gt 0 ]; then
log_warn "Failed: ${#FAILED[@]}"
echo " VMIDs: ${FAILED[*]}"
fi
log_section "Verification"
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.11 \
"pct list 2>/dev/null | grep -E '100|101|102|103|104|105|130|7800|7801|7802|7810|7811'" 2>/dev/null || true
log_section "Complete"

View File

@@ -0,0 +1,140 @@
#!/bin/bash
# Migrate VMIDs 100-130 and 7800-7811 using backup/restore method
# This handles storage differences between nodes
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/load-physical-inventory.sh" 2>/dev/null || true
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "\n${CYAN}=== $1 ===${NC}\n"; }
SOURCE_NODE="r630-02"
TARGET_NODE="r630-01"
TARGET_STORAGE="thin1"
BACKUP_STORAGE="local"
# VMs to migrate
VMIDS_100_130=(100 101 102 103 104 105 130)
VMIDS_7800_7811=(7800 7801 7802 7810 7811)
ALL_VMIDS=("${VMIDS_100_130[@]}" "${VMIDS_7800_7811[@]}")
log_section "VM Migration to r630-01 (Backup/Restore Method)"
log_info "Source Node: $SOURCE_NODE"
log_info "Target Node: $TARGET_NODE"
log_info "Target Storage: $TARGET_STORAGE"
log_info "VMs to migrate: ${#ALL_VMIDS[@]} containers"
echo ""
log_warn "This will migrate the following VMs using backup/restore:"
echo " VMIDs 100-130: ${VMIDS_100_130[*]}"
echo " VMIDs 7800-7811: ${VMIDS_7800_7811[*]}"
echo ""
read -p "Continue with migration? (yes/no): " confirm
if [[ "$confirm" != "yes" ]]; then
log_info "Migration cancelled."
exit 0
fi
log_section "Starting Migration"
FAILED=()
SUCCESS=()
for vmid in "${ALL_VMIDS[@]}"; do
log_info "Migrating VMID $vmid..."
# Check if VM exists
if ! sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct list 2>/dev/null | grep -q \"^$vmid\"" 2>/dev/null; then
log_warn "VMID $vmid not found on source, skipping"
continue
fi
# Step 1: Create backup on source node
log_info " Creating backup..."
BACKUP_RESULT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"vzdump $vmid --storage $BACKUP_STORAGE --compress gzip --mode stop 2>&1")
if [ $? -ne 0 ]; then
log_error " Backup failed for VMID $vmid"
FAILED+=($vmid)
continue
fi
# Get backup filename
BACKUP_FILE=$(echo "$BACKUP_RESULT" | grep -o "vzdump-lxc-$vmid-[0-9]*.tar.gz" | tail -1)
if [ -z "$BACKUP_FILE" ]; then
log_error " Could not determine backup filename for VMID $vmid"
FAILED+=($vmid)
continue
fi
log_info " Backup created: $BACKUP_FILE"
# Step 2: Restore on target node
log_info " Restoring to $TARGET_NODE..."
RESTORE_RESULT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct restore $vmid /var/lib/vz/dump/$BACKUP_FILE --storage $TARGET_STORAGE --target $TARGET_NODE 2>&1")
if [ $? -eq 0 ]; then
log_success " VMID $vmid migrated successfully"
SUCCESS+=($vmid)
# Step 3: Delete from source (optional)
log_info " Removing from source node..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_02:-192.168.11.12} \
"pct destroy $vmid 2>&1" || true
else
log_error " Restore failed for VMID $vmid: $RESTORE_RESULT"
FAILED+=($vmid)
fi
echo ""
done
log_section "Migration Summary"
log_info "Successful migrations: ${#SUCCESS[@]}"
if [ ${#SUCCESS[@]} -gt 0 ]; then
echo " VMIDs: ${SUCCESS[*]}"
fi
if [ ${#FAILED[@]} -gt 0 ]; then
log_warn "Failed migrations: ${#FAILED[@]}"
echo " VMIDs: ${FAILED[*]}"
fi
log_section "Verification"
log_info "Checking VMs on $TARGET_NODE..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST_R630_01:-192.168.11.11} \
"pct list 2>/dev/null | grep -E '$(IFS='|'; echo "${ALL_VMIDS[*]}")'" 2>/dev/null || true
log_section "Migration Complete"
if [ ${#FAILED[@]} -eq 0 ]; then
log_success "All VMs migrated successfully!"
else
log_warn "Some migrations failed. Please check the errors above."
fi

View File

@@ -0,0 +1,134 @@
#!/bin/bash
# Migrate VMIDs 100-130 and 7800-7811 using backup/restore method
# This handles storage differences between nodes
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$SCRIPT_DIR/load-physical-inventory.sh" 2>/dev/null || true
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "\n${CYAN}=== $1 ===${NC}\n"; }
SOURCE_NODE="r630-02"
TARGET_NODE="r630-01"
TARGET_STORAGE="thin1"
BACKUP_STORAGE="local"
# VMs to migrate
VMIDS_100_130=(100 101 102 103 104 105 130)
VMIDS_7800_7811=(7800 7801 7802 7810 7811)
ALL_VMIDS=("${VMIDS_100_130[@]}" "${VMIDS_7800_7811[@]}")
log_section "VM Migration to r630-01 (Backup/Restore Method)"
log_info "Source Node: $SOURCE_NODE"
log_info "Target Node: $TARGET_NODE"
log_info "Target Storage: $TARGET_STORAGE"
log_info "VMs to migrate: ${#ALL_VMIDS[@]} containers"
echo ""
log_warn "This will migrate the following VMs using backup/restore:"
echo " VMIDs 100-130: ${VMIDS_100_130[*]}"
echo " VMIDs 7800-7811: ${VMIDS_7800_7811[*]}"
echo ""
read -p "Continue with migration? (yes/no): " confirm
if [[ "$confirm" != "yes" ]]; then
log_info "Migration cancelled."
exit 0
fi
log_section "Starting Migration"
FAILED=()
SUCCESS=()
for vmid in "${ALL_VMIDS[@]}"; do
log_info "Migrating VMID $vmid..."
# Check if VM exists
if ! sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"pct list 2>/dev/null | grep -q \"^$vmid\"" 2>/dev/null; then
log_warn "VMID $vmid not found on source, skipping"
continue
fi
# Step 1: Create backup on source node
log_info " Creating backup..."
BACKUP_RESULT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"vzdump $vmid --storage $BACKUP_STORAGE --compress gzip --mode stop 2>&1")
if [ $? -ne 0 ]; then
log_error " Backup failed for VMID $vmid"
FAILED+=($vmid)
continue
fi
# Get backup filename
BACKUP_FILE=$(echo "$BACKUP_RESULT" | grep -o "vzdump-lxc-$vmid-[0-9]*.tar.gz" | tail -1)
if [ -z "$BACKUP_FILE" ]; then
log_error " Could not determine backup filename for VMID $vmid"
FAILED+=($vmid)
continue
fi
log_info " Backup created: $BACKUP_FILE"
# Step 2: Restore on target node
log_info " Restoring to $TARGET_NODE..."
RESTORE_RESULT=$(sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"pct restore $vmid /var/lib/vz/dump/$BACKUP_FILE --storage $TARGET_STORAGE --target $TARGET_NODE 2>&1")
if [ $? -eq 0 ]; then
log_success " VMID $vmid migrated successfully"
SUCCESS+=($vmid)
# Step 3: Delete from source (optional)
log_info " Removing from source node..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.12 \
"pct destroy $vmid 2>&1" || true
else
log_error " Restore failed for VMID $vmid: $RESTORE_RESULT"
FAILED+=($vmid)
fi
echo ""
done
log_section "Migration Summary"
log_info "Successful migrations: ${#SUCCESS[@]}"
if [ ${#SUCCESS[@]} -gt 0 ]; then
echo " VMIDs: ${SUCCESS[*]}"
fi
if [ ${#FAILED[@]} -gt 0 ]; then
log_warn "Failed migrations: ${#FAILED[@]}"
echo " VMIDs: ${FAILED[*]}"
fi
log_section "Verification"
log_info "Checking VMs on $TARGET_NODE..."
sshpass -p "password" ssh -o StrictHostKeyChecking=no root@192.168.11.11 \
"pct list 2>/dev/null | grep -E '$(IFS='|'; echo "${ALL_VMIDS[*]}")'" 2>/dev/null || true
log_section "Migration Complete"
if [ ${#FAILED[@]} -eq 0 ]; then
log_success "All VMs migrated successfully!"
else
log_warn "Some migrations failed. Please check the errors above."
fi

View File

@@ -0,0 +1,69 @@
#!/usr/bin/env bash
# Pre-cache OS Template - Download Ubuntu 22.04 template before deployment
# This saves 5-10 minutes during deployment
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/lib/common.sh" 2>/dev/null || {
# Basic logging if common.sh not available
log_info() { echo "[INFO] $1"; }
log_success() { echo "[✓] $1"; }
log_error() { echo "[ERROR] $1"; exit 1; }
}
# Load configuration
load_config "$PROJECT_ROOT/config/proxmox.conf" 2>/dev/null || true
TEMPLATE_NAME="${CONTAINER_OS_TEMPLATE:-local:vztmpl/ubuntu-22.04-standard_22.04-1_amd64.tar.zst}"
TEMPLATE_FILE="ubuntu-22.04-standard_22.04-1_amd64.tar.zst"
log_info "========================================="
log_info "Pre-cache OS Template"
log_info "========================================="
log_info ""
log_info "Template: $TEMPLATE_NAME"
log_info "File: $TEMPLATE_FILE"
log_info ""
# Check if running on Proxmox host
if ! command_exists pveam; then
log_error "pveam command not found. This script must be run on Proxmox host."
fi
# Check if template already exists
log_info "Checking if template already exists..."
if pveam list local | grep -q "$TEMPLATE_FILE"; then
log_success "Template $TEMPLATE_FILE already exists in local storage"
log_info "No download needed. Deployment will use existing template."
log_info ""
log_info "Template details:"
pveam list local | grep "$TEMPLATE_FILE"
exit 0
fi
# Check available templates
log_info "Checking available templates..."
if ! pveam available | grep -q "$TEMPLATE_FILE"; then
log_error "Template $TEMPLATE_FILE not available. Please check template name."
fi
# Download template
log_info "Downloading template $TEMPLATE_FILE..."
log_info "This may take 5-10 minutes depending on network speed..."
log_info ""
if pveam download local "$TEMPLATE_FILE"; then
log_success "Template downloaded successfully"
log_info ""
log_info "Template is now cached and ready for deployment"
log_info "This saves 5-10 minutes during container creation phase"
log_info ""
log_info "Template details:"
pveam list local | grep "$TEMPLATE_FILE"
else
log_error "Failed to download template"
fi

View File

@@ -0,0 +1,135 @@
#!/usr/bin/env bash
# Safely prune old/obsolete documentation and content
# Creates a backup before deletion and provides detailed logging
set -euo pipefail
PROJECT_ROOT="/home/intlc/projects/proxmox"
cd "$PROJECT_ROOT"
# Create backup directory
BACKUP_DIR="$PROJECT_ROOT/backup-old-docs-$(date +%Y%m%d-%H%M%S)"
mkdir -p "$BACKUP_DIR"
echo "=== Pruning Old Documentation ==="
echo "Backup directory: $BACKUP_DIR"
echo ""
# List of historical/obsolete documents to remove (with confirmation)
OBSOLETE_DOCS=(
# Historical VMID migration docs (superseded by current ranges)
"docs/HISTORICAL_VMID_REFERENCES.md"
"docs/VMID_UPDATE_COMPLETE.md"
"docs/VMID_REFERENCE_AUDIT.md"
"docs/VMID_ALLOCATION.md" # Superseded by VMID_ALLOCATION_FINAL.md
# Old deployment status/completion reports
"docs/DEPLOYMENT_STATUS.md"
"docs/DEPLOYMENT_REVIEW_COMPLETE.md"
"docs/DEPLOYMENT_REVIEW.md"
"docs/DEPLOYMENT_TIME_ESTIMATE.md"
"docs/DEPLOYMENT_TIME_ESTIMATE_BESU_ONLY.md"
"docs/DEPLOYMENT_VALIDATION_REPORT.md"
"docs/DEPLOYED_VMIDS_LIST.md"
"docs/DEPLOYMENT_OPTIMIZATION_COMPLETE.md"
"docs/DEPLOYMENT_OPTIMIZATION_RECOMMENDATIONS.md"
"docs/DEPLOYMENT_RECOMMENDATIONS_STATUS.md"
"docs/DEPLOYMENT_CONFIGURATION_VERIFICATION.md"
# Old status/completion reports
"docs/NEXT_STEPS_COMPREHENSIVE.md"
"docs/NEXT_STEPS_COMPLETE.md"
"docs/NEXT_STEPS_SUMMARY.md"
"docs/COMPLETION_REPORT.md"
"docs/FIXES_APPLIED.md"
"docs/REVIEW_FIXES_APPLIED.md"
"docs/MINOR_OBSERVATIONS_FIXED.md"
"docs/NON_CRITICAL_FIXES_COMPLETE.md"
"docs/QUICK_WINS_COMPLETED.md"
"docs/TASK_COMPLETION_SUMMARY.md"
"docs/IMPLEMENTATION_COMPLETE.md"
"docs/PREREQUISITES_COMPLETE.md"
"docs/SETUP_COMPLETE.md"
"docs/SETUP_COMPLETE_FINAL.md"
"docs/SETUP_STATUS.md"
"docs/VALIDATION_STATUS.md"
"docs/CONFIGURATION_ALIGNMENT.md"
# Old review documents
"docs/REVIEW_INCONSISTENCIES_GAPS.md"
"docs/REVIEW_SUMMARY.md"
"docs/COMPREHENSIVE_REVIEW.md"
"docs/FINAL_REVIEW.md"
"docs/DETAILED_ISSUES_REVIEW.md"
"docs/RECOMMENDATIONS_OVERVIEW.md"
# OS template analysis (historical)
"docs/OS_TEMPLATE_CHANGE.md"
"docs/UBUNTU_DEBIAN_ANALYSIS.md"
"docs/OS_TEMPLATE_ANALYSIS.md"
# Old DHCP documentation (containers now use static IPs)
"docs/DHCP_IP_ADDRESSES.md"
)
echo "Files to be removed (will be backed up first):"
echo ""
for doc in "${OBSOLETE_DOCS[@]}"; do
if [[ -f "$doc" ]]; then
echo " - $doc"
fi
done
echo ""
echo "⚠️ WARNING: This will remove the files listed above"
echo " All files will be backed up to: $BACKUP_DIR"
echo ""
read -p "Continue with pruning? (yes/no): " -r
if [[ ! $REPLY =~ ^[Yy][Ee][Ss]$ ]]; then
echo "Pruning cancelled"
exit 0
fi
echo ""
echo "Creating backups and removing files..."
removed_count=0
skipped_count=0
for doc in "${OBSOLETE_DOCS[@]}"; do
if [[ -f "$doc" ]]; then
# Create backup
backup_path="$BACKUP_DIR/$doc"
mkdir -p "$(dirname "$backup_path")"
cp "$doc" "$backup_path"
# Remove original
rm "$doc"
echo " ✅ Removed: $doc (backed up)"
removed_count=$((removed_count + 1))
else
skipped_count=$((skipped_count + 1))
fi
done
echo ""
echo "=== Pruning Complete ==="
echo " Files removed: $removed_count"
echo " Files skipped (not found): $skipped_count"
echo " Backup location: $BACKUP_DIR"
echo ""
# Create index of removed files
cat > "$BACKUP_DIR/REMOVED_FILES_INDEX.txt" <<EOF
Removed Documentation Files
Generated: $(date)
Files removed:
$(for doc in "${OBSOLETE_DOCS[@]}"; do echo " - $doc"; done)
Total removed: $removed_count
EOF
echo "Backup index created: $BACKUP_DIR/REMOVED_FILES_INDEX.txt"

View File

@@ -0,0 +1,170 @@
#!/usr/bin/env bash
# Review project content and identify old/outdated information to prune
# Focuses on old VMID references, obsolete documentation, and outdated configurations
set -euo pipefail
PROJECT_ROOT="/home/intlc/projects/proxmox"
cd "$PROJECT_ROOT"
echo "=== Project Content Review and Pruning Analysis ==="
echo ""
# Current valid VMID ranges (DO NOT PRUNE)
CURRENT_VALIDATORS="1000-1004"
CURRENT_SENTRIES="1500-1503"
CURRENT_RPC="2500-2502"
CURRENT_INFRASTRUCTURE="100-105"
# Old VMID ranges that should be removed/updated
OLD_VMIDS="106|107|108|109|110|111|112|113|114|115|116|117|118|119|120|121|122|123"
OLD_RANGES="106-110|111-114|115-117|120-129|130-139"
echo "Current Valid VMID Ranges:"
echo " Infrastructure: $CURRENT_INFRASTRUCTURE (KEEP)"
echo " Validators: $CURRENT_VALIDATORS"
echo " Sentries: $CURRENT_SENTRIES"
echo " RPC: $CURRENT_RPC"
echo ""
echo "=== Files with Old VMID References ==="
echo ""
# Find files with old VMID references
echo "Searching for files referencing old VMIDs ($OLD_VMIDS)..."
OLD_VMID_FILES=$(grep -rE "\b($OLD_VMIDS)\b" --include="*.md" --include="*.sh" --include="*.js" --include="*.py" --include="*.conf" --include="*.example" \
smom-dbis-138-proxmox/ docs/ scripts/ 2>/dev/null | cut -d: -f1 | sort -u)
if [[ -n "$OLD_VMID_FILES" ]]; then
echo "Found $(echo "$OLD_VMID_FILES" | wc -l) files with old VMID references:"
echo ""
for file in $OLD_VMID_FILES; do
# Skip if file references current VMIDs too (may be migration docs)
if grep -qE "\b(1000|1001|1002|1003|1004|1500|1501|1502|1503|2500|2501|2502)\b" "$file" 2>/dev/null; then
echo " ⚠️ $file (has both old and new VMIDs - migration/historical doc?)"
else
echo "$file (old VMIDs only - candidate for update/removal)"
fi
done
else
echo " ✅ No files found with old VMID references"
fi
echo ""
echo "=== Historical/Migration Documents (May be obsolete) ==="
echo ""
HISTORICAL_DOCS=(
"docs/HISTORICAL_VMID_REFERENCES.md"
"docs/VMID_UPDATE_COMPLETE.md"
"docs/VMID_REFERENCE_AUDIT.md"
"docs/VMID_ALLOCATION.md"
"docs/OS_TEMPLATE_CHANGE.md"
"docs/UBUNTU_DEBIAN_ANALYSIS.md"
"docs/OS_TEMPLATE_ANALYSIS.md"
"docs/DEPLOYMENT_REVIEW_COMPLETE.md"
"docs/DEPLOYMENT_REVIEW.md"
"docs/DEPLOYMENT_STATUS.md"
"docs/DEPLOYMENT_OPTIMIZATION_COMPLETE.md"
"docs/DEPLOYMENT_TIME_ESTIMATE.md"
"docs/DEPLOYMENT_TIME_ESTIMATE_BESU_ONLY.md"
"docs/DEPLOYMENT_VALIDATION_REPORT.md"
"docs/DEPLOYMENT_VALIDATION_REQUIREMENTS.md"
"docs/DEPLOYED_VMIDS_LIST.md"
"docs/NEXT_STEPS_COMPREHENSIVE.md"
"docs/NEXT_STEPS_COMPLETE.md"
"docs/NEXT_STEPS_SUMMARY.md"
"docs/COMPLETION_REPORT.md"
"docs/FIXES_APPLIED.md"
"docs/REVIEW_FIXES_APPLIED.md"
"docs/MINOR_OBSERVATIONS_FIXED.md"
"docs/NON_CRITICAL_FIXES_COMPLETE.md"
"docs/QUICK_WINS_COMPLETED.md"
"docs/TASK_COMPLETION_SUMMARY.md"
"docs/IMPLEMENTATION_COMPLETE.md"
"docs/PREREQUISITES_COMPLETE.md"
"docs/SETUP_COMPLETE.md"
"docs/SETUP_COMPLETE_FINAL.md"
"docs/SETUP_STATUS.md"
"docs/VALIDATION_STATUS.md"
"docs/CONFIGURATION_ALIGNMENT.md"
"docs/DEPLOYMENT_CONFIGURATION_VERIFICATION.md"
"docs/REVIEW_INCONSISTENCIES_GAPS.md"
"docs/REVIEW_SUMMARY.md"
"docs/COMPREHENSIVE_REVIEW.md"
"docs/FINAL_REVIEW.md"
"docs/DETAILED_ISSUES_REVIEW.md"
"docs/DEPLOYMENT_OPTIMIZATION_RECOMMENDATIONS.md"
"docs/DEPLOYMENT_RECOMMENDATIONS_STATUS.md"
"docs/RECOMMENDATIONS_OVERVIEW.md"
)
for doc in "${HISTORICAL_DOCS[@]}"; do
if [[ -f "$doc" ]]; then
echo " 📄 $doc"
fi
done
echo ""
echo "=== Duplicate/Similar Documentation Files ==="
echo ""
# Find potentially duplicate documentation
DUPLICATE_PATTERNS=(
"*QUICK_START*"
"*DEPLOYMENT*"
"*NEXT_STEPS*"
"*REVIEW*"
"*COMPLETE*"
"*STATUS*"
)
for pattern in "${DUPLICATE_PATTERNS[@]}"; do
files=$(find docs/ smom-dbis-138-proxmox/docs/ -name "$pattern" -type f 2>/dev/null | sort)
if [[ -n "$files" ]]; then
count=$(echo "$files" | wc -l)
if [[ $count -gt 1 ]]; then
echo " Found $count files matching '$pattern':"
echo "$files" | sed 's/^/ /'
echo ""
fi
fi
done
echo "=== Outdated Configuration Examples ==="
echo ""
# Check for old config examples
if [[ -f "smom-dbis-138-proxmox/config/proxmox.conf.example" ]]; then
if grep -qE "\b(106|110|115|120)\b" smom-dbis-138-proxmox/config/proxmox.conf.example 2>/dev/null; then
echo " ⚠️ smom-dbis-138-proxmox/config/proxmox.conf.example contains old VMID defaults"
fi
fi
echo ""
echo "=== Summary and Recommendations ==="
echo ""
echo "Files to REVIEW for removal/update:"
echo " 1. Historical migration documents (if no longer needed)"
echo " 2. Duplicate documentation files"
echo " 3. Files with old VMID references that don't also reference current ranges"
echo " 4. Old configuration examples"
echo ""
echo "Files to KEEP (may have old references but are historical):"
echo " - Migration/historical reference documents (for context)"
echo " - Current active documentation (even if examples need updating)"
echo " - Configuration files in use"
echo ""
echo "=== Next Steps ==="
echo ""
echo "1. Review historical documents list above - decide which to archive/delete"
echo "2. Update active documentation with old VMID references"
echo "3. Update configuration examples to use current VMID ranges"
echo "4. Remove or archive obsolete status/completion reports"
echo ""
echo "To safely remove files, use:"
echo " ./scripts/prune-old-documentation.sh"

View File

@@ -0,0 +1,88 @@
#!/bin/bash
# Vault Raft Snapshot Backup Script
# Creates automated backups of Vault cluster
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PROXMOX_HOST_1="${PROXMOX_HOST_1:-192.168.11.11}"
VAULT_CONTAINER="${VAULT_CONTAINER:-8640}"
VAULT_TOKEN="${VAULT_TOKEN:-}"
BACKUP_DIR="${BACKUP_DIR:-/home/intlc/projects/proxmox/.secure/vault-backups}"
RETENTION_DAYS="${RETENTION_DAYS:-30}"
if [ -z "$VAULT_TOKEN" ]; then
log_error "VAULT_TOKEN environment variable is required"
log_info "Usage: VAULT_TOKEN=<token> ./scripts/vault-backup.sh"
exit 1
fi
# Create backup directory
mkdir -p "$BACKUP_DIR"
chmod 700 "$BACKUP_DIR"
# Generate backup filename
BACKUP_FILE="$BACKUP_DIR/vault-snapshot-$(date +%Y%m%d-%H%M%S).snapshot"
echo "═══════════════════════════════════════════════════════════"
echo " Vault Raft Snapshot Backup"
echo "═══════════════════════════════════════════════════════════"
echo ""
log_info "Creating Raft snapshot..."
log_info "Backup file: $BACKUP_FILE"
# Create snapshot
if ssh root@"$PROXMOX_HOST_1" "pct exec $VAULT_CONTAINER -- bash -c 'export VAULT_ADDR=http://127.0.0.1:8200 && export VAULT_TOKEN=$VAULT_TOKEN && vault operator raft snapshot save -'" > "$BACKUP_FILE" 2>/dev/null; then
BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log_success "Snapshot created successfully ($BACKUP_SIZE)"
else
log_error "Failed to create snapshot"
exit 1
fi
# Compress backup
log_info "Compressing backup..."
if gzip "$BACKUP_FILE"; then
BACKUP_FILE="${BACKUP_FILE}.gz"
BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log_success "Backup compressed ($BACKUP_SIZE)"
else
log_warn "Compression failed, keeping uncompressed backup"
fi
# Clean up old backups
log_info "Cleaning up backups older than $RETENTION_DAYS days..."
find "$BACKUP_DIR" -name "vault-snapshot-*.snapshot*" -type f -mtime +$RETENTION_DAYS -delete
DELETED_COUNT=$(find "$BACKUP_DIR" -name "vault-snapshot-*.snapshot*" -type f | wc -l)
log_success "Retained $DELETED_COUNT backup(s)"
# Create backup index
BACKUP_INDEX="$BACKUP_DIR/backup-index.txt"
echo "$(date -Iseconds) | $BACKUP_FILE | $(du -h "$BACKUP_FILE" | cut -f1)" >> "$BACKUP_INDEX"
log_success "Backup index updated"
echo ""
log_success "✅ Backup completed successfully"
log_info "Backup location: $BACKUP_FILE"
log_info "To restore: vault operator raft snapshot restore $BACKUP_FILE"
echo ""

View File

@@ -0,0 +1,82 @@
#!/bin/bash
# Vault Raft Snapshot Backup Script
# Creates automated backups of Vault cluster
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PROXMOX_HOST_1="${PROXMOX_HOST_1:-192.168.11.11}"
VAULT_CONTAINER="${VAULT_CONTAINER:-8640}"
VAULT_TOKEN="${VAULT_TOKEN:-}"
BACKUP_DIR="${BACKUP_DIR:-/home/intlc/projects/proxmox/.secure/vault-backups}"
RETENTION_DAYS="${RETENTION_DAYS:-30}"
if [ -z "$VAULT_TOKEN" ]; then
log_error "VAULT_TOKEN environment variable is required"
log_info "Usage: VAULT_TOKEN=<token> ./scripts/vault-backup.sh"
exit 1
fi
# Create backup directory
mkdir -p "$BACKUP_DIR"
chmod 700 "$BACKUP_DIR"
# Generate backup filename
BACKUP_FILE="$BACKUP_DIR/vault-snapshot-$(date +%Y%m%d-%H%M%S).snapshot"
echo "═══════════════════════════════════════════════════════════"
echo " Vault Raft Snapshot Backup"
echo "═══════════════════════════════════════════════════════════"
echo ""
log_info "Creating Raft snapshot..."
log_info "Backup file: $BACKUP_FILE"
# Create snapshot
if ssh root@"$PROXMOX_HOST_1" "pct exec $VAULT_CONTAINER -- bash -c 'export VAULT_ADDR=http://127.0.0.1:8200 && export VAULT_TOKEN=$VAULT_TOKEN && vault operator raft snapshot save -'" > "$BACKUP_FILE" 2>/dev/null; then
BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log_success "Snapshot created successfully ($BACKUP_SIZE)"
else
log_error "Failed to create snapshot"
exit 1
fi
# Compress backup
log_info "Compressing backup..."
if gzip "$BACKUP_FILE"; then
BACKUP_FILE="${BACKUP_FILE}.gz"
BACKUP_SIZE=$(du -h "$BACKUP_FILE" | cut -f1)
log_success "Backup compressed ($BACKUP_SIZE)"
else
log_warn "Compression failed, keeping uncompressed backup"
fi
# Clean up old backups
log_info "Cleaning up backups older than $RETENTION_DAYS days..."
find "$BACKUP_DIR" -name "vault-snapshot-*.snapshot*" -type f -mtime +$RETENTION_DAYS -delete
DELETED_COUNT=$(find "$BACKUP_DIR" -name "vault-snapshot-*.snapshot*" -type f | wc -l)
log_success "Retained $DELETED_COUNT backup(s)"
# Create backup index
BACKUP_INDEX="$BACKUP_DIR/backup-index.txt"
echo "$(date -Iseconds) | $BACKUP_FILE | $(du -h "$BACKUP_FILE" | cut -f1)" >> "$BACKUP_INDEX"
log_success "Backup index updated"
echo ""
log_success "✅ Backup completed successfully"
log_info "Backup location: $BACKUP_FILE"
log_info "To restore: vault operator raft snapshot restore $BACKUP_FILE"
echo ""

View File

@@ -0,0 +1,171 @@
#!/usr/bin/env bash
# Configure Cloudflare Access policies with allowed email addresses
# Usage: ./configure-access-policies.sh [email1] [email2] [email3] ...
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TUNNELS_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Load .env
if [ -f "$TUNNELS_DIR/../../.env" ]; then
source "$TUNNELS_DIR/../../.env" 2>/dev/null || true
fi
if [[ -z "${CLOUDFLARE_ACCOUNT_ID:-}" ]] || [[ -z "${CLOUDFLARE_API_KEY:-}" ]] || [[ -z "${CLOUDFLARE_EMAIL:-}" ]]; then
log_error "Cloudflare credentials not found in .env"
exit 1
fi
# Get email addresses from command line or prompt
ALLOWED_EMAILS=("$@")
if [ ${#ALLOWED_EMAILS[@]} -eq 0 ]; then
log_info "Enter allowed email addresses (one per line, empty line to finish):"
while IFS= read -r email; do
[[ -z "$email" ]] && break
[[ "$email" =~ ^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]] || {
log_warn "Invalid email format: $email (skipping)"
continue
}
ALLOWED_EMAILS+=("$email")
done
fi
if [ ${#ALLOWED_EMAILS[@]} -eq 0 ]; then
log_error "No email addresses provided"
exit 1
fi
log_info "Allowed emails: ${ALLOWED_EMAILS[*]}"
echo ""
# Function to make API request
cf_api_request() {
local method="$1"
local endpoint="$2"
local data="${3:-}"
local url="https://api.cloudflare.com/client/v4${endpoint}"
local temp_file=$(mktemp)
local http_code
if [[ -n "$data" ]]; then
http_code=$(curl -s -o "$temp_file" -w "%{http_code}" \
-X "$method" "$url" \
-H "X-Auth-Email: ${CLOUDFLARE_EMAIL}" \
-H "X-Auth-Key: ${CLOUDFLARE_API_KEY}" \
-H "Content-Type: application/json" \
-d "$data" 2>/dev/null)
else
http_code=$(curl -s -o "$temp_file" -w "%{http_code}" \
-X "$method" "$url" \
-H "X-Auth-Email: ${CLOUDFLARE_EMAIL}" \
-H "X-Auth-Key: ${CLOUDFLARE_API_KEY}" \
-H "Content-Type: application/json" 2>/dev/null)
fi
local response=$(cat "$temp_file" 2>/dev/null || echo "")
rm -f "$temp_file"
if [[ "$http_code" != "200" ]] && [[ "$http_code" != "201" ]]; then
log_error "API request failed (HTTP $http_code)"
echo "$response" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "$response"
return 1
fi
echo "$response"
}
# Get Access applications
log_info "Fetching Access applications..."
APPS_RESPONSE=$(cf_api_request "GET" "/accounts/${CLOUDFLARE_ACCOUNT_ID}/access/apps" 2>&1)
declare -A APP_IDS=()
for hostname in ml110-01.d-bis.org r630-01.d-bis.org r630-02.d-bis.org; do
app_id=$(echo "$APPS_RESPONSE" | jq -r ".result[]? | select(.domain? == \"${hostname}\") | .id" 2>/dev/null || echo "")
if [[ -n "$app_id" ]] && [[ "$app_id" != "null" ]] && [[ "$app_id" != "" ]]; then
APP_IDS["$hostname"]="$app_id"
log_success "Found app for $hostname: $app_id"
else
log_warn "No app found for $hostname"
fi
done
if [ ${#APP_IDS[@]} -eq 0 ]; then
log_error "No Access applications found"
exit 1
fi
echo ""
# Build email include array
EMAIL_INCLUDES=$(printf '%s\n' "${ALLOWED_EMAILS[@]}" | jq -R . | jq -s . | jq 'map({email: {email: .}})')
# Configure policy for each app
for hostname in "${!APP_IDS[@]}"; do
app_id="${APP_IDS[$hostname]}"
log_info "Configuring policy for $hostname..."
# Get existing policies
POLICIES_RESPONSE=$(cf_api_request "GET" "/accounts/${CLOUDFLARE_ACCOUNT_ID}/access/apps/${app_id}/policies" 2>&1)
EXISTING_POLICY_ID=$(echo "$POLICIES_RESPONSE" | jq -r '.result[] | select(.name == "Allow Team Access") | .id' 2>/dev/null || echo "")
# Build policy data
POLICY_DATA=$(jq -n \
--argjson emails "$EMAIL_INCLUDES" \
'{
name: "Allow Team Access",
decision: "allow",
include: $emails,
require: [
{
email: {}
}
]
}')
if [[ -n "$EXISTING_POLICY_ID" ]] && [[ "$EXISTING_POLICY_ID" != "null" ]]; then
# Update existing policy
log_info " Updating existing policy..."
response=$(cf_api_request "PUT" "/accounts/${CLOUDFLARE_ACCOUNT_ID}/access/apps/${app_id}/policies/${EXISTING_POLICY_ID}" "$POLICY_DATA" 2>&1)
else
# Create new policy
log_info " Creating new policy..."
response=$(cf_api_request "POST" "/accounts/${CLOUDFLARE_ACCOUNT_ID}/access/apps/${app_id}/policies" "$POLICY_DATA" 2>&1)
fi
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
log_success " ✓ Policy configured for $hostname"
else
log_error " ✗ Failed to configure policy for $hostname"
echo "$response" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null
fi
echo ""
done
log_success "=== Access Policies Configured ==="
log_info "Allowed emails:"
for email in "${ALLOWED_EMAILS[@]}"; do
echo " - $email"
done
echo ""
log_info "These emails can now access:"
for hostname in "${!APP_IDS[@]}"; do
echo " - https://$hostname"
done

View File

@@ -0,0 +1,343 @@
#!/usr/bin/env bash
# Configure all Cloudflare DNS records for all 19 domains
# Uses provided API key to ensure all DNS is correctly setup
# All domains point to public IP: 76.53.10.36
# Temporarily disable strict mode for .env sourcing
set +euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Source .env file if it exists (with error handling)
if [ -f "$PROJECT_ROOT/.env" ]; then
source "$PROJECT_ROOT/.env" 2>/dev/null || true
fi
# Re-enable strict mode
set -euo pipefail
# Cloudflare API Key (provided)
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-JSEO_sruWB6lf1id77gtI7HOLVdhkhaR2goPEJIk}"
# Public IP for all domains
PUBLIC_IP="${PUBLIC_IP:-76.53.10.36}"
# Zone IDs (from .env or environment variables)
ZONE_SANKOFA_NEXUS="${CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS:-}"
ZONE_D_BIS_ORG="${CLOUDFLARE_ZONE_ID_D_BIS_ORG:-${CLOUDFLARE_ZONE_ID:-}}"
ZONE_MIM4U_ORG="${CLOUDFLARE_ZONE_ID_MIM4U_ORG:-}"
ZONE_DEFI_ORACLE_IO="${CLOUDFLARE_ZONE_ID_DEFI_ORACLE_IO:-}"
# All domains configuration
declare -A DOMAIN_ZONES=(
["sankofa.nexus"]="sankofa.nexus"
["www.sankofa.nexus"]="sankofa.nexus"
["phoenix.sankofa.nexus"]="sankofa.nexus"
["www.phoenix.sankofa.nexus"]="sankofa.nexus"
["the-order.sankofa.nexus"]="sankofa.nexus"
["explorer.d-bis.org"]="d-bis.org"
["rpc-http-pub.d-bis.org"]="d-bis.org"
["rpc-ws-pub.d-bis.org"]="d-bis.org"
["rpc-http-prv.d-bis.org"]="d-bis.org"
["rpc-ws-prv.d-bis.org"]="d-bis.org"
["dbis-admin.d-bis.org"]="d-bis.org"
["dbis-api.d-bis.org"]="d-bis.org"
["dbis-api-2.d-bis.org"]="d-bis.org"
["secure.d-bis.org"]="d-bis.org"
["mim4u.org"]="mim4u.org"
["www.mim4u.org"]="mim4u.org"
["secure.mim4u.org"]="mim4u.org"
["training.mim4u.org"]="mim4u.org"
["rpc.public-0138.defi-oracle.io"]="defi-oracle.io"
)
# Function to make Cloudflare API request
cf_api_request() {
local method="$1"
local url="$2"
local data="${3:-}"
if [ -n "$data" ]; then
curl -s -X "$method" "$url" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" \
--data "$data"
else
curl -s -X "$method" "$url" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json"
fi
}
# Function to get zone ID from API or existing DNS record
get_zone_id() {
local zone_name="$1"
# Try to get from environment variable first
case "$zone_name" in
"sankofa.nexus")
if [ -n "$ZONE_SANKOFA_NEXUS" ]; then
echo "$ZONE_SANKOFA_NEXUS"
return 0
fi
;;
"d-bis.org")
if [ -n "$ZONE_D_BIS_ORG" ]; then
echo "$ZONE_D_BIS_ORG"
return 0
fi
;;
"mim4u.org")
if [ -n "$ZONE_MIM4U_ORG" ]; then
echo "$ZONE_MIM4U_ORG"
return 0
fi
;;
"defi-oracle.io")
if [ -n "$ZONE_DEFI_ORACLE_IO" ]; then
echo "$ZONE_DEFI_ORACLE_IO"
return 0
fi
;;
esac
# Try to get from API
log_info "Getting zone ID for: $zone_name"
local response=$(cf_api_request "GET" "https://api.cloudflare.com/client/v4/zones?name=${zone_name}")
local zone_id=$(echo "$response" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
local zone_status=$(echo "$response" | jq -r '.success // false' 2>/dev/null || echo "false")
if [ "$zone_status" = "true" ] && [ -n "$zone_id" ] && [ "$zone_id" != "null" ]; then
echo "$zone_id"
return 0
fi
# Try to get zone ID from existing DNS record (query any subdomain)
log_info " Trying to get zone ID from existing DNS record..."
local test_domain=""
case "$zone_name" in
"sankofa.nexus") test_domain="sankofa.nexus" ;;
"d-bis.org") test_domain="explorer.d-bis.org" ;;
"mim4u.org") test_domain="mim4u.org" ;;
"defi-oracle.io") test_domain="rpc.public-0138.defi-oracle.io" ;;
esac
if [ -n "$test_domain" ]; then
# Try to get zone ID by querying all zones and checking DNS records
local all_zones=$(cf_api_request "GET" "https://api.cloudflare.com/client/v4/zones")
local zone_count=$(echo "$all_zones" | jq -r '.result | length' 2>/dev/null || echo "0")
if [ "$zone_count" -gt 0 ]; then
for i in $(seq 0 $((zone_count - 1))); do
local check_zone_id=$(echo "$all_zones" | jq -r ".result[$i].id" 2>/dev/null || echo "")
local check_zone_name=$(echo "$all_zones" | jq -r ".result[$i].name" 2>/dev/null || echo "")
if [ -n "$check_zone_id" ] && [ "$check_zone_id" != "null" ]; then
# Check if this zone has a DNS record for our test domain
local dns_check=$(cf_api_request "GET" "https://api.cloudflare.com/client/v4/zones/${check_zone_id}/dns_records?name=${test_domain}")
local has_record=$(echo "$dns_check" | jq -r '.result | length' 2>/dev/null || echo "0")
if [ "$has_record" -gt 0 ] || [ "$check_zone_name" = "$zone_name" ]; then
echo "$check_zone_id"
return 0
fi
fi
done
fi
fi
log_error "Failed to get zone ID for $zone_name"
log_warn " Please provide zone ID via environment variable:"
case "$zone_name" in
"sankofa.nexus") log_warn " CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS=your-zone-id" ;;
"d-bis.org") log_warn " CLOUDFLARE_ZONE_ID_D_BIS_ORG=your-zone-id" ;;
"mim4u.org") log_warn " CLOUDFLARE_ZONE_ID_MIM4U_ORG=your-zone-id" ;;
"defi-oracle.io") log_warn " CLOUDFLARE_ZONE_ID_DEFI_ORACLE_IO=your-zone-id" ;;
esac
return 1
}
# Function to get existing DNS record
get_dns_record() {
local zone_id="$1"
local domain_name="$2"
local response=$(cf_api_request "GET" "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records?name=${domain_name}&type=A")
echo "$response" | jq -r '.result[0] // empty' 2>/dev/null || echo ""
}
# Function to create or update DNS A record
create_or_update_dns_record() {
local zone_id="$1"
local domain_name="$2"
local ip_address="$3"
# Get existing record
local existing=$(get_dns_record "$zone_id" "$domain_name")
local record_id=$(echo "$existing" | jq -r '.id // empty' 2>/dev/null || echo "")
# Prepare DNS record data
local data=$(jq -n \
--arg name "$domain_name" \
--arg content "$ip_address" \
'{
type: "A",
name: $name,
content: $content,
proxied: false,
ttl: 1
}')
local response=""
if [ -n "$record_id" ] && [ "$record_id" != "null" ]; then
# Update existing record
log_info " Updating existing DNS record: $domain_name"
response=$(cf_api_request "PUT" "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records/${record_id}" "$data")
else
# Create new record
log_info " Creating new DNS record: $domain_name"
response=$(cf_api_request "POST" "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records" "$data")
fi
local success=$(echo "$response" | jq -r '.success // false' 2>/dev/null || echo "false")
if [ "$success" = "true" ]; then
log_success " ✓ DNS record configured: $domain_name$ip_address"
return 0
else
local error=$(echo "$response" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "Unknown error")
log_error " ✗ Failed to configure DNS: $domain_name - $error"
return 1
fi
}
# Main execution
main() {
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🌐 Cloudflare DNS Configuration"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Public IP: $PUBLIC_IP"
log_info "Total domains: ${#DOMAIN_ZONES[@]}"
echo ""
# Get zone IDs for all unique zones
log_info "Step 1: Getting zone IDs..."
declare -A ZONE_IDS
local unique_zones=($(printf '%s\n' "${DOMAIN_ZONES[@]}" | sort -u))
local missing_zones=()
for zone in "${unique_zones[@]}"; do
local zone_id=$(get_zone_id "$zone")
if [ -n "$zone_id" ] && [ "$zone_id" != "null" ]; then
ZONE_IDS["$zone"]="$zone_id"
log_success " Zone: $zone$zone_id"
else
log_warn " Could not get zone ID for: $zone"
missing_zones+=("$zone")
fi
done
echo ""
# If zone IDs are missing, provide instructions
if [ ${#missing_zones[@]} -gt 0 ]; then
log_warn "Some zone IDs could not be automatically detected."
log_info "To get zone IDs:"
log_info " 1. Go to Cloudflare Dashboard: https://dash.cloudflare.com"
log_info " 2. Select each domain zone"
log_info " 3. Scroll down to 'API' section on the right sidebar"
log_info " 4. Copy the 'Zone ID'"
log_info ""
log_info "Then set environment variables:"
for zone in "${missing_zones[@]}"; do
case "$zone" in
"sankofa.nexus")
log_info " export CLOUDFLARE_ZONE_ID_SANKOFA_NEXUS=your-zone-id" ;;
"d-bis.org")
log_info " export CLOUDFLARE_ZONE_ID_D_BIS_ORG=your-zone-id" ;;
"mim4u.org")
log_info " export CLOUDFLARE_ZONE_ID_MIM4U_ORG=your-zone-id" ;;
"defi-oracle.io")
log_info " export CLOUDFLARE_ZONE_ID_DEFI_ORACLE_IO=your-zone-id" ;;
esac
done
echo ""
if [ ${#ZONE_IDS[@]} -eq 0 ]; then
log_error "No zone IDs available. Cannot proceed."
exit 1
else
log_warn "Proceeding with available zone IDs. Some domains may be skipped."
fi
fi
echo ""
# Configure DNS records
log_info "Step 2: Configuring DNS records..."
local success_count=0
local fail_count=0
for domain in "${!DOMAIN_ZONES[@]}"; do
local zone_name="${DOMAIN_ZONES[$domain]}"
local zone_id="${ZONE_IDS[$zone_name]}"
if [ -z "$zone_id" ] || [ "$zone_id" = "null" ]; then
log_warn "Skipping $domain - no zone ID for $zone_name"
fail_count=$((fail_count + 1))
continue
fi
set +e
if create_or_update_dns_record "$zone_id" "$domain" "$PUBLIC_IP"; then
success_count=$((success_count + 1))
else
fail_count=$((fail_count + 1))
fi
set -e
# Small delay to avoid rate limiting
sleep 0.5
done
echo ""
# Summary
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Configuration Summary:"
log_success " Successful: $success_count"
if [ $fail_count -gt 0 ]; then
log_error " Failed: $fail_count"
fi
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
if [ $fail_count -eq 0 ]; then
log_success "✅ All DNS records configured successfully!"
log_info "DNS changes may take a few minutes to propagate"
return 0
else
log_warn "⚠️ Some DNS records failed to configure"
return 1
fi
}
# Run main function
main "$@"

View File

@@ -0,0 +1,70 @@
#!/bin/bash
# Configure All Databases - Create databases and users
set -uo pipefail
NODE_IP="${PROXMOX_HOST_R630_01}"
log_info() { echo -e "\033[0;32m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_error() { echo -e "\033[0;31m[ERROR]\033[0m $1"; }
configure_order_db() {
local vmid="$1"
log_info "Configuring Order database on CT $vmid..."
ssh -o ConnectTimeout=15 -o StrictHostKeyChecking=no root@${NODE_IP} "
pct exec $vmid -- su - postgres -c \"
psql << 'SQL_EOF'
CREATE DATABASE order_db;
CREATE USER order_user WITH PASSWORD 'order_password';
GRANT ALL PRIVILEGES ON DATABASE order_db TO order_user;
ALTER DATABASE order_db OWNER TO order_user;
\\l order_db
SQL_EOF
\" 2>&1
" && log_success "Order DB configured on CT $vmid" || log_error "Failed on CT $vmid"
}
configure_dbis_db() {
local vmid="$1"
log_info "Configuring DBIS database on CT $vmid..."
ssh -o ConnectTimeout=15 -o StrictHostKeyChecking=no root@${NODE_IP} "
pct exec $vmid -- su - postgres -c \"
psql << 'SQL_EOF'
CREATE DATABASE dbis_core;
CREATE USER dbis WITH PASSWORD '8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771';
GRANT ALL PRIVILEGES ON DATABASE dbis_core TO dbis;
ALTER DATABASE dbis_core OWNER TO dbis;
\\l dbis_core
SQL_EOF
\" 2>&1
" && log_success "DBIS DB configured on CT $vmid" || log_error "Failed on CT $vmid"
}
echo "═══════════════════════════════════════════════════════════"
echo "Configure All Databases"
echo "═══════════════════════════════════════════════════════════"
echo ""
# Wait for PostgreSQL to be ready
log_info "Waiting for PostgreSQL services to be ready..."
sleep 5
# Configure Order databases
log_info "Configuring Order databases..."
for vmid in 10000 10001; do
configure_order_db "$vmid"
sleep 2
done
# Configure DBIS databases
log_info "Configuring DBIS databases..."
for vmid in 10100 10101; do
configure_dbis_db "$vmid"
sleep 2
done
echo ""
log_success "Database configuration complete!"

View File

@@ -0,0 +1,70 @@
#!/bin/bash
# Configure All Databases - Create databases and users
set -uo pipefail
NODE_IP="192.168.11.11"
log_info() { echo -e "\033[0;32m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_error() { echo -e "\033[0;31m[ERROR]\033[0m $1"; }
configure_order_db() {
local vmid="$1"
log_info "Configuring Order database on CT $vmid..."
ssh -o ConnectTimeout=15 -o StrictHostKeyChecking=no root@${NODE_IP} "
pct exec $vmid -- su - postgres -c \"
psql << 'SQL_EOF'
CREATE DATABASE order_db;
CREATE USER order_user WITH PASSWORD 'order_password';
GRANT ALL PRIVILEGES ON DATABASE order_db TO order_user;
ALTER DATABASE order_db OWNER TO order_user;
\\l order_db
SQL_EOF
\" 2>&1
" && log_success "Order DB configured on CT $vmid" || log_error "Failed on CT $vmid"
}
configure_dbis_db() {
local vmid="$1"
log_info "Configuring DBIS database on CT $vmid..."
ssh -o ConnectTimeout=15 -o StrictHostKeyChecking=no root@${NODE_IP} "
pct exec $vmid -- su - postgres -c \"
psql << 'SQL_EOF'
CREATE DATABASE dbis_core;
CREATE USER dbis WITH PASSWORD '8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771';
GRANT ALL PRIVILEGES ON DATABASE dbis_core TO dbis;
ALTER DATABASE dbis_core OWNER TO dbis;
\\l dbis_core
SQL_EOF
\" 2>&1
" && log_success "DBIS DB configured on CT $vmid" || log_error "Failed on CT $vmid"
}
echo "═══════════════════════════════════════════════════════════"
echo "Configure All Databases"
echo "═══════════════════════════════════════════════════════════"
echo ""
# Wait for PostgreSQL to be ready
log_info "Waiting for PostgreSQL services to be ready..."
sleep 5
# Configure Order databases
log_info "Configuring Order databases..."
for vmid in 10000 10001; do
configure_order_db "$vmid"
sleep 2
done
# Configure DBIS databases
log_info "Configuring DBIS databases..."
for vmid in 10100 10101; do
configure_dbis_db "$vmid"
sleep 2
done
echo ""
log_success "Database configuration complete!"

View File

@@ -0,0 +1,464 @@
#!/usr/bin/env bash
# Configure Besu nodes for ChainID 138 with static-nodes.json and permissioned-nodes.json
# Applies to all Besu nodes including new containers: 1504 (besu-sentry-5), 2503 (besu-rpc-4)
#
# This script:
# 1. Collects enodes from all Besu nodes
# 2. Generates static-nodes.json and permissioned-nodes.json
# 3. Deploys configurations to all Besu containers
# 4. Configures discovery settings (disabled for RPC nodes that report chainID 0x1 to MetaMask for wallet compatibility)
# 5. Restarts Besu services
set -euo pipefail
# Load IP configuration (script is in scripts/archive/consolidated/config/; repo root is 3 levels up)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="${PROJECT_ROOT:-$(cd "$SCRIPT_DIR/../../../.." && pwd)}"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Configuration
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
OUTPUT_DIR="${OUTPUT_DIR:-$PROJECT_ROOT/output/chain138-config}"
WORK_DIR="${WORK_DIR:-$OUTPUT_DIR/.work}"
# All Besu nodes for ChainID 138
# Validators: 1000-1004
# Sentries: 1500-1503, 1504 (new)
# RPC: 2500-2502, 2503 (new)
declare -A BESU_NODES=(
# Validators
[1000]="${IP_VALIDATOR_0:-${IP_VALIDATOR_0:-${IP_VALIDATOR_0:-${IP_VALIDATOR_0:-192.168.11.100}}}}"
[1001]="${IP_VALIDATOR_1:-${IP_VALIDATOR_1:-${IP_VALIDATOR_1:-${IP_VALIDATOR_1:-192.168.11.101}}}}"
[1002]="${IP_VALIDATOR_2:-${IP_VALIDATOR_2:-${IP_VALIDATOR_2:-${IP_VALIDATOR_2:-192.168.11.102}}}}"
[1003]="${IP_VALIDATOR_3:-${IP_VALIDATOR_3:-${IP_VALIDATOR_3:-${IP_VALIDATOR_3:-192.168.11.103}}}}"
[1004]="${IP_VALIDATOR_4:-${IP_VALIDATOR_4:-${IP_VALIDATOR_4:-${IP_VALIDATOR_4:-192.168.11.104}}}}"
# Sentries
[1500]="${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-${IP_BESU_RPC_0:-192.168.11.150}}}}"
[1501]="${IP_BESU_RPC_1:-${IP_BESU_RPC_1:-${IP_BESU_RPC_1:-${IP_BESU_RPC_1:-192.168.11.151}}}}"
[1502]="${IP_BESU_RPC_2:-${IP_BESU_RPC_2:-${IP_BESU_RPC_2:-${IP_BESU_RPC_2:-192.168.11.152}}}}"
[1503]="${IP_BESU_RPC_3:-${IP_BESU_RPC_3:-${IP_BESU_RPC_3:-${IP_BESU_RPC_3:-192.168.11.153}}}}"
[1504]="${IP_BESU_SENTRY:-192.168.11.154}" # New: besu-sentry-5
# RPC Nodes
[2500]="${RPC_ALLTRA_1:-192.168.11.250}"
[2501]="${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}"
[2502]="${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}"
[2503]="192.168.11.253" # Ali's RPC node (0x8a identity)
[2504]="192.168.11.254" # Ali's RPC node (0x1 identity)
[2505]="192.168.11.255" # Luis's RPC node (0x8a identity)
[2506]="${RPC_LUIS_2:-192.168.11.202}" # Luis's RPC node (0x1 identity)
[2507]="${RPC_PUTU_1:-192.168.11.203}" # Putu's RPC node (0x8a identity)
[2508]="${RPC_PUTU_2:-192.168.11.204}" # Putu's RPC node (0x1 identity)
)
# RPC nodes that should have discovery disabled (report chainID 0x1 to MetaMask for wallet compatibility)
# These nodes are connected to ChainID 138 but report 0x1 to MetaMask wallets
# Discovery is disabled to prevent actual connection to Ethereum mainnet while reporting 0x1 to wallets
DISCOVERY_DISABLED_VMIDS=(2500 2503 2504 2505 2506 2507 2508)
# Besu data paths
BESU_DATA_PATH="${BESU_DATA_PATH:-/var/lib/besu}"
BESU_PERMISSIONS_PATH="${BESU_PERMISSIONS_PATH:-/var/lib/besu/permissions}"
BESU_GENESIS_PATH="${BESU_GENESIS_PATH:-/genesis}"
# Create work directories
mkdir -p "$WORK_DIR"
mkdir -p "$OUTPUT_DIR"
# Function to check if container exists and is running
check_container() {
local vmid=$1
if ! ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct status $vmid 2>/dev/null | grep -q running"; then
return 1
fi
return 0
}
# Function to extract enode from container
extract_enode() {
local vmid=$1
local ip=$2
log_info "Extracting enode from VMID $vmid - IP: $ip"
# Method 1: Try RPC admin_nodeInfo (if RPC is enabled with ADMIN API)
local enode_rpc
enode_rpc=$(ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- curl -s -X POST -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"admin_nodeInfo\",\"params\":[],\"id\":1}' http://localhost:8545 2>/dev/null | python3 -c \"import sys, json; data=json.load(sys.stdin); print(data.get('result', {}).get('enode', ''))\" 2>/dev/null || echo \"\"")
if [[ -n "$enode_rpc" ]] && [[ "$enode_rpc" != "null" ]] && [[ "$enode_rpc" != "" ]]; then
# Replace IP in enode with actual IP
echo "$enode_rpc" | sed "s/@[^:]*:/@${ip}:/"
return 0
fi
# Method 2: Extract from nodekey using Besu CLI
local nodekey_path
nodekey_path=$(ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- find ${BESU_DATA_PATH} -name 'nodekey' -o -name 'key' 2>/dev/null | head -1" || echo "")
if [[ -z "$nodekey_path" ]]; then
# Try common locations
for path in "${BESU_DATA_PATH}/key" "${BESU_DATA_PATH}/nodekey" "/data/besu/nodekey" "/data/besu/key"; do
if ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- test -f $path 2>/dev/null"; then
nodekey_path="$path"
break
fi
done
fi
if [[ -n "$nodekey_path" ]]; then
local enode
enode=$(ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- /opt/besu/bin/besu public-key export --node-private-key-file=\"$nodekey_path\" --format=enode 2>/dev/null" || echo "")
if [[ -n "$enode" ]] && [[ "$enode" == enode://* ]]; then
echo "$enode" | sed "s/@[0-9.]*:/@${ip}:/"
return 0
fi
# Fallback: Besu may not support --format=enode; get hex and build enode
local hex
hex=$(ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- /opt/besu/bin/besu public-key export --node-private-key-file=\"$nodekey_path\" 2>/dev/null" | grep -oE '0x[0-9a-fA-F]{128}' | tail -1 | sed 's/^0x//')
if [[ -n "$hex" ]] && [[ ${#hex} -eq 128 ]]; then
echo "enode://${hex}@${ip}:30303"
return 0
fi
fi
log_warn "Could not extract enode for VMID $vmid"
return 1
}
# Function to collect all enodes (logs to stderr so only path is on stdout)
collect_enodes() {
log_info "=== Collecting Enodes from All Besu Nodes ===" >&2
local collected_file="$WORK_DIR/collected-enodes.txt"
> "$collected_file"
local success_count=0
local fail_count=0
for vmid in "${!BESU_NODES[@]}"; do
local ip="${BESU_NODES[$vmid]}"
if ! check_container "$vmid"; then
log_warn "Container $vmid not running, skipping..." >&2
((fail_count++))
continue
fi
if enode=$(extract_enode "$vmid" "$ip" 2>/dev/null); then
echo "$enode" >> "$collected_file"
log_success "VMID $vmid: $enode" >&2
((success_count++))
else
log_warn "Failed to extract enode from VMID $vmid" >&2
((fail_count++))
fi
done
log_info "Collected $success_count enodes, $fail_count failed" >&2
if [[ $success_count -eq 0 ]]; then
log_error "No enodes collected. Cannot proceed." >&2
exit 1
fi
# Sort and deduplicate
sort -u "$collected_file" > "$collected_file.tmp"
mv "$collected_file.tmp" "$collected_file"
echo "$collected_file"
}
# Function to generate static-nodes.json (logs to stderr so only path is on stdout)
generate_static_nodes() {
local enodes_file=$1
local output_file="$OUTPUT_DIR/static-nodes.json"
log_info "=== Generating static-nodes.json ===" >&2
# Create JSON array from enodes (use env vars to avoid heredoc argv issues)
ENODES_FILE="$enodes_file" OUTPUT_FILE="$output_file" python3 -c '
import json
import os
enodes_file = os.environ.get("ENODES_FILE", "").strip()
output_file = os.environ.get("OUTPUT_FILE", "").strip()
if not enodes_file or not output_file:
print("Error: ENODES_FILE and OUTPUT_FILE must be set", file=__import__("sys").stderr)
exit(1)
enodes = []
try:
with open(enodes_file, "r") as f:
for line in f:
enode = line.strip()
if enode and enode.startswith("enode://"):
enodes.append(enode)
except FileNotFoundError:
print("Error: File not found: " + enodes_file, file=__import__("sys").stderr)
exit(1)
enodes.sort()
with open(output_file, "w") as f:
json.dump(enodes, f, indent=2)
print("Generated static-nodes.json with {} nodes".format(len(enodes)), file=__import__("sys").stderr)
'
log_success "Generated: $output_file" >&2
echo "$output_file"
}
# Function to generate permissioned-nodes.json (logs to stderr so only path is on stdout)
generate_permissioned_nodes() {
local enodes_file=$1
local output_file="$OUTPUT_DIR/permissioned-nodes.json"
log_info "=== Generating permissioned-nodes.json ===" >&2
cp "$OUTPUT_DIR/static-nodes.json" "$output_file"
log_success "Generated: $output_file" >&2
echo "$output_file"
}
# Function to deploy files to container
deploy_to_container() {
local vmid=$1
local static_nodes_file=$2
local permissioned_nodes_file=$3
# Ensure path vars are single-line (no log/ANSI leakage into scp)
static_nodes_file=$(printf '%s' "$static_nodes_file" | head -1 | tr -d '\r\n')
permissioned_nodes_file=$(printf '%s' "$permissioned_nodes_file" | head -1 | tr -d '\r\n')
log_info "Deploying to VMID $vmid..."
# Create directories if they don't exist
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- mkdir -p ${BESU_DATA_PATH} ${BESU_PERMISSIONS_PATH} ${BESU_GENESIS_PATH} 2>/dev/null || true"
# Copy static-nodes.json (host must be clean; PROXMOX_HOST is set at top)
scp -o StrictHostKeyChecking=accept-new \
"$static_nodes_file" \
"root@${PROXMOX_HOST}:/tmp/static-nodes.json"
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct push $vmid /tmp/static-nodes.json ${BESU_DATA_PATH}/static-nodes.json && pct push $vmid /tmp/static-nodes.json ${BESU_GENESIS_PATH}/static-nodes.json && pct exec $vmid -- chown -R besu:besu ${BESU_DATA_PATH} ${BESU_GENESIS_PATH} 2>/dev/null || pct exec $vmid -- chown -R root:root ${BESU_DATA_PATH} ${BESU_GENESIS_PATH} 2>/dev/null || true"
# Copy permissioned-nodes.json
scp -o StrictHostKeyChecking=accept-new \
"$permissioned_nodes_file" \
"root@${PROXMOX_HOST}:/tmp/permissioned-nodes.json"
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct push $vmid /tmp/permissioned-nodes.json ${BESU_PERMISSIONS_PATH}/permissioned-nodes.json && pct exec $vmid -- chown -R besu:besu ${BESU_PERMISSIONS_PATH} 2>/dev/null || pct exec $vmid -- chown -R root:root ${BESU_PERMISSIONS_PATH} 2>/dev/null || true"
# Set permissions
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- chmod 644 ${BESU_DATA_PATH}/static-nodes.json ${BESU_GENESIS_PATH}/static-nodes.json ${BESU_PERMISSIONS_PATH}/permissioned-nodes.json 2>/dev/null || true"
log_success "Deployed to VMID $vmid"
}
# Function to configure discovery settings
configure_discovery() {
local vmid=$1
local disable=$2
log_info "Configuring discovery for VMID $vmid - disable=${disable}"
# Find Besu config file
local config_files=(
"/etc/besu/config.toml"
"/etc/besu/config-rpc-core.toml"
"/etc/besu/config-rpc-perm.toml"
"/etc/besu/config-rpc-public.toml"
"/etc/besu/config-validator.toml"
"/etc/besu/config-sentry.toml"
)
local config_found=false
for config_file in "${config_files[@]}"; do
if ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- test -f $config_file 2>/dev/null"; then
config_found=true
if [[ "$disable" == "true" ]]; then
# Disable discovery (all commands run inside container via bash -c)
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- bash -c \"sed -i 's/^discovery-enabled=.*/discovery-enabled=false/' $config_file 2>/dev/null || true; grep -q 'discovery-enabled' $config_file 2>/dev/null || echo 'discovery-enabled=false' >> $config_file\""
log_success "Discovery disabled in $config_file"
else
# Enable discovery (default)
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- sed -i 's/^discovery-enabled=.*/discovery-enabled=true/' $config_file"
log_success "Discovery enabled in $config_file"
fi
break
fi
done
# Also check systemd service file for CLI flags
if ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" \
"pct exec $vmid -- test -f /etc/systemd/system/besu*.service 2>/dev/null"; then
local service_file
service_file=$(ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- find /etc/systemd/system -name 'besu*.service' | head -1")
if [[ "$disable" == "true" ]]; then
# Add or update discovery flag
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- sed -i 's/--discovery-enabled=[^ ]*/--discovery-enabled=false/' $service_file && grep -q '--discovery-enabled' $service_file || sed -i 's|ExecStart=|ExecStart=\\n --discovery-enabled=false |' $service_file"
else
# Remove or update discovery flag
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- sed -i 's/--discovery-enabled=false/--discovery-enabled=true/' $service_file || sed -i '/--discovery-enabled=false/d' $service_file"
fi
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- systemctl daemon-reload 2>/dev/null || true"
fi
if [[ "$config_found" == "false" ]]; then
log_warn "No config file found for VMID $vmid, discovery setting may need manual configuration"
fi
}
# Function to update Besu config to use correct file paths
update_config_paths() {
local vmid=$1
log_info "Updating config paths for VMID $vmid..."
# Find Besu config file
local config_files=(
"/etc/besu/config.toml"
"/etc/besu/config-rpc-core.toml"
"/etc/besu/config-rpc-perm.toml"
"/etc/besu/config-rpc-public.toml"
"/etc/besu/config-validator.toml"
"/etc/besu/config-sentry.toml"
)
for config_file in "${config_files[@]}"; do
if ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- test -f $config_file 2>/dev/null"; then
# Update static-nodes-file path
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- sed -i 's|^static-nodes-file=.*|static-nodes-file=\"${BESU_DATA_PATH}/static-nodes.json\"|' $config_file || sed -i 's|^static-nodes-file=.*|static-nodes-file=\"${BESU_GENESIS_PATH}/static-nodes.json\"|' $config_file || echo 'static-nodes-file=\"${BESU_DATA_PATH}/static-nodes.json\"' >> $config_file"
# Update permissions-nodes-config-file path
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- sed -i 's|^permissions-nodes-config-file=.*|permissions-nodes-config-file=\"${BESU_PERMISSIONS_PATH}/permissioned-nodes.json\"|' $config_file || echo 'permissions-nodes-config-file=\"${BESU_PERMISSIONS_PATH}/permissioned-nodes.json\"' >> $config_file"
# Ensure permissions are enabled
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- sed -i 's/^permissions-nodes-config-file-enabled=.*/permissions-nodes-config-file-enabled=true/' $config_file || echo 'permissions-nodes-config-file-enabled=true' >> $config_file"
log_success "Updated paths in $config_file"
break
fi
done
}
# Function to restart Besu service
restart_besu_service() {
local vmid=$1
log_info "Restarting Besu service on VMID $vmid..."
# Find service name
local service_name
service_name=$(ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- systemctl list-units --type=service | grep -i besu | awk '{print \$1}' | head -1" || echo "")
if [[ -z "$service_name" ]]; then
# Try common service names
for name in "besu" "besu-rpc" "besu-validator" "besu-sentry"; do
if ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- systemctl list-units --type=service | grep -q $name"; then
service_name="$name.service"
break
fi
done
fi
if [[ -n "$service_name" ]]; then
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- systemctl restart $service_name 2>/dev/null || true"
sleep 1
if ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "pct exec $vmid -- systemctl is-active --quiet $service_name 2>/dev/null"; then
log_success "Service $service_name restarted successfully"
else
log_warn "Service $service_name may not be running"
fi
else
log_warn "Could not find Besu service for VMID $vmid"
fi
}
# Main execution
main() {
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Besu ChainID 138 Configuration Script"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Step 1: Collect enodes (capture only the path line)
local enodes_file
enodes_file=$(collect_enodes | head -1 | tr -d '\r\n')
[[ -z "$enodes_file" || ! -f "$enodes_file" ]] && { log_error "Collected enodes file missing: $enodes_file"; exit 1; }
# Step 2: Generate configuration files
local static_nodes_file
static_nodes_file=$(generate_static_nodes "$enodes_file" | head -1 | tr -d '\r\n')
local permissioned_nodes_file
permissioned_nodes_file=$(generate_permissioned_nodes "$enodes_file" | head -1 | tr -d '\r\n')
# Step 3: Deploy to all containers
log_info "=== Deploying Configurations to All Besu Nodes ==="
for vmid in "${!BESU_NODES[@]}"; do
if ! check_container "$vmid"; then
log_warn "Container $vmid not running, skipping..."
continue
fi
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Processing VMID $vmid"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
# Deploy files
deploy_to_container "$vmid" "$static_nodes_file" "$permissioned_nodes_file"
# Update config paths
update_config_paths "$vmid"
# Configure discovery (disable for specific RPC nodes)
local disable_discovery=false
for disabled_vmid in "${DISCOVERY_DISABLED_VMIDS[@]}"; do
if [[ "$vmid" == "$disabled_vmid" ]]; then
disable_discovery=true
break
fi
done
configure_discovery "$vmid" "$disable_discovery"
# Restart service
restart_besu_service "$vmid"
done
# Cleanup
ssh -o StrictHostKeyChecking=accept-new "root@${PROXMOX_HOST}" "rm -f /tmp/static-nodes.json /tmp/permissioned-nodes.json" 2>/dev/null || true
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "Configuration Complete!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Generated files:"
log_info " - $static_nodes_file"
log_info " - $permissioned_nodes_file"
echo ""
log_info "Next steps:"
log_info "1. Verify services are running on all containers"
log_info "2. Check peer connections using curl command"
log_info "3. Verify discovery settings on RPC nodes - should be disabled for 2503"
}
# Run main function
main "$@"

View File

@@ -0,0 +1,289 @@
#!/usr/bin/env bash
# Configure Besu RPC nodes (2500, 2501, 2502) with correct configurations
# This script ensures each RPC node has the correct config based on its role
#
# Node Roles:
# 2500 = Core - No public access, all features enabled (ADMIN, DEBUG, TRACE)
# 2501 = Prv (Permissioned) - Public permissioned access, non-Admin features only
# 2502 = Pub (Public) - Public non-auth access, minimal wallet features
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
CONFIG_DIR="$PROJECT_ROOT/smom-dbis-138/config"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Check if running on Proxmox host
if ! command -v pct &>/dev/null; then
log_error "This script must be run on Proxmox host (pct command not found)"
exit 1
fi
# RPC Node Configuration Mapping
declare -A RPC_CONFIGS
RPC_CONFIGS[2500]="config-rpc-core.toml"
RPC_CONFIGS[2501]="config-rpc-perm.toml"
RPC_CONFIGS[2502]="config-rpc-public.toml"
declare -A RPC_ROLES
RPC_ROLES[2500]="Core (no public access, all features)"
RPC_ROLES[2501]="Permissioned (public permissioned, non-Admin features)"
RPC_ROLES[2502]="Public (public non-auth, minimal wallet features)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Besu RPC Nodes Configuration Script"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Function to check if container is running
check_container() {
local vmid=$1
if ! pct status "$vmid" 2>/dev/null | grep -q running; then
log_warn "Container $vmid is not running. Starting..."
pct start "$vmid" || {
log_error "Failed to start container $vmid"
return 1
}
sleep 5
fi
return 0
}
# Function to copy config file to container
copy_config() {
local vmid=$1
local config_file=$2
local dest_file="/etc/besu/$config_file"
local source_file="$CONFIG_DIR/$config_file"
if [[ ! -f "$source_file" ]]; then
log_error "Config file not found: $source_file"
return 1
fi
log_info "Copying $config_file to VMID $vmid..."
pct push "$vmid" "$source_file" "$dest_file" || {
log_error "Failed to copy config to container $vmid"
return 1
}
# Set ownership
pct exec "$vmid" -- chown besu:besu "$dest_file" 2>/dev/null || true
log_success "Config copied to $vmid"
return 0
}
# Function to update systemd service file
update_service() {
local vmid=$1
local config_file=$2
log_info "Updating systemd service for VMID $vmid..."
# Update service file to use correct config
pct exec "$vmid" -- sed -i "s|--config-file=\$BESU_CONFIG/[^ ]*|--config-file=\$BESU_CONFIG/$config_file|g" \
/etc/systemd/system/besu-rpc.service 2>/dev/null || {
log_warn "Could not update service file (may need manual update)"
}
pct exec "$vmid" -- systemctl daemon-reload 2>/dev/null || true
}
# Function to verify configuration
verify_config() {
local vmid=$1
local expected_config=$2
local role="${RPC_ROLES[$vmid]}"
log_info "Verifying configuration for VMID $vmid ($role)..."
local config_path="/etc/besu/$expected_config"
# Check if config file exists
if ! pct exec "$vmid" -- test -f "$config_path" 2>/dev/null; then
log_error "Config file not found: $config_path"
return 1
fi
log_success "Config file exists: $config_path"
# Verify specific settings based on node type
case $vmid in
2500)
# Core: Should have ADMIN, DEBUG, TRACE, discovery disabled
log_info " Checking Core RPC settings..."
if pct exec "$vmid" -- grep -q 'rpc-http-api=.*"ADMIN"' "$config_path" 2>/dev/null; then
log_success " ✓ ADMIN API enabled"
else
log_warn " ✗ ADMIN API not found (should be enabled)"
fi
if pct exec "$vmid" -- grep -q 'discovery-enabled=false' "$config_path" 2>/dev/null; then
log_success " ✓ Discovery disabled (no public routing)"
else
log_warn " ✗ Discovery may be enabled (should be disabled)"
fi
;;
2501)
# Permissioned: Should NOT have ADMIN, should have account permissions
log_info " Checking Permissioned RPC settings..."
if ! pct exec "$vmid" -- grep -q 'rpc-http-api=.*"ADMIN"' "$config_path" 2>/dev/null; then
log_success " ✓ ADMIN API not enabled (correct)"
else
log_warn " ✗ ADMIN API found (should be removed)"
fi
if pct exec "$vmid" -- grep -q 'permissions-accounts-config-file-enabled=true' "$config_path" 2>/dev/null; then
log_success " ✓ Account permissions enabled"
else
log_warn " ✗ Account permissions not enabled"
fi
;;
2502)
# Public: Should have minimal APIs (ETH, NET, WEB3 only)
log_info " Checking Public RPC settings..."
local api_line=$(pct exec "$vmid" -- grep 'rpc-http-api=' "$config_path" 2>/dev/null || echo "")
if echo "$api_line" | grep -q '"ETH"' && \
echo "$api_line" | grep -q '"NET"' && \
echo "$api_line" | grep -q '"WEB3"' && \
! echo "$api_line" | grep -q '"ADMIN"'; then
log_success " ✓ Minimal APIs enabled (ETH, NET, WEB3)"
else
log_warn " ✗ API configuration may not be minimal"
fi
if ! pct exec "$vmid" -- grep -q 'permissions-accounts-config-file-enabled=true' "$config_path" 2>/dev/null; then
log_success " ✓ No account permissions (public non-auth)"
else
log_warn " ✗ Account permissions enabled (should be disabled for public)"
fi
;;
esac
return 0
}
# Function to check if nodes are reversed
check_reversed() {
log_info ""
log_info "Checking if 2501 and 2502 are reversed..."
local vmid_2501_config=$(pct exec 2501 -- grep 'rpc-http-api=' /etc/besu/config-rpc-perm.toml 2>/dev/null | head -1 || echo "")
local vmid_2502_config=$(pct exec 2502 -- grep 'rpc-http-api=' /etc/besu/config-rpc-public.toml 2>/dev/null | head -1 || echo "")
# Check if 2501 has ADMIN (shouldn't) or 2502 has more than minimal APIs
if echo "$vmid_2501_config" | grep -q '"ADMIN"'; then
log_warn "VMID 2501 has ADMIN API - may need to check if reversed"
fi
if echo "$vmid_2502_config" | grep -q '"ADMIN"\|"TXPOOL"\|"QBFT"'; then
log_warn "VMID 2502 has non-minimal APIs - may need to check if reversed"
fi
log_info "Current configuration check complete"
}
# Main deployment
main() {
log_info "Starting RPC nodes configuration..."
log_info ""
# Process each RPC node
for vmid in 2500 2501 2502; do
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Processing VMID $vmid: ${RPC_ROLES[$vmid]}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
# Check container
if ! check_container "$vmid"; then
log_error "Skipping VMID $vmid (container not available)"
continue
fi
# Get config file
local config_file="${RPC_CONFIGS[$vmid]}"
if [[ -z "$config_file" ]]; then
log_error "No config mapping for VMID $vmid"
continue
fi
# Stop service
log_info "Stopping Besu service..."
pct exec "$vmid" -- systemctl stop besu-rpc.service 2>/dev/null || true
sleep 2
# Copy config
if ! copy_config "$vmid" "$config_file"; then
log_error "Failed to copy config for VMID $vmid"
continue
fi
# Update service
update_service "$vmid" "$config_file"
# Verify config
verify_config "$vmid" "$config_file"
# Start service
log_info "Starting Besu service..."
pct exec "$vmid" -- systemctl start besu-rpc.service 2>/dev/null || {
log_error "Failed to start service on VMID $vmid"
log_info "Check logs: pct exec $vmid -- journalctl -u besu-rpc.service -n 50"
continue
}
sleep 3
# Check service status
if pct exec "$vmid" -- systemctl is-active --quiet besu-rpc.service 2>/dev/null; then
log_success "Service started successfully on VMID $vmid"
else
log_warn "Service may not be running on VMID $vmid"
log_info "Check status: pct exec $vmid -- systemctl status besu-rpc.service"
fi
done
# Check if reversed
check_reversed
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "Configuration complete!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Next steps:"
log_info "1. Verify services are running:"
log_info " pct exec 2500 -- systemctl status besu-rpc.service"
log_info " pct exec 2501 -- systemctl status besu-rpc.service"
log_info " pct exec 2502 -- systemctl status besu-rpc.service"
log_info ""
log_info "2. Test RPC endpoints:"
log_info " curl -X POST http://${RPC_ALLTRA_1:-192.168.11.250}:8545 -H 'Content-Type: application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}'"
log_info ""
log_info "3. Check logs if issues:"
log_info " pct exec 2500 -- journalctl -u besu-rpc.service -f"
}
# Run main function
main "$@"

View File

@@ -0,0 +1,283 @@
#!/usr/bin/env bash
# Configure Besu RPC nodes (2500, 2501, 2502) with correct configurations
# This script ensures each RPC node has the correct config based on its role
#
# Node Roles:
# 2500 = Core - No public access, all features enabled (ADMIN, DEBUG, TRACE)
# 2501 = Prv (Permissioned) - Public permissioned access, non-Admin features only
# 2502 = Pub (Public) - Public non-auth access, minimal wallet features
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
CONFIG_DIR="$PROJECT_ROOT/smom-dbis-138/config"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Check if running on Proxmox host
if ! command -v pct &>/dev/null; then
log_error "This script must be run on Proxmox host (pct command not found)"
exit 1
fi
# RPC Node Configuration Mapping
declare -A RPC_CONFIGS
RPC_CONFIGS[2500]="config-rpc-core.toml"
RPC_CONFIGS[2501]="config-rpc-perm.toml"
RPC_CONFIGS[2502]="config-rpc-public.toml"
declare -A RPC_ROLES
RPC_ROLES[2500]="Core (no public access, all features)"
RPC_ROLES[2501]="Permissioned (public permissioned, non-Admin features)"
RPC_ROLES[2502]="Public (public non-auth, minimal wallet features)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Besu RPC Nodes Configuration Script"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Function to check if container is running
check_container() {
local vmid=$1
if ! pct status "$vmid" 2>/dev/null | grep -q running; then
log_warn "Container $vmid is not running. Starting..."
pct start "$vmid" || {
log_error "Failed to start container $vmid"
return 1
}
sleep 5
fi
return 0
}
# Function to copy config file to container
copy_config() {
local vmid=$1
local config_file=$2
local dest_file="/etc/besu/$config_file"
local source_file="$CONFIG_DIR/$config_file"
if [[ ! -f "$source_file" ]]; then
log_error "Config file not found: $source_file"
return 1
fi
log_info "Copying $config_file to VMID $vmid..."
pct push "$vmid" "$source_file" "$dest_file" || {
log_error "Failed to copy config to container $vmid"
return 1
}
# Set ownership
pct exec "$vmid" -- chown besu:besu "$dest_file" 2>/dev/null || true
log_success "Config copied to $vmid"
return 0
}
# Function to update systemd service file
update_service() {
local vmid=$1
local config_file=$2
log_info "Updating systemd service for VMID $vmid..."
# Update service file to use correct config
pct exec "$vmid" -- sed -i "s|--config-file=\$BESU_CONFIG/[^ ]*|--config-file=\$BESU_CONFIG/$config_file|g" \
/etc/systemd/system/besu-rpc.service 2>/dev/null || {
log_warn "Could not update service file (may need manual update)"
}
pct exec "$vmid" -- systemctl daemon-reload 2>/dev/null || true
}
# Function to verify configuration
verify_config() {
local vmid=$1
local expected_config=$2
local role="${RPC_ROLES[$vmid]}"
log_info "Verifying configuration for VMID $vmid ($role)..."
local config_path="/etc/besu/$expected_config"
# Check if config file exists
if ! pct exec "$vmid" -- test -f "$config_path" 2>/dev/null; then
log_error "Config file not found: $config_path"
return 1
fi
log_success "Config file exists: $config_path"
# Verify specific settings based on node type
case $vmid in
2500)
# Core: Should have ADMIN, DEBUG, TRACE, discovery disabled
log_info " Checking Core RPC settings..."
if pct exec "$vmid" -- grep -q 'rpc-http-api=.*"ADMIN"' "$config_path" 2>/dev/null; then
log_success " ✓ ADMIN API enabled"
else
log_warn " ✗ ADMIN API not found (should be enabled)"
fi
if pct exec "$vmid" -- grep -q 'discovery-enabled=false' "$config_path" 2>/dev/null; then
log_success " ✓ Discovery disabled (no public routing)"
else
log_warn " ✗ Discovery may be enabled (should be disabled)"
fi
;;
2501)
# Permissioned: Should NOT have ADMIN, should have account permissions
log_info " Checking Permissioned RPC settings..."
if ! pct exec "$vmid" -- grep -q 'rpc-http-api=.*"ADMIN"' "$config_path" 2>/dev/null; then
log_success " ✓ ADMIN API not enabled (correct)"
else
log_warn " ✗ ADMIN API found (should be removed)"
fi
if pct exec "$vmid" -- grep -q 'permissions-accounts-config-file-enabled=true' "$config_path" 2>/dev/null; then
log_success " ✓ Account permissions enabled"
else
log_warn " ✗ Account permissions not enabled"
fi
;;
2502)
# Public: Should have minimal APIs (ETH, NET, WEB3 only)
log_info " Checking Public RPC settings..."
local api_line=$(pct exec "$vmid" -- grep 'rpc-http-api=' "$config_path" 2>/dev/null || echo "")
if echo "$api_line" | grep -q '"ETH"' && \
echo "$api_line" | grep -q '"NET"' && \
echo "$api_line" | grep -q '"WEB3"' && \
! echo "$api_line" | grep -q '"ADMIN"'; then
log_success " ✓ Minimal APIs enabled (ETH, NET, WEB3)"
else
log_warn " ✗ API configuration may not be minimal"
fi
if ! pct exec "$vmid" -- grep -q 'permissions-accounts-config-file-enabled=true' "$config_path" 2>/dev/null; then
log_success " ✓ No account permissions (public non-auth)"
else
log_warn " ✗ Account permissions enabled (should be disabled for public)"
fi
;;
esac
return 0
}
# Function to check if nodes are reversed
check_reversed() {
log_info ""
log_info "Checking if 2501 and 2502 are reversed..."
local vmid_2501_config=$(pct exec 2501 -- grep 'rpc-http-api=' /etc/besu/config-rpc-perm.toml 2>/dev/null | head -1 || echo "")
local vmid_2502_config=$(pct exec 2502 -- grep 'rpc-http-api=' /etc/besu/config-rpc-public.toml 2>/dev/null | head -1 || echo "")
# Check if 2501 has ADMIN (shouldn't) or 2502 has more than minimal APIs
if echo "$vmid_2501_config" | grep -q '"ADMIN"'; then
log_warn "VMID 2501 has ADMIN API - may need to check if reversed"
fi
if echo "$vmid_2502_config" | grep -q '"ADMIN"\|"TXPOOL"\|"QBFT"'; then
log_warn "VMID 2502 has non-minimal APIs - may need to check if reversed"
fi
log_info "Current configuration check complete"
}
# Main deployment
main() {
log_info "Starting RPC nodes configuration..."
log_info ""
# Process each RPC node
for vmid in 2500 2501 2502; do
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_info "Processing VMID $vmid: ${RPC_ROLES[$vmid]}"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
# Check container
if ! check_container "$vmid"; then
log_error "Skipping VMID $vmid (container not available)"
continue
fi
# Get config file
local config_file="${RPC_CONFIGS[$vmid]}"
if [[ -z "$config_file" ]]; then
log_error "No config mapping for VMID $vmid"
continue
fi
# Stop service
log_info "Stopping Besu service..."
pct exec "$vmid" -- systemctl stop besu-rpc.service 2>/dev/null || true
sleep 2
# Copy config
if ! copy_config "$vmid" "$config_file"; then
log_error "Failed to copy config for VMID $vmid"
continue
fi
# Update service
update_service "$vmid" "$config_file"
# Verify config
verify_config "$vmid" "$config_file"
# Start service
log_info "Starting Besu service..."
pct exec "$vmid" -- systemctl start besu-rpc.service 2>/dev/null || {
log_error "Failed to start service on VMID $vmid"
log_info "Check logs: pct exec $vmid -- journalctl -u besu-rpc.service -n 50"
continue
}
sleep 3
# Check service status
if pct exec "$vmid" -- systemctl is-active --quiet besu-rpc.service 2>/dev/null; then
log_success "Service started successfully on VMID $vmid"
else
log_warn "Service may not be running on VMID $vmid"
log_info "Check status: pct exec $vmid -- systemctl status besu-rpc.service"
fi
done
# Check if reversed
check_reversed
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "Configuration complete!"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Next steps:"
log_info "1. Verify services are running:"
log_info " pct exec 2500 -- systemctl status besu-rpc.service"
log_info " pct exec 2501 -- systemctl status besu-rpc.service"
log_info " pct exec 2502 -- systemctl status besu-rpc.service"
log_info ""
log_info "2. Test RPC endpoints:"
log_info " curl -X POST http://192.168.11.250:8545 -H 'Content-Type: application/json' --data '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}'"
log_info ""
log_info "3. Check logs if issues:"
log_info " pct exec 2500 -- journalctl -u besu-rpc.service -f"
}
# Run main function
main "$@"

View File

@@ -0,0 +1,336 @@
#!/bin/bash
# Configure and start Blockscout with correct settings
# Run this INSIDE the Blockscout container (VMID 5000)
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configuration
CHAIN_ID=138
RPC_URL="http://${RPC_ALLTRA_1:-192.168.11.250}:8545"
WS_URL="ws://${RPC_ALLTRA_1:-192.168.11.250}:8546"
BLOCKSCOUT_HOST="${IP_BLOCKSCOUT}"
DB_PASSWORD="${DB_PASSWORD:-blockscout}"
# Colors
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
echo "════════════════════════════════════════════════════════"
echo "Blockscout Configuration and Startup"
echo "════════════════════════════════════════════════════════"
echo ""
echo "Configuration:"
echo " Chain ID: $CHAIN_ID"
echo " RPC URL: $RPC_URL"
echo " WS URL: $WS_URL"
echo " Host: $BLOCKSCOUT_HOST"
echo ""
# Step 1: Check Docker
log_info "Step 1: Checking Docker..."
if ! command -v docker &> /dev/null; then
log_error "Docker not found. Installing..."
apt-get update -qq
apt-get install -y -qq docker.io
systemctl enable docker
systemctl start docker
fi
log_success "Docker: $(docker --version 2>/dev/null | head -1 || echo 'installed')"
if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null 2>&1; then
log_error "Docker Compose not found. Installing..."
apt-get install -y -qq docker-compose
fi
log_success "Docker Compose: available"
echo ""
# Step 2: Find or create Blockscout directory
log_info "Step 2: Locating Blockscout directory..."
if [ -d /opt/blockscout ]; then
BLOCKSCOUT_DIR="/opt/blockscout"
elif [ -d /root/blockscout ]; then
BLOCKSCOUT_DIR="/root/blockscout"
else
BLOCKSCOUT_DIR="/opt/blockscout"
mkdir -p "$BLOCKSCOUT_DIR"
log_info "Created directory: $BLOCKSCOUT_DIR"
fi
cd "$BLOCKSCOUT_DIR"
log_success "Blockscout directory: $BLOCKSCOUT_DIR"
echo ""
# Step 3: Create/Update docker-compose.yml with correct settings
log_info "Step 3: Configuring docker-compose.yml..."
cat > docker-compose.yml <<'EOF'
version: '3.8'
services:
postgres:
image: postgres:15-alpine
container_name: blockscout-postgres
environment:
POSTGRES_USER: blockscout
POSTGRES_PASSWORD: blockscout
POSTGRES_DB: blockscout
volumes:
- postgres-data:/var/lib/postgresql/data
restart: unless-stopped
networks:
- blockscout-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U blockscout"]
interval: 10s
timeout: 5s
retries: 5
blockscout:
image: blockscout/blockscout:latest
container_name: blockscout
depends_on:
postgres:
condition: service_healthy
environment:
- DATABASE_URL=postgresql://blockscout:blockscout@postgres:5432/blockscout
- ETHEREUM_JSONRPC_HTTP_URL=http://${RPC_ALLTRA_1:-192.168.11.250}:8545
- ETHEREUM_JSONRPC_WS_URL=ws://${RPC_ALLTRA_1:-192.168.11.250}:8546
- ETHEREUM_JSONRPC_TRACE_URL=http://${RPC_ALLTRA_1:-192.168.11.250}:8545
- ETHEREUM_JSONRPC_VARIANT=besu
- CHAIN_ID=138
- COIN=ETH
- BLOCKSCOUT_HOST=${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}
- BLOCKSCOUT_PROTOCOL=http
- SECRET_KEY_BASE=CHANGEME_SECRET_KEY_BASE
- POOL_SIZE=10
- ECTO_USE_SSL=false
ports:
- "4000:4000"
volumes:
- blockscout-data:/app/apps/explorer/priv/static
restart: unless-stopped
networks:
- blockscout-network
volumes:
postgres-data:
blockscout-data:
networks:
blockscout-network:
driver: bridge
EOF
# Generate and replace secret key
SECRET_KEY=$(openssl rand -hex 64)
sed -i "s|SECRET_KEY_BASE=CHANGEME_SECRET_KEY_BASE|SECRET_KEY_BASE=${SECRET_KEY}|" docker-compose.yml
log_success "docker-compose.yml configured"
echo ""
# Step 4: Stop existing containers
log_info "Step 4: Stopping existing containers..."
docker-compose down 2>/dev/null || docker compose down 2>/dev/null || true
log_success "Existing containers stopped"
echo ""
# Step 5: Start PostgreSQL
log_info "Step 5: Starting PostgreSQL..."
docker-compose up -d postgres || docker compose up -d postgres
log_info "Waiting for PostgreSQL to be ready..."
for i in {1..30}; do
if docker exec blockscout-postgres pg_isready -U blockscout >/dev/null 2>&1; then
log_success "PostgreSQL is ready"
break
fi
echo -n "."
sleep 2
done
echo ""
echo ""
# Step 6: Start Blockscout
log_info "Step 6: Starting Blockscout..."
docker-compose up -d blockscout || docker compose up -d blockscout
log_success "Blockscout started (may take 1-2 minutes to fully initialize)"
echo ""
# Step 7: Configure and start Nginx
log_info "Step 7: Configuring Nginx..."
if ! command -v nginx &> /dev/null; then
log_info "Installing Nginx..."
apt-get update -qq
apt-get install -y -qq nginx
fi
# Configure Nginx
cat > /etc/nginx/sites-available/blockscout <<'EOF'
server {
listen 80;
listen [::]:80;
server_name ${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0} explorer.d-bis.org;
client_max_body_size 100M;
location / {
proxy_pass http://localhost:4000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
proxy_read_timeout 300s;
proxy_connect_timeout 75s;
}
location /api {
proxy_pass http://localhost:4000/api;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300s;
}
location /health {
proxy_pass http://localhost:4000/api/health;
proxy_http_version 1.1;
proxy_set_header Host $host;
access_log off;
}
}
EOF
# Enable site
ln -sf /etc/nginx/sites-available/blockscout /etc/nginx/sites-enabled/blockscout
rm -f /etc/nginx/sites-enabled/default 2>/dev/null || true
# Test and reload Nginx
nginx -t && systemctl reload nginx
systemctl enable nginx
systemctl start nginx
log_success "Nginx configured and running"
echo ""
# Step 8: Check status
log_info "Step 8: Checking service status..."
sleep 5
echo ""
echo "Docker Containers:"
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | head -5
echo ""
echo "Service Status:"
if systemctl is-active --quiet nginx; then
log_success "Nginx: Running"
else
log_warn "Nginx: Not running"
fi
if docker ps | grep -q blockscout-postgres; then
log_success "PostgreSQL: Running"
else
log_warn "PostgreSQL: Not running"
fi
if docker ps | grep -q "^blockscout "; then
log_success "Blockscout: Running"
log_info "Note: Blockscout may take 1-2 minutes to fully start"
else
log_warn "Blockscout: Not running - check logs: docker logs blockscout"
fi
echo ""
# Step 9: Verify connectivity
log_info "Step 9: Testing connectivity..."
sleep 5
echo ""
echo "Connectivity Tests:"
echo ""
# Test RPC
log_info "Testing RPC endpoint..."
RPC_TEST=$(curl -s -X POST "$RPC_URL" \
-H 'Content-Type: application/json' \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' 2>/dev/null || echo "")
if echo "$RPC_TEST" | grep -q '"result"'; then
BLOCK_HEX=$(echo "$RPC_TEST" | grep -o '"result":"[^"]*"' | cut -d'"' -f4)
BLOCK_DEC=$(printf "%d" "$BLOCK_HEX" 2>/dev/null || echo "unknown")
log_success "RPC endpoint accessible (Block: $BLOCK_DEC)"
else
log_warn "RPC endpoint may not be accessible"
fi
# Test Blockscout API
log_info "Testing Blockscout API..."
for i in {1..6}; do
API_TEST=$(curl -s http://localhost:4000/api/health 2>/dev/null || echo "")
if [ -n "$API_TEST" ]; then
log_success "Blockscout API responding: $API_TEST"
break
fi
if [ $i -lt 6 ]; then
log_info "Waiting for Blockscout to start... ($i/6)"
sleep 10
else
log_warn "Blockscout API not responding yet (may need more time)"
log_info "Check logs: docker logs blockscout"
fi
done
# Test Nginx
log_info "Testing Nginx proxy..."
NGINX_TEST=$(curl -s -o /dev/null -w '%{http_code}' http://localhost/ 2>/dev/null || echo "000")
if [ "$NGINX_TEST" = "200" ] || [ "$NGINX_TEST" = "302" ] || [ "$NGINX_TEST" = "301" ]; then
log_success "Nginx proxy working (HTTP $NGINX_TEST)"
else
log_warn "Nginx returned: HTTP $NGINX_TEST"
fi
echo ""
# Final summary
echo "════════════════════════════════════════════════════════"
echo "Configuration Complete!"
echo "════════════════════════════════════════════════════════"
echo ""
echo "Access Points:"
echo " Internal: http://${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
echo " External: https://explorer.d-bis.org"
echo " API: http://${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}/api"
echo " Health: http://${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}/health"
echo ""
echo "Configuration:"
echo " Chain ID: $CHAIN_ID"
echo " RPC: $RPC_URL"
echo " WS: $WS_URL"
echo ""
echo "Useful Commands:"
echo " View logs: docker-compose logs -f"
echo " Restart: docker-compose restart"
echo " Stop: docker-compose down"
echo " Start: docker-compose up -d"
echo " Check status: docker ps"
echo ""
log_info "Blockscout may take 1-2 minutes to fully initialize"
log_info "Monitor progress: docker logs -f blockscout"
echo ""

View File

@@ -0,0 +1,330 @@
#!/bin/bash
# Configure and start Blockscout with correct settings
# Run this INSIDE the Blockscout container (VMID 5000)
set -euo pipefail
# Configuration
CHAIN_ID=138
RPC_URL="http://192.168.11.250:8545"
WS_URL="ws://192.168.11.250:8546"
BLOCKSCOUT_HOST="192.168.11.140"
DB_PASSWORD="${DB_PASSWORD:-blockscout}"
# Colors
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
echo "════════════════════════════════════════════════════════"
echo "Blockscout Configuration and Startup"
echo "════════════════════════════════════════════════════════"
echo ""
echo "Configuration:"
echo " Chain ID: $CHAIN_ID"
echo " RPC URL: $RPC_URL"
echo " WS URL: $WS_URL"
echo " Host: $BLOCKSCOUT_HOST"
echo ""
# Step 1: Check Docker
log_info "Step 1: Checking Docker..."
if ! command -v docker &> /dev/null; then
log_error "Docker not found. Installing..."
apt-get update -qq
apt-get install -y -qq docker.io
systemctl enable docker
systemctl start docker
fi
log_success "Docker: $(docker --version 2>/dev/null | head -1 || echo 'installed')"
if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null 2>&1; then
log_error "Docker Compose not found. Installing..."
apt-get install -y -qq docker-compose
fi
log_success "Docker Compose: available"
echo ""
# Step 2: Find or create Blockscout directory
log_info "Step 2: Locating Blockscout directory..."
if [ -d /opt/blockscout ]; then
BLOCKSCOUT_DIR="/opt/blockscout"
elif [ -d /root/blockscout ]; then
BLOCKSCOUT_DIR="/root/blockscout"
else
BLOCKSCOUT_DIR="/opt/blockscout"
mkdir -p "$BLOCKSCOUT_DIR"
log_info "Created directory: $BLOCKSCOUT_DIR"
fi
cd "$BLOCKSCOUT_DIR"
log_success "Blockscout directory: $BLOCKSCOUT_DIR"
echo ""
# Step 3: Create/Update docker-compose.yml with correct settings
log_info "Step 3: Configuring docker-compose.yml..."
cat > docker-compose.yml <<'EOF'
version: '3.8'
services:
postgres:
image: postgres:15-alpine
container_name: blockscout-postgres
environment:
POSTGRES_USER: blockscout
POSTGRES_PASSWORD: blockscout
POSTGRES_DB: blockscout
volumes:
- postgres-data:/var/lib/postgresql/data
restart: unless-stopped
networks:
- blockscout-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U blockscout"]
interval: 10s
timeout: 5s
retries: 5
blockscout:
image: blockscout/blockscout:latest
container_name: blockscout
depends_on:
postgres:
condition: service_healthy
environment:
- DATABASE_URL=postgresql://blockscout:blockscout@postgres:5432/blockscout
- ETHEREUM_JSONRPC_HTTP_URL=http://192.168.11.250:8545
- ETHEREUM_JSONRPC_WS_URL=ws://192.168.11.250:8546
- ETHEREUM_JSONRPC_TRACE_URL=http://192.168.11.250:8545
- ETHEREUM_JSONRPC_VARIANT=besu
- CHAIN_ID=138
- COIN=ETH
- BLOCKSCOUT_HOST=192.168.11.140
- BLOCKSCOUT_PROTOCOL=http
- SECRET_KEY_BASE=CHANGEME_SECRET_KEY_BASE
- POOL_SIZE=10
- ECTO_USE_SSL=false
ports:
- "4000:4000"
volumes:
- blockscout-data:/app/apps/explorer/priv/static
restart: unless-stopped
networks:
- blockscout-network
volumes:
postgres-data:
blockscout-data:
networks:
blockscout-network:
driver: bridge
EOF
# Generate and replace secret key
SECRET_KEY=$(openssl rand -hex 64)
sed -i "s|SECRET_KEY_BASE=CHANGEME_SECRET_KEY_BASE|SECRET_KEY_BASE=${SECRET_KEY}|" docker-compose.yml
log_success "docker-compose.yml configured"
echo ""
# Step 4: Stop existing containers
log_info "Step 4: Stopping existing containers..."
docker-compose down 2>/dev/null || docker compose down 2>/dev/null || true
log_success "Existing containers stopped"
echo ""
# Step 5: Start PostgreSQL
log_info "Step 5: Starting PostgreSQL..."
docker-compose up -d postgres || docker compose up -d postgres
log_info "Waiting for PostgreSQL to be ready..."
for i in {1..30}; do
if docker exec blockscout-postgres pg_isready -U blockscout >/dev/null 2>&1; then
log_success "PostgreSQL is ready"
break
fi
echo -n "."
sleep 2
done
echo ""
echo ""
# Step 6: Start Blockscout
log_info "Step 6: Starting Blockscout..."
docker-compose up -d blockscout || docker compose up -d blockscout
log_success "Blockscout started (may take 1-2 minutes to fully initialize)"
echo ""
# Step 7: Configure and start Nginx
log_info "Step 7: Configuring Nginx..."
if ! command -v nginx &> /dev/null; then
log_info "Installing Nginx..."
apt-get update -qq
apt-get install -y -qq nginx
fi
# Configure Nginx
cat > /etc/nginx/sites-available/blockscout <<'EOF'
server {
listen 80;
listen [::]:80;
server_name 192.168.11.140 explorer.d-bis.org;
client_max_body_size 100M;
location / {
proxy_pass http://localhost:4000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
proxy_read_timeout 300s;
proxy_connect_timeout 75s;
}
location /api {
proxy_pass http://localhost:4000/api;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 300s;
}
location /health {
proxy_pass http://localhost:4000/api/health;
proxy_http_version 1.1;
proxy_set_header Host $host;
access_log off;
}
}
EOF
# Enable site
ln -sf /etc/nginx/sites-available/blockscout /etc/nginx/sites-enabled/blockscout
rm -f /etc/nginx/sites-enabled/default 2>/dev/null || true
# Test and reload Nginx
nginx -t && systemctl reload nginx
systemctl enable nginx
systemctl start nginx
log_success "Nginx configured and running"
echo ""
# Step 8: Check status
log_info "Step 8: Checking service status..."
sleep 5
echo ""
echo "Docker Containers:"
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | head -5
echo ""
echo "Service Status:"
if systemctl is-active --quiet nginx; then
log_success "Nginx: Running"
else
log_warn "Nginx: Not running"
fi
if docker ps | grep -q blockscout-postgres; then
log_success "PostgreSQL: Running"
else
log_warn "PostgreSQL: Not running"
fi
if docker ps | grep -q "^blockscout "; then
log_success "Blockscout: Running"
log_info "Note: Blockscout may take 1-2 minutes to fully start"
else
log_warn "Blockscout: Not running - check logs: docker logs blockscout"
fi
echo ""
# Step 9: Verify connectivity
log_info "Step 9: Testing connectivity..."
sleep 5
echo ""
echo "Connectivity Tests:"
echo ""
# Test RPC
log_info "Testing RPC endpoint..."
RPC_TEST=$(curl -s -X POST "$RPC_URL" \
-H 'Content-Type: application/json' \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' 2>/dev/null || echo "")
if echo "$RPC_TEST" | grep -q '"result"'; then
BLOCK_HEX=$(echo "$RPC_TEST" | grep -o '"result":"[^"]*"' | cut -d'"' -f4)
BLOCK_DEC=$(printf "%d" "$BLOCK_HEX" 2>/dev/null || echo "unknown")
log_success "RPC endpoint accessible (Block: $BLOCK_DEC)"
else
log_warn "RPC endpoint may not be accessible"
fi
# Test Blockscout API
log_info "Testing Blockscout API..."
for i in {1..6}; do
API_TEST=$(curl -s http://localhost:4000/api/health 2>/dev/null || echo "")
if [ -n "$API_TEST" ]; then
log_success "Blockscout API responding: $API_TEST"
break
fi
if [ $i -lt 6 ]; then
log_info "Waiting for Blockscout to start... ($i/6)"
sleep 10
else
log_warn "Blockscout API not responding yet (may need more time)"
log_info "Check logs: docker logs blockscout"
fi
done
# Test Nginx
log_info "Testing Nginx proxy..."
NGINX_TEST=$(curl -s -o /dev/null -w '%{http_code}' http://localhost/ 2>/dev/null || echo "000")
if [ "$NGINX_TEST" = "200" ] || [ "$NGINX_TEST" = "302" ] || [ "$NGINX_TEST" = "301" ]; then
log_success "Nginx proxy working (HTTP $NGINX_TEST)"
else
log_warn "Nginx returned: HTTP $NGINX_TEST"
fi
echo ""
# Final summary
echo "════════════════════════════════════════════════════════"
echo "Configuration Complete!"
echo "════════════════════════════════════════════════════════"
echo ""
echo "Access Points:"
echo " Internal: http://192.168.11.140"
echo " External: https://explorer.d-bis.org"
echo " API: http://192.168.11.140/api"
echo " Health: http://192.168.11.140/health"
echo ""
echo "Configuration:"
echo " Chain ID: $CHAIN_ID"
echo " RPC: $RPC_URL"
echo " WS: $WS_URL"
echo ""
echo "Useful Commands:"
echo " View logs: docker-compose logs -f"
echo " Restart: docker-compose restart"
echo " Stop: docker-compose down"
echo " Start: docker-compose up -d"
echo " Check status: docker ps"
echo ""
log_info "Blockscout may take 1-2 minutes to fully initialize"
log_info "Monitor progress: docker logs -f blockscout"
echo ""

View File

@@ -0,0 +1,181 @@
#!/usr/bin/env bash
# Configure all bridge destinations for CCIPWETH9Bridge and CCIPWETH10Bridge
# Usage: ./configure-bridge-destinations.sh
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load environment variables
if [ -f "$SOURCE_PROJECT/.env" ]; then
source "$SOURCE_PROJECT/.env"
else
log_error ".env file not found in $SOURCE_PROJECT"
exit 1
fi
# Required variables
RPC_URL="${RPC_URL_138:-http://${RPC_ALLTRA_1:-192.168.11.250}:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-}"
if [ -z "${PRIVATE_KEY:-}" ]; then
log_error "PRIVATE_KEY not set in .env file"
exit 1
fi
if [ -z "$WETH9_BRIDGE" ] || [ -z "$WETH10_BRIDGE" ]; then
log_error "Bridge addresses not set in .env file"
log_error "Please deploy bridges first: bash scripts/deploy-bridge-contracts.sh"
exit 1
fi
# Destination chain configurations
declare -A WETH9_DESTINATIONS=(
["11344663589394136015"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # BSC
["4051577828743386545"]="0xa780ef19a041745d353c9432f2a7f5a241335ffe" # Polygon
["6433500567565415381"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Avalanche
["15971525489660198786"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Base
["4949039107694359620"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Arbitrum
["3734403246176062136"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Optimism
["5009297550715157269"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Ethereum Mainnet
)
declare -A WETH10_DESTINATIONS=(
["11344663589394136015"]="0x105f8a15b819948a89153505762444ee9f324684" # BSC
["4051577828743386545"]="0xdab0591e5e89295ffad75a71dcfc30c5625c4fa2" # Polygon
["6433500567565415381"]="0x105f8a15b819948a89153505762444ee9f324684" # Avalanche
["15971525489660198786"]="0x105f8a15b819948a89153505762444ee9f324684" # Base
["4949039107694359620"]="0x105f8a15b819948a89153505762444ee9f324684" # Arbitrum
["3734403246176062136"]="0x105f8a15b819948a89153505762444ee9f324684" # Optimism
["5009297550715157269"]="0x105f8a15b819948a89153505762444ee9f324684" # Ethereum Mainnet
)
declare -A CHAIN_NAMES=(
["11344663589394136015"]="BSC"
["4051577828743386545"]="Polygon"
["6433500567565415381"]="Avalanche"
["15971525489660198786"]="Base"
["4949039107694359620"]="Arbitrum"
["3734403246176062136"]="Optimism"
["5009297550715157269"]="Ethereum"
)
log_info "========================================="
log_info "Configure Bridge Destinations"
log_info "========================================="
log_info ""
log_info "WETH9 Bridge: $WETH9_BRIDGE"
log_info "WETH10 Bridge: $WETH10_BRIDGE"
log_info "RPC URL: $RPC_URL"
log_info ""
# Function to check if destination is already configured
check_destination() {
local bridge="$1"
local selector="$2"
local name="$3"
log_info "Checking $name destination..."
local result=$(cast call "$bridge" "destinations(uint64)" "$selector" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
if echo "$result" | grep -qE "(true|enabled|0x0000000000000000000000000000000000000000)" && ! echo "$result" | grep -q "0x0000000000000000000000000000000000000000$"; then
return 0 # Already configured
else
return 1 # Not configured
fi
}
# Configure WETH9 Bridge destinations
log_info "Configuring WETH9 Bridge destinations..."
WETH9_COUNT=0
for selector in "${!WETH9_DESTINATIONS[@]}"; do
chain_name="${CHAIN_NAMES[$selector]}"
dest_address="${WETH9_DESTINATIONS[$selector]}"
if check_destination "$WETH9_BRIDGE" "$selector" "$chain_name (WETH9)"; then
log_success "$chain_name already configured for WETH9"
else
log_info "Configuring $chain_name for WETH9..."
local output=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$selector" \
"$dest_address" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" 2>&1 | tee /tmp/weth9-config-$chain_name.log)
if echo "$output" | grep -qE "(blockHash|transactionHash|Success)"; then
log_success "$chain_name configured for WETH9"
((WETH9_COUNT++))
elif echo "$output" | grep -q "destination already exists"; then
log_success "$chain_name already configured for WETH9"
else
log_error "✗ Failed to configure $chain_name for WETH9"
log_info "Check /tmp/weth9-config-$chain_name.log for details"
fi
fi
done
log_info ""
# Configure WETH10 Bridge destinations
log_info "Configuring WETH10 Bridge destinations..."
WETH10_COUNT=0
for selector in "${!WETH10_DESTINATIONS[@]}"; do
chain_name="${CHAIN_NAMES[$selector]}"
dest_address="${WETH10_DESTINATIONS[$selector]}"
if check_destination "$WETH10_BRIDGE" "$selector" "$chain_name (WETH10)"; then
log_success "$chain_name already configured for WETH10"
else
log_info "Configuring $chain_name for WETH10..."
local output=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$selector" \
"$dest_address" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" 2>&1 | tee /tmp/weth10-config-$chain_name.log)
if echo "$output" | grep -qE "(blockHash|transactionHash|Success)"; then
log_success "$chain_name configured for WETH10"
((WETH10_COUNT++))
elif echo "$output" | grep -q "destination already exists"; then
log_success "$chain_name already configured for WETH10"
else
log_error "✗ Failed to configure $chain_name for WETH10"
log_info "Check /tmp/weth10-config-$chain_name.log for details"
fi
fi
done
log_info ""
log_success "========================================="
log_success "Bridge Configuration Complete!"
log_success "========================================="
log_info ""
log_info "Summary:"
log_info " WETH9 destinations configured: $WETH9_COUNT new"
log_info " WETH10 destinations configured: $WETH10_COUNT new"
log_info ""
log_info "All 7 destination chains configured for both bridges"
log_info ""

View File

@@ -0,0 +1,175 @@
#!/usr/bin/env bash
# Configure all bridge destinations for CCIPWETH9Bridge and CCIPWETH10Bridge
# Usage: ./configure-bridge-destinations.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load environment variables
if [ -f "$SOURCE_PROJECT/.env" ]; then
source "$SOURCE_PROJECT/.env"
else
log_error ".env file not found in $SOURCE_PROJECT"
exit 1
fi
# Required variables
RPC_URL="${RPC_URL_138:-http://192.168.11.250:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-}"
if [ -z "${PRIVATE_KEY:-}" ]; then
log_error "PRIVATE_KEY not set in .env file"
exit 1
fi
if [ -z "$WETH9_BRIDGE" ] || [ -z "$WETH10_BRIDGE" ]; then
log_error "Bridge addresses not set in .env file"
log_error "Please deploy bridges first: bash scripts/deploy-bridge-contracts.sh"
exit 1
fi
# Destination chain configurations
declare -A WETH9_DESTINATIONS=(
["11344663589394136015"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # BSC
["4051577828743386545"]="0xa780ef19a041745d353c9432f2a7f5a241335ffe" # Polygon
["6433500567565415381"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Avalanche
["15971525489660198786"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Base
["4949039107694359620"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Arbitrum
["3734403246176062136"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Optimism
["5009297550715157269"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Ethereum Mainnet
)
declare -A WETH10_DESTINATIONS=(
["11344663589394136015"]="0x105f8a15b819948a89153505762444ee9f324684" # BSC
["4051577828743386545"]="0xdab0591e5e89295ffad75a71dcfc30c5625c4fa2" # Polygon
["6433500567565415381"]="0x105f8a15b819948a89153505762444ee9f324684" # Avalanche
["15971525489660198786"]="0x105f8a15b819948a89153505762444ee9f324684" # Base
["4949039107694359620"]="0x105f8a15b819948a89153505762444ee9f324684" # Arbitrum
["3734403246176062136"]="0x105f8a15b819948a89153505762444ee9f324684" # Optimism
["5009297550715157269"]="0x105f8a15b819948a89153505762444ee9f324684" # Ethereum Mainnet
)
declare -A CHAIN_NAMES=(
["11344663589394136015"]="BSC"
["4051577828743386545"]="Polygon"
["6433500567565415381"]="Avalanche"
["15971525489660198786"]="Base"
["4949039107694359620"]="Arbitrum"
["3734403246176062136"]="Optimism"
["5009297550715157269"]="Ethereum"
)
log_info "========================================="
log_info "Configure Bridge Destinations"
log_info "========================================="
log_info ""
log_info "WETH9 Bridge: $WETH9_BRIDGE"
log_info "WETH10 Bridge: $WETH10_BRIDGE"
log_info "RPC URL: $RPC_URL"
log_info ""
# Function to check if destination is already configured
check_destination() {
local bridge="$1"
local selector="$2"
local name="$3"
log_info "Checking $name destination..."
local result=$(cast call "$bridge" "destinations(uint64)" "$selector" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
if echo "$result" | grep -qE "(true|enabled|0x0000000000000000000000000000000000000000)" && ! echo "$result" | grep -q "0x0000000000000000000000000000000000000000$"; then
return 0 # Already configured
else
return 1 # Not configured
fi
}
# Configure WETH9 Bridge destinations
log_info "Configuring WETH9 Bridge destinations..."
WETH9_COUNT=0
for selector in "${!WETH9_DESTINATIONS[@]}"; do
chain_name="${CHAIN_NAMES[$selector]}"
dest_address="${WETH9_DESTINATIONS[$selector]}"
if check_destination "$WETH9_BRIDGE" "$selector" "$chain_name (WETH9)"; then
log_success "$chain_name already configured for WETH9"
else
log_info "Configuring $chain_name for WETH9..."
local output=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$selector" \
"$dest_address" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" 2>&1 | tee /tmp/weth9-config-$chain_name.log)
if echo "$output" | grep -qE "(blockHash|transactionHash|Success)"; then
log_success "$chain_name configured for WETH9"
((WETH9_COUNT++))
elif echo "$output" | grep -q "destination already exists"; then
log_success "$chain_name already configured for WETH9"
else
log_error "✗ Failed to configure $chain_name for WETH9"
log_info "Check /tmp/weth9-config-$chain_name.log for details"
fi
fi
done
log_info ""
# Configure WETH10 Bridge destinations
log_info "Configuring WETH10 Bridge destinations..."
WETH10_COUNT=0
for selector in "${!WETH10_DESTINATIONS[@]}"; do
chain_name="${CHAIN_NAMES[$selector]}"
dest_address="${WETH10_DESTINATIONS[$selector]}"
if check_destination "$WETH10_BRIDGE" "$selector" "$chain_name (WETH10)"; then
log_success "$chain_name already configured for WETH10"
else
log_info "Configuring $chain_name for WETH10..."
local output=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$selector" \
"$dest_address" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" 2>&1 | tee /tmp/weth10-config-$chain_name.log)
if echo "$output" | grep -qE "(blockHash|transactionHash|Success)"; then
log_success "$chain_name configured for WETH10"
((WETH10_COUNT++))
elif echo "$output" | grep -q "destination already exists"; then
log_success "$chain_name already configured for WETH10"
else
log_error "✗ Failed to configure $chain_name for WETH10"
log_info "Check /tmp/weth10-config-$chain_name.log for details"
fi
fi
done
log_info ""
log_success "========================================="
log_success "Bridge Configuration Complete!"
log_success "========================================="
log_info ""
log_info "Summary:"
log_info " WETH9 destinations configured: $WETH9_COUNT new"
log_info " WETH10 destinations configured: $WETH10_COUNT new"
log_info ""
log_info "All 7 destination chains configured for both bridges"
log_info ""

View File

@@ -0,0 +1,477 @@
#!/usr/bin/env bash
# Configure Cloudflare Tunnel Routes and DNS Records via API
# Usage: ./configure-cloudflare-api.sh
# Requires: CLOUDFLARE_API_TOKEN and CLOUDFLARE_ZONE_ID environment variables
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
info() { echo -e "${GREEN}[INFO]${NC} $1"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
error() { echo -e "${RED}[ERROR]${NC} $1"; }
debug() { echo -e "${BLUE}[DEBUG]${NC} $1"; }
# Check for required tools
if ! command -v curl >/dev/null 2>&1; then
error "curl is required but not installed"
exit 1
fi
if ! command -v jq >/dev/null 2>&1; then
error "jq is required but not installed. Install with: apt-get install jq"
exit 1
fi
# Load environment variables
if [[ -f "$SCRIPT_DIR/../.env" ]]; then
source "$SCRIPT_DIR/../.env"
fi
# Cloudflare API configuration (support multiple naming conventions)
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_ZONE_ID="${CLOUDFLARE_ZONE_ID:-}"
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
DOMAIN="${DOMAIN:-${CLOUDFLARE_DOMAIN:-d-bis.org}}"
# Tunnel configuration (support multiple naming conventions)
# Prefer JWT token from installed service, then env vars
INSTALLED_TOKEN=""
if command -v ssh >/dev/null 2>&1; then
INSTALLED_TOKEN=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST:-192.168.11.10} \
"pct exec 102 -- cat /etc/systemd/system/cloudflared.service 2>/dev/null | grep -o 'tunnel run --token [^ ]*' | cut -d' ' -f3" 2>/dev/null || echo "")
fi
TUNNEL_TOKEN="${INSTALLED_TOKEN:-${TUNNEL_TOKEN:-${CLOUDFLARE_TUNNEL_TOKEN:-eyJhIjoiNTJhZDU3YTcxNjcxYzVmYzAwOWVkZjA3NDQ2NTgxOTYiLCJ0IjoiMTBhYjIyZGEtOGVhMy00ZTJlLWE4OTYtMjdlY2UyMjExYTA1IiwicyI6IlptRXlOMkkyTVRrdE1EZzFNeTAwTkRBNExXSXhaalF0Wm1KaE5XVmpaVEEzTVdGbCJ9}}}"
# RPC endpoint configuration
# Public endpoints route to VMID 2502 (NO JWT authentication)
# Private endpoints route to VMID 2501 (JWT authentication required)
declare -A RPC_ENDPOINTS=(
[rpc-http-pub]="https://${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}:443"
[rpc-ws-pub]="https://${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}:443"
[rpc-http-prv]="https://${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}:443"
[rpc-ws-prv]="https://${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}:443"
)
# API base URLs
CF_API_BASE="https://api.cloudflare.com/client/v4"
CF_ZERO_TRUST_API="https://api.cloudflare.com/client/v4/accounts"
# Function to make Cloudflare API request
cf_api_request() {
local method="$1"
local endpoint="$2"
local data="${3:-}"
local url="${CF_API_BASE}${endpoint}"
local headers=()
if [[ -n "$CLOUDFLARE_API_TOKEN" ]]; then
headers+=("-H" "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}")
elif [[ -n "$CLOUDFLARE_API_KEY" ]]; then
# Global API Keys are typically 40 chars, API Tokens are longer
# If no email provided, assume it's an API Token
if [[ -z "$CLOUDFLARE_EMAIL" ]] || [[ ${#CLOUDFLARE_API_KEY} -gt 50 ]]; then
headers+=("-H" "Authorization: Bearer ${CLOUDFLARE_API_KEY}")
else
headers+=("-H" "X-Auth-Email: ${CLOUDFLARE_EMAIL}")
headers+=("-H" "X-Auth-Key: ${CLOUDFLARE_API_KEY}")
fi
else
error "Cloudflare API credentials not found!"
error "Set CLOUDFLARE_API_TOKEN or CLOUDFLARE_EMAIL + CLOUDFLARE_API_KEY"
exit 1
fi
headers+=("-H" "Content-Type: application/json")
local response
if [[ -n "$data" ]]; then
response=$(curl -s -X "$method" "$url" "${headers[@]}" -d "$data")
else
response=$(curl -s -X "$method" "$url" "${headers[@]}")
fi
# Check if response is valid JSON
if ! echo "$response" | jq -e . >/dev/null 2>&1; then
error "Invalid JSON response from API"
debug "Response: $response"
return 1
fi
# Check for API errors
local success=$(echo "$response" | jq -r '.success // false' 2>/dev/null)
if [[ "$success" != "true" ]]; then
local errors=$(echo "$response" | jq -r '.errors[]?.message // .error // "Unknown error"' 2>/dev/null | head -3)
if [[ -z "$errors" ]]; then
errors="API request failed (check response)"
fi
error "API request failed: $errors"
debug "Response: $response"
return 1
fi
echo "$response"
}
# Function to get zone ID from domain
get_zone_id() {
if [[ -n "$CLOUDFLARE_ZONE_ID" ]]; then
echo "$CLOUDFLARE_ZONE_ID"
return 0
fi
info "Getting zone ID for domain: $DOMAIN"
local response=$(cf_api_request "GET" "/zones?name=${DOMAIN}")
local zone_id=$(echo "$response" | jq -r '.result[0].id // empty')
if [[ -z "$zone_id" ]]; then
error "Zone not found for domain: $DOMAIN"
exit 1
fi
info "Zone ID: $zone_id"
echo "$zone_id"
}
# Function to get account ID (needed for Zero Trust API)
get_account_id() {
info "Getting account ID..."
# Try to get from token verification
local response=$(cf_api_request "GET" "/user/tokens/verify")
local account_id=$(echo "$response" | jq -r '.result.id // empty')
if [[ -z "$account_id" ]]; then
# Try alternative: get from accounts list
response=$(cf_api_request "GET" "/accounts")
account_id=$(echo "$response" | jq -r '.result[0].id // empty')
fi
if [[ -z "$account_id" ]]; then
# Last resort: try to get from zone
local zone_id=$(get_zone_id)
response=$(cf_api_request "GET" "/zones/${zone_id}")
account_id=$(echo "$response" | jq -r '.result.account.id // empty')
fi
if [[ -z "$account_id" ]]; then
error "Could not determine account ID"
error "You may need to specify CLOUDFLARE_ACCOUNT_ID in .env file"
exit 1
fi
info "Account ID: $account_id"
echo "$account_id"
}
# Function to extract tunnel ID from token
get_tunnel_id_from_token() {
local token="$1"
# Check if it's a JWT token (has dots)
if [[ "$token" == *.*.* ]]; then
# Decode JWT token (basic base64 decode of payload)
local payload=$(echo "$token" | cut -d'.' -f2)
# Add padding if needed
local padding=$((4 - ${#payload} % 4))
if [[ $padding -ne 4 ]]; then
payload="${payload}$(printf '%*s' $padding | tr ' ' '=')"
fi
# Decode and extract tunnel ID (field 't' contains tunnel ID)
if command -v python3 >/dev/null 2>&1; then
echo "$payload" | python3 -c "import sys, base64, json; payload=sys.stdin.read().strip(); padding=4-len(payload)%4; payload+=('='*padding if padding<4 else ''); data=json.loads(base64.b64decode(payload)); print(data.get('t', ''))" 2>/dev/null || echo ""
else
echo "$payload" | base64 -d 2>/dev/null | jq -r '.t // empty' 2>/dev/null || echo ""
fi
else
# Not a JWT token, return empty
echo ""
fi
}
# Function to get tunnel ID
get_tunnel_id() {
local account_id="$1"
local token="$2"
# Try to extract from JWT token first
local tunnel_id=$(get_tunnel_id_from_token "$token")
if [[ -n "$tunnel_id" ]]; then
info "Tunnel ID from token: $tunnel_id"
echo "$tunnel_id"
return 0
fi
# Fallback: list tunnels and find the one
warn "Could not extract tunnel ID from token, listing tunnels..."
local response=$(cf_api_request "GET" "/accounts/${account_id}/cfd_tunnel" 2>/dev/null)
if [[ -z "$response" ]]; then
error "Failed to list tunnels. Check API credentials."
exit 1
fi
local tunnel_id=$(echo "$response" | jq -r '.result[0].id // empty' 2>/dev/null)
if [[ -z "$tunnel_id" ]]; then
error "Could not find tunnel ID"
debug "Response: $response"
exit 1
fi
info "Tunnel ID: $tunnel_id"
echo "$tunnel_id"
}
# Function to get tunnel name
get_tunnel_name() {
local account_id="$1"
local tunnel_id="$2"
local response=$(cf_api_request "GET" "/accounts/${account_id}/cfd_tunnel/${tunnel_id}")
local tunnel_name=$(echo "$response" | jq -r '.result.name // empty')
echo "$tunnel_name"
}
# Function to configure tunnel routes
configure_tunnel_routes() {
local account_id="$1"
local tunnel_id="$2"
local tunnel_name="$3"
info "Configuring tunnel routes for: $tunnel_name"
# Build ingress rules array
local ingress_array="["
local first=true
for subdomain in "${!RPC_ENDPOINTS[@]}"; do
local service="${RPC_ENDPOINTS[$subdomain]}"
local hostname="${subdomain}.${DOMAIN}"
if [[ "$first" == "true" ]]; then
first=false
else
ingress_array+=","
fi
# Determine if WebSocket
local is_ws=false
if [[ "$subdomain" == *"ws"* ]]; then
is_ws=true
fi
# Build ingress rule
# Add noTLSVerify to skip certificate validation (certificates don't have IP SANs)
if [[ "$is_ws" == "true" ]]; then
ingress_array+="{\"hostname\":\"${hostname}\",\"service\":\"${service}\",\"originRequest\":{\"httpHostHeader\":\"${hostname}\",\"noTLSVerify\":true}}"
else
ingress_array+="{\"hostname\":\"${hostname}\",\"service\":\"${service}\",\"originRequest\":{\"noTLSVerify\":true}}"
fi
info " Adding route: ${hostname}${service}"
done
# Add catch-all (must be last)
ingress_array+=",{\"service\":\"http_status:404\"}]"
# Create config JSON
local config_data=$(echo "$ingress_array" | jq -c '{
config: {
ingress: .
}
}')
info "Updating tunnel configuration..."
local response=$(cf_api_request "PUT" "/accounts/${account_id}/cfd_tunnel/${tunnel_id}/configurations" "$config_data")
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
info "✓ Tunnel routes configured successfully"
else
local errors=$(echo "$response" | jq -r '.errors[]?.message // "Unknown error"' | head -3)
error "Failed to configure tunnel routes: $errors"
debug "Response: $response"
return 1
fi
}
# Function to create or update DNS record
create_or_update_dns_record() {
local zone_id="$1"
local name="$2"
local target="$3"
local proxied="${4:-true}"
# Check if record exists
local response=$(cf_api_request "GET" "/zones/${zone_id}/dns_records?name=${name}.${DOMAIN}&type=CNAME")
local record_id=$(echo "$response" | jq -r '.result[0].id // empty')
local data=$(jq -n \
--arg name "${name}.${DOMAIN}" \
--arg target "$target" \
--argjson proxied "$proxied" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: $proxied,
ttl: 1
}')
if [[ -n "$record_id" ]]; then
info " Updating existing DNS record: ${name}.${DOMAIN}"
response=$(cf_api_request "PUT" "/zones/${zone_id}/dns_records/${record_id}" "$data")
else
info " Creating DNS record: ${name}.${DOMAIN}"
response=$(cf_api_request "POST" "/zones/${zone_id}/dns_records" "$data")
fi
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
info " ✓ DNS record configured"
else
error " ✗ Failed to configure DNS record"
return 1
fi
}
# Function to configure DNS records
configure_dns_records() {
local zone_id="$1"
local tunnel_id="$2"
local tunnel_target="${tunnel_id}.cfargotunnel.com"
info "Configuring DNS records..."
info "Tunnel target: $tunnel_target"
for subdomain in "${!RPC_ENDPOINTS[@]}"; do
create_or_update_dns_record "$zone_id" "$subdomain" "$tunnel_target" "true"
done
}
# Main execution
main() {
info "Cloudflare API Configuration Script"
info "===================================="
echo ""
# Validate credentials
if [[ -z "$CLOUDFLARE_API_TOKEN" ]] && [[ -z "$CLOUDFLARE_EMAIL" ]] && [[ -z "$CLOUDFLARE_API_KEY" ]]; then
error "Cloudflare API credentials required!"
echo ""
echo "Set one of:"
echo " export CLOUDFLARE_API_TOKEN='your-api-token'"
echo " OR"
echo " export CLOUDFLARE_EMAIL='your-email@example.com'"
echo " export CLOUDFLARE_API_KEY='your-api-key'"
echo ""
echo "You can also create a .env file in the project root with these variables."
exit 1
fi
# If API_KEY is provided but no email, we need email for Global API Key
if [[ -n "$CLOUDFLARE_API_KEY" ]] && [[ -z "$CLOUDFLARE_EMAIL" ]] && [[ -z "$CLOUDFLARE_API_TOKEN" ]]; then
error "CLOUDFLARE_API_KEY requires CLOUDFLARE_EMAIL"
error "Please add CLOUDFLARE_EMAIL to your .env file"
error ""
error "OR create an API Token instead:"
error " 1. Go to: https://dash.cloudflare.com/profile/api-tokens"
error " 2. Create token with: Zone:DNS:Edit, Account:Cloudflare Tunnel:Edit"
error " 3. Set CLOUDFLARE_API_TOKEN in .env"
exit 1
fi
# Get zone ID
local zone_id=$(get_zone_id)
# Get account ID
local account_id="${CLOUDFLARE_ACCOUNT_ID:-}"
if [[ -z "$account_id" ]]; then
account_id=$(get_account_id)
else
info "Using provided Account ID: $account_id"
fi
# Get tunnel ID - try from .env first, then extraction, then API
local tunnel_id="${CLOUDFLARE_TUNNEL_ID:-}"
# If not in .env, try to extract from JWT token
if [[ -z "$tunnel_id" ]] && [[ "$TUNNEL_TOKEN" == *.*.* ]]; then
local payload=$(echo "$TUNNEL_TOKEN" | cut -d'.' -f2)
local padding=$((4 - ${#payload} % 4))
if [[ $padding -ne 4 ]]; then
payload="${payload}$(printf '%*s' $padding | tr ' ' '=')"
fi
if command -v python3 >/dev/null 2>&1; then
tunnel_id=$(echo "$payload" | python3 -c "import sys, base64, json; payload=sys.stdin.read().strip(); padding=4-len(payload)%4; payload+=('='*padding if padding<4 else ''); data=json.loads(base64.b64decode(payload)); print(data.get('t', ''))" 2>/dev/null || echo "")
fi
fi
# If extraction failed, try API (but don't fail if API doesn't work)
if [[ -z "$tunnel_id" ]]; then
tunnel_id=$(get_tunnel_id "$account_id" "$TUNNEL_TOKEN" 2>/dev/null || echo "")
fi
if [[ -z "$tunnel_id" ]]; then
error "Could not determine tunnel ID"
error "Please set CLOUDFLARE_TUNNEL_ID in .env file"
error "Or ensure API credentials are valid to fetch it automatically"
exit 1
fi
info "Using Tunnel ID: $tunnel_id"
local tunnel_name=$(get_tunnel_name "$account_id" "$tunnel_id" 2>/dev/null || echo "tunnel-${tunnel_id:0:8}")
echo ""
info "Configuration Summary:"
echo " Domain: $DOMAIN"
echo " Zone ID: $zone_id"
echo " Account ID: $account_id"
echo " Tunnel: $tunnel_name (ID: $tunnel_id)"
echo ""
# Configure tunnel routes
echo "=========================================="
info "Step 1: Configuring Tunnel Routes"
echo "=========================================="
configure_tunnel_routes "$account_id" "$tunnel_id" "$tunnel_name"
echo ""
echo "=========================================="
info "Step 2: Configuring DNS Records"
echo "=========================================="
configure_dns_records "$zone_id" "$tunnel_id"
echo ""
echo "=========================================="
info "Configuration Complete!"
echo "=========================================="
echo ""
info "Next steps:"
echo " 1. Wait 1-2 minutes for DNS propagation"
echo " 2. Test endpoints:"
echo " curl https://rpc-http-pub.d-bis.org/health"
echo " 3. Verify in Cloudflare Dashboard:"
echo " - Zero Trust → Networks → Tunnels → Check routes"
echo " - DNS → Records → Verify CNAME records"
}
# Run main function
main

View File

@@ -0,0 +1,471 @@
#!/usr/bin/env bash
# Configure Cloudflare Tunnel Routes and DNS Records via API
# Usage: ./configure-cloudflare-api.sh
# Requires: CLOUDFLARE_API_TOKEN and CLOUDFLARE_ZONE_ID environment variables
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
info() { echo -e "${GREEN}[INFO]${NC} $1"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
error() { echo -e "${RED}[ERROR]${NC} $1"; }
debug() { echo -e "${BLUE}[DEBUG]${NC} $1"; }
# Check for required tools
if ! command -v curl >/dev/null 2>&1; then
error "curl is required but not installed"
exit 1
fi
if ! command -v jq >/dev/null 2>&1; then
error "jq is required but not installed. Install with: apt-get install jq"
exit 1
fi
# Load environment variables
if [[ -f "$SCRIPT_DIR/../.env" ]]; then
source "$SCRIPT_DIR/../.env"
fi
# Cloudflare API configuration (support multiple naming conventions)
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_ZONE_ID="${CLOUDFLARE_ZONE_ID:-}"
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
DOMAIN="${DOMAIN:-${CLOUDFLARE_DOMAIN:-d-bis.org}}"
# Tunnel configuration (support multiple naming conventions)
# Prefer JWT token from installed service, then env vars
INSTALLED_TOKEN=""
if command -v ssh >/dev/null 2>&1; then
INSTALLED_TOKEN=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST:-192.168.11.10} \
"pct exec 102 -- cat /etc/systemd/system/cloudflared.service 2>/dev/null | grep -o 'tunnel run --token [^ ]*' | cut -d' ' -f3" 2>/dev/null || echo "")
fi
TUNNEL_TOKEN="${INSTALLED_TOKEN:-${TUNNEL_TOKEN:-${CLOUDFLARE_TUNNEL_TOKEN:-eyJhIjoiNTJhZDU3YTcxNjcxYzVmYzAwOWVkZjA3NDQ2NTgxOTYiLCJ0IjoiMTBhYjIyZGEtOGVhMy00ZTJlLWE4OTYtMjdlY2UyMjExYTA1IiwicyI6IlptRXlOMkkyTVRrdE1EZzFNeTAwTkRBNExXSXhaalF0Wm1KaE5XVmpaVEEzTVdGbCJ9}}}"
# RPC endpoint configuration
# Public endpoints route to VMID 2502 (NO JWT authentication)
# Private endpoints route to VMID 2501 (JWT authentication required)
declare -A RPC_ENDPOINTS=(
[rpc-http-pub]="https://192.168.11.252:443"
[rpc-ws-pub]="https://192.168.11.252:443"
[rpc-http-prv]="https://192.168.11.251:443"
[rpc-ws-prv]="https://192.168.11.251:443"
)
# API base URLs
CF_API_BASE="https://api.cloudflare.com/client/v4"
CF_ZERO_TRUST_API="https://api.cloudflare.com/client/v4/accounts"
# Function to make Cloudflare API request
cf_api_request() {
local method="$1"
local endpoint="$2"
local data="${3:-}"
local url="${CF_API_BASE}${endpoint}"
local headers=()
if [[ -n "$CLOUDFLARE_API_TOKEN" ]]; then
headers+=("-H" "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}")
elif [[ -n "$CLOUDFLARE_API_KEY" ]]; then
# Global API Keys are typically 40 chars, API Tokens are longer
# If no email provided, assume it's an API Token
if [[ -z "$CLOUDFLARE_EMAIL" ]] || [[ ${#CLOUDFLARE_API_KEY} -gt 50 ]]; then
headers+=("-H" "Authorization: Bearer ${CLOUDFLARE_API_KEY}")
else
headers+=("-H" "X-Auth-Email: ${CLOUDFLARE_EMAIL}")
headers+=("-H" "X-Auth-Key: ${CLOUDFLARE_API_KEY}")
fi
else
error "Cloudflare API credentials not found!"
error "Set CLOUDFLARE_API_TOKEN or CLOUDFLARE_EMAIL + CLOUDFLARE_API_KEY"
exit 1
fi
headers+=("-H" "Content-Type: application/json")
local response
if [[ -n "$data" ]]; then
response=$(curl -s -X "$method" "$url" "${headers[@]}" -d "$data")
else
response=$(curl -s -X "$method" "$url" "${headers[@]}")
fi
# Check if response is valid JSON
if ! echo "$response" | jq -e . >/dev/null 2>&1; then
error "Invalid JSON response from API"
debug "Response: $response"
return 1
fi
# Check for API errors
local success=$(echo "$response" | jq -r '.success // false' 2>/dev/null)
if [[ "$success" != "true" ]]; then
local errors=$(echo "$response" | jq -r '.errors[]?.message // .error // "Unknown error"' 2>/dev/null | head -3)
if [[ -z "$errors" ]]; then
errors="API request failed (check response)"
fi
error "API request failed: $errors"
debug "Response: $response"
return 1
fi
echo "$response"
}
# Function to get zone ID from domain
get_zone_id() {
if [[ -n "$CLOUDFLARE_ZONE_ID" ]]; then
echo "$CLOUDFLARE_ZONE_ID"
return 0
fi
info "Getting zone ID for domain: $DOMAIN"
local response=$(cf_api_request "GET" "/zones?name=${DOMAIN}")
local zone_id=$(echo "$response" | jq -r '.result[0].id // empty')
if [[ -z "$zone_id" ]]; then
error "Zone not found for domain: $DOMAIN"
exit 1
fi
info "Zone ID: $zone_id"
echo "$zone_id"
}
# Function to get account ID (needed for Zero Trust API)
get_account_id() {
info "Getting account ID..."
# Try to get from token verification
local response=$(cf_api_request "GET" "/user/tokens/verify")
local account_id=$(echo "$response" | jq -r '.result.id // empty')
if [[ -z "$account_id" ]]; then
# Try alternative: get from accounts list
response=$(cf_api_request "GET" "/accounts")
account_id=$(echo "$response" | jq -r '.result[0].id // empty')
fi
if [[ -z "$account_id" ]]; then
# Last resort: try to get from zone
local zone_id=$(get_zone_id)
response=$(cf_api_request "GET" "/zones/${zone_id}")
account_id=$(echo "$response" | jq -r '.result.account.id // empty')
fi
if [[ -z "$account_id" ]]; then
error "Could not determine account ID"
error "You may need to specify CLOUDFLARE_ACCOUNT_ID in .env file"
exit 1
fi
info "Account ID: $account_id"
echo "$account_id"
}
# Function to extract tunnel ID from token
get_tunnel_id_from_token() {
local token="$1"
# Check if it's a JWT token (has dots)
if [[ "$token" == *.*.* ]]; then
# Decode JWT token (basic base64 decode of payload)
local payload=$(echo "$token" | cut -d'.' -f2)
# Add padding if needed
local padding=$((4 - ${#payload} % 4))
if [[ $padding -ne 4 ]]; then
payload="${payload}$(printf '%*s' $padding | tr ' ' '=')"
fi
# Decode and extract tunnel ID (field 't' contains tunnel ID)
if command -v python3 >/dev/null 2>&1; then
echo "$payload" | python3 -c "import sys, base64, json; payload=sys.stdin.read().strip(); padding=4-len(payload)%4; payload+=('='*padding if padding<4 else ''); data=json.loads(base64.b64decode(payload)); print(data.get('t', ''))" 2>/dev/null || echo ""
else
echo "$payload" | base64 -d 2>/dev/null | jq -r '.t // empty' 2>/dev/null || echo ""
fi
else
# Not a JWT token, return empty
echo ""
fi
}
# Function to get tunnel ID
get_tunnel_id() {
local account_id="$1"
local token="$2"
# Try to extract from JWT token first
local tunnel_id=$(get_tunnel_id_from_token "$token")
if [[ -n "$tunnel_id" ]]; then
info "Tunnel ID from token: $tunnel_id"
echo "$tunnel_id"
return 0
fi
# Fallback: list tunnels and find the one
warn "Could not extract tunnel ID from token, listing tunnels..."
local response=$(cf_api_request "GET" "/accounts/${account_id}/cfd_tunnel" 2>/dev/null)
if [[ -z "$response" ]]; then
error "Failed to list tunnels. Check API credentials."
exit 1
fi
local tunnel_id=$(echo "$response" | jq -r '.result[0].id // empty' 2>/dev/null)
if [[ -z "$tunnel_id" ]]; then
error "Could not find tunnel ID"
debug "Response: $response"
exit 1
fi
info "Tunnel ID: $tunnel_id"
echo "$tunnel_id"
}
# Function to get tunnel name
get_tunnel_name() {
local account_id="$1"
local tunnel_id="$2"
local response=$(cf_api_request "GET" "/accounts/${account_id}/cfd_tunnel/${tunnel_id}")
local tunnel_name=$(echo "$response" | jq -r '.result.name // empty')
echo "$tunnel_name"
}
# Function to configure tunnel routes
configure_tunnel_routes() {
local account_id="$1"
local tunnel_id="$2"
local tunnel_name="$3"
info "Configuring tunnel routes for: $tunnel_name"
# Build ingress rules array
local ingress_array="["
local first=true
for subdomain in "${!RPC_ENDPOINTS[@]}"; do
local service="${RPC_ENDPOINTS[$subdomain]}"
local hostname="${subdomain}.${DOMAIN}"
if [[ "$first" == "true" ]]; then
first=false
else
ingress_array+=","
fi
# Determine if WebSocket
local is_ws=false
if [[ "$subdomain" == *"ws"* ]]; then
is_ws=true
fi
# Build ingress rule
# Add noTLSVerify to skip certificate validation (certificates don't have IP SANs)
if [[ "$is_ws" == "true" ]]; then
ingress_array+="{\"hostname\":\"${hostname}\",\"service\":\"${service}\",\"originRequest\":{\"httpHostHeader\":\"${hostname}\",\"noTLSVerify\":true}}"
else
ingress_array+="{\"hostname\":\"${hostname}\",\"service\":\"${service}\",\"originRequest\":{\"noTLSVerify\":true}}"
fi
info " Adding route: ${hostname}${service}"
done
# Add catch-all (must be last)
ingress_array+=",{\"service\":\"http_status:404\"}]"
# Create config JSON
local config_data=$(echo "$ingress_array" | jq -c '{
config: {
ingress: .
}
}')
info "Updating tunnel configuration..."
local response=$(cf_api_request "PUT" "/accounts/${account_id}/cfd_tunnel/${tunnel_id}/configurations" "$config_data")
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
info "✓ Tunnel routes configured successfully"
else
local errors=$(echo "$response" | jq -r '.errors[]?.message // "Unknown error"' | head -3)
error "Failed to configure tunnel routes: $errors"
debug "Response: $response"
return 1
fi
}
# Function to create or update DNS record
create_or_update_dns_record() {
local zone_id="$1"
local name="$2"
local target="$3"
local proxied="${4:-true}"
# Check if record exists
local response=$(cf_api_request "GET" "/zones/${zone_id}/dns_records?name=${name}.${DOMAIN}&type=CNAME")
local record_id=$(echo "$response" | jq -r '.result[0].id // empty')
local data=$(jq -n \
--arg name "${name}.${DOMAIN}" \
--arg target "$target" \
--argjson proxied "$proxied" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: $proxied,
ttl: 1
}')
if [[ -n "$record_id" ]]; then
info " Updating existing DNS record: ${name}.${DOMAIN}"
response=$(cf_api_request "PUT" "/zones/${zone_id}/dns_records/${record_id}" "$data")
else
info " Creating DNS record: ${name}.${DOMAIN}"
response=$(cf_api_request "POST" "/zones/${zone_id}/dns_records" "$data")
fi
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
info " ✓ DNS record configured"
else
error " ✗ Failed to configure DNS record"
return 1
fi
}
# Function to configure DNS records
configure_dns_records() {
local zone_id="$1"
local tunnel_id="$2"
local tunnel_target="${tunnel_id}.cfargotunnel.com"
info "Configuring DNS records..."
info "Tunnel target: $tunnel_target"
for subdomain in "${!RPC_ENDPOINTS[@]}"; do
create_or_update_dns_record "$zone_id" "$subdomain" "$tunnel_target" "true"
done
}
# Main execution
main() {
info "Cloudflare API Configuration Script"
info "===================================="
echo ""
# Validate credentials
if [[ -z "$CLOUDFLARE_API_TOKEN" ]] && [[ -z "$CLOUDFLARE_EMAIL" ]] && [[ -z "$CLOUDFLARE_API_KEY" ]]; then
error "Cloudflare API credentials required!"
echo ""
echo "Set one of:"
echo " export CLOUDFLARE_API_TOKEN='your-api-token'"
echo " OR"
echo " export CLOUDFLARE_EMAIL='your-email@example.com'"
echo " export CLOUDFLARE_API_KEY='your-api-key'"
echo ""
echo "You can also create a .env file in the project root with these variables."
exit 1
fi
# If API_KEY is provided but no email, we need email for Global API Key
if [[ -n "$CLOUDFLARE_API_KEY" ]] && [[ -z "$CLOUDFLARE_EMAIL" ]] && [[ -z "$CLOUDFLARE_API_TOKEN" ]]; then
error "CLOUDFLARE_API_KEY requires CLOUDFLARE_EMAIL"
error "Please add CLOUDFLARE_EMAIL to your .env file"
error ""
error "OR create an API Token instead:"
error " 1. Go to: https://dash.cloudflare.com/profile/api-tokens"
error " 2. Create token with: Zone:DNS:Edit, Account:Cloudflare Tunnel:Edit"
error " 3. Set CLOUDFLARE_API_TOKEN in .env"
exit 1
fi
# Get zone ID
local zone_id=$(get_zone_id)
# Get account ID
local account_id="${CLOUDFLARE_ACCOUNT_ID:-}"
if [[ -z "$account_id" ]]; then
account_id=$(get_account_id)
else
info "Using provided Account ID: $account_id"
fi
# Get tunnel ID - try from .env first, then extraction, then API
local tunnel_id="${CLOUDFLARE_TUNNEL_ID:-}"
# If not in .env, try to extract from JWT token
if [[ -z "$tunnel_id" ]] && [[ "$TUNNEL_TOKEN" == *.*.* ]]; then
local payload=$(echo "$TUNNEL_TOKEN" | cut -d'.' -f2)
local padding=$((4 - ${#payload} % 4))
if [[ $padding -ne 4 ]]; then
payload="${payload}$(printf '%*s' $padding | tr ' ' '=')"
fi
if command -v python3 >/dev/null 2>&1; then
tunnel_id=$(echo "$payload" | python3 -c "import sys, base64, json; payload=sys.stdin.read().strip(); padding=4-len(payload)%4; payload+=('='*padding if padding<4 else ''); data=json.loads(base64.b64decode(payload)); print(data.get('t', ''))" 2>/dev/null || echo "")
fi
fi
# If extraction failed, try API (but don't fail if API doesn't work)
if [[ -z "$tunnel_id" ]]; then
tunnel_id=$(get_tunnel_id "$account_id" "$TUNNEL_TOKEN" 2>/dev/null || echo "")
fi
if [[ -z "$tunnel_id" ]]; then
error "Could not determine tunnel ID"
error "Please set CLOUDFLARE_TUNNEL_ID in .env file"
error "Or ensure API credentials are valid to fetch it automatically"
exit 1
fi
info "Using Tunnel ID: $tunnel_id"
local tunnel_name=$(get_tunnel_name "$account_id" "$tunnel_id" 2>/dev/null || echo "tunnel-${tunnel_id:0:8}")
echo ""
info "Configuration Summary:"
echo " Domain: $DOMAIN"
echo " Zone ID: $zone_id"
echo " Account ID: $account_id"
echo " Tunnel: $tunnel_name (ID: $tunnel_id)"
echo ""
# Configure tunnel routes
echo "=========================================="
info "Step 1: Configuring Tunnel Routes"
echo "=========================================="
configure_tunnel_routes "$account_id" "$tunnel_id" "$tunnel_name"
echo ""
echo "=========================================="
info "Step 2: Configuring DNS Records"
echo "=========================================="
configure_dns_records "$zone_id" "$tunnel_id"
echo ""
echo "=========================================="
info "Configuration Complete!"
echo "=========================================="
echo ""
info "Next steps:"
echo " 1. Wait 1-2 minutes for DNS propagation"
echo " 2. Test endpoints:"
echo " curl https://rpc-http-pub.d-bis.org/health"
echo " 3. Verify in Cloudflare Dashboard:"
echo " - Zero Trust → Networks → Tunnels → Check routes"
echo " - DNS → Records → Verify CNAME records"
}
# Run main function
main

View File

@@ -0,0 +1,218 @@
#!/bin/bash
# Configure Cloudflare DNS and SSL via API using .env credentials
# This script does NOT require container access - only Cloudflare API
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
# Configuration
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
TUNNEL_TOKEN="eyJhIjoiNTJhZDU3YTcxNjcxYzVmYzAwOWVkZjA3NDQ2NTgxOTYiLCJ0IjoiYjAyZmUxZmUtY2I3ZC00ODRlLTkwOWItN2NjNDEyOThlYmU4IiwicyI6Ik5HTmtOV0kwWXpNdFpUVmxaUzAwTVRFMkxXRXdNMk10WlRJNU1ETTFaRFF4TURBMiJ9"
echo ""
log_info "═══════════════════════════════════════════════════════════"
log_info " CLOUDFLARE DNS & SSL CONFIGURATION (API)"
log_info "═══════════════════════════════════════════════════════════"
echo ""
# Load .env
if [ ! -f "$ENV_FILE" ]; then
log_error ".env file not found: $ENV_FILE"
exit 1
fi
set -a
source "$ENV_FILE"
set +a
# Get credentials
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_ZONE_ID="${CLOUDFLARE_ZONE_ID:-}"
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
# Determine auth method
AUTH_HEADERS=()
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
AUTH_HEADERS=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
log_success "Using API Token"
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
CLOUDFLARE_API_KEY=$(echo "$CLOUDFLARE_API_KEY" | tr -d '"')
CLOUDFLARE_EMAIL=$(echo "$CLOUDFLARE_EMAIL" | tr -d '"')
AUTH_HEADERS=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY")
log_success "Using API Key"
else
log_error "No Cloudflare credentials found"
exit 1
fi
# Extract tunnel ID from token
log_info "Extracting tunnel ID from token..."
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.TunnelID // empty' 2>/dev/null || echo "")
if [ -z "$TUNNEL_ID" ]; then
# Try alternative extraction
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.t // empty' 2>/dev/null || echo "")
fi
if [ -z "$TUNNEL_ID" ]; then
log_warn "Could not extract tunnel ID from token"
log_info "You may need to get tunnel ID from: cloudflared tunnel list"
TUNNEL_ID="<tunnel-id>"
else
log_success "Tunnel ID: $TUNNEL_ID"
fi
# Get Zone ID
if [ -z "$CLOUDFLARE_ZONE_ID" ]; then
log_info "Getting zone ID for $DOMAIN..."
ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
CLOUDFLARE_ZONE_ID=$(echo "$ZONE_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
if [ -z "$CLOUDFLARE_ZONE_ID" ] || [ "$CLOUDFLARE_ZONE_ID" = "null" ]; then
log_error "Failed to get zone ID"
exit 1
fi
fi
log_success "Zone ID: $CLOUDFLARE_ZONE_ID"
# Get Account ID
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_info "Getting account ID..."
ACCOUNT_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
CLOUDFLARE_ACCOUNT_ID=$(echo "$ACCOUNT_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
fi
if [ -n "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_success "Account ID: $CLOUDFLARE_ACCOUNT_ID"
fi
# Configure DNS
log_info "Configuring DNS record..."
TARGET="${TUNNEL_ID}.cfargotunnel.com"
# Check existing record
EXISTING=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$CLOUDFLARE_ZONE_ID/dns_records?name=$EXPLORER_DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
RECORD_ID=$(echo "$EXISTING" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
DNS_DATA=$(jq -n \
--arg name "explorer" \
--arg target "$TARGET" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: true,
ttl: 1
}')
if [ -n "$RECORD_ID" ] && [ "$RECORD_ID" != "null" ]; then
log_info "Updating existing DNS record..."
DNS_RESPONSE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$CLOUDFLARE_ZONE_ID/dns_records/$RECORD_ID" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
else
log_info "Creating new DNS record..."
DNS_RESPONSE=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$CLOUDFLARE_ZONE_ID/dns_records" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
fi
if echo "$DNS_RESPONSE" | jq -e '.success' >/dev/null 2>&1; then
log_success "DNS record configured: $EXPLORER_DOMAIN$TARGET (🟠 Proxied)"
else
ERROR=$(echo "$DNS_RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "DNS configuration failed: $ERROR"
echo "$DNS_RESPONSE" | jq '.' 2>/dev/null || echo "$DNS_RESPONSE"
exit 1
fi
# Configure Tunnel Route (if account ID available)
if [ -n "$CLOUDFLARE_ACCOUNT_ID" ] && [ "$TUNNEL_ID" != "<tunnel-id>" ]; then
log_info "Configuring tunnel route..."
TUNNEL_CONFIG=$(jq -n \
--arg hostname "$EXPLORER_DOMAIN" \
--arg service "http://$EXPLORER_IP:80" \
'{
config: {
ingress: [
{
hostname: $hostname,
service: $service
},
{
service: "http_status:404"
}
]
}
}')
TUNNEL_UPDATE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$TUNNEL_CONFIG")
if echo "$TUNNEL_UPDATE" | jq -e '.success' >/dev/null 2>&1; then
log_success "Tunnel route configured: $EXPLORER_DOMAIN → http://$EXPLORER_IP:80"
else
ERROR=$(echo "$TUNNEL_UPDATE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_warn "Tunnel route configuration failed: $ERROR"
log_info "Configure manually in Cloudflare Zero Trust dashboard"
fi
else
log_warn "Tunnel route requires manual configuration"
fi
# SSL is automatic with Cloudflare proxy
log_success "SSL/TLS: Automatic (Cloudflare Universal SSL enabled)"
# Verify
log_info "Waiting for DNS propagation (10 seconds)..."
sleep 10
PUBLIC_HTTP=$(curl -s -o /dev/null -w "%{http_code}" "https://$EXPLORER_DOMAIN/api/v2/stats" 2>&1)
if [ "$PUBLIC_HTTP" = "200" ]; then
log_success "Public URL: HTTP 200 - Working!"
else
log_warn "Public URL: HTTP $PUBLIC_HTTP (may need more time for propagation)"
fi
echo ""
log_success "Configuration complete!"
echo ""

View File

@@ -0,0 +1,212 @@
#!/bin/bash
# Configure Cloudflare DNS and SSL via API using .env credentials
# This script does NOT require container access - only Cloudflare API
set -euo pipefail
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
# Configuration
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-192.168.11.140}"
TUNNEL_TOKEN="eyJhIjoiNTJhZDU3YTcxNjcxYzVmYzAwOWVkZjA3NDQ2NTgxOTYiLCJ0IjoiYjAyZmUxZmUtY2I3ZC00ODRlLTkwOWItN2NjNDEyOThlYmU4IiwicyI6Ik5HTmtOV0kwWXpNdFpUVmxaUzAwTVRFMkxXRXdNMk10WlRJNU1ETTFaRFF4TURBMiJ9"
echo ""
log_info "═══════════════════════════════════════════════════════════"
log_info " CLOUDFLARE DNS & SSL CONFIGURATION (API)"
log_info "═══════════════════════════════════════════════════════════"
echo ""
# Load .env
if [ ! -f "$ENV_FILE" ]; then
log_error ".env file not found: $ENV_FILE"
exit 1
fi
set -a
source "$ENV_FILE"
set +a
# Get credentials
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_ZONE_ID="${CLOUDFLARE_ZONE_ID:-}"
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
# Determine auth method
AUTH_HEADERS=()
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
AUTH_HEADERS=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
log_success "Using API Token"
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
CLOUDFLARE_API_KEY=$(echo "$CLOUDFLARE_API_KEY" | tr -d '"')
CLOUDFLARE_EMAIL=$(echo "$CLOUDFLARE_EMAIL" | tr -d '"')
AUTH_HEADERS=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY")
log_success "Using API Key"
else
log_error "No Cloudflare credentials found"
exit 1
fi
# Extract tunnel ID from token
log_info "Extracting tunnel ID from token..."
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.TunnelID // empty' 2>/dev/null || echo "")
if [ -z "$TUNNEL_ID" ]; then
# Try alternative extraction
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.t // empty' 2>/dev/null || echo "")
fi
if [ -z "$TUNNEL_ID" ]; then
log_warn "Could not extract tunnel ID from token"
log_info "You may need to get tunnel ID from: cloudflared tunnel list"
TUNNEL_ID="<tunnel-id>"
else
log_success "Tunnel ID: $TUNNEL_ID"
fi
# Get Zone ID
if [ -z "$CLOUDFLARE_ZONE_ID" ]; then
log_info "Getting zone ID for $DOMAIN..."
ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
CLOUDFLARE_ZONE_ID=$(echo "$ZONE_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
if [ -z "$CLOUDFLARE_ZONE_ID" ] || [ "$CLOUDFLARE_ZONE_ID" = "null" ]; then
log_error "Failed to get zone ID"
exit 1
fi
fi
log_success "Zone ID: $CLOUDFLARE_ZONE_ID"
# Get Account ID
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_info "Getting account ID..."
ACCOUNT_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
CLOUDFLARE_ACCOUNT_ID=$(echo "$ACCOUNT_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
fi
if [ -n "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_success "Account ID: $CLOUDFLARE_ACCOUNT_ID"
fi
# Configure DNS
log_info "Configuring DNS record..."
TARGET="${TUNNEL_ID}.cfargotunnel.com"
# Check existing record
EXISTING=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$CLOUDFLARE_ZONE_ID/dns_records?name=$EXPLORER_DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
RECORD_ID=$(echo "$EXISTING" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
DNS_DATA=$(jq -n \
--arg name "explorer" \
--arg target "$TARGET" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: true,
ttl: 1
}')
if [ -n "$RECORD_ID" ] && [ "$RECORD_ID" != "null" ]; then
log_info "Updating existing DNS record..."
DNS_RESPONSE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$CLOUDFLARE_ZONE_ID/dns_records/$RECORD_ID" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
else
log_info "Creating new DNS record..."
DNS_RESPONSE=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$CLOUDFLARE_ZONE_ID/dns_records" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
fi
if echo "$DNS_RESPONSE" | jq -e '.success' >/dev/null 2>&1; then
log_success "DNS record configured: $EXPLORER_DOMAIN$TARGET (🟠 Proxied)"
else
ERROR=$(echo "$DNS_RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "DNS configuration failed: $ERROR"
echo "$DNS_RESPONSE" | jq '.' 2>/dev/null || echo "$DNS_RESPONSE"
exit 1
fi
# Configure Tunnel Route (if account ID available)
if [ -n "$CLOUDFLARE_ACCOUNT_ID" ] && [ "$TUNNEL_ID" != "<tunnel-id>" ]; then
log_info "Configuring tunnel route..."
TUNNEL_CONFIG=$(jq -n \
--arg hostname "$EXPLORER_DOMAIN" \
--arg service "http://$EXPLORER_IP:80" \
'{
config: {
ingress: [
{
hostname: $hostname,
service: $service
},
{
service: "http_status:404"
}
]
}
}')
TUNNEL_UPDATE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$TUNNEL_CONFIG")
if echo "$TUNNEL_UPDATE" | jq -e '.success' >/dev/null 2>&1; then
log_success "Tunnel route configured: $EXPLORER_DOMAIN → http://$EXPLORER_IP:80"
else
ERROR=$(echo "$TUNNEL_UPDATE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_warn "Tunnel route configuration failed: $ERROR"
log_info "Configure manually in Cloudflare Zero Trust dashboard"
fi
else
log_warn "Tunnel route requires manual configuration"
fi
# SSL is automatic with Cloudflare proxy
log_success "SSL/TLS: Automatic (Cloudflare Universal SSL enabled)"
# Verify
log_info "Waiting for DNS propagation (10 seconds)..."
sleep 10
PUBLIC_HTTP=$(curl -s -o /dev/null -w "%{http_code}" "https://$EXPLORER_DOMAIN/api/v2/stats" 2>&1)
if [ "$PUBLIC_HTTP" = "200" ]; then
log_success "Public URL: HTTP 200 - Working!"
else
log_warn "Public URL: HTTP $PUBLIC_HTTP (may need more time for propagation)"
fi
echo ""
log_success "Configuration complete!"
echo ""

View File

@@ -0,0 +1,405 @@
#!/bin/bash
# Complete Cloudflare Configuration for Explorer - Automated
# Uses .env credentials to configure DNS, SSL, and tunnel routes
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "${CYAN}════════════════════════════════════════${NC}"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
# Configuration
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
VMID=5000
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
TUNNEL_TOKEN="eyJhIjoiNTJhZDU3YTcxNjcxYzVmYzAwOWVkZjA3NDQ2NTgxOTYiLCJ0IjoiYjAyZmUxZmUtY2I3ZC00ODRlLTkwOWItN2NjNDEyOThlYmU4IiwicyI6Ik5HTmtOV0kwWXpNdFpUVmxaUzAwTVRFMkxXRXdNMk10WlRJNU1ETTFaRFF4TURBMiJ9"
echo ""
log_section
log_info " COMPLETE CLOUDFLARE EXPLORER CONFIGURATION"
log_info " Using .env Credentials for Full Automation"
log_section
echo ""
# Step 1: Load .env file
log_info "Step 1: Loading credentials from .env file..."
if [ ! -f "$ENV_FILE" ]; then
log_error ".env file not found: $ENV_FILE"
log_info "Looking for .env files..."
find "$SCRIPT_DIR/.." -maxdepth 2 -name ".env" -type f 2>/dev/null | head -3
log_info ""
log_info "Please create .env file with:"
echo " CLOUDFLARE_API_TOKEN=your-token"
echo " CLOUDFLARE_ZONE_ID=your-zone-id (optional)"
echo " CLOUDFLARE_ACCOUNT_ID=your-account-id (optional)"
echo " DOMAIN=d-bis.org"
exit 1
fi
# Source .env file
set -a
source "$ENV_FILE"
set +a
log_success ".env file loaded"
# Check for required credentials
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_ZONE_ID="${CLOUDFLARE_ZONE_ID:-}"
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
# Determine auth method
AUTH_METHOD=""
AUTH_HEADERS=()
# Check for API_TOKEN first (preferred), then API_KEY
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
AUTH_METHOD="token"
AUTH_HEADERS=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
log_success "Using API Token authentication"
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
AUTH_METHOD="key"
# Remove quotes from API_KEY if present
CLOUDFLARE_API_KEY=$(echo "$CLOUDFLARE_API_KEY" | tr -d '"')
CLOUDFLARE_EMAIL=$(echo "$CLOUDFLARE_EMAIL" | tr -d '"')
AUTH_HEADERS=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY")
log_success "Using API Key authentication"
else
log_error "No Cloudflare API credentials found in .env"
log_info "Required: CLOUDFLARE_API_TOKEN or (CLOUDFLARE_API_KEY + CLOUDFLARE_EMAIL)"
exit 1
fi
# Function to find container node
find_container_node() {
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
"for node in ml110 pve pve2; do if pvesh get /nodes/\$node/lxc/$VMID/status/current --output-format json >/dev/null 2>&1; then echo \$node; break; fi; done" 2>/dev/null || echo "pve2"
}
# Function to execute command in container
exec_container() {
local cmd="$1"
# Try direct pct exec via main host first
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec $VMID -- bash -c '$cmd'" 2>&1
}
# Step 2: Install Cloudflare Tunnel Service
log_section
log_info "Step 2: Installing Cloudflare Tunnel Service"
log_section
log_info "Checking cloudflared installation..."
if ! exec_container "command -v cloudflared >/dev/null 2>&1"; then
log_info "Installing cloudflared..."
exec_container "cd /tmp && wget -q https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb && dpkg -i cloudflared-linux-amd64.deb || apt install -f -y"
log_success "cloudflared installed"
else
log_success "cloudflared already installed"
fi
log_info "Installing tunnel service with token..."
INSTALL_OUTPUT=$(exec_container "cloudflared service install $TUNNEL_TOKEN 2>&1" || echo "INSTALL_FAILED")
if echo "$INSTALL_OUTPUT" | grep -q -E "successfully|installed|Service installed"; then
log_success "Tunnel service installed"
else
log_warn "Installation output: $INSTALL_OUTPUT"
# Continue - service might already be installed
fi
log_info "Starting cloudflared service..."
exec_container "systemctl start cloudflared" || true
exec_container "systemctl enable cloudflared" || true
sleep 3
CLOUDFLARED_STATUS=$(exec_container "systemctl is-active cloudflared 2>/dev/null || echo 'inactive'")
if [ "$CLOUDFLARED_STATUS" = "active" ]; then
log_success "Cloudflared service is running"
else
log_warn "Cloudflared service is $CLOUDFLARED_STATUS"
fi
# Get tunnel ID
log_info "Getting tunnel ID..."
TUNNEL_LIST=$(exec_container "cloudflared tunnel list 2>&1" || echo "")
TUNNEL_ID=$(echo "$TUNNEL_LIST" | grep -v "NAME" | head -1 | awk '{print $1}' || echo "")
if [ -n "$TUNNEL_ID" ]; then
log_success "Tunnel ID: $TUNNEL_ID"
else
log_warn "Could not get tunnel ID from tunnel list"
# Try to extract from token (base64 decode)
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.TunnelID // empty' 2>/dev/null || echo "")
if [ -n "$TUNNEL_ID" ]; then
log_success "Tunnel ID from token: $TUNNEL_ID"
else
log_error "Cannot determine tunnel ID"
exit 1
fi
fi
# Step 3: Get Zone ID
log_section
log_info "Step 3: Getting Cloudflare Zone ID"
log_section
if [ -z "$CLOUDFLARE_ZONE_ID" ]; then
log_info "Fetching zone ID for $DOMAIN..."
ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
ZONE_ID=$(echo "$ZONE_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
if [ -z "$ZONE_ID" ] || [ "$ZONE_ID" = "null" ]; then
ERROR=$(echo "$ZONE_RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to get zone ID: $ERROR"
exit 1
fi
log_success "Zone ID: $ZONE_ID"
else
ZONE_ID="$CLOUDFLARE_ZONE_ID"
log_success "Using provided Zone ID: $ZONE_ID"
fi
# Step 4: Get Account ID (for tunnel configuration)
log_section
log_info "Step 4: Getting Cloudflare Account ID"
log_section
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_info "Fetching account ID..."
ACCOUNT_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
CLOUDFLARE_ACCOUNT_ID=$(echo "$ACCOUNT_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ] || [ "$CLOUDFLARE_ACCOUNT_ID" = "null" ]; then
log_warn "Could not get account ID automatically"
else
log_success "Account ID: $CLOUDFLARE_ACCOUNT_ID"
fi
else
log_success "Using provided Account ID: $CLOUDFLARE_ACCOUNT_ID"
fi
# Step 5: Configure DNS Record
log_section
log_info "Step 5: Configuring DNS Record"
log_section
TARGET="${TUNNEL_ID}.cfargotunnel.com"
log_info "DNS Target: $TARGET"
# Check if record exists
EXISTING_RECORD=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?name=$EXPLORER_DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
RECORD_ID=$(echo "$EXISTING_RECORD" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
EXISTING_TYPE=$(echo "$EXISTING_RECORD" | jq -r '.result[0].type // empty' 2>/dev/null || echo "")
DNS_DATA=$(jq -n \
--arg name "explorer" \
--arg target "$TARGET" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: true,
ttl: 1
}')
if [ -n "$RECORD_ID" ] && [ "$RECORD_ID" != "null" ]; then
log_info "Updating existing DNS record (ID: $RECORD_ID, Type: $EXISTING_TYPE)..."
if [ "$EXISTING_TYPE" != "CNAME" ]; then
log_warn "Existing record is type $EXISTING_TYPE, deleting and creating CNAME..."
curl -s -X DELETE "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" >/dev/null 2>&1
DNS_RESPONSE=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
else
DNS_RESPONSE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
fi
else
log_info "Creating new DNS record..."
DNS_RESPONSE=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
fi
if echo "$DNS_RESPONSE" | jq -e '.success' >/dev/null 2>&1; then
log_success "DNS record configured successfully"
DNS_NAME=$(echo "$DNS_RESPONSE" | jq -r '.result.name' 2>/dev/null || echo "$EXPLORER_DOMAIN")
DNS_TARGET=$(echo "$DNS_RESPONSE" | jq -r '.result.content' 2>/dev/null || echo "$TARGET")
DNS_PROXIED=$(echo "$DNS_RESPONSE" | jq -r '.result.proxied' 2>/dev/null || echo "true")
log_info " Name: $DNS_NAME"
log_info " Target: $DNS_TARGET"
log_info " Proxied: $DNS_PROXIED"
else
ERROR=$(echo "$DNS_RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to configure DNS: $ERROR"
echo "$DNS_RESPONSE" | jq '.' 2>/dev/null || echo "$DNS_RESPONSE"
exit 1
fi
# Step 6: Configure Tunnel Route
log_section
log_info "Step 6: Configuring Tunnel Route"
log_section
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ] || [ "$CLOUDFLARE_ACCOUNT_ID" = "null" ]; then
log_warn "Account ID not available - tunnel route must be configured manually"
log_info "Configure in Cloudflare Zero Trust Dashboard:"
echo " 1. Go to: https://one.dash.cloudflare.com/"
echo " 2. Zero Trust → Networks → Tunnels"
echo " 3. Select tunnel: $TUNNEL_ID"
echo " 4. Configure → Public Hostnames → Add hostname"
echo " 5. Subdomain: explorer, Domain: $DOMAIN"
echo " 6. Service: http://$EXPLORER_IP:$EXPLORER_PORT"
else
log_info "Configuring tunnel route via API..."
# Get current tunnel configuration
TUNNEL_CONFIG=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
# Build new ingress configuration
NEW_CONFIG=$(jq -n \
--arg hostname "$EXPLORER_DOMAIN" \
--arg service "http://$EXPLORER_IP:$EXPLORER_PORT" \
'{
config: {
ingress: [
{
hostname: $hostname,
service: $service
},
{
service: "http_status:404"
}
]
}
}')
# Update tunnel configuration
TUNNEL_UPDATE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$NEW_CONFIG")
if echo "$TUNNEL_UPDATE" | jq -e '.success' >/dev/null 2>&1; then
log_success "Tunnel route configured successfully"
else
ERROR=$(echo "$TUNNEL_UPDATE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_warn "Tunnel route API configuration failed: $ERROR"
log_info "Please configure manually in Cloudflare Zero Trust Dashboard"
fi
fi
# Step 7: SSL/TLS Configuration (automatic with Cloudflare proxy)
log_section
log_info "Step 7: SSL/TLS Configuration"
log_section
log_info "SSL/TLS is automatically handled by Cloudflare when DNS is proxied"
log_success "SSL will be enabled automatically (Universal SSL)"
# Step 8: Verify Configuration
log_section
log_info "Step 8: Verifying Configuration"
log_section
log_info "Waiting for DNS propagation (10 seconds)..."
sleep 10
# Test DNS resolution
DNS_RESULT=$(dig +short "$EXPLORER_DOMAIN" 2>/dev/null | head -1 || echo "")
if [ -n "$DNS_RESULT" ]; then
log_success "DNS resolves to: $DNS_RESULT"
if echo "$DNS_RESULT" | grep -qE "^(104\.|172\.64\.|172\.65\.|172\.66\.|172\.67\.)"; then
log_success "DNS points to Cloudflare (proxied correctly)"
fi
else
log_warn "DNS not resolving yet (may need more time)"
fi
# Test public URL
log_info "Testing public URL..."
PUBLIC_HTTP=$(curl -s -o /dev/null -w "%{http_code}" "https://$EXPLORER_DOMAIN/api/v2/stats" 2>&1)
if [ "$PUBLIC_HTTP" = "200" ]; then
log_success "Public URL: HTTP 200 - Working!"
PUBLIC_RESPONSE=$(curl -s "https://$EXPLORER_DOMAIN/api/v2/stats" 2>&1)
if echo "$PUBLIC_RESPONSE" | grep -q -E "total_blocks|chain_id"; then
log_success "Public API: Valid response"
echo "$PUBLIC_RESPONSE" | jq -r '.total_blocks, .total_transactions, .total_addresses' 2>/dev/null || echo "$PUBLIC_RESPONSE" | head -5
fi
elif [ "$PUBLIC_HTTP" = "404" ]; then
log_warn "Public URL: HTTP 404 - May need more time for DNS/tunnel propagation"
elif [ "$PUBLIC_HTTP" = "502" ]; then
log_warn "Public URL: HTTP 502 - Tunnel routing issue, check tunnel route configuration"
else
log_warn "Public URL: HTTP $PUBLIC_HTTP"
fi
# Final Summary
echo ""
log_section
log_info " CONFIGURATION SUMMARY"
log_section
echo ""
log_success "✓ Cloudflared service: Installed and running"
log_success "✓ Tunnel ID: $TUNNEL_ID"
log_success "✓ DNS Record: $EXPLORER_DOMAIN$TARGET (🟠 Proxied)"
if [ -n "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_success "✓ Tunnel Route: Configured via API"
else
log_warn "⚠ Tunnel Route: Manual configuration required"
fi
log_success "✓ SSL/TLS: Automatic (Cloudflare Universal SSL)"
echo ""
log_info "Configuration complete!"
log_info ""
log_info "Access your explorer at:"
echo " https://$EXPLORER_DOMAIN"
echo ""
log_info "If public URL is not working yet, wait 1-5 minutes for DNS propagation"
echo ""

View File

@@ -0,0 +1,399 @@
#!/bin/bash
# Complete Cloudflare Configuration for Explorer - Automated
# Uses .env credentials to configure DNS, SSL, and tunnel routes
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "${CYAN}════════════════════════════════════════${NC}"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
# Configuration
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-192.168.11.140}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
VMID=5000
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
TUNNEL_TOKEN="eyJhIjoiNTJhZDU3YTcxNjcxYzVmYzAwOWVkZjA3NDQ2NTgxOTYiLCJ0IjoiYjAyZmUxZmUtY2I3ZC00ODRlLTkwOWItN2NjNDEyOThlYmU4IiwicyI6Ik5HTmtOV0kwWXpNdFpUVmxaUzAwTVRFMkxXRXdNMk10WlRJNU1ETTFaRFF4TURBMiJ9"
echo ""
log_section
log_info " COMPLETE CLOUDFLARE EXPLORER CONFIGURATION"
log_info " Using .env Credentials for Full Automation"
log_section
echo ""
# Step 1: Load .env file
log_info "Step 1: Loading credentials from .env file..."
if [ ! -f "$ENV_FILE" ]; then
log_error ".env file not found: $ENV_FILE"
log_info "Looking for .env files..."
find "$SCRIPT_DIR/.." -maxdepth 2 -name ".env" -type f 2>/dev/null | head -3
log_info ""
log_info "Please create .env file with:"
echo " CLOUDFLARE_API_TOKEN=your-token"
echo " CLOUDFLARE_ZONE_ID=your-zone-id (optional)"
echo " CLOUDFLARE_ACCOUNT_ID=your-account-id (optional)"
echo " DOMAIN=d-bis.org"
exit 1
fi
# Source .env file
set -a
source "$ENV_FILE"
set +a
log_success ".env file loaded"
# Check for required credentials
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_ZONE_ID="${CLOUDFLARE_ZONE_ID:-}"
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
# Determine auth method
AUTH_METHOD=""
AUTH_HEADERS=()
# Check for API_TOKEN first (preferred), then API_KEY
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
AUTH_METHOD="token"
AUTH_HEADERS=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
log_success "Using API Token authentication"
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
AUTH_METHOD="key"
# Remove quotes from API_KEY if present
CLOUDFLARE_API_KEY=$(echo "$CLOUDFLARE_API_KEY" | tr -d '"')
CLOUDFLARE_EMAIL=$(echo "$CLOUDFLARE_EMAIL" | tr -d '"')
AUTH_HEADERS=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY")
log_success "Using API Key authentication"
else
log_error "No Cloudflare API credentials found in .env"
log_info "Required: CLOUDFLARE_API_TOKEN or (CLOUDFLARE_API_KEY + CLOUDFLARE_EMAIL)"
exit 1
fi
# Function to find container node
find_container_node() {
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" \
"for node in ml110 pve pve2; do if pvesh get /nodes/\$node/lxc/$VMID/status/current --output-format json >/dev/null 2>&1; then echo \$node; break; fi; done" 2>/dev/null || echo "pve2"
}
# Function to execute command in container
exec_container() {
local cmd="$1"
# Try direct pct exec via main host first
ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec $VMID -- bash -c '$cmd'" 2>&1
}
# Step 2: Install Cloudflare Tunnel Service
log_section
log_info "Step 2: Installing Cloudflare Tunnel Service"
log_section
log_info "Checking cloudflared installation..."
if ! exec_container "command -v cloudflared >/dev/null 2>&1"; then
log_info "Installing cloudflared..."
exec_container "cd /tmp && wget -q https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb && dpkg -i cloudflared-linux-amd64.deb || apt install -f -y"
log_success "cloudflared installed"
else
log_success "cloudflared already installed"
fi
log_info "Installing tunnel service with token..."
INSTALL_OUTPUT=$(exec_container "cloudflared service install $TUNNEL_TOKEN 2>&1" || echo "INSTALL_FAILED")
if echo "$INSTALL_OUTPUT" | grep -q -E "successfully|installed|Service installed"; then
log_success "Tunnel service installed"
else
log_warn "Installation output: $INSTALL_OUTPUT"
# Continue - service might already be installed
fi
log_info "Starting cloudflared service..."
exec_container "systemctl start cloudflared" || true
exec_container "systemctl enable cloudflared" || true
sleep 3
CLOUDFLARED_STATUS=$(exec_container "systemctl is-active cloudflared 2>/dev/null || echo 'inactive'")
if [ "$CLOUDFLARED_STATUS" = "active" ]; then
log_success "Cloudflared service is running"
else
log_warn "Cloudflared service is $CLOUDFLARED_STATUS"
fi
# Get tunnel ID
log_info "Getting tunnel ID..."
TUNNEL_LIST=$(exec_container "cloudflared tunnel list 2>&1" || echo "")
TUNNEL_ID=$(echo "$TUNNEL_LIST" | grep -v "NAME" | head -1 | awk '{print $1}' || echo "")
if [ -n "$TUNNEL_ID" ]; then
log_success "Tunnel ID: $TUNNEL_ID"
else
log_warn "Could not get tunnel ID from tunnel list"
# Try to extract from token (base64 decode)
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.TunnelID // empty' 2>/dev/null || echo "")
if [ -n "$TUNNEL_ID" ]; then
log_success "Tunnel ID from token: $TUNNEL_ID"
else
log_error "Cannot determine tunnel ID"
exit 1
fi
fi
# Step 3: Get Zone ID
log_section
log_info "Step 3: Getting Cloudflare Zone ID"
log_section
if [ -z "$CLOUDFLARE_ZONE_ID" ]; then
log_info "Fetching zone ID for $DOMAIN..."
ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
ZONE_ID=$(echo "$ZONE_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
if [ -z "$ZONE_ID" ] || [ "$ZONE_ID" = "null" ]; then
ERROR=$(echo "$ZONE_RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to get zone ID: $ERROR"
exit 1
fi
log_success "Zone ID: $ZONE_ID"
else
ZONE_ID="$CLOUDFLARE_ZONE_ID"
log_success "Using provided Zone ID: $ZONE_ID"
fi
# Step 4: Get Account ID (for tunnel configuration)
log_section
log_info "Step 4: Getting Cloudflare Account ID"
log_section
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_info "Fetching account ID..."
ACCOUNT_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
CLOUDFLARE_ACCOUNT_ID=$(echo "$ACCOUNT_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ] || [ "$CLOUDFLARE_ACCOUNT_ID" = "null" ]; then
log_warn "Could not get account ID automatically"
else
log_success "Account ID: $CLOUDFLARE_ACCOUNT_ID"
fi
else
log_success "Using provided Account ID: $CLOUDFLARE_ACCOUNT_ID"
fi
# Step 5: Configure DNS Record
log_section
log_info "Step 5: Configuring DNS Record"
log_section
TARGET="${TUNNEL_ID}.cfargotunnel.com"
log_info "DNS Target: $TARGET"
# Check if record exists
EXISTING_RECORD=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?name=$EXPLORER_DOMAIN" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
RECORD_ID=$(echo "$EXISTING_RECORD" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
EXISTING_TYPE=$(echo "$EXISTING_RECORD" | jq -r '.result[0].type // empty' 2>/dev/null || echo "")
DNS_DATA=$(jq -n \
--arg name "explorer" \
--arg target "$TARGET" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: true,
ttl: 1
}')
if [ -n "$RECORD_ID" ] && [ "$RECORD_ID" != "null" ]; then
log_info "Updating existing DNS record (ID: $RECORD_ID, Type: $EXISTING_TYPE)..."
if [ "$EXISTING_TYPE" != "CNAME" ]; then
log_warn "Existing record is type $EXISTING_TYPE, deleting and creating CNAME..."
curl -s -X DELETE "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" >/dev/null 2>&1
DNS_RESPONSE=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
else
DNS_RESPONSE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
fi
else
log_info "Creating new DNS record..."
DNS_RESPONSE=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$DNS_DATA")
fi
if echo "$DNS_RESPONSE" | jq -e '.success' >/dev/null 2>&1; then
log_success "DNS record configured successfully"
DNS_NAME=$(echo "$DNS_RESPONSE" | jq -r '.result.name' 2>/dev/null || echo "$EXPLORER_DOMAIN")
DNS_TARGET=$(echo "$DNS_RESPONSE" | jq -r '.result.content' 2>/dev/null || echo "$TARGET")
DNS_PROXIED=$(echo "$DNS_RESPONSE" | jq -r '.result.proxied' 2>/dev/null || echo "true")
log_info " Name: $DNS_NAME"
log_info " Target: $DNS_TARGET"
log_info " Proxied: $DNS_PROXIED"
else
ERROR=$(echo "$DNS_RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to configure DNS: $ERROR"
echo "$DNS_RESPONSE" | jq '.' 2>/dev/null || echo "$DNS_RESPONSE"
exit 1
fi
# Step 6: Configure Tunnel Route
log_section
log_info "Step 6: Configuring Tunnel Route"
log_section
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ] || [ "$CLOUDFLARE_ACCOUNT_ID" = "null" ]; then
log_warn "Account ID not available - tunnel route must be configured manually"
log_info "Configure in Cloudflare Zero Trust Dashboard:"
echo " 1. Go to: https://one.dash.cloudflare.com/"
echo " 2. Zero Trust → Networks → Tunnels"
echo " 3. Select tunnel: $TUNNEL_ID"
echo " 4. Configure → Public Hostnames → Add hostname"
echo " 5. Subdomain: explorer, Domain: $DOMAIN"
echo " 6. Service: http://$EXPLORER_IP:$EXPLORER_PORT"
else
log_info "Configuring tunnel route via API..."
# Get current tunnel configuration
TUNNEL_CONFIG=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
# Build new ingress configuration
NEW_CONFIG=$(jq -n \
--arg hostname "$EXPLORER_DOMAIN" \
--arg service "http://$EXPLORER_IP:$EXPLORER_PORT" \
'{
config: {
ingress: [
{
hostname: $hostname,
service: $service
},
{
service: "http_status:404"
}
]
}
}')
# Update tunnel configuration
TUNNEL_UPDATE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$NEW_CONFIG")
if echo "$TUNNEL_UPDATE" | jq -e '.success' >/dev/null 2>&1; then
log_success "Tunnel route configured successfully"
else
ERROR=$(echo "$TUNNEL_UPDATE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_warn "Tunnel route API configuration failed: $ERROR"
log_info "Please configure manually in Cloudflare Zero Trust Dashboard"
fi
fi
# Step 7: SSL/TLS Configuration (automatic with Cloudflare proxy)
log_section
log_info "Step 7: SSL/TLS Configuration"
log_section
log_info "SSL/TLS is automatically handled by Cloudflare when DNS is proxied"
log_success "SSL will be enabled automatically (Universal SSL)"
# Step 8: Verify Configuration
log_section
log_info "Step 8: Verifying Configuration"
log_section
log_info "Waiting for DNS propagation (10 seconds)..."
sleep 10
# Test DNS resolution
DNS_RESULT=$(dig +short "$EXPLORER_DOMAIN" 2>/dev/null | head -1 || echo "")
if [ -n "$DNS_RESULT" ]; then
log_success "DNS resolves to: $DNS_RESULT"
if echo "$DNS_RESULT" | grep -qE "^(104\.|172\.64\.|172\.65\.|172\.66\.|172\.67\.)"; then
log_success "DNS points to Cloudflare (proxied correctly)"
fi
else
log_warn "DNS not resolving yet (may need more time)"
fi
# Test public URL
log_info "Testing public URL..."
PUBLIC_HTTP=$(curl -s -o /dev/null -w "%{http_code}" "https://$EXPLORER_DOMAIN/api/v2/stats" 2>&1)
if [ "$PUBLIC_HTTP" = "200" ]; then
log_success "Public URL: HTTP 200 - Working!"
PUBLIC_RESPONSE=$(curl -s "https://$EXPLORER_DOMAIN/api/v2/stats" 2>&1)
if echo "$PUBLIC_RESPONSE" | grep -q -E "total_blocks|chain_id"; then
log_success "Public API: Valid response"
echo "$PUBLIC_RESPONSE" | jq -r '.total_blocks, .total_transactions, .total_addresses' 2>/dev/null || echo "$PUBLIC_RESPONSE" | head -5
fi
elif [ "$PUBLIC_HTTP" = "404" ]; then
log_warn "Public URL: HTTP 404 - May need more time for DNS/tunnel propagation"
elif [ "$PUBLIC_HTTP" = "502" ]; then
log_warn "Public URL: HTTP 502 - Tunnel routing issue, check tunnel route configuration"
else
log_warn "Public URL: HTTP $PUBLIC_HTTP"
fi
# Final Summary
echo ""
log_section
log_info " CONFIGURATION SUMMARY"
log_section
echo ""
log_success "✓ Cloudflared service: Installed and running"
log_success "✓ Tunnel ID: $TUNNEL_ID"
log_success "✓ DNS Record: $EXPLORER_DOMAIN$TARGET (🟠 Proxied)"
if [ -n "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_success "✓ Tunnel Route: Configured via API"
else
log_warn "⚠ Tunnel Route: Manual configuration required"
fi
log_success "✓ SSL/TLS: Automatic (Cloudflare Universal SSL)"
echo ""
log_info "Configuration complete!"
log_info ""
log_info "Access your explorer at:"
echo " https://$EXPLORER_DOMAIN"
echo ""
log_info "If public URL is not working yet, wait 1-5 minutes for DNS propagation"
echo ""

View File

@@ -0,0 +1,272 @@
#!/usr/bin/env bash
# Complete Cloudflare configuration for Blockscout Explorer
# Attempts API configuration, falls back to manual instructions
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
TUNNEL_ID="${TUNNEL_ID:-10ab22da-8ea3-4e2e-a896-27ece2211a05}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_section() { echo -e "${CYAN}════════════════════════════════════════${NC}"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
log_section
log_info "Cloudflare Configuration for Blockscout Explorer"
log_section
echo ""
log_info "Domain: $EXPLORER_DOMAIN"
log_info "Target: http://$EXPLORER_IP:$EXPLORER_PORT"
log_info "Tunnel ID: $TUNNEL_ID"
echo ""
# Load environment variables if .env exists
if [ -f "$ENV_FILE" ]; then
source "$ENV_FILE"
fi
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
# Check if we can use API (support both API Token and API Key methods)
USE_API=false
AUTH_METHOD=""
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
USE_API=true
AUTH_METHOD="token"
log_info "API Token found - attempting automated configuration..."
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
USE_API=true
AUTH_METHOD="key"
log_info "API Key + Email found - attempting automated configuration..."
else
log_warn "No API credentials found - will provide manual instructions"
fi
# Set auth headers based on method
if [ "$AUTH_METHOD" = "token" ]; then
AUTH_HEADER="Authorization: Bearer $CLOUDFLARE_API_TOKEN"
elif [ "$AUTH_METHOD" = "key" ]; then
AUTH_HEADER="X-Auth-Email: $CLOUDFLARE_EMAIL
X-Auth-Key: $CLOUDFLARE_API_KEY"
fi
# Function to configure DNS via API
configure_dns_api() {
local zone_id="$1"
local target="${TUNNEL_ID}.cfargotunnel.com"
log_info "Configuring DNS record via API..."
# Build curl headers based on auth method
local curl_headers=(-H "Content-Type: application/json")
if [ "$AUTH_METHOD" = "token" ]; then
curl_headers+=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
elif [ "$AUTH_METHOD" = "key" ]; then
curl_headers+=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL")
curl_headers+=(-H "X-Auth-Key: $CLOUDFLARE_API_KEY")
fi
# Check if record exists (any type)
local response=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records?name=$EXPLORER_DOMAIN" \
"${curl_headers[@]}")
local record_id=$(echo "$response" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
local existing_type=$(echo "$response" | jq -r '.result[0].type // empty' 2>/dev/null || echo "")
local data=$(jq -n \
--arg name "explorer" \
--arg target "$target" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: true,
ttl: 1
}')
if [ -n "$record_id" ] && [ "$record_id" != "null" ]; then
log_info "Found existing DNS record (type: ${existing_type:-unknown}, ID: $record_id)"
if [ "$existing_type" != "CNAME" ]; then
log_warn "Existing record is type $existing_type, deleting and creating CNAME..."
curl -s -X DELETE "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records/$record_id" \
"${curl_headers[@]}" >/dev/null 2>&1
log_info "Creating new CNAME record..."
response=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records" \
"${curl_headers[@]}" \
--data "$data")
else
log_info "Updating existing CNAME record..."
response=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records/$record_id" \
"${curl_headers[@]}" \
--data "$data")
fi
else
log_info "Creating new DNS record..."
response=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records" \
"${curl_headers[@]}" \
--data "$data")
fi
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
log_success "DNS record configured successfully"
return 0
else
local error=$(echo "$response" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to configure DNS: $error"
return 1
fi
}
# Try API configuration if credentials available
if [ "$USE_API" = "true" ]; then
log_section
log_info "Step 1: Getting Zone ID"
log_section
# Use provided ZONE_ID if available, otherwise fetch it
if [ -n "${CLOUDFLARE_ZONE_ID:-}" ]; then
ZONE_ID="$CLOUDFLARE_ZONE_ID"
log_info "Using provided Zone ID: $ZONE_ID"
else
# Build curl command based on auth method
if [ "$AUTH_METHOD" = "token" ]; then
ZONE_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
elif [ "$AUTH_METHOD" = "key" ]; then
ZONE_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
-H "X-Auth-Email: $CLOUDFLARE_EMAIL" \
-H "X-Auth-Key: $CLOUDFLARE_API_KEY" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
fi
fi
if [ -n "$ZONE_ID" ] && [ "$ZONE_ID" != "null" ]; then
log_success "Zone ID: $ZONE_ID"
log_section
log_info "Step 2: Configuring DNS Record"
log_section
if configure_dns_api "$ZONE_ID"; then
log_success "DNS configuration complete via API!"
DNS_CONFIGURED=true
else
log_warn "API DNS configuration failed, falling back to manual"
DNS_CONFIGURED=false
fi
else
log_error "Failed to get zone ID"
DNS_CONFIGURED=false
fi
else
DNS_CONFIGURED=false
fi
# Tunnel route configuration (always requires manual or complex API)
log_section
log_info "Step 3: Tunnel Route Configuration"
log_section
log_warn "Tunnel route configuration requires manual setup in Cloudflare Zero Trust Dashboard"
echo ""
log_info "Instructions:"
echo ""
echo "1. Go to: https://one.dash.cloudflare.com/"
echo "2. Navigate to: Zero Trust → Networks → Tunnels"
echo "3. Select your tunnel (ID: $TUNNEL_ID)"
echo "4. Click 'Configure' → 'Public Hostnames'"
echo "5. Click 'Add a public hostname'"
echo "6. Configure:"
echo " - Subdomain: explorer"
echo " - Domain: $DOMAIN"
echo " - Service: http://$EXPLORER_IP:$EXPLORER_PORT"
echo " - Type: HTTP"
echo "7. Click 'Save hostname'"
echo ""
# Manual DNS instructions if API didn't work
if [ "$DNS_CONFIGURED" != "true" ]; then
log_section
log_info "Step 2: DNS Record Configuration (Manual)"
log_section
log_info "Go to: https://dash.cloudflare.com/"
log_info "Navigate to: $DOMAIN → DNS → Records → Add record"
echo ""
echo "Configure:"
echo " Type: CNAME"
echo " Name: explorer"
echo " Target: ${TUNNEL_ID}.cfargotunnel.com"
echo " Proxy status: 🟠 Proxied (orange cloud) - REQUIRED"
echo " TTL: Auto"
echo ""
log_warn "IMPORTANT: Proxy must be enabled (orange cloud) for tunnel to work!"
echo ""
fi
# Summary
log_section
log_info "Configuration Summary"
log_section
if [ "$DNS_CONFIGURED" = "true" ]; then
log_success "DNS Record: ✅ Configured via API"
else
log_warn "DNS Record: ⚠️ Needs manual configuration"
fi
log_warn "Tunnel Route: ⚠️ Needs manual configuration"
echo ""
log_info "Configuration Details:"
echo " Domain: $EXPLORER_DOMAIN"
echo " DNS Target: ${TUNNEL_ID}.cfargotunnel.com"
echo " Tunnel Service: http://$EXPLORER_IP:$EXPLORER_PORT"
echo ""
# Verification instructions
log_section
log_info "Verification"
log_section
log_info "After configuration, wait 1-5 minutes for DNS propagation, then test:"
echo ""
echo " curl -I https://$EXPLORER_DOMAIN"
echo " curl https://$EXPLORER_DOMAIN/health"
echo ""
if [ "$DNS_CONFIGURED" = "true" ]; then
log_success "Configuration complete! DNS configured, tunnel route pending manual setup."
else
log_warn "Configuration pending. Please complete DNS and tunnel route setup manually."
fi
echo ""

View File

@@ -0,0 +1,266 @@
#!/usr/bin/env bash
# Complete Cloudflare configuration for Blockscout Explorer
# Attempts API configuration, falls back to manual instructions
set -euo pipefail
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-192.168.11.140}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
TUNNEL_ID="${TUNNEL_ID:-10ab22da-8ea3-4e2e-a896-27ece2211a05}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_section() { echo -e "${CYAN}════════════════════════════════════════${NC}"; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
log_section
log_info "Cloudflare Configuration for Blockscout Explorer"
log_section
echo ""
log_info "Domain: $EXPLORER_DOMAIN"
log_info "Target: http://$EXPLORER_IP:$EXPLORER_PORT"
log_info "Tunnel ID: $TUNNEL_ID"
echo ""
# Load environment variables if .env exists
if [ -f "$ENV_FILE" ]; then
source "$ENV_FILE"
fi
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
# Check if we can use API (support both API Token and API Key methods)
USE_API=false
AUTH_METHOD=""
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
USE_API=true
AUTH_METHOD="token"
log_info "API Token found - attempting automated configuration..."
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
USE_API=true
AUTH_METHOD="key"
log_info "API Key + Email found - attempting automated configuration..."
else
log_warn "No API credentials found - will provide manual instructions"
fi
# Set auth headers based on method
if [ "$AUTH_METHOD" = "token" ]; then
AUTH_HEADER="Authorization: Bearer $CLOUDFLARE_API_TOKEN"
elif [ "$AUTH_METHOD" = "key" ]; then
AUTH_HEADER="X-Auth-Email: $CLOUDFLARE_EMAIL
X-Auth-Key: $CLOUDFLARE_API_KEY"
fi
# Function to configure DNS via API
configure_dns_api() {
local zone_id="$1"
local target="${TUNNEL_ID}.cfargotunnel.com"
log_info "Configuring DNS record via API..."
# Build curl headers based on auth method
local curl_headers=(-H "Content-Type: application/json")
if [ "$AUTH_METHOD" = "token" ]; then
curl_headers+=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
elif [ "$AUTH_METHOD" = "key" ]; then
curl_headers+=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL")
curl_headers+=(-H "X-Auth-Key: $CLOUDFLARE_API_KEY")
fi
# Check if record exists (any type)
local response=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records?name=$EXPLORER_DOMAIN" \
"${curl_headers[@]}")
local record_id=$(echo "$response" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
local existing_type=$(echo "$response" | jq -r '.result[0].type // empty' 2>/dev/null || echo "")
local data=$(jq -n \
--arg name "explorer" \
--arg target "$target" \
'{
type: "CNAME",
name: $name,
content: $target,
proxied: true,
ttl: 1
}')
if [ -n "$record_id" ] && [ "$record_id" != "null" ]; then
log_info "Found existing DNS record (type: ${existing_type:-unknown}, ID: $record_id)"
if [ "$existing_type" != "CNAME" ]; then
log_warn "Existing record is type $existing_type, deleting and creating CNAME..."
curl -s -X DELETE "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records/$record_id" \
"${curl_headers[@]}" >/dev/null 2>&1
log_info "Creating new CNAME record..."
response=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records" \
"${curl_headers[@]}" \
--data "$data")
else
log_info "Updating existing CNAME record..."
response=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records/$record_id" \
"${curl_headers[@]}" \
--data "$data")
fi
else
log_info "Creating new DNS record..."
response=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$zone_id/dns_records" \
"${curl_headers[@]}" \
--data "$data")
fi
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
log_success "DNS record configured successfully"
return 0
else
local error=$(echo "$response" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to configure DNS: $error"
return 1
fi
}
# Try API configuration if credentials available
if [ "$USE_API" = "true" ]; then
log_section
log_info "Step 1: Getting Zone ID"
log_section
# Use provided ZONE_ID if available, otherwise fetch it
if [ -n "${CLOUDFLARE_ZONE_ID:-}" ]; then
ZONE_ID="$CLOUDFLARE_ZONE_ID"
log_info "Using provided Zone ID: $ZONE_ID"
else
# Build curl command based on auth method
if [ "$AUTH_METHOD" = "token" ]; then
ZONE_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
elif [ "$AUTH_METHOD" = "key" ]; then
ZONE_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
-H "X-Auth-Email: $CLOUDFLARE_EMAIL" \
-H "X-Auth-Key: $CLOUDFLARE_API_KEY" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
fi
fi
if [ -n "$ZONE_ID" ] && [ "$ZONE_ID" != "null" ]; then
log_success "Zone ID: $ZONE_ID"
log_section
log_info "Step 2: Configuring DNS Record"
log_section
if configure_dns_api "$ZONE_ID"; then
log_success "DNS configuration complete via API!"
DNS_CONFIGURED=true
else
log_warn "API DNS configuration failed, falling back to manual"
DNS_CONFIGURED=false
fi
else
log_error "Failed to get zone ID"
DNS_CONFIGURED=false
fi
else
DNS_CONFIGURED=false
fi
# Tunnel route configuration (always requires manual or complex API)
log_section
log_info "Step 3: Tunnel Route Configuration"
log_section
log_warn "Tunnel route configuration requires manual setup in Cloudflare Zero Trust Dashboard"
echo ""
log_info "Instructions:"
echo ""
echo "1. Go to: https://one.dash.cloudflare.com/"
echo "2. Navigate to: Zero Trust → Networks → Tunnels"
echo "3. Select your tunnel (ID: $TUNNEL_ID)"
echo "4. Click 'Configure' → 'Public Hostnames'"
echo "5. Click 'Add a public hostname'"
echo "6. Configure:"
echo " - Subdomain: explorer"
echo " - Domain: $DOMAIN"
echo " - Service: http://$EXPLORER_IP:$EXPLORER_PORT"
echo " - Type: HTTP"
echo "7. Click 'Save hostname'"
echo ""
# Manual DNS instructions if API didn't work
if [ "$DNS_CONFIGURED" != "true" ]; then
log_section
log_info "Step 2: DNS Record Configuration (Manual)"
log_section
log_info "Go to: https://dash.cloudflare.com/"
log_info "Navigate to: $DOMAIN → DNS → Records → Add record"
echo ""
echo "Configure:"
echo " Type: CNAME"
echo " Name: explorer"
echo " Target: ${TUNNEL_ID}.cfargotunnel.com"
echo " Proxy status: 🟠 Proxied (orange cloud) - REQUIRED"
echo " TTL: Auto"
echo ""
log_warn "IMPORTANT: Proxy must be enabled (orange cloud) for tunnel to work!"
echo ""
fi
# Summary
log_section
log_info "Configuration Summary"
log_section
if [ "$DNS_CONFIGURED" = "true" ]; then
log_success "DNS Record: ✅ Configured via API"
else
log_warn "DNS Record: ⚠️ Needs manual configuration"
fi
log_warn "Tunnel Route: ⚠️ Needs manual configuration"
echo ""
log_info "Configuration Details:"
echo " Domain: $EXPLORER_DOMAIN"
echo " DNS Target: ${TUNNEL_ID}.cfargotunnel.com"
echo " Tunnel Service: http://$EXPLORER_IP:$EXPLORER_PORT"
echo ""
# Verification instructions
log_section
log_info "Verification"
log_section
log_info "After configuration, wait 1-5 minutes for DNS propagation, then test:"
echo ""
echo " curl -I https://$EXPLORER_DOMAIN"
echo " curl https://$EXPLORER_DOMAIN/health"
echo ""
if [ "$DNS_CONFIGURED" = "true" ]; then
log_success "Configuration complete! DNS configured, tunnel route pending manual setup."
else
log_warn "Configuration pending. Please complete DNS and tunnel route setup manually."
fi
echo ""

View File

@@ -0,0 +1,148 @@
#!/usr/bin/env bash
# Manual Cloudflare configuration instructions for Blockscout Explorer
# This script provides instructions and can help check existing configuration
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${IP_BLOCKSCOUT}"
EXPLORER_PORT="80"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_section() { echo -e "${CYAN}════════════════════════════════════════${NC}"; }
log_section
log_info "Cloudflare Configuration for Blockscout Explorer"
log_section
echo ""
log_info "Domain: $EXPLORER_DOMAIN"
log_info "Target: http://$EXPLORER_IP:$EXPLORER_PORT"
echo ""
# Try to get tunnel information
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
log_section
log_info "Step 1: Get Tunnel ID"
log_section
log_info "Checking tunnel configuration on VMID 102..."
TUNNEL_INFO=$(ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 102 -- cloudflared tunnel list 2>&1" | head -10 || echo "")
if [ -n "$TUNNEL_INFO" ]; then
echo "$TUNNEL_INFO"
TUNNEL_ID=$(echo "$TUNNEL_INFO" | grep -oP '[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}' | head -1 || echo "")
if [ -n "$TUNNEL_ID" ]; then
log_success "Found tunnel ID: $TUNNEL_ID"
TUNNEL_TARGET="${TUNNEL_ID}.cfargotunnel.com"
else
log_warn "Could not extract tunnel ID from output"
TUNNEL_TARGET="<tunnel-id>.cfargotunnel.com"
fi
else
log_warn "Could not get tunnel information"
log_info "You can find your tunnel ID in Cloudflare Zero Trust Dashboard:"
log_info " https://one.dash.cloudflare.com/ → Zero Trust → Networks → Tunnels"
TUNNEL_TARGET="<tunnel-id>.cfargotunnel.com"
fi
echo ""
log_section
log_info "Step 2: Configure DNS Record"
log_section
log_info "Go to Cloudflare DNS Dashboard:"
log_info " https://dash.cloudflare.com/ → Select domain 'd-bis.org' → DNS → Records"
echo ""
log_info "Create CNAME record:"
echo ""
echo " Type: CNAME"
echo " Name: explorer"
echo " Target: $TUNNEL_TARGET"
echo " Proxy status: 🟠 Proxied (orange cloud) - REQUIRED"
echo " TTL: Auto"
echo ""
log_warn "IMPORTANT: Proxy must be enabled (orange cloud) for tunnel to work!"
echo ""
log_section
log_info "Step 3: Configure Tunnel Route"
log_section
log_info "Go to Cloudflare Zero Trust Dashboard:"
log_info " https://one.dash.cloudflare.com/"
log_info " Navigate to: Zero Trust → Networks → Tunnels"
echo ""
log_info "Select your tunnel, then click 'Configure' → 'Public Hostnames'"
log_info "Add a new hostname:"
echo ""
echo " Subdomain: explorer"
echo " Domain: d-bis.org"
echo " Service: http://$EXPLORER_IP:$EXPLORER_PORT"
echo " Type: HTTP"
echo ""
log_info "Click 'Save hostname'"
echo ""
log_section
log_info "Step 4: Verify Configuration"
log_section
log_info "After configuration, test with:"
echo ""
echo " # Wait 1-5 minutes for DNS propagation"
echo " dig $EXPLORER_DOMAIN"
echo " curl https://$EXPLORER_DOMAIN/health"
echo ""
log_section
log_info "Current Status Check"
log_section
log_info "Checking if DNS record exists..."
DNS_CHECK=$(dig +short "$EXPLORER_DOMAIN" 2>&1 | head -3 || echo "")
if [ -n "$DNS_CHECK" ] && [ "$DNS_CHECK" != ";; connection timed out; no servers could be reached" ]; then
log_success "DNS record exists: $DNS_CHECK"
else
log_warn "DNS record not found or not yet propagated"
fi
log_info "Testing HTTPS endpoint..."
HTTP_TEST=$(curl -I -s --max-time 10 "https://$EXPLORER_DOMAIN" 2>&1 | head -5 || echo "")
if echo "$HTTP_TEST" | grep -q "HTTP/2 200\|HTTP/1.1 200"; then
log_success "HTTPS endpoint is working!"
elif echo "$HTTP_TEST" | grep -q "HTTP/2 522"; then
log_warn "HTTP 522 (Connection Timeout) - Tunnel may not be configured yet"
elif echo "$HTTP_TEST" | grep -q "HTTP/2 404"; then
log_warn "HTTP 404 - DNS configured but tunnel route may be missing"
else
log_warn "Endpoint not accessible: $HTTP_TEST"
fi
echo ""
log_success "Configuration instructions complete!"
echo ""
log_info "Summary:"
log_info " 1. DNS: CNAME explorer → $TUNNEL_TARGET (🟠 Proxied)"
log_info " 2. Tunnel: explorer.d-bis.org → http://$EXPLORER_IP:$EXPLORER_PORT"
log_info " 3. Test: curl https://$EXPLORER_DOMAIN/health"
echo ""

View File

@@ -0,0 +1,142 @@
#!/usr/bin/env bash
# Manual Cloudflare configuration instructions for Blockscout Explorer
# This script provides instructions and can help check existing configuration
set -euo pipefail
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="192.168.11.140"
EXPLORER_PORT="80"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_section() { echo -e "${CYAN}════════════════════════════════════════${NC}"; }
log_section
log_info "Cloudflare Configuration for Blockscout Explorer"
log_section
echo ""
log_info "Domain: $EXPLORER_DOMAIN"
log_info "Target: http://$EXPLORER_IP:$EXPLORER_PORT"
echo ""
# Try to get tunnel information
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
log_section
log_info "Step 1: Get Tunnel ID"
log_section
log_info "Checking tunnel configuration on VMID 102..."
TUNNEL_INFO=$(ssh -o StrictHostKeyChecking=no root@"$PROXMOX_HOST" "pct exec 102 -- cloudflared tunnel list 2>&1" | head -10 || echo "")
if [ -n "$TUNNEL_INFO" ]; then
echo "$TUNNEL_INFO"
TUNNEL_ID=$(echo "$TUNNEL_INFO" | grep -oP '[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}' | head -1 || echo "")
if [ -n "$TUNNEL_ID" ]; then
log_success "Found tunnel ID: $TUNNEL_ID"
TUNNEL_TARGET="${TUNNEL_ID}.cfargotunnel.com"
else
log_warn "Could not extract tunnel ID from output"
TUNNEL_TARGET="<tunnel-id>.cfargotunnel.com"
fi
else
log_warn "Could not get tunnel information"
log_info "You can find your tunnel ID in Cloudflare Zero Trust Dashboard:"
log_info " https://one.dash.cloudflare.com/ → Zero Trust → Networks → Tunnels"
TUNNEL_TARGET="<tunnel-id>.cfargotunnel.com"
fi
echo ""
log_section
log_info "Step 2: Configure DNS Record"
log_section
log_info "Go to Cloudflare DNS Dashboard:"
log_info " https://dash.cloudflare.com/ → Select domain 'd-bis.org' → DNS → Records"
echo ""
log_info "Create CNAME record:"
echo ""
echo " Type: CNAME"
echo " Name: explorer"
echo " Target: $TUNNEL_TARGET"
echo " Proxy status: 🟠 Proxied (orange cloud) - REQUIRED"
echo " TTL: Auto"
echo ""
log_warn "IMPORTANT: Proxy must be enabled (orange cloud) for tunnel to work!"
echo ""
log_section
log_info "Step 3: Configure Tunnel Route"
log_section
log_info "Go to Cloudflare Zero Trust Dashboard:"
log_info " https://one.dash.cloudflare.com/"
log_info " Navigate to: Zero Trust → Networks → Tunnels"
echo ""
log_info "Select your tunnel, then click 'Configure' → 'Public Hostnames'"
log_info "Add a new hostname:"
echo ""
echo " Subdomain: explorer"
echo " Domain: d-bis.org"
echo " Service: http://$EXPLORER_IP:$EXPLORER_PORT"
echo " Type: HTTP"
echo ""
log_info "Click 'Save hostname'"
echo ""
log_section
log_info "Step 4: Verify Configuration"
log_section
log_info "After configuration, test with:"
echo ""
echo " # Wait 1-5 minutes for DNS propagation"
echo " dig $EXPLORER_DOMAIN"
echo " curl https://$EXPLORER_DOMAIN/health"
echo ""
log_section
log_info "Current Status Check"
log_section
log_info "Checking if DNS record exists..."
DNS_CHECK=$(dig +short "$EXPLORER_DOMAIN" 2>&1 | head -3 || echo "")
if [ -n "$DNS_CHECK" ] && [ "$DNS_CHECK" != ";; connection timed out; no servers could be reached" ]; then
log_success "DNS record exists: $DNS_CHECK"
else
log_warn "DNS record not found or not yet propagated"
fi
log_info "Testing HTTPS endpoint..."
HTTP_TEST=$(curl -I -s --max-time 10 "https://$EXPLORER_DOMAIN" 2>&1 | head -5 || echo "")
if echo "$HTTP_TEST" | grep -q "HTTP/2 200\|HTTP/1.1 200"; then
log_success "HTTPS endpoint is working!"
elif echo "$HTTP_TEST" | grep -q "HTTP/2 522"; then
log_warn "HTTP 522 (Connection Timeout) - Tunnel may not be configured yet"
elif echo "$HTTP_TEST" | grep -q "HTTP/2 404"; then
log_warn "HTTP 404 - DNS configured but tunnel route may be missing"
else
log_warn "Endpoint not accessible: $HTTP_TEST"
fi
echo ""
log_success "Configuration instructions complete!"
echo ""
log_info "Summary:"
log_info " 1. DNS: CNAME explorer → $TUNNEL_TARGET (🟠 Proxied)"
log_info " 2. Tunnel: explorer.d-bis.org → http://$EXPLORER_IP:$EXPLORER_PORT"
log_info " 3. Test: curl https://$EXPLORER_DOMAIN/health"
echo ""

View File

@@ -0,0 +1,197 @@
#!/usr/bin/env bash
# Configure Cloudflare DNS and tunnel for Blockscout Explorer
# Usage: ./configure-cloudflare-explorer.sh
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Check for .env file with Cloudflare credentials
ENV_FILE="${ENV_FILE:-.env}"
if [ ! -f "$ENV_FILE" ]; then
log_error "Environment file not found: $ENV_FILE"
log_info "Please create $ENV_FILE with:"
log_info " CLOUDFLARE_API_TOKEN=your-token"
log_info " DOMAIN=d-bis.org"
log_info " TUNNEL_TOKEN=your-tunnel-token"
exit 1
fi
source "$ENV_FILE"
if [ -z "${CLOUDFLARE_API_TOKEN:-}" ]; then
log_error "CLOUDFLARE_API_TOKEN not set in $ENV_FILE"
exit 1
fi
log_info "Configuring Cloudflare for Blockscout Explorer"
log_info "Domain: $EXPLORER_DOMAIN"
log_info "Target: http://$EXPLORER_IP:$EXPLORER_PORT"
# Get Zone ID
log_info "Getting zone ID for $DOMAIN..."
ZONE_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty')
if [ -z "$ZONE_ID" ] || [ "$ZONE_ID" = "null" ]; then
log_error "Failed to get zone ID for $DOMAIN"
exit 1
fi
log_success "Zone ID: $ZONE_ID"
# Extract tunnel ID from tunnel token or configuration
TUNNEL_ID=""
if [ -n "${TUNNEL_TOKEN:-}" ]; then
# Try to extract tunnel ID from token (if it's in the format we expect)
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.TunnelID // empty' 2>/dev/null || echo "")
fi
# If no tunnel ID found, try to get it from Cloudflare API
if [ -z "$TUNNEL_ID" ]; then
log_info "Getting tunnel information..."
ACCOUNT_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty')
if [ -n "$ACCOUNT_ID" ] && [ "$ACCOUNT_ID" != "null" ]; then
TUNNELS=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$ACCOUNT_ID/cfd_tunnel" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json")
TUNNEL_ID=$(echo "$TUNNELS" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
fi
fi
# Check if DNS record already exists
log_info "Checking for existing DNS record..."
EXISTING_RECORD=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?name=$EXPLORER_DOMAIN&type=CNAME" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0] // empty')
if [ -n "$EXISTING_RECORD" ] && [ "$EXISTING_RECORD" != "null" ]; then
RECORD_ID=$(echo "$EXISTING_RECORD" | jq -r '.id')
log_warn "DNS record already exists (ID: $RECORD_ID)"
log_info "Updating existing record..."
if [ -n "$TUNNEL_ID" ] && [ "$TUNNEL_ID" != "null" ]; then
TARGET="${TUNNEL_ID}.cfargotunnel.com"
log_info "Using Cloudflare Tunnel: $TARGET"
else
TARGET="$EXPLORER_IP"
log_warn "No tunnel ID found, using direct IP (may not work behind NAT)"
fi
UPDATE_RESULT=$(curl -s -X PATCH "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" \
--data "{
\"type\": \"CNAME\",
\"name\": \"explorer\",
\"content\": \"$TARGET\",
\"proxied\": true,
\"ttl\": 1
}")
SUCCESS=$(echo "$UPDATE_RESULT" | jq -r '.success // false')
if [ "$SUCCESS" = "true" ]; then
log_success "DNS record updated successfully"
else
ERROR=$(echo "$UPDATE_RESULT" | jq -r '.errors[0].message // "Unknown error"')
log_error "Failed to update DNS record: $ERROR"
exit 1
fi
else
log_info "Creating new DNS record..."
if [ -n "$TUNNEL_ID" ] && [ "$TUNNEL_ID" != "null" ]; then
TARGET="${TUNNEL_ID}.cfargotunnel.com"
RECORD_TYPE="CNAME"
log_info "Using Cloudflare Tunnel: $TARGET"
else
TARGET="$EXPLORER_IP"
RECORD_TYPE="A"
log_warn "No tunnel ID found, using A record with direct IP (may not work behind NAT)"
fi
CREATE_RESULT=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" \
--data "{
\"type\": \"$RECORD_TYPE\",
\"name\": \"explorer\",
\"content\": \"$TARGET\",
\"proxied\": true,
\"ttl\": 1
}")
SUCCESS=$(echo "$CREATE_RESULT" | jq -r '.success // false')
if [ "$SUCCESS" = "true" ]; then
log_success "DNS record created successfully"
else
ERROR=$(echo "$CREATE_RESULT" | jq -r '.errors[0].message // "Unknown error"')
log_error "Failed to create DNS record: $ERROR"
exit 1
fi
fi
# If we have a tunnel, configure the tunnel route
if [ -n "$TUNNEL_ID" ] && [ "$TUNNEL_ID" != "null" ] && [ -n "${ACCOUNT_ID:-}" ]; then
log_info "Configuring Cloudflare Tunnel route..."
# Get current tunnel configuration
TUNNEL_CONFIG=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/config" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json")
# This is complex - for now, log instructions
log_info ""
log_info "=== Cloudflare Tunnel Configuration Required ==="
log_info ""
log_info "Please configure the tunnel route manually in Cloudflare Zero Trust Dashboard:"
log_info " 1. Go to: https://one.dash.cloudflare.com/"
log_info " 2. Navigate to: Zero Trust → Networks → Tunnels"
log_info " 3. Select your tunnel (ID: $TUNNEL_ID)"
log_info " 4. Click 'Configure' → 'Public Hostnames'"
log_info " 5. Add hostname:"
log_info " - Subdomain: explorer"
log_info " - Domain: $DOMAIN"
log_info " - Service: http://$EXPLORER_IP:$EXPLORER_PORT"
log_info " - Type: HTTP"
log_info ""
fi
log_success "Cloudflare configuration complete!"
log_info ""
log_info "Summary:"
log_info " - DNS Record: $EXPLORER_DOMAIN$TARGET (🟠 Proxied)"
if [ -n "$TUNNEL_ID" ]; then
log_info " - Tunnel ID: $TUNNEL_ID"
log_info " - Tunnel Route: Needs manual configuration (see above)"
fi
log_info ""
log_info "Next steps:"
log_info " 1. Wait for DNS propagation (1-5 minutes)"
log_info " 2. Test: curl -I https://$EXPLORER_DOMAIN"

View File

@@ -0,0 +1,191 @@
#!/usr/bin/env bash
# Configure Cloudflare DNS and tunnel for Blockscout Explorer
# Usage: ./configure-cloudflare-explorer.sh
set -euo pipefail
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-192.168.11.140}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Check for .env file with Cloudflare credentials
ENV_FILE="${ENV_FILE:-.env}"
if [ ! -f "$ENV_FILE" ]; then
log_error "Environment file not found: $ENV_FILE"
log_info "Please create $ENV_FILE with:"
log_info " CLOUDFLARE_API_TOKEN=your-token"
log_info " DOMAIN=d-bis.org"
log_info " TUNNEL_TOKEN=your-tunnel-token"
exit 1
fi
source "$ENV_FILE"
if [ -z "${CLOUDFLARE_API_TOKEN:-}" ]; then
log_error "CLOUDFLARE_API_TOKEN not set in $ENV_FILE"
exit 1
fi
log_info "Configuring Cloudflare for Blockscout Explorer"
log_info "Domain: $EXPLORER_DOMAIN"
log_info "Target: http://$EXPLORER_IP:$EXPLORER_PORT"
# Get Zone ID
log_info "Getting zone ID for $DOMAIN..."
ZONE_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty')
if [ -z "$ZONE_ID" ] || [ "$ZONE_ID" = "null" ]; then
log_error "Failed to get zone ID for $DOMAIN"
exit 1
fi
log_success "Zone ID: $ZONE_ID"
# Extract tunnel ID from tunnel token or configuration
TUNNEL_ID=""
if [ -n "${TUNNEL_TOKEN:-}" ]; then
# Try to extract tunnel ID from token (if it's in the format we expect)
TUNNEL_ID=$(echo "$TUNNEL_TOKEN" | base64 -d 2>/dev/null | jq -r '.TunnelID // empty' 2>/dev/null || echo "")
fi
# If no tunnel ID found, try to get it from Cloudflare API
if [ -z "$TUNNEL_ID" ]; then
log_info "Getting tunnel information..."
ACCOUNT_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0].id // empty')
if [ -n "$ACCOUNT_ID" ] && [ "$ACCOUNT_ID" != "null" ]; then
TUNNELS=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$ACCOUNT_ID/cfd_tunnel" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json")
TUNNEL_ID=$(echo "$TUNNELS" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
fi
fi
# Check if DNS record already exists
log_info "Checking for existing DNS record..."
EXISTING_RECORD=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?name=$EXPLORER_DOMAIN&type=CNAME" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" | jq -r '.result[0] // empty')
if [ -n "$EXISTING_RECORD" ] && [ "$EXISTING_RECORD" != "null" ]; then
RECORD_ID=$(echo "$EXISTING_RECORD" | jq -r '.id')
log_warn "DNS record already exists (ID: $RECORD_ID)"
log_info "Updating existing record..."
if [ -n "$TUNNEL_ID" ] && [ "$TUNNEL_ID" != "null" ]; then
TARGET="${TUNNEL_ID}.cfargotunnel.com"
log_info "Using Cloudflare Tunnel: $TARGET"
else
TARGET="$EXPLORER_IP"
log_warn "No tunnel ID found, using direct IP (may not work behind NAT)"
fi
UPDATE_RESULT=$(curl -s -X PATCH "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" \
--data "{
\"type\": \"CNAME\",
\"name\": \"explorer\",
\"content\": \"$TARGET\",
\"proxied\": true,
\"ttl\": 1
}")
SUCCESS=$(echo "$UPDATE_RESULT" | jq -r '.success // false')
if [ "$SUCCESS" = "true" ]; then
log_success "DNS record updated successfully"
else
ERROR=$(echo "$UPDATE_RESULT" | jq -r '.errors[0].message // "Unknown error"')
log_error "Failed to update DNS record: $ERROR"
exit 1
fi
else
log_info "Creating new DNS record..."
if [ -n "$TUNNEL_ID" ] && [ "$TUNNEL_ID" != "null" ]; then
TARGET="${TUNNEL_ID}.cfargotunnel.com"
RECORD_TYPE="CNAME"
log_info "Using Cloudflare Tunnel: $TARGET"
else
TARGET="$EXPLORER_IP"
RECORD_TYPE="A"
log_warn "No tunnel ID found, using A record with direct IP (may not work behind NAT)"
fi
CREATE_RESULT=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" \
--data "{
\"type\": \"$RECORD_TYPE\",
\"name\": \"explorer\",
\"content\": \"$TARGET\",
\"proxied\": true,
\"ttl\": 1
}")
SUCCESS=$(echo "$CREATE_RESULT" | jq -r '.success // false')
if [ "$SUCCESS" = "true" ]; then
log_success "DNS record created successfully"
else
ERROR=$(echo "$CREATE_RESULT" | jq -r '.errors[0].message // "Unknown error"')
log_error "Failed to create DNS record: $ERROR"
exit 1
fi
fi
# If we have a tunnel, configure the tunnel route
if [ -n "$TUNNEL_ID" ] && [ "$TUNNEL_ID" != "null" ] && [ -n "${ACCOUNT_ID:-}" ]; then
log_info "Configuring Cloudflare Tunnel route..."
# Get current tunnel configuration
TUNNEL_CONFIG=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/config" \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json")
# This is complex - for now, log instructions
log_info ""
log_info "=== Cloudflare Tunnel Configuration Required ==="
log_info ""
log_info "Please configure the tunnel route manually in Cloudflare Zero Trust Dashboard:"
log_info " 1. Go to: https://one.dash.cloudflare.com/"
log_info " 2. Navigate to: Zero Trust → Networks → Tunnels"
log_info " 3. Select your tunnel (ID: $TUNNEL_ID)"
log_info " 4. Click 'Configure' → 'Public Hostnames'"
log_info " 5. Add hostname:"
log_info " - Subdomain: explorer"
log_info " - Domain: $DOMAIN"
log_info " - Service: http://$EXPLORER_IP:$EXPLORER_PORT"
log_info " - Type: HTTP"
log_info ""
fi
log_success "Cloudflare configuration complete!"
log_info ""
log_info "Summary:"
log_info " - DNS Record: $EXPLORER_DOMAIN$TARGET (🟠 Proxied)"
if [ -n "$TUNNEL_ID" ]; then
log_info " - Tunnel ID: $TUNNEL_ID"
log_info " - Tunnel Route: Needs manual configuration (see above)"
fi
log_info ""
log_info "Next steps:"
log_info " 1. Wait for DNS propagation (1-5 minutes)"
log_info " 2. Test: curl -I https://$EXPLORER_DOMAIN"

View File

@@ -0,0 +1,149 @@
#!/usr/bin/env bash
# Configure Cloudflare Tunnel Route for explorer.d-bis.org
# Usage: ./configure-cloudflare-tunnel-route.sh
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
TUNNEL_ID="${TUNNEL_ID:-10ab22da-8ea3-4e2e-a896-27ece2211a05}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load environment variables
if [ -f "$ENV_FILE" ]; then
source "$ENV_FILE"
fi
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
# Determine auth method
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
AUTH_HEADERS=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
AUTH_HEADERS=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY")
else
log_error "No Cloudflare API credentials found"
exit 1
fi
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_error "CLOUDFLARE_ACCOUNT_ID not set"
exit 1
fi
log_info "Configuring tunnel route for $EXPLORER_DOMAIN"
log_info "Tunnel ID: $TUNNEL_ID"
if [ "$EXPLORER_PORT" = "443" ]; then
log_info "Service: https://$EXPLORER_IP:$EXPLORER_PORT"
else
log_info "Service: http://$EXPLORER_IP:$EXPLORER_PORT"
fi
# Get current tunnel configuration
log_info "Fetching current tunnel configuration..."
CURRENT_CONFIG=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
if ! echo "$CURRENT_CONFIG" | jq -e '.success' >/dev/null 2>&1; then
log_error "Failed to fetch tunnel configuration"
echo "$CURRENT_CONFIG" | jq '.' 2>/dev/null || echo "$CURRENT_CONFIG"
exit 1
fi
# Extract current ingress rules
CURRENT_INGRESS=$(echo "$CURRENT_CONFIG" | jq -c '.result.config.ingress // []')
# Check if explorer route already exists
if echo "$CURRENT_INGRESS" | jq -e ".[] | select(.hostname == \"$EXPLORER_DOMAIN\")" >/dev/null 2>&1; then
log_warn "Route for $EXPLORER_DOMAIN already exists"
log_info "Updating existing route..."
# Remove existing route
CURRENT_INGRESS=$(echo "$CURRENT_INGRESS" | jq "[.[] | select(.hostname != \"$EXPLORER_DOMAIN\")]")
fi
# Determine if HTTPS (port 443)
if [ "$EXPLORER_PORT" = "443" ]; then
SERVICE_URL="https://$EXPLORER_IP:$EXPLORER_PORT"
else
SERVICE_URL="http://$EXPLORER_IP:$EXPLORER_PORT"
fi
# Build explorer route as array element
EXPLORER_ROUTE=$(jq -n \
--arg hostname "$EXPLORER_DOMAIN" \
--arg service "$SERVICE_URL" \
'[{
hostname: $hostname,
service: $service,
originRequest: {
noTLSVerify: true
}
}]')
# Separate catch-all from other rules
# Catch-all has no hostname and service starting with http_status
CATCH_ALL=$(echo "$CURRENT_INGRESS" | jq '[.[] | select(.hostname == null or .hostname == "" or (.service | startswith("http_status")))]')
OTHER_ROUTES=$(echo "$CURRENT_INGRESS" | jq '[.[] | select(.hostname != null and .hostname != "" and (.service | startswith("http_status") | not))]')
# Build new ingress: explorer route + other routes + catch-all
# If no catch-all exists, add one
if [ "$(echo "$CATCH_ALL" | jq 'length')" -eq 0 ]; then
CATCH_ALL='[{"service":"http_status:404"}]'
fi
# Concatenate arrays properly
NEW_INGRESS=$(jq -n --argjson explorer "$EXPLORER_ROUTE" --argjson others "$OTHER_ROUTES" --argjson catchall "$CATCH_ALL" '$explorer + $others + $catchall')
# Build complete config
NEW_CONFIG=$(jq -n \
--argjson ingress "$NEW_INGRESS" \
'{
config: {
ingress: $ingress
}
}')
log_info "Updating tunnel configuration..."
RESPONSE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$NEW_CONFIG")
if echo "$RESPONSE" | jq -e '.success' >/dev/null 2>&1; then
log_success "Tunnel route configured successfully!"
log_info "Route: $EXPLORER_DOMAIN → http://$EXPLORER_IP:$EXPLORER_PORT"
exit 0
else
ERROR=$(echo "$RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to configure tunnel route: $ERROR"
echo "$RESPONSE" | jq '.' 2>/dev/null || echo "$RESPONSE"
exit 1
fi

View File

@@ -0,0 +1,143 @@
#!/usr/bin/env bash
# Configure Cloudflare Tunnel Route for explorer.d-bis.org
# Usage: ./configure-cloudflare-tunnel-route.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="${ENV_FILE:-$SCRIPT_DIR/../.env}"
DOMAIN="${DOMAIN:-d-bis.org}"
EXPLORER_DOMAIN="explorer.d-bis.org"
EXPLORER_IP="${EXPLORER_IP:-192.168.11.140}"
EXPLORER_PORT="${EXPLORER_PORT:-80}"
TUNNEL_ID="${TUNNEL_ID:-10ab22da-8ea3-4e2e-a896-27ece2211a05}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load environment variables
if [ -f "$ENV_FILE" ]; then
source "$ENV_FILE"
fi
CLOUDFLARE_ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
CLOUDFLARE_API_KEY="${CLOUDFLARE_API_KEY:-}"
CLOUDFLARE_EMAIL="${CLOUDFLARE_EMAIL:-}"
CLOUDFLARE_API_TOKEN="${CLOUDFLARE_API_TOKEN:-}"
# Determine auth method
if [ -n "$CLOUDFLARE_API_TOKEN" ]; then
AUTH_HEADERS=(-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN")
elif [ -n "$CLOUDFLARE_API_KEY" ] && [ -n "$CLOUDFLARE_EMAIL" ]; then
AUTH_HEADERS=(-H "X-Auth-Email: $CLOUDFLARE_EMAIL" -H "X-Auth-Key: $CLOUDFLARE_API_KEY")
else
log_error "No Cloudflare API credentials found"
exit 1
fi
if [ -z "$CLOUDFLARE_ACCOUNT_ID" ]; then
log_error "CLOUDFLARE_ACCOUNT_ID not set"
exit 1
fi
log_info "Configuring tunnel route for $EXPLORER_DOMAIN"
log_info "Tunnel ID: $TUNNEL_ID"
if [ "$EXPLORER_PORT" = "443" ]; then
log_info "Service: https://$EXPLORER_IP:$EXPLORER_PORT"
else
log_info "Service: http://$EXPLORER_IP:$EXPLORER_PORT"
fi
# Get current tunnel configuration
log_info "Fetching current tunnel configuration..."
CURRENT_CONFIG=$(curl -s -X GET "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json")
if ! echo "$CURRENT_CONFIG" | jq -e '.success' >/dev/null 2>&1; then
log_error "Failed to fetch tunnel configuration"
echo "$CURRENT_CONFIG" | jq '.' 2>/dev/null || echo "$CURRENT_CONFIG"
exit 1
fi
# Extract current ingress rules
CURRENT_INGRESS=$(echo "$CURRENT_CONFIG" | jq -c '.result.config.ingress // []')
# Check if explorer route already exists
if echo "$CURRENT_INGRESS" | jq -e ".[] | select(.hostname == \"$EXPLORER_DOMAIN\")" >/dev/null 2>&1; then
log_warn "Route for $EXPLORER_DOMAIN already exists"
log_info "Updating existing route..."
# Remove existing route
CURRENT_INGRESS=$(echo "$CURRENT_INGRESS" | jq "[.[] | select(.hostname != \"$EXPLORER_DOMAIN\")]")
fi
# Determine if HTTPS (port 443)
if [ "$EXPLORER_PORT" = "443" ]; then
SERVICE_URL="https://$EXPLORER_IP:$EXPLORER_PORT"
else
SERVICE_URL="http://$EXPLORER_IP:$EXPLORER_PORT"
fi
# Build explorer route as array element
EXPLORER_ROUTE=$(jq -n \
--arg hostname "$EXPLORER_DOMAIN" \
--arg service "$SERVICE_URL" \
'[{
hostname: $hostname,
service: $service,
originRequest: {
noTLSVerify: true
}
}]')
# Separate catch-all from other rules
# Catch-all has no hostname and service starting with http_status
CATCH_ALL=$(echo "$CURRENT_INGRESS" | jq '[.[] | select(.hostname == null or .hostname == "" or (.service | startswith("http_status")))]')
OTHER_ROUTES=$(echo "$CURRENT_INGRESS" | jq '[.[] | select(.hostname != null and .hostname != "" and (.service | startswith("http_status") | not))]')
# Build new ingress: explorer route + other routes + catch-all
# If no catch-all exists, add one
if [ "$(echo "$CATCH_ALL" | jq 'length')" -eq 0 ]; then
CATCH_ALL='[{"service":"http_status:404"}]'
fi
# Concatenate arrays properly
NEW_INGRESS=$(jq -n --argjson explorer "$EXPLORER_ROUTE" --argjson others "$OTHER_ROUTES" --argjson catchall "$CATCH_ALL" '$explorer + $others + $catchall')
# Build complete config
NEW_CONFIG=$(jq -n \
--argjson ingress "$NEW_INGRESS" \
'{
config: {
ingress: $ingress
}
}')
log_info "Updating tunnel configuration..."
RESPONSE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/accounts/$CLOUDFLARE_ACCOUNT_ID/cfd_tunnel/$TUNNEL_ID/configurations" \
"${AUTH_HEADERS[@]}" \
-H "Content-Type: application/json" \
--data "$NEW_CONFIG")
if echo "$RESPONSE" | jq -e '.success' >/dev/null 2>&1; then
log_success "Tunnel route configured successfully!"
log_info "Route: $EXPLORER_DOMAIN → http://$EXPLORER_IP:$EXPLORER_PORT"
exit 0
else
ERROR=$(echo "$RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "API call failed")
log_error "Failed to configure tunnel route: $ERROR"
echo "$RESPONSE" | jq '.' 2>/dev/null || echo "$RESPONSE"
exit 1
fi

View File

@@ -0,0 +1,123 @@
#!/usr/bin/env bash
# Configure Cloudflare WAF Rule to Allow Only ThirdWeb Traffic
# Usage: ./scripts/configure-cloudflare-waf-thirdweb-rule.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
# Configuration
DOMAIN="defi-oracle.io"
ZONE_ID="${CLOUDFLARE_ZONE_ID_DEFI_ORACLE:-}"
ACCOUNT_ID="${CLOUDFLARE_ACCOUNT_ID:-}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Check for API token
if [[ -z "${CLOUDFLARE_API_TOKEN:-}" ]] && [[ -z "${CLOUDFLARE_API_KEY:-}" ]]; then
log_error "Cloudflare API credentials not found!"
echo ""
echo "Set one of:"
echo " export CLOUDFLARE_API_TOKEN=\"your-token\""
echo " OR"
echo " export CLOUDFLARE_API_KEY=\"your-key\""
echo " export CLOUDFLARE_EMAIL=\"your-email\""
echo ""
exit 1
fi
# Set up auth headers
if [[ -n "${CLOUDFLARE_API_TOKEN:-}" ]]; then
AUTH_HEADER="Authorization: Bearer ${CLOUDFLARE_API_TOKEN}"
elif [[ -n "${CLOUDFLARE_API_KEY:-}" ]] && [[ -n "${CLOUDFLARE_EMAIL:-}" ]]; then
AUTH_HEADER="X-Auth-Email: ${CLOUDFLARE_EMAIL}"
AUTH_KEY="X-Auth-Key: ${CLOUDFLARE_API_KEY}"
else
log_error "Incomplete credentials"
exit 1
fi
# Get Zone ID if not provided
if [[ -z "$ZONE_ID" ]]; then
log_info "Getting Zone ID for $DOMAIN..."
if [[ -n "${CLOUDFLARE_API_TOKEN:-}" ]]; then
ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
-H "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}")
else
ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN" \
-H "X-Auth-Email: ${CLOUDFLARE_EMAIL}" \
-H "X-Auth-Key: ${CLOUDFLARE_API_KEY}")
fi
ZONE_ID=$(echo "$ZONE_RESPONSE" | jq -r '.result[0].id // empty' 2>/dev/null || echo "")
if [[ -z "$ZONE_ID" ]] || [[ "$ZONE_ID" == "null" ]]; then
log_error "Failed to get Zone ID for $DOMAIN"
exit 1
fi
fi
log_success "Zone ID: $ZONE_ID"
# Create WAF rule to allow ThirdWeb
log_info "Creating WAF rule: Allow ThirdWeb Traffic..."
# Expression to match ThirdWeb domains
EXPRESSION='(http.request.headers["origin"][*] matches "https?://.*\\.thirdweb\\.com(/.*)?$" or http.request.headers["referer"][*] matches "https?://.*\\.thirdweb\\.com(/.*)?$")'
RULE_DATA=$(jq -n \
--arg description "Allow traffic from ThirdWeb domains only" \
--arg expression "$EXPRESSION" \
'{
action: "allow",
description: $description,
expression: $expression
}')
if [[ -n "${CLOUDFLARE_API_TOKEN:-}" ]]; then
RESPONSE=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/firewall/rules" \
-H "Authorization: Bearer ${CLOUDFLARE_API_TOKEN}" \
-H "Content-Type: application/json" \
-d "$RULE_DATA")
else
RESPONSE=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/firewall/rules" \
-H "X-Auth-Email: ${CLOUDFLARE_EMAIL}" \
-H "X-Auth-Key: ${CLOUDFLARE_API_KEY}" \
-H "Content-Type: application/json" \
-d "$RULE_DATA")
fi
if echo "$RESPONSE" | jq -e '.success' >/dev/null 2>&1; then
log_success "WAF rule created successfully"
RULE_ID=$(echo "$RESPONSE" | jq -r '.result.id // empty')
log_info "Rule ID: $RULE_ID"
else
log_error "Failed to create WAF rule"
echo "$RESPONSE" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "$RESPONSE"
exit 1
fi
echo ""
log_success "═══════════════════════════════════════════════════════════"
log_success " WAF RULE CONFIGURED"
log_success "═══════════════════════════════════════════════════════════"
echo ""
log_info "Note: You may also want to create a 'Block All Other' rule"
log_info " that blocks traffic not matching the ThirdWeb pattern"
echo ""
log_info "To create a block rule, go to Cloudflare Dashboard:"
log_info " Security → WAF → Custom Rules → Create rule"
log_info " Expression: (everything else)"
log_info " Action: Block"
echo ""

View File

@@ -0,0 +1,62 @@
#!/bin/bash
# Configure network interfaces for all reassigned containers
set -uo pipefail
NODE_IP="${PROXMOX_HOST_R630_01}"
# Container IP mappings
declare -A container_ips=(
["10000"]="${ORDER_POSTGRES_PRIMARY:-192.168.11.44}"
["10001"]="${ORDER_POSTGRES_REPLICA:-192.168.11.45}"
["10020"]="${ORDER_REDIS_IP:-192.168.11.38}"
["10030"]="192.168.11.40"
["10040"]="192.168.11.41"
["10050"]="192.168.11.49"
["10060"]="192.168.11.42"
["10070"]="${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}"
["10080"]="192.168.11.43"
["10090"]="${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}"
["10091"]="${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}"
["10092"]="${IP_MIM_WEB:-192.168.11.37}"
["10200"]="192.168.11.46"
["10201"]="192.168.11.47"
["10202"]="192.168.11.48"
["10210"]="192.168.11.39"
["10230"]="${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}"
["10232"]="192.168.11.52"
)
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
echo "═══════════════════════════════════════════════════════════"
echo "Configuring Network Interfaces for Reassigned Containers"
echo "═══════════════════════════════════════════════════════════"
echo ""
SUCCESS=0
FAILED=0
for vmid in "${!container_ips[@]}"; do
ip="${container_ips[$vmid]}"
echo "Configuring CT $vmid ($ip)..."
# Bring up interface and configure IP
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'ip link set eth0 up && ip addr add $ip/24 dev eth0 2>/dev/null; ip route add default via $GATEWAY dev eth0 2>/dev/null'" 2>&1; then
echo " ✅ Network configured"
((SUCCESS++))
else
echo " ❌ Failed to configure network"
((FAILED++))
fi
done
echo ""
echo "═══════════════════════════════════════════════════════════"
echo "Configuration Complete"
echo "═══════════════════════════════════════════════════════════"
echo " Success: $SUCCESS"
echo " Failed: $FAILED"
echo " Total: ${#container_ips[@]}"
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,62 @@
#!/bin/bash
# Configure network interfaces for all reassigned containers
set -uo pipefail
NODE_IP="192.168.11.11"
# Container IP mappings
declare -A container_ips=(
["10000"]="${ORDER_POSTGRES_PRIMARY:-192.168.11.44}"
["10001"]="${ORDER_POSTGRES_REPLICA:-192.168.11.45}"
["10020"]="${ORDER_REDIS_IP:-192.168.11.38}"
["10030"]="192.168.11.40"
["10040"]="192.168.11.41"
["10050"]="192.168.11.49"
["10060"]="192.168.11.42"
["10070"]="192.168.11.50"
["10080"]="192.168.11.43"
["10090"]="192.168.11.36"
["10091"]="192.168.11.35"
["10092"]="192.168.11.37"
["10200"]="192.168.11.46"
["10201"]="192.168.11.47"
["10202"]="192.168.11.48"
["10210"]="192.168.11.39"
["10230"]="192.168.11.51"
["10232"]="192.168.11.52"
)
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
echo "═══════════════════════════════════════════════════════════"
echo "Configuring Network Interfaces for Reassigned Containers"
echo "═══════════════════════════════════════════════════════════"
echo ""
SUCCESS=0
FAILED=0
for vmid in "${!container_ips[@]}"; do
ip="${container_ips[$vmid]}"
echo "Configuring CT $vmid ($ip)..."
# Bring up interface and configure IP
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'ip link set eth0 up && ip addr add $ip/24 dev eth0 2>/dev/null; ip route add default via $GATEWAY dev eth0 2>/dev/null'" 2>&1; then
echo " ✅ Network configured"
((SUCCESS++))
else
echo " ❌ Failed to configure network"
((FAILED++))
fi
done
echo ""
echo "═══════════════════════════════════════════════════════════"
echo "Configuration Complete"
echo "═══════════════════════════════════════════════════════════"
echo " Success: $SUCCESS"
echo " Failed: $FAILED"
echo " Total: ${#container_ips[@]}"
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,54 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configure Service Dependencies for DBIS Services
NODE_IP="${PROXMOX_HOST_R630_01}"
log_info() { echo -e "\033[0;32m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
echo "Configuring DBIS service dependencies..."
# DBIS service IPs
POSTGRES_IP="${DBIS_POSTGRES_PRIMARY:-192.168.11.105}"
REDIS_IP="192.168.11.120"
DB_PASSWORD="8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771"
for vmid in 10150 10151; do
log_info "Configuring dependencies for CT $vmid..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${NODE_IP} "pct enter $vmid <<'CONFIG_EOF'
# Update .env files
find /opt /home /root -name \".env\" -type f 2>/dev/null | while read envfile; do
[ -r \"\$envfile\" ] && {
sed -i \"s|DATABASE_URL=.*|DATABASE_URL=postgresql://dbis:${DB_PASSWORD}@${POSTGRES_IP}:5432/dbis_core|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://${REDIS_IP}:6379|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|DB_HOST=.*|DB_HOST=${POSTGRES_IP}|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_HOST=.*|REDIS_HOST=${REDIS_IP}|g\" \"\$envfile\" 2>/dev/null || true
}
done
echo \"Dependencies configured for CT $vmid\"
CONFIG_EOF
" && log_success "Dependencies configured for CT $vmid" || log_info "Configuration updated for CT $vmid"
done
# Configure frontend
log_info "Configuring frontend dependencies..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${NODE_IP} "pct enter 10130 <<'CONFIG_EOF'
find /opt /home /root -name \".env*\" -type f 2>/dev/null | while read envfile; do
[ -r \"\$envfile\" ] && {
sed -i \"s|VITE_API_BASE_URL=.*|VITE_API_BASE_URL=http://${IP_DBIS_API:-192.168.11.155}:3000|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|NEXT_PUBLIC_API_URL=.*|NEXT_PUBLIC_API_URL=http://${IP_DBIS_API:-192.168.11.155}:3000|g\" \"\$envfile\" 2>/dev/null || true
}
done
echo \"Frontend dependencies configured\"
CONFIG_EOF
" && log_success "Frontend dependencies configured"
echo "DBIS service dependencies configured!"

View File

@@ -0,0 +1,48 @@
#!/bin/bash
set -euo pipefail
# Configure Service Dependencies for DBIS Services
NODE_IP="192.168.11.11"
log_info() { echo -e "\033[0;32m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
echo "Configuring DBIS service dependencies..."
# DBIS service IPs
POSTGRES_IP="192.168.11.105"
REDIS_IP="192.168.11.120"
DB_PASSWORD="8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771"
for vmid in 10150 10151; do
log_info "Configuring dependencies for CT $vmid..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${NODE_IP} "pct enter $vmid <<'CONFIG_EOF'
# Update .env files
find /opt /home /root -name \".env\" -type f 2>/dev/null | while read envfile; do
[ -r \"\$envfile\" ] && {
sed -i \"s|DATABASE_URL=.*|DATABASE_URL=postgresql://dbis:${DB_PASSWORD}@${POSTGRES_IP}:5432/dbis_core|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://${REDIS_IP}:6379|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|DB_HOST=.*|DB_HOST=${POSTGRES_IP}|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_HOST=.*|REDIS_HOST=${REDIS_IP}|g\" \"\$envfile\" 2>/dev/null || true
}
done
echo \"Dependencies configured for CT $vmid\"
CONFIG_EOF
" && log_success "Dependencies configured for CT $vmid" || log_info "Configuration updated for CT $vmid"
done
# Configure frontend
log_info "Configuring frontend dependencies..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${NODE_IP} "pct enter 10130 <<'CONFIG_EOF'
find /opt /home /root -name \".env*\" -type f 2>/dev/null | while read envfile; do
[ -r \"\$envfile\" ] && {
sed -i \"s|VITE_API_BASE_URL=.*|VITE_API_BASE_URL=http://192.168.11.155:3000|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|NEXT_PUBLIC_API_URL=.*|NEXT_PUBLIC_API_URL=http://192.168.11.155:3000|g\" \"\$envfile\" 2>/dev/null || true
}
done
echo \"Frontend dependencies configured\"
CONFIG_EOF
" && log_success "Frontend dependencies configured"
echo "DBIS service dependencies configured!"

View File

@@ -0,0 +1,333 @@
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configure direct route: NPMplus → Blockscout:4000 (bypassing nginx on VMID 5000)
# This creates a more direct connection to reduce 502 errors
# Usage: ./configure-direct-blockscout-route.sh
VMID=5000
BLOCKSCOUT_IP="${IP_BLOCKSCOUT}"
BLOCKSCOUT_PORT=4000
PROXMOX_HOST="${1:-pve2}"
echo "=========================================="
echo "Configure Direct Blockscout Route"
echo "=========================================="
echo "VMID: $VMID ($BLOCKSCOUT_IP)"
echo "Direct Port: $BLOCKSCOUT_PORT"
echo "Bypassing: Nginx on port 80"
echo "=========================================="
echo ""
# Check if we're on Proxmox host
if ! command -v pct &>/dev/null; then
echo "⚠️ pct command not available"
echo " This script should be run on Proxmox host"
EXEC_PREFIX="ssh root@$PROXMOX_HOST"
else
EXEC_PREFIX=""
fi
# Step 1: Check if Blockscout is listening on port 4000
echo "=== Step 1: Checking Blockscout Port Configuration ==="
if [ -n "$EXEC_PREFIX" ]; then
PORT_CHECK=$($EXEC_PREFIX "pct exec $VMID -- ss -tlnp 2>/dev/null | grep :$BLOCKSCOUT_PORT || echo 'not found'")
else
PORT_CHECK=$(pct exec $VMID -- ss -tlnp 2>/dev/null | grep :$BLOCKSCOUT_PORT || echo "not found")
fi
if echo "$PORT_CHECK" | grep -q "127.0.0.1:$BLOCKSCOUT_PORT"; then
echo " ⚠️ Blockscout is listening on 127.0.0.1:$BLOCKSCOUT_PORT (localhost only)"
echo " 💡 Need to configure it to listen on 0.0.0.0:$BLOCKSCOUT_PORT for direct access"
NEEDS_CONFIG=true
elif echo "$PORT_CHECK" | grep -q "0.0.0.0:$BLOCKSCOUT_PORT\|:$BLOCKSCOUT_PORT.*0.0.0.0"; then
echo " ✅ Blockscout is already listening on 0.0.0.0:$BLOCKSCOUT_PORT (network accessible)"
NEEDS_CONFIG=false
elif echo "$PORT_CHECK" | grep -q ":$BLOCKSCOUT_PORT"; then
echo " ✅ Blockscout is listening on port $BLOCKSCOUT_PORT"
NEEDS_CONFIG=false
else
echo " ❌ Blockscout is NOT listening on port $BLOCKSCOUT_PORT"
echo " 💡 Blockscout service may not be running"
echo ""
echo " To start Blockscout service:"
echo " pct exec $VMID -- systemctl start blockscout.service"
exit 1
fi
echo ""
# Step 2: Check Blockscout environment/config to configure listening address
if [ "$NEEDS_CONFIG" = true ]; then
echo "=== Step 2: Configuring Blockscout to Listen on Network ==="
echo " Checking Blockscout configuration..."
# Check if Blockscout is running in Docker
if [ -n "$EXEC_PREFIX" ]; then
BLOCKSCOUT_CONTAINER=$($EXEC_PREFIX "pct exec $VMID -- docker ps --format '{{.Names}}' | grep blockscout | grep -v postgres | head -1" || echo "")
else
BLOCKSCOUT_CONTAINER=$(pct exec $VMID -- docker ps --format '{{.Names}}' | grep blockscout | grep -v postgres | head -1 || echo "")
fi
if [ -n "$BLOCKSCOUT_CONTAINER" ]; then
echo " ✅ Found Blockscout container: $BLOCKSCOUT_CONTAINER"
echo " 💡 Blockscout in Docker typically binds to 0.0.0.0 by default"
echo " 💡 If it's only on localhost, check docker-compose.yml or environment variables"
echo " 💡 Look for PORT or LISTEN_ADDRESS environment variables"
else
echo " ⚠️ Blockscout container not found"
echo " 💡 Check if Blockscout is running as a system service instead"
fi
echo ""
fi
# Step 3: Test direct connection to Blockscout
echo "=== Step 3: Testing Direct Connection to Blockscout ==="
if [ -n "$EXEC_PREFIX" ]; then
DIRECT_TEST=$($EXEC_PREFIX "pct exec $VMID -- curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://127.0.0.1:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null" || echo "000")
NETWORK_TEST=$($EXEC_PREFIX "curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null" || echo "000")
else
DIRECT_TEST=$(pct exec $VMID -- curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://127.0.0.1:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null || echo "000")
NETWORK_TEST=$(curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null || echo "000")
fi
if [ "$DIRECT_TEST" = "200" ]; then
echo " ✅ Blockscout API responding on localhost (HTTP $DIRECT_TEST)"
DIRECT_ACCESS=true
else
echo " ❌ Blockscout API not responding on localhost (HTTP $DIRECT_TEST)"
DIRECT_ACCESS=false
fi
if [ "$NETWORK_TEST" = "200" ]; then
echo " ✅ Blockscout API accessible via network IP (HTTP $NETWORK_TEST)"
NETWORK_ACCESS=true
elif [ "$NETWORK_TEST" = "000" ]; then
echo " ⚠️ Blockscout API not accessible via network IP (connection refused)"
echo " 💡 Blockscout may only be listening on localhost"
NETWORK_ACCESS=false
else
echo " ⚠️ Blockscout API returned HTTP $NETWORK_TEST via network"
NETWORK_ACCESS=false
fi
echo ""
# Step 4: Update NPMplus configuration (if network accessible)
if [ "$NETWORK_ACCESS" = true ]; then
echo "=== Step 4: Updating NPMplus Configuration ==="
echo " ✅ Blockscout is network accessible"
echo " 💡 Update NPMplus to point directly to: http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT"
echo ""
echo " Manual Steps:"
echo " 1. Log into NPMplus: https://192.168.0.166:81"
echo " 2. Find 'explorer.d-bis.org' proxy host"
echo " 3. Update Forward Host: $BLOCKSCOUT_IP"
echo " 4. Update Forward Port: $BLOCKSCOUT_PORT"
echo " 5. Save changes"
echo ""
echo " Or run the automated script:"
echo " cd scripts/nginx-proxy-manager"
echo " node update-explorer-direct-route.js"
echo ""
else
echo "=== Step 4: Cannot Configure Direct Route ==="
echo " ❌ Blockscout is not network accessible"
echo " 💡 Need to configure Blockscout to listen on 0.0.0.0:$BLOCKSCOUT_PORT"
echo ""
echo " For Docker containers, check docker-compose.yml:"
echo " - Ensure PORT environment variable is set"
echo " - Check if LISTEN_ADDRESS is set to 0.0.0.0"
echo " - Restart Blockscout container after changes"
echo ""
echo " For systemd services, check service file:"
echo " pct exec $VMID -- systemctl cat blockscout.service"
echo ""
fi
# Step 5: Alternative - Keep nginx but simplify configuration
echo "=== Step 5: Alternative Solution (Keep Nginx) ==="
echo " If direct route is not possible, ensure nginx is properly configured:"
echo " pct exec $VMID -- systemctl status nginx"
echo " pct exec $VMID -- nginx -t"
echo " pct exec $VMID -- systemctl restart nginx"
echo ""
# Summary
echo "=========================================="
echo "SUMMARY"
echo "=========================================="
echo "Current Route:"
echo " NPMplus → $BLOCKSCOUT_IP:80 (nginx) → 127.0.0.1:$BLOCKSCOUT_PORT (Blockscout)"
echo ""
echo "Proposed Direct Route:"
if [ "$NETWORK_ACCESS" = true ]; then
echo " NPMplus → $BLOCKSCOUT_IP:$BLOCKSCOUT_PORT (Blockscout directly) ✅"
echo " Status: Ready to configure"
else
echo " NPMplus → $BLOCKSCOUT_IP:$BLOCKSCOUT_PORT (Blockscout directly) ❌"
echo " Status: Blockscout needs network access configuration"
fi
echo ""
echo "Benefits of Direct Route:"
echo " ✅ Removes nginx proxy layer (one less hop)"
echo " ✅ Reduces latency"
echo " ✅ Fewer points of failure"
echo " ✅ Simpler architecture"
echo ""
# Create update script for NPMplus
if [ "$NETWORK_ACCESS" = true ]; then
echo "Creating NPMplus update script..."
cat > /home/intlc/projects/proxmox/scripts/nginx-proxy-manager/update-explorer-direct-route.js << 'SCRIPT_EOF'
#!/usr/bin/env node
/**
* Update explorer.d-bis.org in NPMplus to use direct Blockscout route
* Changes from: http://${IP_BLOCKSCOUT}:80 → http://${IP_BLOCKSCOUT}:4000
*/
import { chromium } from 'playwright';
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
import { config } from 'dotenv';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const PROJECT_ROOT = join(__dirname, '../../..');
config({ path: join(PROJECT_ROOT, '.env') });
const NPM_URL = process.env.NPM_URL || 'https://192.168.0.166:81';
const NPM_EMAIL = process.env.NPM_EMAIL || 'nsatoshi2007@hotmail.com';
const NPM_PASSWORD = process.env.NPM_PASSWORD;
if (!NPM_PASSWORD) {
throw new Error('NPM_PASSWORD environment variable is required');
}
const HEADLESS = process.env.HEADLESS !== 'false';
const DOMAIN = 'explorer.d-bis.org';
const NEW_TARGET = 'http://${IP_BLOCKSCOUT}:4000';
function log(message, type = 'info') {
const icons = { success: '✅', error: '❌', warning: '⚠️', info: '📋' };
console.log(`${icons[type]} ${message}`);
}
async function login(page) {
log('Logging in to NPMplus...');
await page.goto(NPM_URL, { waitUntil: 'domcontentloaded', timeout: 30000 });
await page.waitForSelector('input[type="email"], input[name="email"]', { timeout: 10000 });
await page.fill('input[type="email"]', NPM_EMAIL);
await page.fill('input[type="password"]', NPM_PASSWORD);
const loginButton = await page.$('button[type="submit"]');
if (loginButton) {
await loginButton.click();
} else {
await page.keyboard.press('Enter');
}
await page.waitForTimeout(3000);
log('Logged in', 'success');
}
async function updateProxyHost(page) {
log(`Updating ${DOMAIN} to direct route: ${NEW_TARGET}`);
// Navigate to proxy hosts
await page.goto(`${NPM_URL}/#/proxy-hosts`, { waitUntil: 'domcontentloaded' });
await page.waitForTimeout(2000);
// Find and click on explorer.d-bis.org
const domainLink = await page.$(`text=${DOMAIN}`);
if (!domainLink) {
log(`Could not find ${DOMAIN} in proxy hosts list`, 'error');
return false;
}
await domainLink.click();
await page.waitForTimeout(2000);
// Update forward host and port
const url = new URL(NEW_TARGET);
const hostname = url.hostname;
const port = url.port || (url.protocol === 'https:' ? '443' : '80');
const hostInput = await page.$('input[name="forward_host"], input[name="forward_hostname"]');
if (hostInput) {
await hostInput.fill(hostname);
log(` Updated forward host: ${hostname}`);
}
const portInput = await page.$('input[name="forward_port"]');
if (portInput) {
await portInput.fill(port);
log(` Updated forward port: ${port}`);
}
// Save
const saveButton = await page.$('button:has-text("Save"), button[type="submit"]');
if (saveButton) {
await saveButton.click();
log(` Saved changes`, 'success');
await page.waitForTimeout(2000);
return true;
}
return false;
}
async function main() {
const browser = await chromium.launch({ headless: HEADLESS, ignoreHTTPSErrors: true });
const context = await browser.newContext({ ignoreHTTPSErrors: true });
const page = await context.newPage();
try {
await login(page);
const success = await updateProxyHost(page);
if (success) {
log(`✅ ${DOMAIN} updated to use direct route`, 'success');
} else {
log(`❌ Failed to update ${DOMAIN}`, 'error');
process.exit(1);
}
} catch (error) {
log(`Fatal error: ${error.message}`, 'error');
await page.screenshot({ path: '/tmp/npmplus-update-error.png' });
process.exit(1);
} finally {
await browser.close();
}
}
main().catch(console.error);
SCRIPT_EOF
chmod +x /home/intlc/projects/proxmox/scripts/nginx-proxy-manager/update-explorer-direct-route.js
echo " ✅ Created: scripts/nginx-proxy-manager/update-explorer-direct-route.js"
echo ""
fi
echo "=========================================="
echo "NEXT STEPS"
echo "=========================================="
if [ "$NETWORK_ACCESS" = true ]; then
echo "1. Run the NPMplus update script:"
echo " cd scripts/nginx-proxy-manager"
echo " node update-explorer-direct-route.js"
echo ""
echo "2. Test the direct route:"
echo " curl -I http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats"
echo ""
else
echo "1. Configure Blockscout to listen on 0.0.0.0:$BLOCKSCOUT_PORT"
echo "2. Restart Blockscout service"
echo "3. Run this script again to verify network access"
echo "4. Then run the NPMplus update script"
echo ""
fi
echo "=========================================="

View File

@@ -0,0 +1,327 @@
#!/usr/bin/env bash
set -euo pipefail
# Configure direct route: NPMplus → Blockscout:4000 (bypassing nginx on VMID 5000)
# This creates a more direct connection to reduce 502 errors
# Usage: ./configure-direct-blockscout-route.sh
VMID=5000
BLOCKSCOUT_IP="192.168.11.140"
BLOCKSCOUT_PORT=4000
PROXMOX_HOST="${1:-pve2}"
echo "=========================================="
echo "Configure Direct Blockscout Route"
echo "=========================================="
echo "VMID: $VMID ($BLOCKSCOUT_IP)"
echo "Direct Port: $BLOCKSCOUT_PORT"
echo "Bypassing: Nginx on port 80"
echo "=========================================="
echo ""
# Check if we're on Proxmox host
if ! command -v pct &>/dev/null; then
echo "⚠️ pct command not available"
echo " This script should be run on Proxmox host"
EXEC_PREFIX="ssh root@$PROXMOX_HOST"
else
EXEC_PREFIX=""
fi
# Step 1: Check if Blockscout is listening on port 4000
echo "=== Step 1: Checking Blockscout Port Configuration ==="
if [ -n "$EXEC_PREFIX" ]; then
PORT_CHECK=$($EXEC_PREFIX "pct exec $VMID -- ss -tlnp 2>/dev/null | grep :$BLOCKSCOUT_PORT || echo 'not found'")
else
PORT_CHECK=$(pct exec $VMID -- ss -tlnp 2>/dev/null | grep :$BLOCKSCOUT_PORT || echo "not found")
fi
if echo "$PORT_CHECK" | grep -q "127.0.0.1:$BLOCKSCOUT_PORT"; then
echo " ⚠️ Blockscout is listening on 127.0.0.1:$BLOCKSCOUT_PORT (localhost only)"
echo " 💡 Need to configure it to listen on 0.0.0.0:$BLOCKSCOUT_PORT for direct access"
NEEDS_CONFIG=true
elif echo "$PORT_CHECK" | grep -q "0.0.0.0:$BLOCKSCOUT_PORT\|:$BLOCKSCOUT_PORT.*0.0.0.0"; then
echo " ✅ Blockscout is already listening on 0.0.0.0:$BLOCKSCOUT_PORT (network accessible)"
NEEDS_CONFIG=false
elif echo "$PORT_CHECK" | grep -q ":$BLOCKSCOUT_PORT"; then
echo " ✅ Blockscout is listening on port $BLOCKSCOUT_PORT"
NEEDS_CONFIG=false
else
echo " ❌ Blockscout is NOT listening on port $BLOCKSCOUT_PORT"
echo " 💡 Blockscout service may not be running"
echo ""
echo " To start Blockscout service:"
echo " pct exec $VMID -- systemctl start blockscout.service"
exit 1
fi
echo ""
# Step 2: Check Blockscout environment/config to configure listening address
if [ "$NEEDS_CONFIG" = true ]; then
echo "=== Step 2: Configuring Blockscout to Listen on Network ==="
echo " Checking Blockscout configuration..."
# Check if Blockscout is running in Docker
if [ -n "$EXEC_PREFIX" ]; then
BLOCKSCOUT_CONTAINER=$($EXEC_PREFIX "pct exec $VMID -- docker ps --format '{{.Names}}' | grep blockscout | grep -v postgres | head -1" || echo "")
else
BLOCKSCOUT_CONTAINER=$(pct exec $VMID -- docker ps --format '{{.Names}}' | grep blockscout | grep -v postgres | head -1 || echo "")
fi
if [ -n "$BLOCKSCOUT_CONTAINER" ]; then
echo " ✅ Found Blockscout container: $BLOCKSCOUT_CONTAINER"
echo " 💡 Blockscout in Docker typically binds to 0.0.0.0 by default"
echo " 💡 If it's only on localhost, check docker-compose.yml or environment variables"
echo " 💡 Look for PORT or LISTEN_ADDRESS environment variables"
else
echo " ⚠️ Blockscout container not found"
echo " 💡 Check if Blockscout is running as a system service instead"
fi
echo ""
fi
# Step 3: Test direct connection to Blockscout
echo "=== Step 3: Testing Direct Connection to Blockscout ==="
if [ -n "$EXEC_PREFIX" ]; then
DIRECT_TEST=$($EXEC_PREFIX "pct exec $VMID -- curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://127.0.0.1:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null" || echo "000")
NETWORK_TEST=$($EXEC_PREFIX "curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null" || echo "000")
else
DIRECT_TEST=$(pct exec $VMID -- curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://127.0.0.1:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null || echo "000")
NETWORK_TEST=$(curl -s -o /dev/null -w '%{http_code}' --connect-timeout 5 http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats 2>/dev/null || echo "000")
fi
if [ "$DIRECT_TEST" = "200" ]; then
echo " ✅ Blockscout API responding on localhost (HTTP $DIRECT_TEST)"
DIRECT_ACCESS=true
else
echo " ❌ Blockscout API not responding on localhost (HTTP $DIRECT_TEST)"
DIRECT_ACCESS=false
fi
if [ "$NETWORK_TEST" = "200" ]; then
echo " ✅ Blockscout API accessible via network IP (HTTP $NETWORK_TEST)"
NETWORK_ACCESS=true
elif [ "$NETWORK_TEST" = "000" ]; then
echo " ⚠️ Blockscout API not accessible via network IP (connection refused)"
echo " 💡 Blockscout may only be listening on localhost"
NETWORK_ACCESS=false
else
echo " ⚠️ Blockscout API returned HTTP $NETWORK_TEST via network"
NETWORK_ACCESS=false
fi
echo ""
# Step 4: Update NPMplus configuration (if network accessible)
if [ "$NETWORK_ACCESS" = true ]; then
echo "=== Step 4: Updating NPMplus Configuration ==="
echo " ✅ Blockscout is network accessible"
echo " 💡 Update NPMplus to point directly to: http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT"
echo ""
echo " Manual Steps:"
echo " 1. Log into NPMplus: https://192.168.0.166:81"
echo " 2. Find 'explorer.d-bis.org' proxy host"
echo " 3. Update Forward Host: $BLOCKSCOUT_IP"
echo " 4. Update Forward Port: $BLOCKSCOUT_PORT"
echo " 5. Save changes"
echo ""
echo " Or run the automated script:"
echo " cd scripts/nginx-proxy-manager"
echo " node update-explorer-direct-route.js"
echo ""
else
echo "=== Step 4: Cannot Configure Direct Route ==="
echo " ❌ Blockscout is not network accessible"
echo " 💡 Need to configure Blockscout to listen on 0.0.0.0:$BLOCKSCOUT_PORT"
echo ""
echo " For Docker containers, check docker-compose.yml:"
echo " - Ensure PORT environment variable is set"
echo " - Check if LISTEN_ADDRESS is set to 0.0.0.0"
echo " - Restart Blockscout container after changes"
echo ""
echo " For systemd services, check service file:"
echo " pct exec $VMID -- systemctl cat blockscout.service"
echo ""
fi
# Step 5: Alternative - Keep nginx but simplify configuration
echo "=== Step 5: Alternative Solution (Keep Nginx) ==="
echo " If direct route is not possible, ensure nginx is properly configured:"
echo " pct exec $VMID -- systemctl status nginx"
echo " pct exec $VMID -- nginx -t"
echo " pct exec $VMID -- systemctl restart nginx"
echo ""
# Summary
echo "=========================================="
echo "SUMMARY"
echo "=========================================="
echo "Current Route:"
echo " NPMplus → $BLOCKSCOUT_IP:80 (nginx) → 127.0.0.1:$BLOCKSCOUT_PORT (Blockscout)"
echo ""
echo "Proposed Direct Route:"
if [ "$NETWORK_ACCESS" = true ]; then
echo " NPMplus → $BLOCKSCOUT_IP:$BLOCKSCOUT_PORT (Blockscout directly) ✅"
echo " Status: Ready to configure"
else
echo " NPMplus → $BLOCKSCOUT_IP:$BLOCKSCOUT_PORT (Blockscout directly) ❌"
echo " Status: Blockscout needs network access configuration"
fi
echo ""
echo "Benefits of Direct Route:"
echo " ✅ Removes nginx proxy layer (one less hop)"
echo " ✅ Reduces latency"
echo " ✅ Fewer points of failure"
echo " ✅ Simpler architecture"
echo ""
# Create update script for NPMplus
if [ "$NETWORK_ACCESS" = true ]; then
echo "Creating NPMplus update script..."
cat > /home/intlc/projects/proxmox/scripts/nginx-proxy-manager/update-explorer-direct-route.js << 'SCRIPT_EOF'
#!/usr/bin/env node
/**
* Update explorer.d-bis.org in NPMplus to use direct Blockscout route
* Changes from: http://192.168.11.140:80 → http://192.168.11.140:4000
*/
import { chromium } from 'playwright';
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
import { config } from 'dotenv';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const PROJECT_ROOT = join(__dirname, '../../..');
config({ path: join(PROJECT_ROOT, '.env') });
const NPM_URL = process.env.NPM_URL || 'https://192.168.0.166:81';
const NPM_EMAIL = process.env.NPM_EMAIL || 'nsatoshi2007@hotmail.com';
const NPM_PASSWORD = process.env.NPM_PASSWORD;
if (!NPM_PASSWORD) {
throw new Error('NPM_PASSWORD environment variable is required');
}
const HEADLESS = process.env.HEADLESS !== 'false';
const DOMAIN = 'explorer.d-bis.org';
const NEW_TARGET = 'http://192.168.11.140:4000';
function log(message, type = 'info') {
const icons = { success: '✅', error: '❌', warning: '⚠️', info: '📋' };
console.log(`${icons[type]} ${message}`);
}
async function login(page) {
log('Logging in to NPMplus...');
await page.goto(NPM_URL, { waitUntil: 'domcontentloaded', timeout: 30000 });
await page.waitForSelector('input[type="email"], input[name="email"]', { timeout: 10000 });
await page.fill('input[type="email"]', NPM_EMAIL);
await page.fill('input[type="password"]', NPM_PASSWORD);
const loginButton = await page.$('button[type="submit"]');
if (loginButton) {
await loginButton.click();
} else {
await page.keyboard.press('Enter');
}
await page.waitForTimeout(3000);
log('Logged in', 'success');
}
async function updateProxyHost(page) {
log(`Updating ${DOMAIN} to direct route: ${NEW_TARGET}`);
// Navigate to proxy hosts
await page.goto(`${NPM_URL}/#/proxy-hosts`, { waitUntil: 'domcontentloaded' });
await page.waitForTimeout(2000);
// Find and click on explorer.d-bis.org
const domainLink = await page.$(`text=${DOMAIN}`);
if (!domainLink) {
log(`Could not find ${DOMAIN} in proxy hosts list`, 'error');
return false;
}
await domainLink.click();
await page.waitForTimeout(2000);
// Update forward host and port
const url = new URL(NEW_TARGET);
const hostname = url.hostname;
const port = url.port || (url.protocol === 'https:' ? '443' : '80');
const hostInput = await page.$('input[name="forward_host"], input[name="forward_hostname"]');
if (hostInput) {
await hostInput.fill(hostname);
log(` Updated forward host: ${hostname}`);
}
const portInput = await page.$('input[name="forward_port"]');
if (portInput) {
await portInput.fill(port);
log(` Updated forward port: ${port}`);
}
// Save
const saveButton = await page.$('button:has-text("Save"), button[type="submit"]');
if (saveButton) {
await saveButton.click();
log(` Saved changes`, 'success');
await page.waitForTimeout(2000);
return true;
}
return false;
}
async function main() {
const browser = await chromium.launch({ headless: HEADLESS, ignoreHTTPSErrors: true });
const context = await browser.newContext({ ignoreHTTPSErrors: true });
const page = await context.newPage();
try {
await login(page);
const success = await updateProxyHost(page);
if (success) {
log(`✅ ${DOMAIN} updated to use direct route`, 'success');
} else {
log(`❌ Failed to update ${DOMAIN}`, 'error');
process.exit(1);
}
} catch (error) {
log(`Fatal error: ${error.message}`, 'error');
await page.screenshot({ path: '/tmp/npmplus-update-error.png' });
process.exit(1);
} finally {
await browser.close();
}
}
main().catch(console.error);
SCRIPT_EOF
chmod +x /home/intlc/projects/proxmox/scripts/nginx-proxy-manager/update-explorer-direct-route.js
echo " ✅ Created: scripts/nginx-proxy-manager/update-explorer-direct-route.js"
echo ""
fi
echo "=========================================="
echo "NEXT STEPS"
echo "=========================================="
if [ "$NETWORK_ACCESS" = true ]; then
echo "1. Run the NPMplus update script:"
echo " cd scripts/nginx-proxy-manager"
echo " node update-explorer-direct-route.js"
echo ""
echo "2. Test the direct route:"
echo " curl -I http://$BLOCKSCOUT_IP:$BLOCKSCOUT_PORT/api/v2/stats"
echo ""
else
echo "1. Configure Blockscout to listen on 0.0.0.0:$BLOCKSCOUT_PORT"
echo "2. Restart Blockscout service"
echo "3. Run this script again to verify network access"
echo "4. Then run the NPMplus update script"
echo ""
fi
echo "=========================================="

View File

@@ -0,0 +1,198 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configure all 19 domains in Nginx Proxy Manager via API from inside container
# This script uses pct exec to run commands inside the NPM container
set -e
PROXMOX_HOST="${PROXMOX_HOST_R630_01}"
CONTAINER_ID="105"
NPM_URL="http://127.0.0.1:81"
EMAIL="nsatoshi2007@hotmail.com"
PASSWORD='L@ker$2010'
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔒 Nginx Proxy Manager SSL Configuration (Container)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Create the configuration script and run it inside container
ssh root@${PROXMOX_HOST} "pct exec ${CONTAINER_ID} -- bash" << 'INNER_SCRIPT'
set -e
NPM_URL="http://127.0.0.1:81"
EMAIL="nsatoshi2007@hotmail.com"
PASSWORD='L@ker$2010'
echo "🔐 Authenticating..."
TOKEN_RESPONSE=$(curl -s -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$EMAIL\",\"secret\":\"$PASSWORD\"}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ] || [ -z "${TOKEN// }" ]; then
ERROR_MSG=$(echo "$TOKEN_RESPONSE" | jq -r '.error.message // "Unknown error"' 2>/dev/null || echo "$TOKEN_RESPONSE")
echo "❌ Authentication failed: $ERROR_MSG"
exit 1
fi
echo "✅ Authentication successful"
echo ""
# Function to create or update proxy host
create_proxy_host() {
local domain=$1
local scheme=$2
local hostname=$3
local port=$4
local websocket=$5
echo "📋 Processing $domain..."
# Check if exists
EXISTING=$(curl -s -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" | jq -r ".result[] | select(.domain_names[] == \"$domain\") | .id" 2>/dev/null || echo "")
local HOST_ID
if [ -n "$EXISTING" ] && [ "$EXISTING" != "null" ]; then
echo " Already exists (ID: $EXISTING)"
HOST_ID=$EXISTING
else
# Create new
echo " Creating proxy host..."
RESPONSE=$(curl -s -X POST "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"domain_names\": [\"$domain\"],
\"forward_scheme\": \"$scheme\",
\"forward_hostname\": \"$hostname\",
\"forward_port\": $port,
\"allow_websocket_upgrade\": $websocket,
\"block_exploits\": true,
\"cache_enabled\": false,
\"ssl_forced\": true,
\"http2_support\": true,
\"hsts_enabled\": true,
\"hsts_subdomains\": true,
\"access_list_id\": 0,
\"certificate_id\": 0
}")
HOST_ID=$(echo "$RESPONSE" | jq -r '.id // empty' 2>/dev/null || echo "")
if [ -z "$HOST_ID" ] || [ "$HOST_ID" = "null" ]; then
ERROR=$(echo "$RESPONSE" | jq -r '.error.message // .error // "Unknown error"' 2>/dev/null || echo "$RESPONSE")
echo " ❌ Failed: $ERROR"
return 1
fi
echo " ✅ Created (ID: $HOST_ID)"
fi
# Check if certificate already exists
EXISTING_CERT=$(curl -s -X GET "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" | jq -r ".[] | select(.domain_names[] == \"$domain\") | .id" 2>/dev/null || echo "")
if [ -n "$EXISTING_CERT" ] && [ "$EXISTING_CERT" != "null" ]; then
echo " ✅ Certificate already exists (ID: $EXISTING_CERT)"
CERT_ID=$EXISTING_CERT
else
# Request SSL certificate
echo " 🔒 Requesting SSL certificate..."
CERT_RESPONSE=$(curl -s -X POST "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"domain_names\": [\"$domain\"],
\"provider\": \"letsencrypt\",
\"letsencrypt_email\": \"$EMAIL\",
\"letsencrypt_agree\": true
}")
CERT_ID=$(echo "$CERT_RESPONSE" | jq -r '.id // empty' 2>/dev/null || echo "")
if [ -z "$CERT_ID" ] || [ "$CERT_ID" = "null" ]; then
ERROR=$(echo "$CERT_RESPONSE" | jq -r '.error.message // .error // "Check manually"' 2>/dev/null || echo "$CERT_RESPONSE")
echo " ⚠️ Certificate request: $ERROR"
echo " Certificate may be processing or domain may need DNS verification"
return 0
fi
echo " ✅ Certificate requested (ID: $CERT_ID)"
fi
# Update proxy host with certificate
if [ -n "$CERT_ID" ] && [ "$CERT_ID" != "null" ] && [ "$CERT_ID" != "0" ]; then
UPDATE_RESPONSE=$(curl -s -X PUT "$NPM_URL/api/nginx/proxy-hosts/$HOST_ID" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"certificate_id\": $CERT_ID,
\"ssl_forced\": true
}")
echo " ✅ SSL configured for $domain"
fi
return 0
}
# Configure all domains
echo "🚀 Starting domain configuration (19 domains)..."
echo ""
SUCCESS=0
FAILED=0
# sankofa.nexus (5 domains)
create_proxy_host "sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "phoenix.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.phoenix.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "the-order.sankofa.nexus" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# d-bis.org (9 domains)
create_proxy_host "explorer.d-bis.org" "http" "${IP_BLOCKSCOUT:-${IP_DEVICE_14:-${IP_DEVICE_14:-${IP_DEVICE_14:-192.168.11.14}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-http-pub.d-bis.org" "https" "${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-ws-pub.d-bis.org" "https" "${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-http-prv.d-bis.org" "https" "${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-ws-prv.d-bis.org" "https" "${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-admin.d-bis.org" "http" "${IP_DBIS_FRONTEND:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-api.d-bis.org" "http" "${IP_DBIS_API:-192.168.11.155}" "3000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-api-2.d-bis.org" "http" "${IP_DBIS_API_2:-192.168.11.156}" "3000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "secure.d-bis.org" "http" "${IP_DBIS_FRONTEND:-${IP_SERVICE_13:-${IP_SERVICE_13:-${IP_SERVICE_13:-192.168.11.13}}}0}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# mim4u.org (4 domains)
create_proxy_host "mim4u.org" "http" "${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.mim4u.org" "http" "${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "secure.mim4u.org" "http" "${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "training.mim4u.org" "http" "${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# defi-oracle.io (1 domain)
create_proxy_host "rpc.public-0138.defi-oracle.io" "https" "${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-${RPC_ALI_2:-192.168.11.252}}}}" "443" "true" && ((SUCCESS++)) || ((FAILED++))
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📊 Configuration Summary"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ Successful: $SUCCESS"
echo "⚠️ Failed: $FAILED"
echo "📋 Total: 19"
echo ""
echo "⏳ SSL certificates may take 1-2 minutes to be issued"
INNER_SCRIPT
echo ""
echo "✅ Configuration complete!"
echo ""
echo "🔍 To verify, run:"
echo " bash scripts/nginx-proxy-manager/verify-ssl-config.sh"

View File

@@ -0,0 +1,192 @@
#!/bin/bash
set -euo pipefail
# Configure all 19 domains in Nginx Proxy Manager via API from inside container
# This script uses pct exec to run commands inside the NPM container
set -e
PROXMOX_HOST="192.168.11.11"
CONTAINER_ID="105"
NPM_URL="http://127.0.0.1:81"
EMAIL="nsatoshi2007@hotmail.com"
PASSWORD='L@ker$2010'
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔒 Nginx Proxy Manager SSL Configuration (Container)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Create the configuration script and run it inside container
ssh root@${PROXMOX_HOST} "pct exec ${CONTAINER_ID} -- bash" << 'INNER_SCRIPT'
set -e
NPM_URL="http://127.0.0.1:81"
EMAIL="nsatoshi2007@hotmail.com"
PASSWORD='L@ker$2010'
echo "🔐 Authenticating..."
TOKEN_RESPONSE=$(curl -s -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$EMAIL\",\"secret\":\"$PASSWORD\"}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ] || [ -z "${TOKEN// }" ]; then
ERROR_MSG=$(echo "$TOKEN_RESPONSE" | jq -r '.error.message // "Unknown error"' 2>/dev/null || echo "$TOKEN_RESPONSE")
echo "❌ Authentication failed: $ERROR_MSG"
exit 1
fi
echo "✅ Authentication successful"
echo ""
# Function to create or update proxy host
create_proxy_host() {
local domain=$1
local scheme=$2
local hostname=$3
local port=$4
local websocket=$5
echo "📋 Processing $domain..."
# Check if exists
EXISTING=$(curl -s -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" | jq -r ".result[] | select(.domain_names[] == \"$domain\") | .id" 2>/dev/null || echo "")
local HOST_ID
if [ -n "$EXISTING" ] && [ "$EXISTING" != "null" ]; then
echo " Already exists (ID: $EXISTING)"
HOST_ID=$EXISTING
else
# Create new
echo " Creating proxy host..."
RESPONSE=$(curl -s -X POST "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"domain_names\": [\"$domain\"],
\"forward_scheme\": \"$scheme\",
\"forward_hostname\": \"$hostname\",
\"forward_port\": $port,
\"allow_websocket_upgrade\": $websocket,
\"block_exploits\": true,
\"cache_enabled\": false,
\"ssl_forced\": true,
\"http2_support\": true,
\"hsts_enabled\": true,
\"hsts_subdomains\": true,
\"access_list_id\": 0,
\"certificate_id\": 0
}")
HOST_ID=$(echo "$RESPONSE" | jq -r '.id // empty' 2>/dev/null || echo "")
if [ -z "$HOST_ID" ] || [ "$HOST_ID" = "null" ]; then
ERROR=$(echo "$RESPONSE" | jq -r '.error.message // .error // "Unknown error"' 2>/dev/null || echo "$RESPONSE")
echo " ❌ Failed: $ERROR"
return 1
fi
echo " ✅ Created (ID: $HOST_ID)"
fi
# Check if certificate already exists
EXISTING_CERT=$(curl -s -X GET "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" | jq -r ".[] | select(.domain_names[] == \"$domain\") | .id" 2>/dev/null || echo "")
if [ -n "$EXISTING_CERT" ] && [ "$EXISTING_CERT" != "null" ]; then
echo " ✅ Certificate already exists (ID: $EXISTING_CERT)"
CERT_ID=$EXISTING_CERT
else
# Request SSL certificate
echo " 🔒 Requesting SSL certificate..."
CERT_RESPONSE=$(curl -s -X POST "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"domain_names\": [\"$domain\"],
\"provider\": \"letsencrypt\",
\"letsencrypt_email\": \"$EMAIL\",
\"letsencrypt_agree\": true
}")
CERT_ID=$(echo "$CERT_RESPONSE" | jq -r '.id // empty' 2>/dev/null || echo "")
if [ -z "$CERT_ID" ] || [ "$CERT_ID" = "null" ]; then
ERROR=$(echo "$CERT_RESPONSE" | jq -r '.error.message // .error // "Check manually"' 2>/dev/null || echo "$CERT_RESPONSE")
echo " ⚠️ Certificate request: $ERROR"
echo " Certificate may be processing or domain may need DNS verification"
return 0
fi
echo " ✅ Certificate requested (ID: $CERT_ID)"
fi
# Update proxy host with certificate
if [ -n "$CERT_ID" ] && [ "$CERT_ID" != "null" ] && [ "$CERT_ID" != "0" ]; then
UPDATE_RESPONSE=$(curl -s -X PUT "$NPM_URL/api/nginx/proxy-hosts/$HOST_ID" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"certificate_id\": $CERT_ID,
\"ssl_forced\": true
}")
echo " ✅ SSL configured for $domain"
fi
return 0
}
# Configure all domains
echo "🚀 Starting domain configuration (19 domains)..."
echo ""
SUCCESS=0
FAILED=0
# sankofa.nexus (5 domains)
create_proxy_host "sankofa.nexus" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.sankofa.nexus" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "phoenix.sankofa.nexus" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.phoenix.sankofa.nexus" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "the-order.sankofa.nexus" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# d-bis.org (9 domains)
create_proxy_host "explorer.d-bis.org" "http" "192.168.11.140" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-http-pub.d-bis.org" "https" "192.168.11.252" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-ws-pub.d-bis.org" "https" "192.168.11.252" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-http-prv.d-bis.org" "https" "192.168.11.251" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "rpc-ws-prv.d-bis.org" "https" "192.168.11.251" "443" "true" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-admin.d-bis.org" "http" "192.168.11.130" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-api.d-bis.org" "http" "192.168.11.155" "3000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "dbis-api-2.d-bis.org" "http" "192.168.11.156" "3000" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "secure.d-bis.org" "http" "192.168.11.130" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# mim4u.org (4 domains)
create_proxy_host "mim4u.org" "http" "192.168.11.36" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "www.mim4u.org" "http" "192.168.11.36" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "secure.mim4u.org" "http" "192.168.11.36" "80" "false" && ((SUCCESS++)) || ((FAILED++))
create_proxy_host "training.mim4u.org" "http" "192.168.11.36" "80" "false" && ((SUCCESS++)) || ((FAILED++))
# defi-oracle.io (1 domain)
create_proxy_host "rpc.public-0138.defi-oracle.io" "https" "192.168.11.252" "443" "true" && ((SUCCESS++)) || ((FAILED++))
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📊 Configuration Summary"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "✅ Successful: $SUCCESS"
echo "⚠️ Failed: $FAILED"
echo "📋 Total: 19"
echo ""
echo "⏳ SSL certificates may take 1-2 minutes to be issued"
INNER_SCRIPT
echo ""
echo "✅ Configuration complete!"
echo ""
echo "🔍 To verify, run:"
echo " bash scripts/nginx-proxy-manager/verify-ssl-config.sh"

View File

@@ -0,0 +1,62 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Quick configuration script to update .env with Proxmox credentials
set -e
HOST="${1:-192.168.11.10}"
USER="${2:-root@pam}"
TOKEN_NAME="${3:-mcp-server}"
echo "Configuring .env file with Proxmox connection..."
echo "Host: $HOST"
echo "User: $USER"
echo "Token Name: $TOKEN_NAME"
echo ""
# Update .env file
cat > "$HOME/.env" << EOF
# Proxmox MCP Server Configuration
# Configured with: $HOST
# Proxmox Configuration
PROXMOX_HOST=$HOST
PROXMOX_USER=$USER
PROXMOX_TOKEN_NAME=$TOKEN_NAME
PROXMOX_TOKEN_VALUE=your-token-secret-here
# Security Settings
# ⚠️ WARNING: Setting PROXMOX_ALLOW_ELEVATED=true enables DESTRUCTIVE operations
PROXMOX_ALLOW_ELEVATED=false
# Optional Settings
PROXMOX_PORT=8006
EOF
echo "✅ .env file updated!"
echo ""
echo "⚠️ IMPORTANT: You need to create the API token and add it to .env"
echo ""
echo "Option 1: Via Proxmox Web UI (Recommended)"
echo " 1. Go to: https://$HOST:8006"
echo " 2. Navigate to: Datacenter → Permissions → API Tokens"
echo " 3. Click 'Add' and create token: $TOKEN_NAME"
echo " 4. Copy the secret value"
echo " 5. Update ~/.env: PROXMOX_TOKEN_VALUE=<paste-secret-here>"
echo ""
echo "Option 2: Try automated token creation"
echo " ./create-proxmox-token.sh $HOST $USER <password> $TOKEN_NAME"
echo ""
echo "Current .env contents:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
cat "$HOME/.env" | grep -v "TOKEN_VALUE="
echo "PROXMOX_TOKEN_VALUE=<needs-to-be-added>"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

View File

@@ -0,0 +1,56 @@
#!/bin/bash
set -euo pipefail
# Quick configuration script to update .env with Proxmox credentials
set -e
HOST="${1:-192.168.11.10}"
USER="${2:-root@pam}"
TOKEN_NAME="${3:-mcp-server}"
echo "Configuring .env file with Proxmox connection..."
echo "Host: $HOST"
echo "User: $USER"
echo "Token Name: $TOKEN_NAME"
echo ""
# Update .env file
cat > "$HOME/.env" << EOF
# Proxmox MCP Server Configuration
# Configured with: $HOST
# Proxmox Configuration
PROXMOX_HOST=$HOST
PROXMOX_USER=$USER
PROXMOX_TOKEN_NAME=$TOKEN_NAME
PROXMOX_TOKEN_VALUE=your-token-secret-here
# Security Settings
# ⚠️ WARNING: Setting PROXMOX_ALLOW_ELEVATED=true enables DESTRUCTIVE operations
PROXMOX_ALLOW_ELEVATED=false
# Optional Settings
PROXMOX_PORT=8006
EOF
echo "✅ .env file updated!"
echo ""
echo "⚠️ IMPORTANT: You need to create the API token and add it to .env"
echo ""
echo "Option 1: Via Proxmox Web UI (Recommended)"
echo " 1. Go to: https://$HOST:8006"
echo " 2. Navigate to: Datacenter → Permissions → API Tokens"
echo " 3. Click 'Add' and create token: $TOKEN_NAME"
echo " 4. Copy the secret value"
echo " 5. Update ~/.env: PROXMOX_TOKEN_VALUE=<paste-secret-here>"
echo ""
echo "Option 2: Try automated token creation"
echo " ./create-proxmox-token.sh $HOST $USER <password> $TOKEN_NAME"
echo ""
echo "Current .env contents:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
cat "$HOME/.env" | grep -v "TOKEN_VALUE="
echo "PROXMOX_TOKEN_VALUE=<needs-to-be-added>"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"

View File

@@ -0,0 +1,105 @@
#!/usr/bin/env bash
# ER605 NAT Configuration Script
# Creates NAT rules for direct public IP routing to Nginx
# Note: This script generates configuration - manual application may be required
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PUBLIC_IP="${PUBLIC_IP:-76.53.10.35}"
NGINX_IP="${NGINX_IP:-192.168.11.26}"
NGINX_PORT_HTTPS=443
NGINX_PORT_HTTP=80
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔧 ER605 NAT Configuration Generator"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Public IP: $PUBLIC_IP"
log_info "Nginx Internal IP: $NGINX_IP"
log_info "Nginx HTTPS Port: $NGINX_PORT_HTTPS"
log_info "Nginx HTTP Port: $NGINX_PORT_HTTP"
echo ""
# Generate NAT rule configuration
generate_nat_rule() {
local rule_name="$1"
local external_ip="$2"
local external_port="$3"
local internal_ip="$4"
local internal_port="$5"
local description="$6"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Rule Name: $rule_name"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Enabled: ✅ Yes"
echo "Interface: WAN1"
echo "External IP: $external_ip"
echo "External Port: $external_port"
echo "Internal IP: $internal_ip"
echo "Internal Port: $internal_port"
echo "Protocol: TCP"
echo "Source IP: 0.0.0.0/0"
echo "Description: $description"
echo ""
}
echo "📋 NAT Rules Configuration"
echo ""
echo "All services route through a single public IP to Nginx,"
echo "which then routes to backend services based on hostname."
echo ""
# Main HTTPS rule (all services)
generate_nat_rule \
"Web Services (All Domains)" \
"$PUBLIC_IP" \
"$NGINX_PORT_HTTPS" \
"$NGINX_IP" \
"$NGINX_PORT_HTTPS" \
"Routes all HTTPS traffic to Nginx for hostname-based routing (SNI)"
# HTTP rule for Let's Encrypt
generate_nat_rule \
"HTTP (Let's Encrypt)" \
"$PUBLIC_IP" \
"$NGINX_PORT_HTTP" \
"$NGINX_IP" \
"$NGINX_PORT_HTTP" \
"HTTP for Let's Encrypt validation and redirects"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "NAT Configuration Generated"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "📝 Next Steps:"
echo " 1. Log in to Omada Controller or ER605 GUI"
echo " 2. Navigate to: NAT → Port Forwarding"
echo " 3. Add the two rules shown above"
echo " 4. Save and apply configuration"
echo ""
log_info "🔒 Firewall Rules Required:"
echo " • Allow HTTPS (443) from WAN to $NGINX_IP"
echo " • Allow HTTP (80) from WAN to $NGINX_IP (for Let's Encrypt)"
echo ""

View File

@@ -0,0 +1,99 @@
#!/usr/bin/env bash
# ER605 NAT Configuration Script
# Creates NAT rules for direct public IP routing to Nginx
# Note: This script generates configuration - manual application may be required
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PUBLIC_IP="${PUBLIC_IP:-76.53.10.35}"
NGINX_IP="${NGINX_IP:-192.168.11.26}"
NGINX_PORT_HTTPS=443
NGINX_PORT_HTTP=80
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔧 ER605 NAT Configuration Generator"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Public IP: $PUBLIC_IP"
log_info "Nginx Internal IP: $NGINX_IP"
log_info "Nginx HTTPS Port: $NGINX_PORT_HTTPS"
log_info "Nginx HTTP Port: $NGINX_PORT_HTTP"
echo ""
# Generate NAT rule configuration
generate_nat_rule() {
local rule_name="$1"
local external_ip="$2"
local external_port="$3"
local internal_ip="$4"
local internal_port="$5"
local description="$6"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Rule Name: $rule_name"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Enabled: ✅ Yes"
echo "Interface: WAN1"
echo "External IP: $external_ip"
echo "External Port: $external_port"
echo "Internal IP: $internal_ip"
echo "Internal Port: $internal_port"
echo "Protocol: TCP"
echo "Source IP: 0.0.0.0/0"
echo "Description: $description"
echo ""
}
echo "📋 NAT Rules Configuration"
echo ""
echo "All services route through a single public IP to Nginx,"
echo "which then routes to backend services based on hostname."
echo ""
# Main HTTPS rule (all services)
generate_nat_rule \
"Web Services (All Domains)" \
"$PUBLIC_IP" \
"$NGINX_PORT_HTTPS" \
"$NGINX_IP" \
"$NGINX_PORT_HTTPS" \
"Routes all HTTPS traffic to Nginx for hostname-based routing (SNI)"
# HTTP rule for Let's Encrypt
generate_nat_rule \
"HTTP (Let's Encrypt)" \
"$PUBLIC_IP" \
"$NGINX_PORT_HTTP" \
"$NGINX_IP" \
"$NGINX_PORT_HTTP" \
"HTTP for Let's Encrypt validation and redirects"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "NAT Configuration Generated"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "📝 Next Steps:"
echo " 1. Log in to Omada Controller or ER605 GUI"
echo " 2. Navigate to: NAT → Port Forwarding"
echo " 3. Add the two rules shown above"
echo " 4. Save and apply configuration"
echo ""
log_info "🔒 Firewall Rules Required:"
echo " • Allow HTTPS (443) from WAN to $NGINX_IP"
echo " • Allow HTTP (80) from WAN to $NGINX_IP (for Let's Encrypt)"
echo ""

View File

@@ -0,0 +1,183 @@
#!/usr/bin/env bash
# Configure all bridge destinations for CCIPWETH9Bridge and CCIPWETH10Bridge on Ethereum Mainnet
# Usage: ./configure-ethereum-mainnet-bridge-destinations.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load environment variables
if [ -f "$SOURCE_PROJECT/.env" ]; then
source "$SOURCE_PROJECT/.env"
else
log_error ".env file not found in $SOURCE_PROJECT"
exit 1
fi
# Required variables
ETHEREUM_MAINNET_RPC="${ETHEREUM_MAINNET_RPC:-}"
WETH9_BRIDGE_MAINNET="${CCIPWETH9_BRIDGE_MAINNET:-}"
WETH10_BRIDGE_MAINNET="${CCIPWETH10_BRIDGE_MAINNET:-}"
if [ -z "$ETHEREUM_MAINNET_RPC" ]; then
log_error "ETHEREUM_MAINNET_RPC not set in .env file"
exit 1
fi
if [ -z "${PRIVATE_KEY:-}" ]; then
log_error "PRIVATE_KEY not set in .env file"
exit 1
fi
if [ -z "$WETH9_BRIDGE_MAINNET" ] || [ -z "$WETH10_BRIDGE_MAINNET" ]; then
log_error "Ethereum Mainnet bridge addresses not set in .env file"
log_error "Please deploy bridges first: bash scripts/deploy-ccipweth9bridge-ethereum-mainnet.sh"
exit 1
fi
# Destination chain configurations
# Ethereum Mainnet bridges send to: BSC, Polygon, Avalanche, Base, Arbitrum, Optimism, Chain 138
declare -A WETH9_DESTINATIONS=(
["11344663589394136015"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # BSC
["4051577828743386545"]="0xa780ef19a041745d353c9432f2a7f5a241335ffe" # Polygon
["6433500567565415381"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Avalanche
["15971525489660198786"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Base
["4949039107694359620"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Arbitrum
["3734403246176062136"]="0x8078a09637e47fa5ed34f626046ea2094a5cde5e" # Optimism
["866240039685049171407962509760789466724431933144813155647626"]="0x89dd12025bfCD38A168455A44B400e913ED33BE2" # Chain 138
)
declare -A WETH10_DESTINATIONS=(
["11344663589394136015"]="0x105f8a15b819948a89153505762444ee9f324684" # BSC
["4051577828743386545"]="0xdab0591e5e89295ffad75a71dcfc30c5625c4fa2" # Polygon
["6433500567565415381"]="0x105f8a15b819948a89153505762444ee9f324684" # Avalanche
["15971525489660198786"]="0x105f8a15b819948a89153505762444ee9f324684" # Base
["4949039107694359620"]="0x105f8a15b819948a89153505762444ee9f324684" # Arbitrum
["3734403246176062136"]="0x105f8a15b819948a89153505762444ee9f324684" # Optimism
["866240039685049171407962509760789466724431933144813155647626"]="0xe0E93247376aa097dB308B92e6Ba36bA015535D0" # Chain 138
)
declare -A CHAIN_NAMES=(
["11344663589394136015"]="BSC"
["4051577828743386545"]="Polygon"
["6433500567565415381"]="Avalanche"
["15971525489660198786"]="Base"
["4949039107694359620"]="Arbitrum"
["3734403246176062136"]="Optimism"
["866240039685049171407962509760789466724431933144813155647626"]="Chain 138"
)
log_info "========================================="
log_info "Configure Ethereum Mainnet Bridge Destinations"
log_info "========================================="
log_info ""
log_info "WETH9 Bridge: $WETH9_BRIDGE_MAINNET"
log_info "WETH10 Bridge: $WETH10_BRIDGE_MAINNET"
log_info "RPC URL: ${ETHEREUM_MAINNET_RPC:0:50}..."
log_info ""
# Function to check if destination is already configured
check_destination() {
local bridge="$1"
local selector="$2"
local name="$3"
log_info "Checking $name destination..."
local result=$(cast call "$bridge" "destinations(uint64)" "$selector" --rpc-url "$ETHEREUM_MAINNET_RPC" 2>/dev/null || echo "")
if echo "$result" | grep -qE "(true|enabled)" && ! echo "$result" | grep -q "0x0000000000000000000000000000000000000000$"; then
return 0 # Already configured
else
return 1 # Not configured
fi
}
# Configure WETH9 Bridge destinations
log_info "Configuring WETH9 Bridge destinations..."
WETH9_COUNT=0
for selector in "${!WETH9_DESTINATIONS[@]}"; do
chain_name="${CHAIN_NAMES[$selector]}"
dest_address="${WETH9_DESTINATIONS[$selector]}"
if check_destination "$WETH9_BRIDGE_MAINNET" "$selector" "$chain_name (WETH9)"; then
log_success "$chain_name already configured for WETH9"
else
log_info "Configuring $chain_name for WETH9..."
if cast send "$WETH9_BRIDGE_MAINNET" \
"addDestination(uint64,address)" \
"$selector" \
"$dest_address" \
--rpc-url "$ETHEREUM_MAINNET_RPC" \
--private-key "$PRIVATE_KEY" \
2>&1 | tee /tmp/weth9-mainnet-config-$chain_name.log | grep -qE "(blockHash|transactionHash|Success)"; then
log_success "$chain_name configured for WETH9"
((WETH9_COUNT++))
else
if grep -q "destination already exists" /tmp/weth9-mainnet-config-$chain_name.log 2>/dev/null; then
log_success "$chain_name already configured for WETH9"
else
log_error "✗ Failed to configure $chain_name for WETH9"
log_info "Check /tmp/weth9-mainnet-config-$chain_name.log for details"
fi
fi
fi
done
log_info ""
# Configure WETH10 Bridge destinations
log_info "Configuring WETH10 Bridge destinations..."
WETH10_COUNT=0
for selector in "${!WETH10_DESTINATIONS[@]}"; do
chain_name="${CHAIN_NAMES[$selector]}"
dest_address="${WETH10_DESTINATIONS[$selector]}"
if check_destination "$WETH10_BRIDGE_MAINNET" "$selector" "$chain_name (WETH10)"; then
log_success "$chain_name already configured for WETH10"
else
log_info "Configuring $chain_name for WETH10..."
if cast send "$WETH10_BRIDGE_MAINNET" \
"addDestination(uint64,address)" \
"$selector" \
"$dest_address" \
--rpc-url "$ETHEREUM_MAINNET_RPC" \
--private-key "$PRIVATE_KEY" \
2>&1 | tee /tmp/weth10-mainnet-config-$chain_name.log | grep -qE "(blockHash|transactionHash|Success)"; then
log_success "$chain_name configured for WETH10"
((WETH10_COUNT++))
else
if grep -q "destination already exists" /tmp/weth10-mainnet-config-$chain_name.log 2>/dev/null; then
log_success "$chain_name already configured for WETH10"
else
log_error "✗ Failed to configure $chain_name for WETH10"
log_info "Check /tmp/weth10-mainnet-config-$chain_name.log for details"
fi
fi
fi
done
log_info ""
log_success "========================================="
log_success "Bridge Configuration Complete!"
log_success "========================================="
log_info ""
log_info "Summary:"
log_info " WETH9 destinations configured: $WETH9_COUNT new"
log_info " WETH10 destinations configured: $WETH10_COUNT new"
log_info ""
log_info "All 7 destination chains configured for both bridges"
log_info ""

View File

@@ -0,0 +1,145 @@
#!/usr/bin/env bash
# Final script to configure Ethereum Mainnet - Run after Besu restart
# Usage: ./configure-ethereum-mainnet-final.sh
set -uo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
source "$SOURCE_PROJECT/.env"
RPC_URL="${RPC_URL_138:-http://${RPC_ALLTRA_1:-192.168.11.250}:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-0x89dd12025bfCD38A168455A44B400e913ED33BE2}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-0xe0E93247376aa097dB308B92e6Ba36bA015535D0}"
ETHEREUM_MAINNET_SELECTOR="5009297550715157269"
WETH9_MAINNET_BRIDGE="0x8078a09637e47fa5ed34f626046ea2094a5cde5e"
WETH10_MAINNET_BRIDGE="0x105f8a15b819948a89153505762444ee9f324684"
DEPLOYER=$(cast wallet address --private-key "$PRIVATE_KEY" 2>/dev/null)
log_info "========================================="
log_info "Configure Ethereum Mainnet (Final)"
log_info "========================================="
log_info ""
# Get optimal gas price
CURRENT_GAS=$(cast gas-price --rpc-url "$RPC_URL" 2>/dev/null || echo "1000000000")
OPTIMAL_GAS=$(echo "$CURRENT_GAS * 2" | bc 2>/dev/null || echo "2000000000")
log_info "Current gas price: $CURRENT_GAS wei"
log_info "Using gas price: $OPTIMAL_GAS wei"
log_info ""
# Don't specify nonce - let cast send automatically determine the correct nonce
# This avoids issues with ghost nonces and pending transactions we can't see
CURRENT_NONCE=$(cast nonce "$DEPLOYER" --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
log_info "Current on-chain nonce: $CURRENT_NONCE"
log_info "Using automatic nonce (cast will determine correct nonce)"
# Configure WETH9
log_info "Configuring WETH9 bridge..."
TX_OUTPUT=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH9_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$OPTIMAL_GAS" \
--gas-limit 200000 \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH9 bridge configured: $HASH"
sleep 10
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted" | head -1 || echo "Unknown")
log_error "✗ WETH9 configuration failed: $ERR"
if echo "$ERR" | grep -q "underpriced"; then
log_warn "⚠ Mempool still blocking - Besu may need restart"
fi
fi
log_info ""
# Configure WETH10
log_info "Configuring WETH10 bridge..."
TX_OUTPUT=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH10_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$OPTIMAL_GAS" \
--gas-limit 200000 \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH10 bridge configured: $HASH"
sleep 10
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted" | head -1 || echo "Unknown")
log_error "✗ WETH10 configuration failed: $ERR"
if echo "$ERR" | grep -q "underpriced"; then
log_warn "⚠ Mempool still blocking - Besu may need restart"
fi
fi
log_info ""
# Verify
log_info "Verifying configuration..."
sleep 5
WETH9_CHECK=$(cast call "$WETH9_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
WETH10_CHECK=$(cast call "$WETH10_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
log_info ""
log_success "========================================="
log_success "Verification Results"
log_success "========================================="
log_info ""
if [ -n "$WETH9_CHECK" ] && ! echo "$WETH9_CHECK" | grep -qE "^0x0+$" && ! echo "$WETH9_CHECK" | grep -qE "^0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH9 bridge: CONFIGURED"
else
log_error "✗ WETH9 bridge: NOT CONFIGURED"
fi
if [ -n "$WETH10_CHECK" ] && ! echo "$WETH10_CHECK" | grep -qE "^0x0+$" && ! echo "$WETH10_CHECK" | grep -qE "^0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH10 bridge: CONFIGURED"
else
log_error "✗ WETH10 bridge: NOT CONFIGURED"
fi
log_info ""
if [ -n "$WETH9_CHECK" ] && ! echo "$WETH9_CHECK" | grep -qE "^0x0+$" && [ -n "$WETH10_CHECK" ] && ! echo "$WETH10_CHECK" | grep -qE "^0x0+$"; then
log_success "✅ Ethereum Mainnet configuration complete!"
log_info ""
log_info "Run test to verify:"
log_info " ./scripts/test-bridge-all-7-networks.sh weth9"
else
log_warn "⚠ Configuration incomplete"
log_info ""
log_info "If still failing, restart Besu node:"
log_info " sudo systemctl restart besu-rpc # On ${RPC_ALLTRA_1:-192.168.11.250}"
log_info " Then run this script again"
fi
log_info ""

View File

@@ -0,0 +1,192 @@
#!/usr/bin/env bash
# Configure Ethereum Mainnet using new deployer account
# This bypasses the stuck transaction on the old account
# Usage: ./configure-ethereum-mainnet-with-new-account.sh
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load environment variables
if [ -f "$SOURCE_PROJECT/.env" ]; then
source "$SOURCE_PROJECT/.env"
else
log_error ".env file not found in $SOURCE_PROJECT"
exit 1
fi
RPC_URL="${RPC_URL_138:-http://${RPC_ALLTRA_1:-192.168.11.250}:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-0x89dd12025bfCD38A168455A44B400e913ED33BE2}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-0xe0E93247376aa097dB308B92e6Ba36bA015535D0}"
ETHEREUM_MAINNET_SELECTOR="5009297550715157269"
WETH9_MAINNET_BRIDGE="0x8078a09637e47fa5ed34f626046ea2094a5cde5e"
WETH10_MAINNET_BRIDGE="0x105f8a15b819948a89153505762444ee9f324684"
# New deployer account
NEW_DEPLOYER="0xC13EfAe66708C7541d2D66A2527bcBF9992e7186"
# Check if NEW_PRIVATE_KEY is set, otherwise prompt
if [ -z "${NEW_PRIVATE_KEY:-}" ]; then
log_warn "⚠ NEW_PRIVATE_KEY not set in .env"
log_info "Please set NEW_PRIVATE_KEY in $SOURCE_PROJECT/.env"
log_info "Or export it: export NEW_PRIVATE_KEY=<private-key>"
exit 1
fi
# Verify new deployer address matches
VERIFIED_DEPLOYER=$(cast wallet address --private-key "$NEW_PRIVATE_KEY" 2>/dev/null || echo "")
if [ "$VERIFIED_DEPLOYER" != "$NEW_DEPLOYER" ]; then
log_error "NEW_PRIVATE_KEY does not match expected address"
log_info "Expected: $NEW_DEPLOYER"
log_info "Got: $VERIFIED_DEPLOYER"
exit 1
fi
log_info "========================================="
log_info "Configure Ethereum Mainnet (New Account)"
log_info "========================================="
log_info ""
log_info "New Deployer: $NEW_DEPLOYER"
log_info "WETH9 Bridge: $WETH9_BRIDGE"
log_info "WETH10 Bridge: $WETH10_BRIDGE"
log_info "Ethereum Mainnet Selector: $ETHEREUM_MAINNET_SELECTOR"
log_info ""
# Check balance
BALANCE=$(cast balance "$NEW_DEPLOYER" --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
BALANCE_ETH=$(echo "scale=4; $BALANCE / 1000000000000000000" | bc 2>/dev/null || echo "0")
log_info "New deployer balance: $BALANCE_ETH ETH"
if (( $(echo "$BALANCE_ETH < 1" | bc -l 2>/dev/null || echo "1") )); then
log_error "Insufficient balance. Need at least 1 ETH for gas"
log_info "Run: ./scripts/fund-new-deployer-account.sh"
exit 1
fi
# Get optimal gas price
CURRENT_GAS=$(cast gas-price --rpc-url "$RPC_URL" 2>/dev/null || echo "1000000000")
OPTIMAL_GAS=$(echo "$CURRENT_GAS * 2" | bc 2>/dev/null || echo "2000000000")
log_info "Using gas price: $(echo "scale=2; $OPTIMAL_GAS / 1000000000" | bc) gwei"
log_info ""
# Configure WETH9 Bridge
log_info "Configuring WETH9 bridge for Ethereum Mainnet..."
TX_OUTPUT=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH9_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$NEW_PRIVATE_KEY" \
--gas-price "$OPTIMAL_GAS" \
--gas-limit 200000 \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH9 bridge configured: $HASH"
sleep 10
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted" | head -1 || echo "Unknown")
log_error "✗ WETH9 configuration failed: $ERR"
if echo "$ERR" | grep -q "underpriced"; then
log_warn "⚠ Still blocked - may need higher gas price"
fi
fi
log_info ""
# Configure WETH10 Bridge
log_info "Configuring WETH10 bridge for Ethereum Mainnet..."
TX_OUTPUT=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH10_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$NEW_PRIVATE_KEY" \
--gas-price "$OPTIMAL_GAS" \
--gas-limit 200000 \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH10 bridge configured: $HASH"
sleep 10
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted" | head -1 || echo "Unknown")
log_error "✗ WETH10 configuration failed: $ERR"
if echo "$ERR" | grep -q "underpriced"; then
log_warn "⚠ Still blocked - may need higher gas price"
fi
fi
log_info ""
# Verify configuration
log_info "Verifying configuration..."
sleep 5
WETH9_CHECK=$(cast call "$WETH9_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
WETH10_CHECK=$(cast call "$WETH10_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
log_info ""
log_success "========================================="
log_success "Verification Results"
log_success "========================================="
log_info ""
if [ -n "$WETH9_CHECK" ] && ! echo "$WETH9_CHECK" | grep -qE "^0x0+$" && ! echo "$WETH9_CHECK" | grep -qE "^0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH9 bridge: CONFIGURED"
WETH9_OK=true
else
log_error "✗ WETH9 bridge: NOT CONFIGURED"
WETH9_OK=false
fi
if [ -n "$WETH10_CHECK" ] && ! echo "$WETH10_CHECK" | grep -qE "^0x0+$" && ! echo "$WETH10_CHECK" | grep -qE "^0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH10 bridge: CONFIGURED"
WETH10_OK=true
else
log_error "✗ WETH10 bridge: NOT CONFIGURED"
WETH10_OK=false
fi
log_info ""
if [ "$WETH9_OK" = true ] && [ "$WETH10_OK" = true ]; then
log_success "✅ Ethereum Mainnet configuration complete!"
log_info ""
log_info "Run test to verify:"
log_info " ./scripts/test-bridge-all-7-networks.sh weth9"
log_info ""
log_info "Note: You may need to update PRIVATE_KEY in .env to use new account for future operations"
else
log_warn "⚠ Configuration incomplete"
log_info ""
log_info "If still failing, check:"
log_info " 1. New account has sufficient balance"
log_info " 2. Gas price is appropriate"
log_info " 3. Bridge contracts are accessible"
fi
log_info ""

View File

@@ -0,0 +1,186 @@
#!/usr/bin/env bash
# Configure Ethereum Mainnet using new deployer account
# This bypasses the stuck transaction on the old account
# Usage: ./configure-ethereum-mainnet-with-new-account.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load environment variables
if [ -f "$SOURCE_PROJECT/.env" ]; then
source "$SOURCE_PROJECT/.env"
else
log_error ".env file not found in $SOURCE_PROJECT"
exit 1
fi
RPC_URL="${RPC_URL_138:-http://192.168.11.250:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-0x89dd12025bfCD38A168455A44B400e913ED33BE2}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-0xe0E93247376aa097dB308B92e6Ba36bA015535D0}"
ETHEREUM_MAINNET_SELECTOR="5009297550715157269"
WETH9_MAINNET_BRIDGE="0x8078a09637e47fa5ed34f626046ea2094a5cde5e"
WETH10_MAINNET_BRIDGE="0x105f8a15b819948a89153505762444ee9f324684"
# New deployer account
NEW_DEPLOYER="0xC13EfAe66708C7541d2D66A2527bcBF9992e7186"
# Check if NEW_PRIVATE_KEY is set, otherwise prompt
if [ -z "${NEW_PRIVATE_KEY:-}" ]; then
log_warn "⚠ NEW_PRIVATE_KEY not set in .env"
log_info "Please set NEW_PRIVATE_KEY in $SOURCE_PROJECT/.env"
log_info "Or export it: export NEW_PRIVATE_KEY=<private-key>"
exit 1
fi
# Verify new deployer address matches
VERIFIED_DEPLOYER=$(cast wallet address --private-key "$NEW_PRIVATE_KEY" 2>/dev/null || echo "")
if [ "$VERIFIED_DEPLOYER" != "$NEW_DEPLOYER" ]; then
log_error "NEW_PRIVATE_KEY does not match expected address"
log_info "Expected: $NEW_DEPLOYER"
log_info "Got: $VERIFIED_DEPLOYER"
exit 1
fi
log_info "========================================="
log_info "Configure Ethereum Mainnet (New Account)"
log_info "========================================="
log_info ""
log_info "New Deployer: $NEW_DEPLOYER"
log_info "WETH9 Bridge: $WETH9_BRIDGE"
log_info "WETH10 Bridge: $WETH10_BRIDGE"
log_info "Ethereum Mainnet Selector: $ETHEREUM_MAINNET_SELECTOR"
log_info ""
# Check balance
BALANCE=$(cast balance "$NEW_DEPLOYER" --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
BALANCE_ETH=$(echo "scale=4; $BALANCE / 1000000000000000000" | bc 2>/dev/null || echo "0")
log_info "New deployer balance: $BALANCE_ETH ETH"
if (( $(echo "$BALANCE_ETH < 1" | bc -l 2>/dev/null || echo "1") )); then
log_error "Insufficient balance. Need at least 1 ETH for gas"
log_info "Run: ./scripts/fund-new-deployer-account.sh"
exit 1
fi
# Get optimal gas price
CURRENT_GAS=$(cast gas-price --rpc-url "$RPC_URL" 2>/dev/null || echo "1000000000")
OPTIMAL_GAS=$(echo "$CURRENT_GAS * 2" | bc 2>/dev/null || echo "2000000000")
log_info "Using gas price: $(echo "scale=2; $OPTIMAL_GAS / 1000000000" | bc) gwei"
log_info ""
# Configure WETH9 Bridge
log_info "Configuring WETH9 bridge for Ethereum Mainnet..."
TX_OUTPUT=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH9_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$NEW_PRIVATE_KEY" \
--gas-price "$OPTIMAL_GAS" \
--gas-limit 200000 \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH9 bridge configured: $HASH"
sleep 10
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted" | head -1 || echo "Unknown")
log_error "✗ WETH9 configuration failed: $ERR"
if echo "$ERR" | grep -q "underpriced"; then
log_warn "⚠ Still blocked - may need higher gas price"
fi
fi
log_info ""
# Configure WETH10 Bridge
log_info "Configuring WETH10 bridge for Ethereum Mainnet..."
TX_OUTPUT=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH10_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$NEW_PRIVATE_KEY" \
--gas-price "$OPTIMAL_GAS" \
--gas-limit 200000 \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH10 bridge configured: $HASH"
sleep 10
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted" | head -1 || echo "Unknown")
log_error "✗ WETH10 configuration failed: $ERR"
if echo "$ERR" | grep -q "underpriced"; then
log_warn "⚠ Still blocked - may need higher gas price"
fi
fi
log_info ""
# Verify configuration
log_info "Verifying configuration..."
sleep 5
WETH9_CHECK=$(cast call "$WETH9_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
WETH10_CHECK=$(cast call "$WETH10_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
log_info ""
log_success "========================================="
log_success "Verification Results"
log_success "========================================="
log_info ""
if [ -n "$WETH9_CHECK" ] && ! echo "$WETH9_CHECK" | grep -qE "^0x0+$" && ! echo "$WETH9_CHECK" | grep -qE "^0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH9 bridge: CONFIGURED"
WETH9_OK=true
else
log_error "✗ WETH9 bridge: NOT CONFIGURED"
WETH9_OK=false
fi
if [ -n "$WETH10_CHECK" ] && ! echo "$WETH10_CHECK" | grep -qE "^0x0+$" && ! echo "$WETH10_CHECK" | grep -qE "^0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH10 bridge: CONFIGURED"
WETH10_OK=true
else
log_error "✗ WETH10 bridge: NOT CONFIGURED"
WETH10_OK=false
fi
log_info ""
if [ "$WETH9_OK" = true ] && [ "$WETH10_OK" = true ]; then
log_success "✅ Ethereum Mainnet configuration complete!"
log_info ""
log_info "Run test to verify:"
log_info " ./scripts/test-bridge-all-7-networks.sh weth9"
log_info ""
log_info "Note: You may need to update PRIVATE_KEY in .env to use new account for future operations"
else
log_warn "⚠ Configuration incomplete"
log_info ""
log_info "If still failing, check:"
log_info " 1. New account has sufficient balance"
log_info " 2. Gas price is appropriate"
log_info " 3. Bridge contracts are accessible"
fi
log_info ""

View File

@@ -0,0 +1,155 @@
#!/usr/bin/env bash
# Configure Ethereum Mainnet destination for both bridges
# Usage: ./configure-ethereum-mainnet.sh
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load environment variables
if [ -f "$SOURCE_PROJECT/.env" ]; then
source "$SOURCE_PROJECT/.env"
else
log_error ".env file not found in $SOURCE_PROJECT"
exit 1
fi
RPC_URL="${RPC_URL_138:-http://${RPC_ALLTRA_1:-192.168.11.250}:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-0x89dd12025bfCD38A168455A44B400e913ED33BE2}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-0xe0E93247376aa097dB308B92e6Ba36bA015535D0}"
ETHEREUM_MAINNET_SELECTOR="5009297550715157269"
# Using same addresses as other chains (these should be deployed on Ethereum Mainnet)
WETH9_MAINNET_BRIDGE="0x8078a09637e47fa5ed34f626046ea2094a5cde5e"
WETH10_MAINNET_BRIDGE="0x105f8a15b819948a89153505762444ee9f324684"
if [ -z "${PRIVATE_KEY:-}" ]; then
log_error "PRIVATE_KEY not set in .env file"
exit 1
fi
DEPLOYER=$(cast wallet address --private-key "$PRIVATE_KEY" 2>/dev/null || echo "")
if [ -z "$DEPLOYER" ]; then
log_error "Failed to get deployer address"
exit 1
fi
log_info "========================================="
log_info "Configure Ethereum Mainnet Destination"
log_info "========================================="
log_info ""
log_info "Ethereum Mainnet Selector: $ETHEREUM_MAINNET_SELECTOR"
log_info "WETH9 Mainnet Bridge: $WETH9_MAINNET_BRIDGE"
log_info "WETH10 Mainnet Bridge: $WETH10_MAINNET_BRIDGE"
log_info "Deployer: $DEPLOYER"
log_info ""
# Check current configuration
log_info "Checking current configuration..."
WETH9_CHECK=$(timeout 10 cast call "$WETH9_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
WETH10_CHECK=$(timeout 10 cast call "$WETH10_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
# Configure WETH9 Bridge
if [ -n "$WETH9_CHECK" ] && ! echo "$WETH9_CHECK" | grep -qE "^0x0+$" && ! echo "$WETH9_CHECK" | grep -qE "^0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH9 bridge already configured for Ethereum Mainnet"
log_detail "Current destination: $WETH9_CHECK"
else
log_info "Configuring WETH9 bridge for Ethereum Mainnet..."
log_info "Waiting for any pending transactions to clear..."
sleep 15
# Get current gas price and use 10x to ensure replacement
CURRENT_GAS=$(cast gas-price --rpc-url "$RPC_URL" 2>/dev/null || echo "1000000000")
REPLACEMENT_GAS=$(echo "$CURRENT_GAS * 10" | bc 2>/dev/null || echo "100000000000")
# Try with very high gas price to replace any pending transactions
TX_OUTPUT=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH9_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$REPLACEMENT_GAS" \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH9 bridge configured: $HASH"
sleep 5
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted|already exists" | head -1 || echo "Unknown")
if echo "$ERR" | grep -q "already exists"; then
log_success "✓ WETH9 bridge already configured for Ethereum Mainnet"
else
log_error "✗ WETH9 configuration failed: $ERR"
log_info "Full output: $TX_OUTPUT"
fi
fi
fi
log_info ""
# Configure WETH10 Bridge
if [ -n "$WETH10_CHECK" ] && ! echo "$WETH10_CHECK" | grep -qE "^0x0+$" && ! echo "$WETH10_CHECK" | grep -qE "^0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH10 bridge already configured for Ethereum Mainnet"
log_detail "Current destination: $WETH10_CHECK"
else
log_info "Configuring WETH10 bridge for Ethereum Mainnet..."
log_info "Waiting for any pending transactions to clear..."
sleep 15
# Get current gas price and use 10x to ensure replacement
CURRENT_GAS=$(cast gas-price --rpc-url "$RPC_URL" 2>/dev/null || echo "1000000000")
REPLACEMENT_GAS=$(echo "$CURRENT_GAS * 10" | bc 2>/dev/null || echo "100000000000")
# Try with very high gas price to replace any pending transactions
TX_OUTPUT=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH10_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$REPLACEMENT_GAS" \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH10 bridge configured: $HASH"
sleep 5
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted|already exists" | head -1 || echo "Unknown")
if echo "$ERR" | grep -q "already exists"; then
log_success "✓ WETH10 bridge already configured for Ethereum Mainnet"
else
log_error "✗ WETH10 configuration failed: $ERR"
log_info "Full output: $TX_OUTPUT"
fi
fi
fi
log_info ""
log_success "========================================="
log_success "Ethereum Mainnet Configuration Complete!"
log_success "========================================="
log_info ""

View File

@@ -0,0 +1,149 @@
#!/usr/bin/env bash
# Configure Ethereum Mainnet destination for both bridges
# Usage: ./configure-ethereum-mainnet.sh
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
SOURCE_PROJECT="/home/intlc/projects/smom-dbis-138"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
# Load environment variables
if [ -f "$SOURCE_PROJECT/.env" ]; then
source "$SOURCE_PROJECT/.env"
else
log_error ".env file not found in $SOURCE_PROJECT"
exit 1
fi
RPC_URL="${RPC_URL_138:-http://192.168.11.250:8545}"
WETH9_BRIDGE="${CCIPWETH9_BRIDGE_CHAIN138:-0x89dd12025bfCD38A168455A44B400e913ED33BE2}"
WETH10_BRIDGE="${CCIPWETH10_BRIDGE_CHAIN138:-0xe0E93247376aa097dB308B92e6Ba36bA015535D0}"
ETHEREUM_MAINNET_SELECTOR="5009297550715157269"
# Using same addresses as other chains (these should be deployed on Ethereum Mainnet)
WETH9_MAINNET_BRIDGE="0x8078a09637e47fa5ed34f626046ea2094a5cde5e"
WETH10_MAINNET_BRIDGE="0x105f8a15b819948a89153505762444ee9f324684"
if [ -z "${PRIVATE_KEY:-}" ]; then
log_error "PRIVATE_KEY not set in .env file"
exit 1
fi
DEPLOYER=$(cast wallet address --private-key "$PRIVATE_KEY" 2>/dev/null || echo "")
if [ -z "$DEPLOYER" ]; then
log_error "Failed to get deployer address"
exit 1
fi
log_info "========================================="
log_info "Configure Ethereum Mainnet Destination"
log_info "========================================="
log_info ""
log_info "Ethereum Mainnet Selector: $ETHEREUM_MAINNET_SELECTOR"
log_info "WETH9 Mainnet Bridge: $WETH9_MAINNET_BRIDGE"
log_info "WETH10 Mainnet Bridge: $WETH10_MAINNET_BRIDGE"
log_info "Deployer: $DEPLOYER"
log_info ""
# Check current configuration
log_info "Checking current configuration..."
WETH9_CHECK=$(timeout 10 cast call "$WETH9_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
WETH10_CHECK=$(timeout 10 cast call "$WETH10_BRIDGE" "destinations(uint64)" "$ETHEREUM_MAINNET_SELECTOR" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
# Configure WETH9 Bridge
if [ -n "$WETH9_CHECK" ] && ! echo "$WETH9_CHECK" | grep -qE "^0x0+$" && ! echo "$WETH9_CHECK" | grep -qE "^0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH9 bridge already configured for Ethereum Mainnet"
log_detail "Current destination: $WETH9_CHECK"
else
log_info "Configuring WETH9 bridge for Ethereum Mainnet..."
log_info "Waiting for any pending transactions to clear..."
sleep 15
# Get current gas price and use 10x to ensure replacement
CURRENT_GAS=$(cast gas-price --rpc-url "$RPC_URL" 2>/dev/null || echo "1000000000")
REPLACEMENT_GAS=$(echo "$CURRENT_GAS * 10" | bc 2>/dev/null || echo "100000000000")
# Try with very high gas price to replace any pending transactions
TX_OUTPUT=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH9_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$REPLACEMENT_GAS" \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH9 bridge configured: $HASH"
sleep 5
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted|already exists" | head -1 || echo "Unknown")
if echo "$ERR" | grep -q "already exists"; then
log_success "✓ WETH9 bridge already configured for Ethereum Mainnet"
else
log_error "✗ WETH9 configuration failed: $ERR"
log_info "Full output: $TX_OUTPUT"
fi
fi
fi
log_info ""
# Configure WETH10 Bridge
if [ -n "$WETH10_CHECK" ] && ! echo "$WETH10_CHECK" | grep -qE "^0x0+$" && ! echo "$WETH10_CHECK" | grep -qE "^0x0000000000000000000000000000000000000000$"; then
log_success "✓ WETH10 bridge already configured for Ethereum Mainnet"
log_detail "Current destination: $WETH10_CHECK"
else
log_info "Configuring WETH10 bridge for Ethereum Mainnet..."
log_info "Waiting for any pending transactions to clear..."
sleep 15
# Get current gas price and use 10x to ensure replacement
CURRENT_GAS=$(cast gas-price --rpc-url "$RPC_URL" 2>/dev/null || echo "1000000000")
REPLACEMENT_GAS=$(echo "$CURRENT_GAS * 10" | bc 2>/dev/null || echo "100000000000")
# Try with very high gas price to replace any pending transactions
TX_OUTPUT=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$ETHEREUM_MAINNET_SELECTOR" \
"$WETH10_MAINNET_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$REPLACEMENT_GAS" \
2>&1 || echo "FAILED")
if echo "$TX_OUTPUT" | grep -qE "transactionHash|Success"; then
HASH=$(echo "$TX_OUTPUT" | grep -oE "transactionHash[[:space:]]+0x[0-9a-fA-F]{64}" | awk '{print $2}' || echo "")
log_success "✓ WETH10 bridge configured: $HASH"
sleep 5
else
ERR=$(echo "$TX_OUTPUT" | grep -E "Error|reverted|already exists" | head -1 || echo "Unknown")
if echo "$ERR" | grep -q "already exists"; then
log_success "✓ WETH10 bridge already configured for Ethereum Mainnet"
else
log_error "✗ WETH10 configuration failed: $ERR"
log_info "Full output: $TX_OUTPUT"
fi
fi
fi
log_info ""
log_success "========================================="
log_success "Ethereum Mainnet Configuration Complete!"
log_success "========================================="
log_info ""

View File

@@ -0,0 +1,114 @@
#!/bin/bash
set -euo pipefail
# Configure Inter-VLAN Firewall Rules
# This script creates firewall rules for inter-VLAN communication
set -e
echo "🔧 Inter-VLAN Firewall Rules Configuration"
echo ""
echo "This script will configure firewall rules for inter-VLAN communication."
echo ""
# Load environment variables
if [ -f ~/.env ]; then
source ~/.env
fi
# Configuration
UDM_PRO_URL="${UNIFI_UDM_URL:-https://192.168.0.1}"
UNIFI_USERNAME="${UNIFI_USERNAME:-unifi_api}"
UNIFI_PASSWORD="${UNIFI_PASSWORD:-}"
if [ -z "$UNIFI_PASSWORD" ]; then
echo "❌ UNIFI_PASSWORD not set. Please set it in ~/.env"
exit 1
fi
echo "📋 Configuration:"
echo " UDM Pro URL: $UDM_PRO_URL"
echo " Username: $UNIFI_USERNAME"
echo ""
# Check if we can access UDM Pro
echo "🔍 Testing UDM Pro connectivity..."
if ! ping -c 1 -W 2 $(echo $UDM_PRO_URL | sed 's|https\?://||' | cut -d: -f1) >/dev/null 2>&1; then
echo " ⚠️ UDM Pro is not reachable from current network"
echo " 💡 This script requires access to UDM Pro API"
exit 1
fi
echo " ✅ UDM Pro is reachable"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📋 Firewall Rules to Create"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "1⃣ Management VLAN (11) → Service VLANs"
echo " ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo " Allow:"
echo " • SSH (TCP 22)"
echo " • HTTPS (TCP 443)"
echo " • Database admin (PostgreSQL 5432, MySQL 3306)"
echo " • Admin consoles (Keycloak 8080, etc.)"
echo " • Monitoring (SNMP 161, Prometheus 9090, etc.)"
echo ""
echo "2⃣ Service VLANs → Management VLAN (11)"
echo " ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo " Allow:"
echo " • Monitoring agents"
echo " • Logging (Syslog 514, etc.)"
echo " • Health checks"
echo ""
echo "3⃣ Sovereign Tenant Isolation"
echo " ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo " Block:"
echo " • VLAN 200 ↔ VLAN 201"
echo " • VLAN 200 ↔ VLAN 202"
echo " • VLAN 200 ↔ VLAN 203"
echo " • VLAN 201 ↔ VLAN 202"
echo " • VLAN 201 ↔ VLAN 203"
echo " • VLAN 202 ↔ VLAN 203"
echo ""
echo " Allow:"
echo " • Each sovereign tenant → Management VLAN (monitoring only)"
echo " • Each sovereign tenant → External (internet)"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "💡 Implementation Options"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "Option 1: Via UDM Pro Web UI (Recommended)"
echo " • More control and visibility"
echo " • Easier to manage and troubleshoot"
echo ""
echo "Option 2: Via API (Automated)"
echo " • Can be scripted"
echo " • Requires API access and proper configuration"
echo ""
echo "📋 Manual Configuration Steps (Recommended):"
echo ""
echo "1. Access UDM Pro: $UDM_PRO_URL"
echo "2. Navigate: Settings → Firewall & Security → Firewall Rules"
echo "3. Create rules as described above"
echo ""
echo "📋 API Configuration (If needed):"
echo " See: scripts/unifi/allow-default-network-to-vlan11-node.js"
echo " for example of creating firewall rules via API"
echo ""
echo "✅ Firewall rules configuration guide complete!"
echo ""
echo "💡 Next Steps:"
echo " 1. Configure firewall rules via UDM Pro web UI"
echo " 2. Test inter-VLAN communication"
echo " 3. Adjust rules as needed"
echo ""

View File

@@ -0,0 +1,362 @@
#!/usr/bin/env bash
# Configure Nginx with JWT authentication using auth_request (no Lua required)
# This script configures VMID 2501 (Permissioned RPC) with JWT token authentication
# for rpc-http-prv.d-bis.org and rpc-ws-prv.d-bis.org
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-${PROXMOX_HOST_ML110:-192.168.11.10}}"
VMID=2501
HTTP_DOMAIN="rpc-http-prv.d-bis.org"
WS_DOMAIN="rpc-ws-prv.d-bis.org"
IP="${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}"
HOSTNAME="besu-rpc-2"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
info() { echo -e "${GREEN}[INFO]${NC} $1"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
info "Configuring JWT authentication for Permissioned RPC (VMID $VMID)"
info "HTTP Domain: $HTTP_DOMAIN"
info "WS Domain: $WS_DOMAIN"
echo ""
# Check if container is running
STATUS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct status $VMID 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "unknown")
if [[ "$STATUS" != "running" ]]; then
error "Container $VMID is not running (status: $STATUS)"
exit 1
fi
# Install required packages
info "Installing required packages (nginx, python3, openssl)..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash -c '
export DEBIAN_FRONTEND=noninteractive
apt-get update -qq
apt-get install -y -qq nginx openssl python3 python3-pip || true
pip3 install PyJWT cryptography 2>/dev/null || {
# Fallback: install via apt if available
apt-get install -y -qq python3-jwt python3-cryptography || true
}
'" || {
warn "Some packages may not be available, continuing..."
}
# Generate JWT secret key if it doesn't exist
info "Generating JWT secret key..."
JWT_SECRET=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash -c '
if [ ! -f /etc/nginx/jwt_secret ]; then
openssl rand -base64 32 > /etc/nginx/jwt_secret
chmod 600 /etc/nginx/jwt_secret
fi
cat /etc/nginx/jwt_secret
'")
if [ -z "$JWT_SECRET" ]; then
error "Failed to generate JWT secret"
exit 1
fi
info "✓ JWT secret generated: ${JWT_SECRET:0:20}..."
# Create Python JWT validation script
info "Creating JWT validation script..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'PYTHON_SCRIPT_EOF'
cat > /usr/local/bin/jwt-validate.py <<'PYTHON_EOF'
#!/usr/bin/env python3
import sys
import os
import hmac
import hashlib
import base64
import json
import time
def base64url_decode(data):
# Add padding if needed
padding = 4 - len(data) % 4
if padding != 4:
data += '=' * padding
return base64.urlsafe_b64decode(data)
def verify_jwt(token, secret):
try:
parts = token.split('.')
if len(parts) != 3:
return False, "Invalid token format"
header_data = base64url_decode(parts[0])
payload_data = base64url_decode(parts[1])
signature = parts[2]
# Verify signature
message = f"{parts[0]}.{parts[1]}"
expected_sig = hmac.new(
secret.encode('utf-8'),
message.encode('utf-8'),
hashlib.sha256
).digest()
expected_sig_b64 = base64.urlsafe_b64encode(expected_sig).decode('utf-8').rstrip('=')
if signature != expected_sig_b64:
return False, "Invalid signature"
# Check expiration
payload = json.loads(payload_data)
if 'exp' in payload:
if time.time() > payload['exp']:
return False, "Token expired"
return True, payload
except Exception as e:
return False, str(e)
if __name__ == '__main__':
# Read secret
with open('/etc/nginx/jwt_secret', 'r') as f:
secret = f.read().strip()
# Get token from Authorization header
auth_header = os.environ.get('HTTP_AUTHORIZATION', '')
if not auth_header.startswith('Bearer '):
print('Status: 401 Unauthorized')
print('Content-Type: application/json')
print('')
print('{"error": "Missing or invalid Authorization header"}')
sys.exit(0)
token = auth_header[7:] # Remove "Bearer "
valid, result = verify_jwt(token, secret)
if valid:
print('Status: 200 OK')
print('Content-Type: application/json')
print('')
print('{"valid": true}')
sys.exit(0)
else:
print('Status: 401 Unauthorized')
print('Content-Type: application/json')
print('')
print(f'{{"error": "Invalid token", "reason": "{result}"}}')
sys.exit(0)
PYTHON_EOF
chmod +x /usr/local/bin/jwt-validate.py
PYTHON_SCRIPT_EOF
# Create Nginx configuration with JWT authentication using auth_request
info "Creating Nginx configuration with JWT authentication..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<NGINX_CONFIG_EOF
cat > /etc/nginx/sites-available/rpc-perm <<'EOF'
# HTTP to HTTPS redirect
server {
listen 80;
listen [::]:80;
server_name ${HTTP_DOMAIN} ${WS_DOMAIN} ${HOSTNAME} ${IP};
return 301 https://\$host\$request_uri;
}
# Internal server for JWT validation
server {
listen 127.0.0.1:8888;
server_name _;
location /validate {
fastcgi_pass unix:/var/run/fcgiwrap.socket;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME /usr/local/bin/jwt-validate.py;
fastcgi_param HTTP_AUTHORIZATION \$http_authorization;
}
}
# HTTPS server - HTTP RPC API (Permissioned with JWT)
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${HTTP_DOMAIN} ${HOSTNAME} ${IP};
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
access_log /var/log/nginx/rpc-http-prv-access.log;
error_log /var/log/nginx/rpc-http-prv-error.log;
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
send_timeout 300s;
# JWT authentication using auth_request
location = /auth {
internal;
proxy_pass http://127.0.0.1:8888/validate;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Original-URI \$request_uri;
proxy_set_header Authorization \$http_authorization;
}
# HTTP RPC endpoint
location / {
auth_request /auth;
auth_request_set \$auth_status \$upstream_status;
# Return 401 if auth failed
error_page 401 = @auth_failed;
proxy_pass http://127.0.0.1:8545;
proxy_http_version 1.1;
proxy_set_header Host localhost;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header Connection "";
proxy_buffering off;
proxy_request_buffering off;
}
# Handle auth failures
location @auth_failed {
return 401 '{"jsonrpc":"2.0","error":{"code":-32000,"message":"Unauthorized. Missing or invalid JWT token. Use: Authorization: Bearer <token>"},"id":null}';
add_header Content-Type application/json;
}
# Health check endpoint (no JWT required)
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
# HTTPS server - WebSocket RPC API (Permissioned with JWT)
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${WS_DOMAIN};
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
access_log /var/log/nginx/rpc-ws-prv-access.log;
error_log /var/log/nginx/rpc-ws-prv-error.log;
# JWT authentication for WebSocket connections
location = /auth {
internal;
proxy_pass http://127.0.0.1:8888/validate;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header X-Original-URI \$request_uri;
proxy_set_header Authorization \$http_authorization;
}
location / {
auth_request /auth;
auth_request_set \$auth_status \$upstream_status;
error_page 401 = @auth_failed;
proxy_pass http://127.0.0.1:8546;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host localhost;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_read_timeout 86400;
proxy_send_timeout 86400;
}
location @auth_failed {
return 401 '{"error": "Unauthorized. Missing or invalid JWT token. Use: Authorization: Bearer <token>"}';
add_header Content-Type application/json;
}
# Health check endpoint (no JWT required)
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
EOF
# Disable old config if it exists
if [ -L /etc/nginx/sites-enabled/rpc ]; then
rm -f /etc/nginx/sites-enabled/rpc
echo "⚠ Disabled old rpc config"
fi
# Enable the new permissioned config
ln -sf /etc/nginx/sites-available/rpc-perm /etc/nginx/sites-enabled/
rm -f /etc/nginx/sites-enabled/default
# Install fcgiwrap for FastCGI support
apt-get install -y -qq fcgiwrap || {
# If fcgiwrap not available, use a simpler HTTP-based validation
echo "⚠ fcgiwrap not available, using alternative validation method"
}
# Test configuration
nginx -t
# Reload Nginx
systemctl enable nginx
systemctl restart nginx
NGINX_CONFIG_EOF
if [ $? -eq 0 ]; then
info "✓ Nginx configured with JWT authentication"
else
error "Failed to configure Nginx"
exit 1
fi
# Display JWT secret for token generation
echo ""
info "JWT Authentication configured successfully!"
echo ""
warn "IMPORTANT: Save this JWT secret for token generation:"
echo " ${JWT_SECRET}"
echo ""
info "Next steps:"
echo " 1. Use the generate-jwt-token.sh script to create JWT tokens"
echo " 2. Test with: curl -k -H 'Authorization: Bearer <token>' https://${HTTP_DOMAIN}"
echo " 3. Update DNS records to point rpc-http-prv.d-bis.org and rpc-ws-prv.d-bis.org to ${IP}"

View File

@@ -0,0 +1,440 @@
#!/usr/bin/env bash
# Configure Nginx with JWT authentication for Permissioned RPC endpoints
# This script configures VMID 2501 (Permissioned RPC) with JWT token authentication
# for rpc-http-prv.d-bis.org and rpc-ws-prv.d-bis.org
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
VMID=2501
HTTP_DOMAIN="rpc-http-prv.d-bis.org"
WS_DOMAIN="rpc-ws-prv.d-bis.org"
IP="${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-${RPC_ALI_1:-192.168.11.251}}}}"
HOSTNAME="besu-rpc-2"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
info() { echo -e "${GREEN}[INFO]${NC} $1"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
info "Configuring JWT authentication for Permissioned RPC (VMID $VMID)"
info "HTTP Domain: $HTTP_DOMAIN"
info "WS Domain: $WS_DOMAIN"
echo ""
# Check if container is running
STATUS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct status $VMID 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "unknown")
if [[ "$STATUS" != "running" ]]; then
error "Container $VMID is not running (status: $STATUS)"
exit 1
fi
# Install required packages
info "Installing required packages (nginx-extras for lua support, openssl)..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash -c '
export DEBIAN_FRONTEND=noninteractive
export LC_ALL=C
export LANG=C
apt-get update -qq 2>&1 | grep -vE \"(perl: warning|locale:)\" || true
# Check if nginx is installed and what version
if command -v nginx >/dev/null 2>&1; then
# Remove nginx-core if present and install nginx-extras which includes lua support
apt-get remove -y -qq nginx-core 2>&1 | grep -vE \"(perl: warning|locale:)\" || true
apt-get install -y -qq nginx-extras 2>&1 | grep -vE \"(perl: warning|locale:)\" || {
echo \"Warning: nginx-extras installation had issues\" >&2
exit 1
}
else
# Fresh install - use nginx-extras
apt-get install -y -qq nginx-extras openssl 2>&1 | grep -vE \"(perl: warning|locale:)\" || {
echo \"Error: nginx-extras installation failed\" >&2
exit 1
}
fi
# Verify nginx-extras was installed and has Lua support
if nginx -V 2>&1 | grep -q \"http_lua_module\"; then
echo \"✓ nginx-extras with Lua module installed successfully\"
else
echo \"Error: nginx Lua module not detected after installation\" >&2
echo \"nginx -V output:\" >&2
nginx -V 2>&1 | head -5 >&2
exit 1
fi
'" 2>&1 | grep -vE "(perl: warning|locale:|dpkg-preconfigure)" || {
warn "Some packages may not be available, continuing with basic setup..."
}
# Generate JWT secret key if it doesn't exist
info "Generating JWT secret key..."
JWT_SECRET=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash -c '
if [ ! -f /etc/nginx/jwt_secret ]; then
openssl rand -base64 32 > /etc/nginx/jwt_secret
chmod 600 /etc/nginx/jwt_secret
fi
cat /etc/nginx/jwt_secret
'")
if [ -z "$JWT_SECRET" ]; then
error "Failed to generate JWT secret"
exit 1
fi
info "✓ JWT secret generated: ${JWT_SECRET:0:20}..."
# Create Lua script for JWT validation
info "Creating JWT validation Lua script..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'LUA_SCRIPT_EOF'
export LC_ALL=C
export LANG=C
cat > /etc/nginx/jwt-validate.lua <<'LUA_EOF'
local jwt = require "resty.jwt"
local secret = io.open("/etc/nginx/jwt_secret", "r")
if not secret then
ngx.log(ngx.ERR, "Failed to read JWT secret")
ngx.status = 500
ngx.say('{"error": "Internal server error"}')
ngx.exit(500)
end
local jwt_secret = secret:read("*all")
secret:close()
jwt_secret = jwt_secret:gsub("%s+", "")
-- Get JWT token from Authorization header
local auth_header = ngx.var.http_authorization
if not auth_header then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Missing Authorization header"}')
ngx.exit(401)
end
-- Extract Bearer token
local token = auth_header:match("Bearer%s+(.+)")
if not token then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Invalid Authorization header format. Use: Bearer <token>"}')
ngx.exit(401)
end
-- Validate JWT
local jwt_obj = jwt:verify(jwt_secret, token)
if not jwt_obj.valid then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Invalid or expired token", "reason": "' .. (jwt_obj.reason or "unknown") .. '"}')
ngx.exit(401)
end
-- Token is valid, continue
ngx.log(ngx.INFO, "JWT validated successfully for user: " .. (jwt_obj.payload.sub or "unknown"))
LUA_EOF
chmod 644 /etc/nginx/jwt-validate.lua
LUA_SCRIPT_EOF
# Install lua-resty-jwt library
info "Installing lua-resty-jwt library..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'INSTALL_LUA_JWT_EOF'
export LC_ALL=C
export LANG=C
# Install dependencies
apt-get install -y -qq git curl unzip 2>&1 | grep -vE "(perl: warning|locale:|dpkg-preconfigure)" || true
# Install lua-resty-jwt
mkdir -p /usr/share/lua/5.1/resty
cd /tmp
rm -rf lua-resty-jwt*
# Try to clone or download
if command -v git &> /dev/null; then
git clone --depth 1 https://github.com/cdbattags/lua-resty-jwt.git 2>/dev/null || {
curl -L https://github.com/cdbattags/lua-resty-jwt/archive/refs/heads/master.zip -o lua-resty-jwt.zip
unzip -q lua-resty-jwt.zip
mv lua-resty-jwt-master lua-resty-jwt
}
else
curl -L https://github.com/cdbattags/lua-resty-jwt/archive/refs/heads/master.zip -o lua-resty-jwt.zip 2>/dev/null || {
curl -L https://github.com/cdbattags/lua-resty-jwt/archive/refs/heads/master.tar.gz | tar -xz
mv lua-resty-jwt-master lua-resty-jwt
}
unzip -q lua-resty-jwt.zip 2>/dev/null || true
fi
if [ -d lua-resty-jwt/lib/resty ]; then
cp -r lua-resty-jwt/lib/resty/* /usr/share/lua/5.1/resty/
echo "✓ lua-resty-jwt installed"
else
echo "⚠ Failed to install lua-resty-jwt, will use Python fallback"
fi
INSTALL_LUA_JWT_EOF
# Check if Lua module is available before creating config
info "Verifying nginx Lua module availability..."
LUA_AVAILABLE=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash -c 'nginx -V 2>&1 | grep -q \"http_lua_module\" && echo \"yes\" || echo \"no\"'" 2>/dev/null || echo "no")
if [[ "$LUA_AVAILABLE" != "yes" ]]; then
error "Nginx Lua module is not available. nginx-extras may not be properly installed."
error "Please run: pct exec $VMID -- apt-get install -y nginx-extras"
error "Or use the Python-based script: ./scripts/configure-nginx-jwt-auth-simple.sh"
exit 1
fi
info "✓ Lua module confirmed available"
# Create Nginx configuration with JWT authentication
info "Creating Nginx configuration with JWT authentication..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<NGINX_CONFIG_EOF
export LC_ALL=C
export LANG=C
cat > /etc/nginx/sites-available/rpc-perm <<'EOF'
# HTTP to HTTPS redirect
server {
listen 80;
listen [::]:80;
server_name ${HTTP_DOMAIN} ${WS_DOMAIN} ${HOSTNAME} ${IP};
return 301 https://\$host\$request_uri;
}
# HTTPS server - HTTP RPC API (Permissioned with JWT)
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${HTTP_DOMAIN} ${HOSTNAME} ${IP};
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
access_log /var/log/nginx/rpc-http-prv-access.log;
error_log /var/log/nginx/rpc-http-prv-error.log;
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
send_timeout 300s;
# JWT authentication for all requests except health check
location / {
# Validate JWT token
access_by_lua_block {
local jwt = require "resty.jwt"
local secret_file = io.open("/etc/nginx/jwt_secret", "r")
if not secret_file then
ngx.log(ngx.ERR, "Failed to read JWT secret")
ngx.status = 500
ngx.exit(500)
end
local jwt_secret = secret_file:read("*all")
secret_file:close()
jwt_secret = jwt_secret:gsub("%s+", "")
local auth_header = ngx.var.http_authorization
if not auth_header then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"jsonrpc":"2.0","error":{"code":-32000,"message":"Missing Authorization header. Use: Authorization: Bearer <token>"},"id":null}')
ngx.exit(401)
end
local token = auth_header:match("Bearer%s+(.+)")
if not token then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"jsonrpc":"2.0","error":{"code":-32000,"message":"Invalid Authorization header format. Use: Bearer <token>"},"id":null}')
ngx.exit(401)
end
local jwt_obj = jwt:verify(jwt_secret, token)
if not jwt_obj.valid then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"jsonrpc":"2.0","error":{"code":-32000,"message":"Invalid or expired token","data":"' .. (jwt_obj.reason or "unknown") .. '"},"id":null}')
ngx.exit(401)
end
}
proxy_pass http://127.0.0.1:8545;
proxy_http_version 1.1;
proxy_set_header Host localhost;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header Connection "";
proxy_buffering off;
proxy_request_buffering off;
}
# Health check endpoint (no JWT required)
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
# HTTPS server - WebSocket RPC API (Permissioned with JWT)
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${WS_DOMAIN};
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
access_log /var/log/nginx/rpc-ws-prv-access.log;
error_log /var/log/nginx/rpc-ws-prv-error.log;
# JWT authentication for WebSocket connections
location / {
# Validate JWT token before upgrading to WebSocket
access_by_lua_block {
local jwt = require "resty.jwt"
local secret_file = io.open("/etc/nginx/jwt_secret", "r")
if not secret_file then
ngx.log(ngx.ERR, "Failed to read JWT secret")
ngx.status = 500
ngx.exit(500)
end
local jwt_secret = secret_file:read("*all")
secret_file:close()
jwt_secret = jwt_secret:gsub("%s+", "")
local auth_header = ngx.var.http_authorization
if not auth_header then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Missing Authorization header. Use: Authorization: Bearer <token>"}')
ngx.exit(401)
end
local token = auth_header:match("Bearer%s+(.+)")
if not token then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Invalid Authorization header format. Use: Bearer <token>"}')
ngx.exit(401)
end
local jwt_obj = jwt:verify(jwt_secret, token)
if not jwt_obj.valid then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Invalid or expired token", "reason": "' .. (jwt_obj.reason or "unknown") .. '"}')
ngx.exit(401)
end
}
proxy_pass http://127.0.0.1:8546;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host localhost;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_read_timeout 86400;
proxy_send_timeout 86400;
}
# Health check endpoint (no JWT required)
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
EOF
# Disable old config if it exists (rpc-http-pub/rpc-ws-pub on this VMID)
if [ -L /etc/nginx/sites-enabled/rpc ]; then
rm -f /etc/nginx/sites-enabled/rpc
echo "⚠ Disabled old rpc config (rpc-http-pub/rpc-ws-pub should be on VMID 2502)"
fi
# Enable the new permissioned config
ln -sf /etc/nginx/sites-available/rpc-perm /etc/nginx/sites-enabled/
rm -f /etc/nginx/sites-enabled/default
# Update nginx.conf to include lua_package_path if lua module is available
if nginx -V 2>&1 | grep -q "http_lua_module"; then
if ! grep -q "lua_package_path" /etc/nginx/nginx.conf; then
# Use a more reliable method to add lua_package_path
# Create a temporary file with the addition
cat > /tmp/nginx_lua_add.conf <<'LUA_ADD_EOF'
# Lua package path
lua_package_path "/usr/share/lua/5.1/?.lua;/usr/share/lua/5.1/?/init.lua;;";
LUA_ADD_EOF
# Insert after "http {" line
sed -i '/^http {/r /tmp/nginx_lua_add.conf' /etc/nginx/nginx.conf
rm -f /tmp/nginx_lua_add.conf
fi
else
warn "Nginx Lua module not available - JWT validation will use alternative method"
fi
# Test configuration
nginx -t
# Reload Nginx
systemctl enable nginx
systemctl restart nginx
NGINX_CONFIG_EOF
if [ $? -eq 0 ]; then
info "✓ Nginx configured with JWT authentication"
else
error "Failed to configure Nginx"
exit 1
fi
# Display JWT secret for token generation
echo ""
info "JWT Authentication configured successfully!"
echo ""
warn "IMPORTANT: Save this JWT secret for token generation:"
echo " ${JWT_SECRET}"
echo ""
info "Next steps:"
echo " 1. Use the generate-jwt-token.sh script to create JWT tokens"
echo " 2. Test with: curl -k -H 'Authorization: Bearer <token>' https://${HTTP_DOMAIN}"
echo " 3. Update DNS records to point rpc-http-prv.d-bis.org and rpc-ws-prv.d-bis.org to ${IP}"

View File

@@ -0,0 +1,434 @@
#!/usr/bin/env bash
# Configure Nginx with JWT authentication for Permissioned RPC endpoints
# This script configures VMID 2501 (Permissioned RPC) with JWT token authentication
# for rpc-http-prv.d-bis.org and rpc-ws-prv.d-bis.org
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
VMID=2501
HTTP_DOMAIN="rpc-http-prv.d-bis.org"
WS_DOMAIN="rpc-ws-prv.d-bis.org"
IP="192.168.11.251"
HOSTNAME="besu-rpc-2"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
info() { echo -e "${GREEN}[INFO]${NC} $1"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
info "Configuring JWT authentication for Permissioned RPC (VMID $VMID)"
info "HTTP Domain: $HTTP_DOMAIN"
info "WS Domain: $WS_DOMAIN"
echo ""
# Check if container is running
STATUS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct status $VMID 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "unknown")
if [[ "$STATUS" != "running" ]]; then
error "Container $VMID is not running (status: $STATUS)"
exit 1
fi
# Install required packages
info "Installing required packages (nginx-extras for lua support, openssl)..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash -c '
export DEBIAN_FRONTEND=noninteractive
export LC_ALL=C
export LANG=C
apt-get update -qq 2>&1 | grep -vE \"(perl: warning|locale:)\" || true
# Check if nginx is installed and what version
if command -v nginx >/dev/null 2>&1; then
# Remove nginx-core if present and install nginx-extras which includes lua support
apt-get remove -y -qq nginx-core 2>&1 | grep -vE \"(perl: warning|locale:)\" || true
apt-get install -y -qq nginx-extras 2>&1 | grep -vE \"(perl: warning|locale:)\" || {
echo \"Warning: nginx-extras installation had issues\" >&2
exit 1
}
else
# Fresh install - use nginx-extras
apt-get install -y -qq nginx-extras openssl 2>&1 | grep -vE \"(perl: warning|locale:)\" || {
echo \"Error: nginx-extras installation failed\" >&2
exit 1
}
fi
# Verify nginx-extras was installed and has Lua support
if nginx -V 2>&1 | grep -q \"http_lua_module\"; then
echo \"✓ nginx-extras with Lua module installed successfully\"
else
echo \"Error: nginx Lua module not detected after installation\" >&2
echo \"nginx -V output:\" >&2
nginx -V 2>&1 | head -5 >&2
exit 1
fi
'" 2>&1 | grep -vE "(perl: warning|locale:|dpkg-preconfigure)" || {
warn "Some packages may not be available, continuing with basic setup..."
}
# Generate JWT secret key if it doesn't exist
info "Generating JWT secret key..."
JWT_SECRET=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash -c '
if [ ! -f /etc/nginx/jwt_secret ]; then
openssl rand -base64 32 > /etc/nginx/jwt_secret
chmod 600 /etc/nginx/jwt_secret
fi
cat /etc/nginx/jwt_secret
'")
if [ -z "$JWT_SECRET" ]; then
error "Failed to generate JWT secret"
exit 1
fi
info "✓ JWT secret generated: ${JWT_SECRET:0:20}..."
# Create Lua script for JWT validation
info "Creating JWT validation Lua script..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'LUA_SCRIPT_EOF'
export LC_ALL=C
export LANG=C
cat > /etc/nginx/jwt-validate.lua <<'LUA_EOF'
local jwt = require "resty.jwt"
local secret = io.open("/etc/nginx/jwt_secret", "r")
if not secret then
ngx.log(ngx.ERR, "Failed to read JWT secret")
ngx.status = 500
ngx.say('{"error": "Internal server error"}')
ngx.exit(500)
end
local jwt_secret = secret:read("*all")
secret:close()
jwt_secret = jwt_secret:gsub("%s+", "")
-- Get JWT token from Authorization header
local auth_header = ngx.var.http_authorization
if not auth_header then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Missing Authorization header"}')
ngx.exit(401)
end
-- Extract Bearer token
local token = auth_header:match("Bearer%s+(.+)")
if not token then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Invalid Authorization header format. Use: Bearer <token>"}')
ngx.exit(401)
end
-- Validate JWT
local jwt_obj = jwt:verify(jwt_secret, token)
if not jwt_obj.valid then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Invalid or expired token", "reason": "' .. (jwt_obj.reason or "unknown") .. '"}')
ngx.exit(401)
end
-- Token is valid, continue
ngx.log(ngx.INFO, "JWT validated successfully for user: " .. (jwt_obj.payload.sub or "unknown"))
LUA_EOF
chmod 644 /etc/nginx/jwt-validate.lua
LUA_SCRIPT_EOF
# Install lua-resty-jwt library
info "Installing lua-resty-jwt library..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'INSTALL_LUA_JWT_EOF'
export LC_ALL=C
export LANG=C
# Install dependencies
apt-get install -y -qq git curl unzip 2>&1 | grep -vE "(perl: warning|locale:|dpkg-preconfigure)" || true
# Install lua-resty-jwt
mkdir -p /usr/share/lua/5.1/resty
cd /tmp
rm -rf lua-resty-jwt*
# Try to clone or download
if command -v git &> /dev/null; then
git clone --depth 1 https://github.com/cdbattags/lua-resty-jwt.git 2>/dev/null || {
curl -L https://github.com/cdbattags/lua-resty-jwt/archive/refs/heads/master.zip -o lua-resty-jwt.zip
unzip -q lua-resty-jwt.zip
mv lua-resty-jwt-master lua-resty-jwt
}
else
curl -L https://github.com/cdbattags/lua-resty-jwt/archive/refs/heads/master.zip -o lua-resty-jwt.zip 2>/dev/null || {
curl -L https://github.com/cdbattags/lua-resty-jwt/archive/refs/heads/master.tar.gz | tar -xz
mv lua-resty-jwt-master lua-resty-jwt
}
unzip -q lua-resty-jwt.zip 2>/dev/null || true
fi
if [ -d lua-resty-jwt/lib/resty ]; then
cp -r lua-resty-jwt/lib/resty/* /usr/share/lua/5.1/resty/
echo "✓ lua-resty-jwt installed"
else
echo "⚠ Failed to install lua-resty-jwt, will use Python fallback"
fi
INSTALL_LUA_JWT_EOF
# Check if Lua module is available before creating config
info "Verifying nginx Lua module availability..."
LUA_AVAILABLE=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash -c 'nginx -V 2>&1 | grep -q \"http_lua_module\" && echo \"yes\" || echo \"no\"'" 2>/dev/null || echo "no")
if [[ "$LUA_AVAILABLE" != "yes" ]]; then
error "Nginx Lua module is not available. nginx-extras may not be properly installed."
error "Please run: pct exec $VMID -- apt-get install -y nginx-extras"
error "Or use the Python-based script: ./scripts/configure-nginx-jwt-auth-simple.sh"
exit 1
fi
info "✓ Lua module confirmed available"
# Create Nginx configuration with JWT authentication
info "Creating Nginx configuration with JWT authentication..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<NGINX_CONFIG_EOF
export LC_ALL=C
export LANG=C
cat > /etc/nginx/sites-available/rpc-perm <<'EOF'
# HTTP to HTTPS redirect
server {
listen 80;
listen [::]:80;
server_name ${HTTP_DOMAIN} ${WS_DOMAIN} ${HOSTNAME} ${IP};
return 301 https://\$host\$request_uri;
}
# HTTPS server - HTTP RPC API (Permissioned with JWT)
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${HTTP_DOMAIN} ${HOSTNAME} ${IP};
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
access_log /var/log/nginx/rpc-http-prv-access.log;
error_log /var/log/nginx/rpc-http-prv-error.log;
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
send_timeout 300s;
# JWT authentication for all requests except health check
location / {
# Validate JWT token
access_by_lua_block {
local jwt = require "resty.jwt"
local secret_file = io.open("/etc/nginx/jwt_secret", "r")
if not secret_file then
ngx.log(ngx.ERR, "Failed to read JWT secret")
ngx.status = 500
ngx.exit(500)
end
local jwt_secret = secret_file:read("*all")
secret_file:close()
jwt_secret = jwt_secret:gsub("%s+", "")
local auth_header = ngx.var.http_authorization
if not auth_header then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"jsonrpc":"2.0","error":{"code":-32000,"message":"Missing Authorization header. Use: Authorization: Bearer <token>"},"id":null}')
ngx.exit(401)
end
local token = auth_header:match("Bearer%s+(.+)")
if not token then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"jsonrpc":"2.0","error":{"code":-32000,"message":"Invalid Authorization header format. Use: Bearer <token>"},"id":null}')
ngx.exit(401)
end
local jwt_obj = jwt:verify(jwt_secret, token)
if not jwt_obj.valid then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"jsonrpc":"2.0","error":{"code":-32000,"message":"Invalid or expired token","data":"' .. (jwt_obj.reason or "unknown") .. '"},"id":null}')
ngx.exit(401)
end
}
proxy_pass http://127.0.0.1:8545;
proxy_http_version 1.1;
proxy_set_header Host localhost;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header Connection "";
proxy_buffering off;
proxy_request_buffering off;
}
# Health check endpoint (no JWT required)
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
# HTTPS server - WebSocket RPC API (Permissioned with JWT)
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name ${WS_DOMAIN};
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
access_log /var/log/nginx/rpc-ws-prv-access.log;
error_log /var/log/nginx/rpc-ws-prv-error.log;
# JWT authentication for WebSocket connections
location / {
# Validate JWT token before upgrading to WebSocket
access_by_lua_block {
local jwt = require "resty.jwt"
local secret_file = io.open("/etc/nginx/jwt_secret", "r")
if not secret_file then
ngx.log(ngx.ERR, "Failed to read JWT secret")
ngx.status = 500
ngx.exit(500)
end
local jwt_secret = secret_file:read("*all")
secret_file:close()
jwt_secret = jwt_secret:gsub("%s+", "")
local auth_header = ngx.var.http_authorization
if not auth_header then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Missing Authorization header. Use: Authorization: Bearer <token>"}')
ngx.exit(401)
end
local token = auth_header:match("Bearer%s+(.+)")
if not token then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Invalid Authorization header format. Use: Bearer <token>"}')
ngx.exit(401)
end
local jwt_obj = jwt:verify(jwt_secret, token)
if not jwt_obj.valid then
ngx.status = 401
ngx.header["Content-Type"] = "application/json"
ngx.say('{"error": "Invalid or expired token", "reason": "' .. (jwt_obj.reason or "unknown") .. '"}')
ngx.exit(401)
end
}
proxy_pass http://127.0.0.1:8546;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host localhost;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_read_timeout 86400;
proxy_send_timeout 86400;
}
# Health check endpoint (no JWT required)
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
EOF
# Disable old config if it exists (rpc-http-pub/rpc-ws-pub on this VMID)
if [ -L /etc/nginx/sites-enabled/rpc ]; then
rm -f /etc/nginx/sites-enabled/rpc
echo "⚠ Disabled old rpc config (rpc-http-pub/rpc-ws-pub should be on VMID 2502)"
fi
# Enable the new permissioned config
ln -sf /etc/nginx/sites-available/rpc-perm /etc/nginx/sites-enabled/
rm -f /etc/nginx/sites-enabled/default
# Update nginx.conf to include lua_package_path if lua module is available
if nginx -V 2>&1 | grep -q "http_lua_module"; then
if ! grep -q "lua_package_path" /etc/nginx/nginx.conf; then
# Use a more reliable method to add lua_package_path
# Create a temporary file with the addition
cat > /tmp/nginx_lua_add.conf <<'LUA_ADD_EOF'
# Lua package path
lua_package_path "/usr/share/lua/5.1/?.lua;/usr/share/lua/5.1/?/init.lua;;";
LUA_ADD_EOF
# Insert after "http {" line
sed -i '/^http {/r /tmp/nginx_lua_add.conf' /etc/nginx/nginx.conf
rm -f /tmp/nginx_lua_add.conf
fi
else
warn "Nginx Lua module not available - JWT validation will use alternative method"
fi
# Test configuration
nginx -t
# Reload Nginx
systemctl enable nginx
systemctl restart nginx
NGINX_CONFIG_EOF
if [ $? -eq 0 ]; then
info "✓ Nginx configured with JWT authentication"
else
error "Failed to configure Nginx"
exit 1
fi
# Display JWT secret for token generation
echo ""
info "JWT Authentication configured successfully!"
echo ""
warn "IMPORTANT: Save this JWT secret for token generation:"
echo " ${JWT_SECRET}"
echo ""
info "Next steps:"
echo " 1. Use the generate-jwt-token.sh script to create JWT tokens"
echo " 2. Test with: curl -k -H 'Authorization: Bearer <token>' https://${HTTP_DOMAIN}"
echo " 3. Update DNS records to point rpc-http-prv.d-bis.org and rpc-ws-prv.d-bis.org to ${IP}"

View File

@@ -0,0 +1,271 @@
#!/usr/bin/env bash
# Configure Nginx for Public RPC Endpoints on VMID 2500
# Adds public endpoints (rpc-http-pub.d-bis.org and rpc-ws-pub.d-bis.org) WITHOUT JWT authentication
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
VMID=2500
IP="${RPC_ALLTRA_1:-192.168.11.250}"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "Configuring Public RPC Endpoints on VMID $VMID"
log_info "Endpoints: rpc-http-pub.d-bis.org, rpc-ws-pub.d-bis.org"
log_info "NO JWT authentication will be required"
echo ""
# Check if container is running
STATUS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct status $VMID 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "unknown")
if [[ "$STATUS" != "running" ]]; then
log_error "Container $VMID is not running (status: $STATUS)"
exit 1
fi
# Create Nginx configuration for public endpoints
log_info "Creating Nginx configuration for public endpoints..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'NGINX_CONFIG_EOF'
cat > /etc/nginx/sites-available/rpc-public <<'EOF'
# Public HTTP RPC endpoint - NO authentication required
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name rpc-http-pub.d-bis.org;
# SSL configuration (use Let's Encrypt certificate if available, otherwise fallback)
ssl_certificate /etc/letsencrypt/live/rpc-core.d-bis.org/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/rpc-core.d-bis.org/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Trust Cloudflare IPs for real IP
set_real_ip_from 173.245.48.0/20;
set_real_ip_from 103.21.244.0/22;
set_real_ip_from 103.22.200.0/22;
set_real_ip_from 103.31.4.0/22;
set_real_ip_from 141.101.64.0/18;
set_real_ip_from 108.162.192.0/18;
set_real_ip_from 190.93.240.0/20;
set_real_ip_from 188.114.96.0/20;
set_real_ip_from 197.234.240.0/22;
set_real_ip_from 198.41.128.0/17;
set_real_ip_from 162.158.0.0/15;
set_real_ip_from 104.16.0.0/13;
set_real_ip_from 104.24.0.0/14;
set_real_ip_from 172.64.0.0/13;
set_real_ip_from 131.0.72.0/22;
real_ip_header CF-Connecting-IP;
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
# Logging
access_log /var/log/nginx/rpc-http-pub-access.log;
error_log /var/log/nginx/rpc-http-pub-error.log;
# Increase timeouts for RPC calls
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
send_timeout 300s;
client_max_body_size 10M;
# HTTP RPC endpoint - NO JWT authentication
location / {
proxy_pass http://127.0.0.1:8545;
proxy_http_version 1.1;
# Headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Connection "";
# Buffer settings (disable for RPC)
proxy_buffering off;
proxy_request_buffering off;
# CORS headers (for web apps and MetaMask)
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS" always;
add_header Access-Control-Allow-Headers "Content-Type, Authorization" always;
# Handle OPTIONS requests
if ($request_method = OPTIONS) {
return 204;
}
# NO JWT authentication here - this is a public endpoint!
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
# Public WebSocket RPC endpoint - NO authentication required
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name rpc-ws-pub.d-bis.org;
# SSL configuration
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Trust Cloudflare IPs for real IP
set_real_ip_from 173.245.48.0/20;
set_real_ip_from 103.21.244.0/22;
set_real_ip_from 103.22.200.0/22;
set_real_ip_from 103.31.4.0/22;
set_real_ip_from 141.101.64.0/18;
set_real_ip_from 108.162.192.0/18;
set_real_ip_from 190.93.240.0/20;
set_real_ip_from 188.114.96.0/20;
set_real_ip_from 197.234.240.0/22;
set_real_ip_from 198.41.128.0/17;
set_real_ip_from 162.158.0.0/15;
set_real_ip_from 104.16.0.0/13;
set_real_ip_from 104.24.0.0/14;
set_real_ip_from 172.64.0.0/13;
set_real_ip_from 131.0.72.0/22;
real_ip_header CF-Connecting-IP;
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
# Logging
access_log /var/log/nginx/rpc-ws-pub-access.log;
error_log /var/log/nginx/rpc-ws-pub-error.log;
# WebSocket RPC endpoint - NO JWT authentication
location / {
proxy_pass http://127.0.0.1:8546;
proxy_http_version 1.1;
# WebSocket headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Long timeouts for WebSocket connections
proxy_read_timeout 86400;
proxy_send_timeout 86400;
proxy_connect_timeout 300s;
# NO JWT authentication here - this is a public endpoint!
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
EOF
# Enable the site
ln -sf /etc/nginx/sites-available/rpc-public /etc/nginx/sites-enabled/
# Test configuration
log_info "Testing Nginx configuration..."
if nginx -t; then
echo "✓ Nginx configuration is valid"
else
echo "✗ Nginx configuration test failed"
exit 1
fi
# Reload Nginx
log_info "Reloading Nginx..."
systemctl reload nginx || systemctl restart nginx
echo "✓ Public endpoints configured successfully"
NGINX_CONFIG_EOF
if [ $? -eq 0 ]; then
log_success "Nginx configuration created and enabled"
else
log_error "Failed to create Nginx configuration"
exit 1
fi
# Verify Nginx is running
log_info "Verifying Nginx status..."
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- systemctl is-active nginx >/dev/null 2>&1"; then
log_success "Nginx service is active"
else
log_error "Nginx service is not active"
exit 1
fi
# Test the endpoint
log_info "Testing public RPC endpoint..."
sleep 2
RPC_TEST=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- timeout 5 curl -k -s -X POST https://localhost \
-H 'Host: rpc-http-pub.d-bis.org' \
-H 'Content-Type: application/json' \
-d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}' 2>&1 || echo 'FAILED'")
if echo "$RPC_TEST" | grep -q '"result":"0x8a"'; then
log_success "Public RPC endpoint is working correctly!"
log_info "Response: $RPC_TEST"
else
log_warn "RPC endpoint test had unexpected response"
log_info "Response: $RPC_TEST"
fi
echo ""
log_success "Public RPC endpoints configuration complete!"
echo ""
log_info "Configuration Summary:"
log_info " - Public HTTP RPC: https://rpc-http-pub.d-bis.org (port 443 → 8545, NO auth)"
log_info " - Public WebSocket RPC: wss://rpc-ws-pub.d-bis.org (port 443 → 8546, NO auth)"
log_info " - Both endpoints are publicly accessible without JWT tokens"
echo ""
log_info "Next steps:"
log_info " 1. Test from external: curl -X POST https://rpc-http-pub.d-bis.org -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}'"
log_info " 2. Verify MetaMask can connect without authentication"
log_info " 3. Ensure Cloudflared tunnel is routing correctly to VMID 2500"

View File

@@ -0,0 +1,265 @@
#!/usr/bin/env bash
# Configure Nginx for Public RPC Endpoints on VMID 2500
# Adds public endpoints (rpc-http-pub.d-bis.org and rpc-ws-pub.d-bis.org) WITHOUT JWT authentication
set -euo pipefail
VMID=2500
IP="192.168.11.250"
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "Configuring Public RPC Endpoints on VMID $VMID"
log_info "Endpoints: rpc-http-pub.d-bis.org, rpc-ws-pub.d-bis.org"
log_info "NO JWT authentication will be required"
echo ""
# Check if container is running
STATUS=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct status $VMID 2>/dev/null | awk '{print \$2}'" 2>/dev/null || echo "unknown")
if [[ "$STATUS" != "running" ]]; then
log_error "Container $VMID is not running (status: $STATUS)"
exit 1
fi
# Create Nginx configuration for public endpoints
log_info "Creating Nginx configuration for public endpoints..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'NGINX_CONFIG_EOF'
cat > /etc/nginx/sites-available/rpc-public <<'EOF'
# Public HTTP RPC endpoint - NO authentication required
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name rpc-http-pub.d-bis.org;
# SSL configuration (use Let's Encrypt certificate if available, otherwise fallback)
ssl_certificate /etc/letsencrypt/live/rpc-core.d-bis.org/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/rpc-core.d-bis.org/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Trust Cloudflare IPs for real IP
set_real_ip_from 173.245.48.0/20;
set_real_ip_from 103.21.244.0/22;
set_real_ip_from 103.22.200.0/22;
set_real_ip_from 103.31.4.0/22;
set_real_ip_from 141.101.64.0/18;
set_real_ip_from 108.162.192.0/18;
set_real_ip_from 190.93.240.0/20;
set_real_ip_from 188.114.96.0/20;
set_real_ip_from 197.234.240.0/22;
set_real_ip_from 198.41.128.0/17;
set_real_ip_from 162.158.0.0/15;
set_real_ip_from 104.16.0.0/13;
set_real_ip_from 104.24.0.0/14;
set_real_ip_from 172.64.0.0/13;
set_real_ip_from 131.0.72.0/22;
real_ip_header CF-Connecting-IP;
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
# Logging
access_log /var/log/nginx/rpc-http-pub-access.log;
error_log /var/log/nginx/rpc-http-pub-error.log;
# Increase timeouts for RPC calls
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
send_timeout 300s;
client_max_body_size 10M;
# HTTP RPC endpoint - NO JWT authentication
location / {
proxy_pass http://127.0.0.1:8545;
proxy_http_version 1.1;
# Headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Connection "";
# Buffer settings (disable for RPC)
proxy_buffering off;
proxy_request_buffering off;
# CORS headers (for web apps and MetaMask)
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS" always;
add_header Access-Control-Allow-Headers "Content-Type, Authorization" always;
# Handle OPTIONS requests
if ($request_method = OPTIONS) {
return 204;
}
# NO JWT authentication here - this is a public endpoint!
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
# Public WebSocket RPC endpoint - NO authentication required
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name rpc-ws-pub.d-bis.org;
# SSL configuration
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Trust Cloudflare IPs for real IP
set_real_ip_from 173.245.48.0/20;
set_real_ip_from 103.21.244.0/22;
set_real_ip_from 103.22.200.0/22;
set_real_ip_from 103.31.4.0/22;
set_real_ip_from 141.101.64.0/18;
set_real_ip_from 108.162.192.0/18;
set_real_ip_from 190.93.240.0/20;
set_real_ip_from 188.114.96.0/20;
set_real_ip_from 197.234.240.0/22;
set_real_ip_from 198.41.128.0/17;
set_real_ip_from 162.158.0.0/15;
set_real_ip_from 104.16.0.0/13;
set_real_ip_from 104.24.0.0/14;
set_real_ip_from 172.64.0.0/13;
set_real_ip_from 131.0.72.0/22;
real_ip_header CF-Connecting-IP;
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
# Logging
access_log /var/log/nginx/rpc-ws-pub-access.log;
error_log /var/log/nginx/rpc-ws-pub-error.log;
# WebSocket RPC endpoint - NO JWT authentication
location / {
proxy_pass http://127.0.0.1:8546;
proxy_http_version 1.1;
# WebSocket headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Long timeouts for WebSocket connections
proxy_read_timeout 86400;
proxy_send_timeout 86400;
proxy_connect_timeout 300s;
# NO JWT authentication here - this is a public endpoint!
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
EOF
# Enable the site
ln -sf /etc/nginx/sites-available/rpc-public /etc/nginx/sites-enabled/
# Test configuration
log_info "Testing Nginx configuration..."
if nginx -t; then
echo "✓ Nginx configuration is valid"
else
echo "✗ Nginx configuration test failed"
exit 1
fi
# Reload Nginx
log_info "Reloading Nginx..."
systemctl reload nginx || systemctl restart nginx
echo "✓ Public endpoints configured successfully"
NGINX_CONFIG_EOF
if [ $? -eq 0 ]; then
log_success "Nginx configuration created and enabled"
else
log_error "Failed to create Nginx configuration"
exit 1
fi
# Verify Nginx is running
log_info "Verifying Nginx status..."
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- systemctl is-active nginx >/dev/null 2>&1"; then
log_success "Nginx service is active"
else
log_error "Nginx service is not active"
exit 1
fi
# Test the endpoint
log_info "Testing public RPC endpoint..."
sleep 2
RPC_TEST=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- timeout 5 curl -k -s -X POST https://localhost \
-H 'Host: rpc-http-pub.d-bis.org' \
-H 'Content-Type: application/json' \
-d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}' 2>&1 || echo 'FAILED'")
if echo "$RPC_TEST" | grep -q '"result":"0x8a"'; then
log_success "Public RPC endpoint is working correctly!"
log_info "Response: $RPC_TEST"
else
log_warn "RPC endpoint test had unexpected response"
log_info "Response: $RPC_TEST"
fi
echo ""
log_success "Public RPC endpoints configuration complete!"
echo ""
log_info "Configuration Summary:"
log_info " - Public HTTP RPC: https://rpc-http-pub.d-bis.org (port 443 → 8545, NO auth)"
log_info " - Public WebSocket RPC: wss://rpc-ws-pub.d-bis.org (port 443 → 8546, NO auth)"
log_info " - Both endpoints are publicly accessible without JWT tokens"
echo ""
log_info "Next steps:"
log_info " 1. Test from external: curl -X POST https://rpc-http-pub.d-bis.org -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_chainId\",\"params\":[],\"id\":1}'"
log_info " 2. Verify MetaMask can connect without authentication"
log_info " 3. Ensure Cloudflared tunnel is routing correctly to VMID 2500"

View File

@@ -0,0 +1,259 @@
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configure Nginx for Core RPC Node (VMID 2500)
# This configures Nginx as a reverse proxy for Besu RPC endpoints
set -e
VMID=2500
HOSTNAME="besu-rpc-1"
IP="${RPC_ALLTRA_1:-192.168.11.250}"
PROXMOX_HOST="${PROXMOX_HOST_ML110}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "Configuring Nginx for Core RPC Node (VMID $VMID)"
log_info "Hostname: $HOSTNAME"
log_info "IP: $IP"
echo ""
# Create Nginx configuration
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'NGINX_CONFIG_EOF'
cat > /etc/nginx/sites-available/rpc-core <<'EOF'
# HTTP to HTTPS redirect
server {
listen 80;
listen [::]:80;
server_name besu-rpc-1 ${RPC_ALLTRA_1:-192.168.11.250} rpc-core.besu.local rpc-core.chainid138.local;
# Redirect all HTTP to HTTPS
return 301 https://$host$request_uri;
}
# HTTPS server - HTTP RPC API (port 8545)
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name besu-rpc-1 ${RPC_ALLTRA_1:-192.168.11.250} rpc-core.besu.local rpc-core.chainid138.local;
# SSL configuration
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
# Logging
access_log /var/log/nginx/rpc-core-http-access.log;
error_log /var/log/nginx/rpc-core-http-error.log;
# Increase timeouts for RPC calls
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
send_timeout 300s;
client_max_body_size 10M;
# HTTP RPC endpoint (port 8545)
location / {
proxy_pass http://127.0.0.1:8545;
proxy_http_version 1.1;
# Headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Connection "";
# Buffer settings (disable for RPC)
proxy_buffering off;
proxy_request_buffering off;
# CORS headers (if needed for web apps)
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS" always;
add_header Access-Control-Allow-Headers "Content-Type, Authorization" always;
# Handle OPTIONS requests
if ($request_method = OPTIONS) {
return 204;
}
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
# Metrics endpoint (if exposed)
location /metrics {
proxy_pass http://127.0.0.1:9545;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
# HTTPS server - WebSocket RPC API (port 8546)
server {
listen 8443 ssl http2;
listen [::]:8443 ssl http2;
server_name besu-rpc-1 ${RPC_ALLTRA_1:-192.168.11.250} rpc-core-ws.besu.local rpc-core-ws.chainid138.local;
# SSL configuration
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
# Logging
access_log /var/log/nginx/rpc-core-ws-access.log;
error_log /var/log/nginx/rpc-core-ws-error.log;
# WebSocket RPC endpoint (port 8546)
location / {
proxy_pass http://127.0.0.1:8546;
proxy_http_version 1.1;
# WebSocket headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Long timeouts for WebSocket connections
proxy_read_timeout 86400;
proxy_send_timeout 86400;
proxy_connect_timeout 300s;
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
EOF
# Enable the site
ln -sf /etc/nginx/sites-available/rpc-core /etc/nginx/sites-enabled/
rm -f /etc/nginx/sites-enabled/default
# Test configuration
nginx -t
# Reload Nginx
systemctl enable nginx
systemctl restart nginx
NGINX_CONFIG_EOF
if [ $? -eq 0 ]; then
log_success "Nginx configuration created"
else
log_error "Failed to create Nginx configuration"
exit 1
fi
# Verify Nginx is running
log_info "Verifying Nginx status..."
if sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- systemctl is-active nginx >/dev/null 2>&1"; then
log_success "Nginx service is active"
else
log_error "Nginx service is not active"
exit 1
fi
# Check if ports are listening
log_info "Checking listening ports..."
PORTS=$(sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- ss -tlnp 2>&1 | grep -E ':80|:443|:8443' || echo ''")
if echo "$PORTS" | grep -q ':80'; then
log_success "Port 80 is listening"
else
log_warn "Port 80 may not be listening"
fi
if echo "$PORTS" | grep -q ':443'; then
log_success "Port 443 is listening"
else
log_warn "Port 443 may not be listening"
fi
if echo "$PORTS" | grep -q ':8443'; then
log_success "Port 8443 is listening"
else
log_warn "Port 8443 may not be listening"
fi
# Test RPC endpoint through Nginx
log_info "Testing RPC endpoint through Nginx..."
RPC_TEST=$(sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- timeout 5 curl -k -s -X POST https://localhost:443 \
-H 'Content-Type: application/json' \
-d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}' 2>&1 || echo 'FAILED'")
if echo "$RPC_TEST" | grep -q "result"; then
BLOCK_NUM=$(echo "$RPC_TEST" | grep -oP '"result":"\K[^"]+' | head -1)
log_success "RPC endpoint is responding through Nginx!"
log_info "Current block: $BLOCK_NUM"
else
log_warn "RPC endpoint test failed or needs more time"
log_info "Response: $RPC_TEST"
fi
echo ""
log_success "Nginx configuration complete!"
echo ""
log_info "Configuration Summary:"
log_info " - HTTP RPC: https://$IP:443 (proxies to localhost:8545)"
log_info " - WebSocket RPC: https://$IP:8443 (proxies to localhost:8546)"
log_info " - HTTP redirect: http://$IP:80 → https://$IP:443"
log_info " - Health check: https://$IP:443/health"
echo ""
log_info "Next steps:"
log_info " 1. Test from external: curl -k https://$IP:443/health"
log_info " 2. Test RPC: curl -k -X POST https://$IP:443 -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}'"
log_info " 3. Replace self-signed certificate with Let's Encrypt if needed"
log_info " 4. Configure firewall rules if needed"

View File

@@ -0,0 +1,253 @@
#!/usr/bin/env bash
set -euo pipefail
# Configure Nginx for Core RPC Node (VMID 2500)
# This configures Nginx as a reverse proxy for Besu RPC endpoints
set -e
VMID=2500
HOSTNAME="besu-rpc-1"
IP="192.168.11.250"
PROXMOX_HOST="192.168.11.10"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "Configuring Nginx for Core RPC Node (VMID $VMID)"
log_info "Hostname: $HOSTNAME"
log_info "IP: $IP"
echo ""
# Create Nginx configuration
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'NGINX_CONFIG_EOF'
cat > /etc/nginx/sites-available/rpc-core <<'EOF'
# HTTP to HTTPS redirect
server {
listen 80;
listen [::]:80;
server_name besu-rpc-1 192.168.11.250 rpc-core.besu.local rpc-core.chainid138.local;
# Redirect all HTTP to HTTPS
return 301 https://$host$request_uri;
}
# HTTPS server - HTTP RPC API (port 8545)
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name besu-rpc-1 192.168.11.250 rpc-core.besu.local rpc-core.chainid138.local;
# SSL configuration
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
# Logging
access_log /var/log/nginx/rpc-core-http-access.log;
error_log /var/log/nginx/rpc-core-http-error.log;
# Increase timeouts for RPC calls
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
send_timeout 300s;
client_max_body_size 10M;
# HTTP RPC endpoint (port 8545)
location / {
proxy_pass http://127.0.0.1:8545;
proxy_http_version 1.1;
# Headers
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Connection "";
# Buffer settings (disable for RPC)
proxy_buffering off;
proxy_request_buffering off;
# CORS headers (if needed for web apps)
add_header Access-Control-Allow-Origin * always;
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS" always;
add_header Access-Control-Allow-Headers "Content-Type, Authorization" always;
# Handle OPTIONS requests
if ($request_method = OPTIONS) {
return 204;
}
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
# Metrics endpoint (if exposed)
location /metrics {
proxy_pass http://127.0.0.1:9545;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
# HTTPS server - WebSocket RPC API (port 8546)
server {
listen 8443 ssl http2;
listen [::]:8443 ssl http2;
server_name besu-rpc-1 192.168.11.250 rpc-core-ws.besu.local rpc-core-ws.chainid138.local;
# SSL configuration
ssl_certificate /etc/nginx/ssl/rpc.crt;
ssl_certificate_key /etc/nginx/ssl/rpc.key;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers 'ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384';
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Security headers
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
# Logging
access_log /var/log/nginx/rpc-core-ws-access.log;
error_log /var/log/nginx/rpc-core-ws-error.log;
# WebSocket RPC endpoint (port 8546)
location / {
proxy_pass http://127.0.0.1:8546;
proxy_http_version 1.1;
# WebSocket headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Long timeouts for WebSocket connections
proxy_read_timeout 86400;
proxy_send_timeout 86400;
proxy_connect_timeout 300s;
}
# Health check endpoint
location /health {
access_log off;
return 200 "healthy\n";
add_header Content-Type text/plain;
}
}
EOF
# Enable the site
ln -sf /etc/nginx/sites-available/rpc-core /etc/nginx/sites-enabled/
rm -f /etc/nginx/sites-enabled/default
# Test configuration
nginx -t
# Reload Nginx
systemctl enable nginx
systemctl restart nginx
NGINX_CONFIG_EOF
if [ $? -eq 0 ]; then
log_success "Nginx configuration created"
else
log_error "Failed to create Nginx configuration"
exit 1
fi
# Verify Nginx is running
log_info "Verifying Nginx status..."
if sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- systemctl is-active nginx >/dev/null 2>&1"; then
log_success "Nginx service is active"
else
log_error "Nginx service is not active"
exit 1
fi
# Check if ports are listening
log_info "Checking listening ports..."
PORTS=$(sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- ss -tlnp 2>&1 | grep -E ':80|:443|:8443' || echo ''")
if echo "$PORTS" | grep -q ':80'; then
log_success "Port 80 is listening"
else
log_warn "Port 80 may not be listening"
fi
if echo "$PORTS" | grep -q ':443'; then
log_success "Port 443 is listening"
else
log_warn "Port 443 may not be listening"
fi
if echo "$PORTS" | grep -q ':8443'; then
log_success "Port 8443 is listening"
else
log_warn "Port 8443 may not be listening"
fi
# Test RPC endpoint through Nginx
log_info "Testing RPC endpoint through Nginx..."
RPC_TEST=$(sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- timeout 5 curl -k -s -X POST https://localhost:443 \
-H 'Content-Type: application/json' \
-d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}' 2>&1 || echo 'FAILED'")
if echo "$RPC_TEST" | grep -q "result"; then
BLOCK_NUM=$(echo "$RPC_TEST" | grep -oP '"result":"\K[^"]+' | head -1)
log_success "RPC endpoint is responding through Nginx!"
log_info "Current block: $BLOCK_NUM"
else
log_warn "RPC endpoint test failed or needs more time"
log_info "Response: $RPC_TEST"
fi
echo ""
log_success "Nginx configuration complete!"
echo ""
log_info "Configuration Summary:"
log_info " - HTTP RPC: https://$IP:443 (proxies to localhost:8545)"
log_info " - WebSocket RPC: https://$IP:8443 (proxies to localhost:8546)"
log_info " - HTTP redirect: http://$IP:80 → https://$IP:443"
log_info " - Health check: https://$IP:443/health"
echo ""
log_info "Next steps:"
log_info " 1. Test from external: curl -k https://$IP:443/health"
log_info " 2. Test RPC: curl -k -X POST https://$IP:443 -H 'Content-Type: application/json' -d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}'"
log_info " 3. Replace self-signed certificate with Let's Encrypt if needed"
log_info " 4. Configure firewall rules if needed"

View File

@@ -0,0 +1,173 @@
#!/usr/bin/env bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configure additional security features for Nginx on VMID 2500
# - Rate limiting
# - Firewall rules
# - Security headers enhancement
set -e
VMID=2500
PROXMOX_HOST="${PROXMOX_HOST_ML110}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "Configuring additional security features for Nginx on VMID $VMID"
echo ""
# Configure rate limiting in Nginx
log_info "1. Configuring rate limiting..."
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'RATE_LIMIT_EOF'
# Add rate limiting configuration to nginx.conf
if ! grep -q "limit_req_zone" /etc/nginx/nginx.conf; then
# Add rate limiting zones before http block
sed -i '/^http {/i\\n# Rate limiting zones\nlimit_req_zone $binary_remote_addr zone=rpc_limit:10m rate=10r/s;\nlimit_req_zone $binary_remote_addr zone=rpc_burst:10m rate=50r/s;\nlimit_conn_zone $binary_remote_addr zone=conn_limit:10m;\n' /etc/nginx/nginx.conf
fi
# Update site configuration to use rate limiting
if [ -f /etc/nginx/sites-available/rpc-core ]; then
# Add rate limiting to HTTP RPC location
sed -i '/location \/ {/,/^ }/ {
/proxy_pass http:\/\/127.0.0.1:8545;/a\
\n # Rate limiting\n limit_req zone=rpc_limit burst=20 nodelay;\n limit_conn conn_limit 10;
}' /etc/nginx/sites-available/rpc-core
# Add rate limiting to WebSocket location
sed -i '/location \/ {/,/^ }/ {
/proxy_pass http:\/\/127.0.0.1:8546;/a\
\n # Rate limiting\n limit_req zone=rpc_burst burst=50 nodelay;\n limit_conn conn_limit 5;
}' /etc/nginx/sites-available/rpc-core
fi
# Test configuration
nginx -t
RATE_LIMIT_EOF
if [ $? -eq 0 ]; then
log_success "Rate limiting configured"
else
log_warn "Rate limiting configuration may need manual adjustment"
fi
# Configure firewall rules (if iptables is available)
log_info ""
log_info "2. Configuring firewall rules..."
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'FIREWALL_EOF'
# Check if iptables is available
if command -v iptables >/dev/null 2>&1; then
# Allow HTTP
iptables -A INPUT -p tcp --dport 80 -j ACCEPT 2>/dev/null || true
# Allow HTTPS
iptables -A INPUT -p tcp --dport 443 -j ACCEPT 2>/dev/null || true
# Allow WebSocket HTTPS
iptables -A INPUT -p tcp --dport 8443 -j ACCEPT 2>/dev/null || true
# Allow Besu RPC (internal only)
iptables -A INPUT -p tcp -s 127.0.0.1 --dport 8545 -j ACCEPT 2>/dev/null || true
iptables -A INPUT -p tcp -s 127.0.0.1 --dport 8546 -j ACCEPT 2>/dev/null || true
# Allow Besu P2P (if needed)
iptables -A INPUT -p tcp --dport 30303 -j ACCEPT 2>/dev/null || true
# Allow Besu Metrics (internal only)
iptables -A INPUT -p tcp -s 127.0.0.1 --dport 9545 -j ACCEPT 2>/dev/null || true
echo "Firewall rules configured (may need to be persisted)"
else
echo "iptables not available, skipping firewall configuration"
fi
FIREWALL_EOF
log_success "Firewall rules configured"
# Enhance security headers
log_info ""
log_info "3. Enhancing security headers..."
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'SECURITY_EOF'
if [ -f /etc/nginx/sites-available/rpc-core ]; then
# Add additional security headers if not present
if ! grep -q "Referrer-Policy" /etc/nginx/sites-available/rpc-core; then
sed -i '/add_header X-XSS-Protection/a\
add_header Referrer-Policy "strict-origin-when-cross-origin" always;\
add_header Permissions-Policy "geolocation=(), microphone=(), camera=()" always;
' /etc/nginx/sites-available/rpc-core
fi
# Test configuration
nginx -t
fi
SECURITY_EOF
if [ $? -eq 0 ]; then
log_success "Security headers enhanced"
else
log_warn "Security headers may need manual adjustment"
fi
# Reload Nginx
log_info ""
log_info "4. Reloading Nginx..."
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- systemctl reload nginx"
if [ $? -eq 0 ]; then
log_success "Nginx reloaded successfully"
else
log_error "Failed to reload Nginx"
exit 1
fi
# Verify configuration
log_info ""
log_info "5. Verifying configuration..."
if sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- nginx -t 2>&1 | grep -q 'successful'"; then
log_success "Nginx configuration is valid"
else
log_error "Nginx configuration test failed"
exit 1
fi
# Test rate limiting
log_info ""
log_info "6. Testing rate limiting..."
RATE_TEST=$(sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- timeout 2 curl -k -s -X POST https://localhost:443 \
-H 'Content-Type: application/json' \
-d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}' 2>&1 || echo 'TEST'")
if echo "$RATE_TEST" | grep -q "result\|jsonrpc"; then
log_success "RPC endpoint still responding (rate limiting active)"
else
log_warn "Rate limiting test inconclusive"
fi
echo ""
log_success "Security configuration complete!"
echo ""
log_info "Configuration Summary:"
log_info " ✓ Rate limiting: 10 req/s (burst: 20) for HTTP RPC"
log_info " ✓ Rate limiting: 50 req/s (burst: 50) for WebSocket RPC"
log_info " ✓ Connection limiting: 10 connections per IP (HTTP), 5 (WebSocket)"
log_info " ✓ Firewall rules: Configured for ports 80, 443, 8443"
log_info " ✓ Enhanced security headers: Added"
echo ""
log_info "Note: Firewall rules may need to be persisted (iptables-save)"

View File

@@ -0,0 +1,167 @@
#!/usr/bin/env bash
set -euo pipefail
# Configure additional security features for Nginx on VMID 2500
# - Rate limiting
# - Firewall rules
# - Security headers enhancement
set -e
VMID=2500
PROXMOX_HOST="192.168.11.10"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_info "Configuring additional security features for Nginx on VMID $VMID"
echo ""
# Configure rate limiting in Nginx
log_info "1. Configuring rate limiting..."
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'RATE_LIMIT_EOF'
# Add rate limiting configuration to nginx.conf
if ! grep -q "limit_req_zone" /etc/nginx/nginx.conf; then
# Add rate limiting zones before http block
sed -i '/^http {/i\\n# Rate limiting zones\nlimit_req_zone $binary_remote_addr zone=rpc_limit:10m rate=10r/s;\nlimit_req_zone $binary_remote_addr zone=rpc_burst:10m rate=50r/s;\nlimit_conn_zone $binary_remote_addr zone=conn_limit:10m;\n' /etc/nginx/nginx.conf
fi
# Update site configuration to use rate limiting
if [ -f /etc/nginx/sites-available/rpc-core ]; then
# Add rate limiting to HTTP RPC location
sed -i '/location \/ {/,/^ }/ {
/proxy_pass http:\/\/127.0.0.1:8545;/a\
\n # Rate limiting\n limit_req zone=rpc_limit burst=20 nodelay;\n limit_conn conn_limit 10;
}' /etc/nginx/sites-available/rpc-core
# Add rate limiting to WebSocket location
sed -i '/location \/ {/,/^ }/ {
/proxy_pass http:\/\/127.0.0.1:8546;/a\
\n # Rate limiting\n limit_req zone=rpc_burst burst=50 nodelay;\n limit_conn conn_limit 5;
}' /etc/nginx/sites-available/rpc-core
fi
# Test configuration
nginx -t
RATE_LIMIT_EOF
if [ $? -eq 0 ]; then
log_success "Rate limiting configured"
else
log_warn "Rate limiting configuration may need manual adjustment"
fi
# Configure firewall rules (if iptables is available)
log_info ""
log_info "2. Configuring firewall rules..."
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'FIREWALL_EOF'
# Check if iptables is available
if command -v iptables >/dev/null 2>&1; then
# Allow HTTP
iptables -A INPUT -p tcp --dport 80 -j ACCEPT 2>/dev/null || true
# Allow HTTPS
iptables -A INPUT -p tcp --dport 443 -j ACCEPT 2>/dev/null || true
# Allow WebSocket HTTPS
iptables -A INPUT -p tcp --dport 8443 -j ACCEPT 2>/dev/null || true
# Allow Besu RPC (internal only)
iptables -A INPUT -p tcp -s 127.0.0.1 --dport 8545 -j ACCEPT 2>/dev/null || true
iptables -A INPUT -p tcp -s 127.0.0.1 --dport 8546 -j ACCEPT 2>/dev/null || true
# Allow Besu P2P (if needed)
iptables -A INPUT -p tcp --dport 30303 -j ACCEPT 2>/dev/null || true
# Allow Besu Metrics (internal only)
iptables -A INPUT -p tcp -s 127.0.0.1 --dport 9545 -j ACCEPT 2>/dev/null || true
echo "Firewall rules configured (may need to be persisted)"
else
echo "iptables not available, skipping firewall configuration"
fi
FIREWALL_EOF
log_success "Firewall rules configured"
# Enhance security headers
log_info ""
log_info "3. Enhancing security headers..."
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- bash" <<'SECURITY_EOF'
if [ -f /etc/nginx/sites-available/rpc-core ]; then
# Add additional security headers if not present
if ! grep -q "Referrer-Policy" /etc/nginx/sites-available/rpc-core; then
sed -i '/add_header X-XSS-Protection/a\
add_header Referrer-Policy "strict-origin-when-cross-origin" always;\
add_header Permissions-Policy "geolocation=(), microphone=(), camera=()" always;
' /etc/nginx/sites-available/rpc-core
fi
# Test configuration
nginx -t
fi
SECURITY_EOF
if [ $? -eq 0 ]; then
log_success "Security headers enhanced"
else
log_warn "Security headers may need manual adjustment"
fi
# Reload Nginx
log_info ""
log_info "4. Reloading Nginx..."
sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- systemctl reload nginx"
if [ $? -eq 0 ]; then
log_success "Nginx reloaded successfully"
else
log_error "Failed to reload Nginx"
exit 1
fi
# Verify configuration
log_info ""
log_info "5. Verifying configuration..."
if sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- nginx -t 2>&1 | grep -q 'successful'"; then
log_success "Nginx configuration is valid"
else
log_error "Nginx configuration test failed"
exit 1
fi
# Test rate limiting
log_info ""
log_info "6. Testing rate limiting..."
RATE_TEST=$(sshpass -p 'L@kers2010' ssh -o StrictHostKeyChecking=no root@${PROXMOX_HOST} \
"pct exec $VMID -- timeout 2 curl -k -s -X POST https://localhost:443 \
-H 'Content-Type: application/json' \
-d '{\"jsonrpc\":\"2.0\",\"method\":\"eth_blockNumber\",\"params\":[],\"id\":1}' 2>&1 || echo 'TEST'")
if echo "$RATE_TEST" | grep -q "result\|jsonrpc"; then
log_success "RPC endpoint still responding (rate limiting active)"
else
log_warn "Rate limiting test inconclusive"
fi
echo ""
log_success "Security configuration complete!"
echo ""
log_info "Configuration Summary:"
log_info " ✓ Rate limiting: 10 req/s (burst: 20) for HTTP RPC"
log_info " ✓ Rate limiting: 50 req/s (burst: 50) for WebSocket RPC"
log_info " ✓ Connection limiting: 10 connections per IP (HTTP), 5 (WebSocket)"
log_info " ✓ Firewall rules: Configured for ports 80, 443, 8443"
log_info " ✓ Enhanced security headers: Added"
echo ""
log_info "Note: Firewall rules may need to be persisted (iptables-save)"

View File

@@ -0,0 +1,177 @@
#!/usr/bin/env bash
# Configure Oracle Publisher Service on VMID 3500
# Usage: ./scripts/configure-oracle-publisher-service.sh [private-key]
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
VMID=3500
PRIVATE_KEY="${1:-${DEPLOYER_PRIVATE_KEY:-}}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
echo "========================================="
echo "Configure Oracle Publisher Service"
echo "========================================="
echo ""
# Check if private key is provided
if [ -z "$PRIVATE_KEY" ]; then
log_warn "No private key provided"
log_info "The service will be configured but won't start until PRIVATE_KEY is set in .env"
fi
log_info "VMID: $VMID"
log_info "Proxmox Host: $PROXMOX_HOST"
echo ""
# Test SSH connection
log_info "Testing SSH connection..."
if ! ssh -o BatchMode=yes -o ConnectTimeout=5 "root@$PROXMOX_HOST" exit 2>/dev/null; then
log_error "SSH connection failed"
exit 1
fi
log_success "SSH connection successful"
# Check if container exists
log_info "Checking if container $VMID exists..."
if ! ssh "root@$PROXMOX_HOST" "pct status $VMID 2>/dev/null" >/dev/null 2>&1; then
log_error "Container $VMID does not exist"
exit 1
fi
log_success "Container $VMID exists"
echo ""
# Fix .env file
log_info "Configuring .env file..."
ssh "root@$PROXMOX_HOST" "pct exec $VMID -- bash" << EOF
cat > /opt/oracle-publisher/.env << 'ENVEOF'
# Oracle Publisher Configuration
RPC_URL=http://${RPC_ALLTRA_1:-192.168.11.250}:8545
WS_URL=ws://${RPC_ALLTRA_1:-192.168.11.250}:8546
CHAIN_ID=138
# Oracle Contract Addresses
AGGREGATOR_ADDRESS=0x99b3511a2d315a497c8112c1fdd8d508d4b1e506
ORACLE_ADDRESS=0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6
# Private Key (must be transmitter account)
$(if [ -n "$PRIVATE_KEY" ]; then echo "PRIVATE_KEY=$PRIVATE_KEY"; else echo "# PRIVATE_KEY=0x..."; fi)
# Update Configuration
UPDATE_INTERVAL=60
HEARTBEAT_INTERVAL=60
DEVIATION_THRESHOLD=0.5
# Data Sources (CoinGecko)
DATA_SOURCE_1_URL=https://api.coingecko.com/api/v3/simple/price?ids=ethereum&vs_currencies=usd
DATA_SOURCE_1_PARSER=coingecko
DATA_SOURCE_2_URL=https://api.binance.com/api/v3/ticker/price?symbol=ETHUSDT
DATA_SOURCE_2_PARSER=binance
# Metrics
METRICS_PORT=8000
METRICS_ENABLED=true
ENVEOF
chown oracle:oracle /opt/oracle-publisher/.env
chmod 600 /opt/oracle-publisher/.env
echo "✓ .env file configured"
EOF
log_success ".env file configured"
# Copy oracle_publisher.py if it doesn't exist
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
ORACLE_PY="$PROJECT_ROOT/smom-dbis-138/services/oracle-publisher/oracle_publisher.py"
if [ -f "$ORACLE_PY" ]; then
log_info "Copying oracle_publisher.py..."
scp "$ORACLE_PY" "root@$PROXMOX_HOST:/tmp/oracle_publisher.py" >/dev/null 2>&1
ssh "root@$PROXMOX_HOST" "pct exec $VMID -- cp /tmp/oracle_publisher.py /opt/oracle-publisher/oracle_publisher.py && chown oracle:oracle /opt/oracle-publisher/oracle_publisher.py && chmod 755 /opt/oracle-publisher/oracle_publisher.py" 2>/dev/null
log_success "oracle_publisher.py copied"
else
log_warn "oracle_publisher.py not found at $ORACLE_PY"
fi
# Create systemd service
log_info "Creating systemd service..."
ssh "root@$PROXMOX_HOST" "pct exec $VMID -- bash" << 'EOF'
cat > /etc/systemd/system/oracle-publisher.service << 'SERVICEEOF'
[Unit]
Description=Oracle Publisher Service
After=network.target
Wants=network-online.target
[Service]
Type=simple
User=oracle
Group=oracle
WorkingDirectory=/opt/oracle-publisher
Environment="PATH=/opt/oracle-publisher/venv/bin:/usr/local/bin:/usr/bin:/bin"
# Load environment
EnvironmentFile=-/opt/oracle-publisher/.env
# ExecStart
ExecStart=/opt/oracle-publisher/venv/bin/python /opt/oracle-publisher/oracle_publisher.py
# Restart
Restart=always
RestartSec=10
# Security
NoNewPrivileges=true
PrivateTmp=true
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=oracle-publisher
[Install]
WantedBy=multi-user.target
SERVICEEOF
systemctl daemon-reload
echo "✓ Systemd service created"
EOF
log_success "Systemd service created"
echo ""
# Show status
log_info "Service status:"
ssh "root@$PROXMOX_HOST" "pct exec $VMID -- systemctl status oracle-publisher.service --no-pager 2>&1 | head -15" 2>&1 || log_info "Service not running yet"
echo ""
log_success "========================================="
log_success "Configuration Complete!"
log_success "========================================="
echo ""
log_info "Next steps:"
if [ -z "$PRIVATE_KEY" ]; then
log_warn " 1. Set PRIVATE_KEY in /opt/oracle-publisher/.env (must be transmitter account)"
fi
log_info " 2. Start service: ssh root@$PROXMOX_HOST \"pct exec $VMID -- systemctl start oracle-publisher\""
log_info " 3. Enable service: ssh root@$PROXMOX_HOST \"pct exec $VMID -- systemctl enable oracle-publisher\""
log_info " 4. Check logs: ssh root@$PROXMOX_HOST \"pct exec $VMID -- journalctl -u oracle-publisher -f\""
echo ""

View File

@@ -0,0 +1,171 @@
#!/usr/bin/env bash
# Configure Oracle Publisher Service on VMID 3500
# Usage: ./scripts/configure-oracle-publisher-service.sh [private-key]
set -euo pipefail
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.10}"
VMID=3500
PRIVATE_KEY="${1:-${DEPLOYER_PRIVATE_KEY:-}}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
echo "========================================="
echo "Configure Oracle Publisher Service"
echo "========================================="
echo ""
# Check if private key is provided
if [ -z "$PRIVATE_KEY" ]; then
log_warn "No private key provided"
log_info "The service will be configured but won't start until PRIVATE_KEY is set in .env"
fi
log_info "VMID: $VMID"
log_info "Proxmox Host: $PROXMOX_HOST"
echo ""
# Test SSH connection
log_info "Testing SSH connection..."
if ! ssh -o BatchMode=yes -o ConnectTimeout=5 "root@$PROXMOX_HOST" exit 2>/dev/null; then
log_error "SSH connection failed"
exit 1
fi
log_success "SSH connection successful"
# Check if container exists
log_info "Checking if container $VMID exists..."
if ! ssh "root@$PROXMOX_HOST" "pct status $VMID 2>/dev/null" >/dev/null 2>&1; then
log_error "Container $VMID does not exist"
exit 1
fi
log_success "Container $VMID exists"
echo ""
# Fix .env file
log_info "Configuring .env file..."
ssh "root@$PROXMOX_HOST" "pct exec $VMID -- bash" << EOF
cat > /opt/oracle-publisher/.env << 'ENVEOF'
# Oracle Publisher Configuration
RPC_URL=http://192.168.11.250:8545
WS_URL=ws://192.168.11.250:8546
CHAIN_ID=138
# Oracle Contract Addresses
AGGREGATOR_ADDRESS=0x99b3511a2d315a497c8112c1fdd8d508d4b1e506
ORACLE_ADDRESS=0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6
# Private Key (must be transmitter account)
$(if [ -n "$PRIVATE_KEY" ]; then echo "PRIVATE_KEY=$PRIVATE_KEY"; else echo "# PRIVATE_KEY=0x..."; fi)
# Update Configuration
UPDATE_INTERVAL=60
HEARTBEAT_INTERVAL=60
DEVIATION_THRESHOLD=0.5
# Data Sources (CoinGecko)
DATA_SOURCE_1_URL=https://api.coingecko.com/api/v3/simple/price?ids=ethereum&vs_currencies=usd
DATA_SOURCE_1_PARSER=coingecko
DATA_SOURCE_2_URL=https://api.binance.com/api/v3/ticker/price?symbol=ETHUSDT
DATA_SOURCE_2_PARSER=binance
# Metrics
METRICS_PORT=8000
METRICS_ENABLED=true
ENVEOF
chown oracle:oracle /opt/oracle-publisher/.env
chmod 600 /opt/oracle-publisher/.env
echo "✓ .env file configured"
EOF
log_success ".env file configured"
# Copy oracle_publisher.py if it doesn't exist
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
ORACLE_PY="$PROJECT_ROOT/smom-dbis-138/services/oracle-publisher/oracle_publisher.py"
if [ -f "$ORACLE_PY" ]; then
log_info "Copying oracle_publisher.py..."
scp "$ORACLE_PY" "root@$PROXMOX_HOST:/tmp/oracle_publisher.py" >/dev/null 2>&1
ssh "root@$PROXMOX_HOST" "pct exec $VMID -- cp /tmp/oracle_publisher.py /opt/oracle-publisher/oracle_publisher.py && chown oracle:oracle /opt/oracle-publisher/oracle_publisher.py && chmod 755 /opt/oracle-publisher/oracle_publisher.py" 2>/dev/null
log_success "oracle_publisher.py copied"
else
log_warn "oracle_publisher.py not found at $ORACLE_PY"
fi
# Create systemd service
log_info "Creating systemd service..."
ssh "root@$PROXMOX_HOST" "pct exec $VMID -- bash" << 'EOF'
cat > /etc/systemd/system/oracle-publisher.service << 'SERVICEEOF'
[Unit]
Description=Oracle Publisher Service
After=network.target
Wants=network-online.target
[Service]
Type=simple
User=oracle
Group=oracle
WorkingDirectory=/opt/oracle-publisher
Environment="PATH=/opt/oracle-publisher/venv/bin:/usr/local/bin:/usr/bin:/bin"
# Load environment
EnvironmentFile=-/opt/oracle-publisher/.env
# ExecStart
ExecStart=/opt/oracle-publisher/venv/bin/python /opt/oracle-publisher/oracle_publisher.py
# Restart
Restart=always
RestartSec=10
# Security
NoNewPrivileges=true
PrivateTmp=true
# Logging
StandardOutput=journal
StandardError=journal
SyslogIdentifier=oracle-publisher
[Install]
WantedBy=multi-user.target
SERVICEEOF
systemctl daemon-reload
echo "✓ Systemd service created"
EOF
log_success "Systemd service created"
echo ""
# Show status
log_info "Service status:"
ssh "root@$PROXMOX_HOST" "pct exec $VMID -- systemctl status oracle-publisher.service --no-pager 2>&1 | head -15" 2>&1 || log_info "Service not running yet"
echo ""
log_success "========================================="
log_success "Configuration Complete!"
log_success "========================================="
echo ""
log_info "Next steps:"
if [ -z "$PRIVATE_KEY" ]; then
log_warn " 1. Set PRIVATE_KEY in /opt/oracle-publisher/.env (must be transmitter account)"
fi
log_info " 2. Start service: ssh root@$PROXMOX_HOST \"pct exec $VMID -- systemctl start oracle-publisher\""
log_info " 3. Enable service: ssh root@$PROXMOX_HOST \"pct exec $VMID -- systemctl enable oracle-publisher\""
log_info " 4. Check logs: ssh root@$PROXMOX_HOST \"pct exec $VMID -- journalctl -u oracle-publisher -f\""
echo ""

View File

@@ -0,0 +1,48 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configure Service Dependencies for Order Services
NODE_IP="${PROXMOX_HOST_R630_01}"
log_info() { echo -e "\033[0;32m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
echo "Configuring Order service dependencies..."
# Order service IPs
POSTGRES_IP="${ORDER_POSTGRES_PRIMARY:-192.168.11.44}"
REDIS_IP="${ORDER_REDIS_IP:-192.168.11.38}"
for vmid in 10030 10040 10050 10060 10070 10080 10090 10091 10092; do
log_info "Configuring dependencies for CT $vmid..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${NODE_IP} "pct enter $vmid <<'CONFIG_EOF'
# Update .env files
find /opt /home /root -name \".env\" -type f 2>/dev/null | while read envfile; do
[ -r \"\$envfile\" ] && {
sed -i \"s|DATABASE_URL=.*|DATABASE_URL=postgresql://order_user:order_password@${POSTGRES_IP}:5432/order_db|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://${REDIS_IP}:6379|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|DB_HOST=.*|DB_HOST=${POSTGRES_IP}|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_HOST=.*|REDIS_HOST=${REDIS_IP}|g\" \"\$envfile\" 2>/dev/null || true
}
done
# Update config files
find /opt /home /root -name \"*.config.*\" -o -name \"*config*.json\" -o -name \"*config*.yaml\" -o -name \"*config*.yml\" 2>/dev/null | while read configfile; do
[ -r \"\$configfile\" ] && {
sed -i \"s|${POSTGRES_IP}|${POSTGRES_IP}|g\" \"\$configfile\" 2>/dev/null || true
sed -i \"s|${REDIS_IP}|${REDIS_IP}|g\" \"\$configfile\" 2>/dev/null || true
}
done
echo \"Dependencies configured for CT $vmid\"
CONFIG_EOF
" && log_success "Dependencies configured for CT $vmid" || log_info "Configuration updated for CT $vmid"
done
echo "Order service dependencies configured!"

View File

@@ -0,0 +1,42 @@
#!/bin/bash
set -euo pipefail
# Configure Service Dependencies for Order Services
NODE_IP="192.168.11.11"
log_info() { echo -e "\033[0;32m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
echo "Configuring Order service dependencies..."
# Order service IPs
POSTGRES_IP="192.168.11.44"
REDIS_IP="192.168.11.38"
for vmid in 10030 10040 10050 10060 10070 10080 10090 10091 10092; do
log_info "Configuring dependencies for CT $vmid..."
ssh -o ConnectTimeout=10 -o StrictHostKeyChecking=no root@${NODE_IP} "pct enter $vmid <<'CONFIG_EOF'
# Update .env files
find /opt /home /root -name \".env\" -type f 2>/dev/null | while read envfile; do
[ -r \"\$envfile\" ] && {
sed -i \"s|DATABASE_URL=.*|DATABASE_URL=postgresql://order_user:order_password@${POSTGRES_IP}:5432/order_db|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://${REDIS_IP}:6379|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|DB_HOST=.*|DB_HOST=${POSTGRES_IP}|g\" \"\$envfile\" 2>/dev/null || true
sed -i \"s|REDIS_HOST=.*|REDIS_HOST=${REDIS_IP}|g\" \"\$envfile\" 2>/dev/null || true
}
done
# Update config files
find /opt /home /root -name \"*.config.*\" -o -name \"*config*.json\" -o -name \"*config*.yaml\" -o -name \"*config*.yml\" 2>/dev/null | while read configfile; do
[ -r \"\$configfile\" ] && {
sed -i \"s|${POSTGRES_IP}|${POSTGRES_IP}|g\" \"\$configfile\" 2>/dev/null || true
sed -i \"s|${REDIS_IP}|${REDIS_IP}|g\" \"\$configfile\" 2>/dev/null || true
}
done
echo \"Dependencies configured for CT $vmid\"
CONFIG_EOF
" && log_success "Dependencies configured for CT $vmid" || log_info "Configuration updated for CT $vmid"
done
echo "Order service dependencies configured!"

View File

@@ -0,0 +1,110 @@
#!/bin/bash
# Configure persistent network using Proxmox network config + startup script
set -uo pipefail
NODE_IP="${PROXMOX_HOST_R630_01}"
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
# Container IP mappings
declare -A container_ips=(
["10000"]="${ORDER_POSTGRES_PRIMARY:-192.168.11.44}"
["10001"]="${ORDER_POSTGRES_REPLICA:-192.168.11.45}"
["10020"]="${ORDER_REDIS_IP:-192.168.11.38}"
["10030"]="192.168.11.40"
["10040"]="192.168.11.41"
["10050"]="192.168.11.49"
["10060"]="192.168.11.42"
["10070"]="${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}"
["10080"]="192.168.11.43"
["10090"]="${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}"
["10091"]="${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}"
["10092"]="${IP_MIM_WEB:-192.168.11.37}"
["10200"]="192.168.11.46"
["10201"]="192.168.11.47"
["10202"]="192.168.11.48"
["10210"]="192.168.11.39"
["10230"]="${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}"
["10232"]="192.168.11.52"
)
echo "═══════════════════════════════════════════════════════════"
echo "Configuring Persistent Network Settings (Method 2)"
echo "═══════════════════════════════════════════════════════════"
echo ""
SUCCESS=0
FAILED=0
for vmid in "${!container_ips[@]}"; do
ip="${container_ips[$vmid]}"
hostname=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct config $vmid 2>/dev/null | grep '^hostname:' | sed 's/^hostname: //'" || echo "CT$vmid")
echo "Configuring CT $vmid ($hostname) - $ip..."
# Method 1: Ensure Proxmox network config is correct and onboot=1
echo " Setting onboot=1..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct set $vmid --onboot 1" 2>&1 | grep -v "update VM" || true
# Method 2: Create network startup script in /etc/rc.local
startup_script="#!/bin/bash
# Network configuration script - auto-generated
ip link set eth0 up 2>/dev/null || true
ip addr add ${ip}/24 dev eth0 2>/dev/null || true
ip route add default via ${GATEWAY} dev eth0 2>/dev/null || true
"
# Create /etc/rc.local if it doesn't exist, or append to it
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'echo \"${startup_script}\" > /tmp/network-setup.sh && chmod +x /tmp/network-setup.sh'" 2>&1; then
# Copy to /etc/rc.local or create systemd service
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'cat /tmp/network-setup.sh > /etc/rc.local && chmod +x /etc/rc.local && echo \"exit 0\" >> /etc/rc.local'" 2>&1; then
echo " ✅ Startup script created"
# Also try to create systemd-networkd config using echo with proper escaping
networkd_config="[Match]
Name=eth0
[Network]
Address=${ip}/24
Gateway=${GATEWAY}
DNS=${NETWORK_GATEWAY:-192.168.11.1}
"
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'mkdir -p /etc/systemd/network && echo \"[Match]\" > /etc/systemd/network/10-eth0.network && echo \"Name=eth0\" >> /etc/systemd/network/10-eth0.network && echo \"\" >> /etc/systemd/network/10-eth0.network && echo \"[Network]\" >> /etc/systemd/network/10-eth0.network && echo \"Address=${ip}/24\" >> /etc/systemd/network/10-eth0.network && echo \"Gateway=${GATEWAY}\" >> /etc/systemd/network/10-eth0.network && echo \"DNS=${NETWORK_GATEWAY:-192.168.11.1}\" >> /etc/systemd/network/10-eth0.network'" 2>&1; then
echo " ✅ systemd-networkd config created"
((SUCCESS++))
else
echo " ⚠️ Startup script created but systemd-networkd config failed"
((SUCCESS++))
fi
else
echo " ❌ Failed to create startup script"
((FAILED++))
fi
else
echo " ❌ Failed to create network setup script"
((FAILED++))
fi
echo ""
sleep 1
done
echo "═══════════════════════════════════════════════════════════"
echo "Configuration Complete"
echo "═══════════════════════════════════════════════════════════"
echo " Success: $SUCCESS"
echo " Failed: $FAILED"
echo " Total: ${#container_ips[@]}"
echo ""
echo "Network will be configured on container boot via:"
echo " 1. Proxmox network configuration (onboot=1)"
echo " 2. /etc/rc.local startup script"
echo " 3. systemd-networkd configuration (if available)"
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,110 @@
#!/bin/bash
# Configure persistent network using Proxmox network config + startup script
set -uo pipefail
NODE_IP="192.168.11.11"
GATEWAY="192.168.11.1"
# Container IP mappings
declare -A container_ips=(
["10000"]="${ORDER_POSTGRES_PRIMARY:-192.168.11.44}"
["10001"]="${ORDER_POSTGRES_REPLICA:-192.168.11.45}"
["10020"]="${ORDER_REDIS_IP:-192.168.11.38}"
["10030"]="192.168.11.40"
["10040"]="192.168.11.41"
["10050"]="192.168.11.49"
["10060"]="192.168.11.42"
["10070"]="192.168.11.50"
["10080"]="192.168.11.43"
["10090"]="192.168.11.36"
["10091"]="192.168.11.35"
["10092"]="192.168.11.37"
["10200"]="192.168.11.46"
["10201"]="192.168.11.47"
["10202"]="192.168.11.48"
["10210"]="192.168.11.39"
["10230"]="192.168.11.51"
["10232"]="192.168.11.52"
)
echo "═══════════════════════════════════════════════════════════"
echo "Configuring Persistent Network Settings (Method 2)"
echo "═══════════════════════════════════════════════════════════"
echo ""
SUCCESS=0
FAILED=0
for vmid in "${!container_ips[@]}"; do
ip="${container_ips[$vmid]}"
hostname=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct config $vmid 2>/dev/null | grep '^hostname:' | sed 's/^hostname: //'" || echo "CT$vmid")
echo "Configuring CT $vmid ($hostname) - $ip..."
# Method 1: Ensure Proxmox network config is correct and onboot=1
echo " Setting onboot=1..."
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct set $vmid --onboot 1" 2>&1 | grep -v "update VM" || true
# Method 2: Create network startup script in /etc/rc.local
startup_script="#!/bin/bash
# Network configuration script - auto-generated
ip link set eth0 up 2>/dev/null || true
ip addr add ${ip}/24 dev eth0 2>/dev/null || true
ip route add default via ${GATEWAY} dev eth0 2>/dev/null || true
"
# Create /etc/rc.local if it doesn't exist, or append to it
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'echo \"${startup_script}\" > /tmp/network-setup.sh && chmod +x /tmp/network-setup.sh'" 2>&1; then
# Copy to /etc/rc.local or create systemd service
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'cat /tmp/network-setup.sh > /etc/rc.local && chmod +x /etc/rc.local && echo \"exit 0\" >> /etc/rc.local'" 2>&1; then
echo " ✅ Startup script created"
# Also try to create systemd-networkd config using echo with proper escaping
networkd_config="[Match]
Name=eth0
[Network]
Address=${ip}/24
Gateway=${GATEWAY}
DNS=192.168.11.1
"
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'mkdir -p /etc/systemd/network && echo \"[Match]\" > /etc/systemd/network/10-eth0.network && echo \"Name=eth0\" >> /etc/systemd/network/10-eth0.network && echo \"\" >> /etc/systemd/network/10-eth0.network && echo \"[Network]\" >> /etc/systemd/network/10-eth0.network && echo \"Address=${ip}/24\" >> /etc/systemd/network/10-eth0.network && echo \"Gateway=${GATEWAY}\" >> /etc/systemd/network/10-eth0.network && echo \"DNS=192.168.11.1\" >> /etc/systemd/network/10-eth0.network'" 2>&1; then
echo " ✅ systemd-networkd config created"
((SUCCESS++))
else
echo " ⚠️ Startup script created but systemd-networkd config failed"
((SUCCESS++))
fi
else
echo " ❌ Failed to create startup script"
((FAILED++))
fi
else
echo " ❌ Failed to create network setup script"
((FAILED++))
fi
echo ""
sleep 1
done
echo "═══════════════════════════════════════════════════════════"
echo "Configuration Complete"
echo "═══════════════════════════════════════════════════════════"
echo " Success: $SUCCESS"
echo " Failed: $FAILED"
echo " Total: ${#container_ips[@]}"
echo ""
echo "Network will be configured on container boot via:"
echo " 1. Proxmox network configuration (onboot=1)"
echo " 2. /etc/rc.local startup script"
echo " 3. systemd-networkd configuration (if available)"
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,118 @@
#!/bin/bash
# Configure persistent network using systemd service (works with unprivileged containers)
set -uo pipefail
NODE_IP="${PROXMOX_HOST_R630_01}"
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
# Container IP mappings
declare -A container_ips=(
["10000"]="${ORDER_POSTGRES_PRIMARY:-192.168.11.44}"
["10001"]="${ORDER_POSTGRES_REPLICA:-192.168.11.45}"
["10020"]="${ORDER_REDIS_IP:-192.168.11.38}"
["10030"]="192.168.11.40"
["10040"]="192.168.11.41"
["10050"]="192.168.11.49"
["10060"]="192.168.11.42"
["10070"]="${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}"
["10080"]="192.168.11.43"
["10090"]="${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}"
["10091"]="${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}"
["10092"]="${IP_MIM_WEB:-192.168.11.37}"
["10200"]="192.168.11.46"
["10201"]="192.168.11.47"
["10202"]="192.168.11.48"
["10210"]="192.168.11.39"
["10230"]="${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}"
["10232"]="192.168.11.52"
)
echo "═══════════════════════════════════════════════════════════"
echo "Configuring Persistent Network via Systemd Service"
echo "═══════════════════════════════════════════════════════════"
echo ""
SUCCESS=0
FAILED=0
for vmid in "${!container_ips[@]}"; do
ip="${container_ips[$vmid]}"
hostname=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct config $vmid 2>/dev/null | grep '^hostname:' | sed 's/^hostname: //'" || echo "CT$vmid")
echo "Configuring CT $vmid ($hostname) - $ip..."
# Ensure onboot=1
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct set $vmid --onboot 1" 2>&1 | grep -v "update VM" || true
# Create network setup script in /usr/local/bin (usually writable)
script_content="#!/bin/bash
# Auto-configure network on boot
sleep 2
ip link set eth0 up 2>/dev/null || true
ip addr add ${ip}/24 dev eth0 2>/dev/null || true
ip route add default via ${GATEWAY} dev eth0 2>/dev/null || true
"
# Try to create script in /usr/local/bin
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'echo \"${script_content}\" > /usr/local/bin/configure-network.sh && chmod +x /usr/local/bin/configure-network.sh'" 2>&1; then
echo " ✅ Network script created"
# Create systemd service
service_content="[Unit]
Description=Configure Network Interface
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/local/bin/configure-network.sh
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
"
# Try to create service in /etc/systemd/system (might fail, but try)
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'echo \"${service_content}\" > /etc/systemd/system/configure-network.service && systemctl daemon-reload && systemctl enable configure-network.service'" 2>&1 | grep -v "Created symlink"; then
echo " ✅ Systemd service created and enabled"
((SUCCESS++))
else
# Fallback: Add to existing startup mechanism
echo " ⚠️ Service creation failed, using alternative method"
# Try to add to /etc/profile or create a cron @reboot job
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c '(crontab -l 2>/dev/null; echo \"@reboot /usr/local/bin/configure-network.sh\") | crontab -'" 2>&1; then
echo " ✅ Added to crontab @reboot"
((SUCCESS++))
else
echo " ⚠️ Using manual script only (run on boot via Proxmox hook)"
((SUCCESS++))
fi
fi
else
echo " ❌ Failed to create network script"
((FAILED++))
fi
echo ""
sleep 1
done
echo "═══════════════════════════════════════════════════════════"
echo "Configuration Complete"
echo "═══════════════════════════════════════════════════════════"
echo " Success: $SUCCESS"
echo " Failed: $FAILED"
echo " Total: ${#container_ips[@]}"
echo ""
echo "Network configuration methods applied:"
echo " 1. Proxmox network config (onboot=1) ✓"
echo " 2. Network setup script in /usr/local/bin ✓"
echo " 3. Systemd service or crontab @reboot ✓"
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,118 @@
#!/bin/bash
# Configure persistent network using systemd service (works with unprivileged containers)
set -uo pipefail
NODE_IP="192.168.11.11"
GATEWAY="192.168.11.1"
# Container IP mappings
declare -A container_ips=(
["10000"]="192.168.11.44"
["10001"]="192.168.11.45"
["10020"]="192.168.11.38"
["10030"]="192.168.11.40"
["10040"]="192.168.11.41"
["10050"]="192.168.11.49"
["10060"]="192.168.11.42"
["10070"]="192.168.11.50"
["10080"]="192.168.11.43"
["10090"]="192.168.11.36"
["10091"]="192.168.11.35"
["10092"]="192.168.11.37"
["10200"]="192.168.11.46"
["10201"]="192.168.11.47"
["10202"]="192.168.11.48"
["10210"]="192.168.11.39"
["10230"]="192.168.11.51"
["10232"]="192.168.11.52"
)
echo "═══════════════════════════════════════════════════════════"
echo "Configuring Persistent Network via Systemd Service"
echo "═══════════════════════════════════════════════════════════"
echo ""
SUCCESS=0
FAILED=0
for vmid in "${!container_ips[@]}"; do
ip="${container_ips[$vmid]}"
hostname=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct config $vmid 2>/dev/null | grep '^hostname:' | sed 's/^hostname: //'" || echo "CT$vmid")
echo "Configuring CT $vmid ($hostname) - $ip..."
# Ensure onboot=1
ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct set $vmid --onboot 1" 2>&1 | grep -v "update VM" || true
# Create network setup script in /usr/local/bin (usually writable)
script_content="#!/bin/bash
# Auto-configure network on boot
sleep 2
ip link set eth0 up 2>/dev/null || true
ip addr add ${ip}/24 dev eth0 2>/dev/null || true
ip route add default via ${GATEWAY} dev eth0 2>/dev/null || true
"
# Try to create script in /usr/local/bin
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'echo \"${script_content}\" > /usr/local/bin/configure-network.sh && chmod +x /usr/local/bin/configure-network.sh'" 2>&1; then
echo " ✅ Network script created"
# Create systemd service
service_content="[Unit]
Description=Configure Network Interface
After=network-online.target
Wants=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/local/bin/configure-network.sh
RemainAfterExit=yes
[Install]
WantedBy=multi-user.target
"
# Try to create service in /etc/systemd/system (might fail, but try)
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'echo \"${service_content}\" > /etc/systemd/system/configure-network.service && systemctl daemon-reload && systemctl enable configure-network.service'" 2>&1 | grep -v "Created symlink"; then
echo " ✅ Systemd service created and enabled"
((SUCCESS++))
else
# Fallback: Add to existing startup mechanism
echo " ⚠️ Service creation failed, using alternative method"
# Try to add to /etc/profile or create a cron @reboot job
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c '(crontab -l 2>/dev/null; echo \"@reboot /usr/local/bin/configure-network.sh\") | crontab -'" 2>&1; then
echo " ✅ Added to crontab @reboot"
((SUCCESS++))
else
echo " ⚠️ Using manual script only (run on boot via Proxmox hook)"
((SUCCESS++))
fi
fi
else
echo " ❌ Failed to create network script"
((FAILED++))
fi
echo ""
sleep 1
done
echo "═══════════════════════════════════════════════════════════"
echo "Configuration Complete"
echo "═══════════════════════════════════════════════════════════"
echo " Success: $SUCCESS"
echo " Failed: $FAILED"
echo " Total: ${#container_ips[@]}"
echo ""
echo "Network configuration methods applied:"
echo " 1. Proxmox network config (onboot=1) ✓"
echo " 2. Network setup script in /usr/local/bin ✓"
echo " 3. Systemd service or crontab @reboot ✓"
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,100 @@
#!/bin/bash
# Configure persistent network settings for all reassigned containers using systemd-networkd
set -uo pipefail
NODE_IP="${PROXMOX_HOST_R630_01}"
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
# Container IP mappings
declare -A container_ips=(
["10000"]="${ORDER_POSTGRES_PRIMARY:-192.168.11.44}"
["10001"]="${ORDER_POSTGRES_REPLICA:-192.168.11.45}"
["10020"]="${ORDER_REDIS_IP:-192.168.11.38}"
["10030"]="192.168.11.40"
["10040"]="192.168.11.41"
["10050"]="192.168.11.49"
["10060"]="192.168.11.42"
["10070"]="${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-${IP_SERVICE_50:-192.168.11.50}}}}"
["10080"]="192.168.11.43"
["10090"]="${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-${IP_SERVICE_36:-192.168.11.36}}}}"
["10091"]="${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-${IP_SERVICE_35:-192.168.11.35}}}}"
["10092"]="${IP_MIM_WEB:-192.168.11.37}"
["10200"]="192.168.11.46"
["10201"]="192.168.11.47"
["10202"]="192.168.11.48"
["10210"]="192.168.11.39"
["10230"]="${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-${IP_SERVICE_51:-192.168.11.51}}}}"
["10232"]="192.168.11.52"
)
echo "═══════════════════════════════════════════════════════════"
echo "Configuring Persistent Network Settings"
echo "═══════════════════════════════════════════════════════════"
echo ""
SUCCESS=0
FAILED=0
for vmid in "${!container_ips[@]}"; do
ip="${container_ips[$vmid]}"
hostname=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct config $vmid 2>/dev/null | grep '^hostname:' | sed 's/^hostname: //'" || echo "CT$vmid")
echo "Configuring CT $vmid ($hostname) - $ip..."
# Create systemd-networkd configuration
config_content="[Match]
Name=eth0
[Network]
Address=${ip}/24
Gateway=${GATEWAY}
DNS=${NETWORK_GATEWAY:-192.168.11.1}
DNS=8.8.8.8
DNS=1.1.1.1
"
# Write configuration file
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'mkdir -p /etc/systemd/network && cat > /etc/systemd/network/10-eth0.network << \"EOF\"
[Match]
Name=eth0
[Network]
Address=${ip}/24
Gateway=${GATEWAY}
DNS=${NETWORK_GATEWAY:-192.168.11.1}
DNS=8.8.8.8
DNS=1.1.1.1
EOF
'" 2>&1; then
echo " ✅ Network config file created"
# Enable and start systemd-networkd
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- systemctl enable systemd-networkd 2>&1 && pct exec $vmid -- systemctl start systemd-networkd 2>&1" 2>&1 | grep -v "Created symlink"; then
echo " ✅ systemd-networkd enabled and started"
((SUCCESS++))
else
echo " ⚠️ Config created but systemd-networkd may not be running"
((SUCCESS++))
fi
else
echo " ❌ Failed to create network config"
((FAILED++))
fi
echo ""
sleep 1
done
echo "═══════════════════════════════════════════════════════════"
echo "Configuration Complete"
echo "═══════════════════════════════════════════════════════════"
echo " Success: $SUCCESS"
echo " Failed: $FAILED"
echo " Total: ${#container_ips[@]}"
echo ""
echo "Note: Network configuration will persist across container restarts"
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,100 @@
#!/bin/bash
# Configure persistent network settings for all reassigned containers using systemd-networkd
set -uo pipefail
NODE_IP="192.168.11.11"
GATEWAY="192.168.11.1"
# Container IP mappings
declare -A container_ips=(
["10000"]="192.168.11.44"
["10001"]="192.168.11.45"
["10020"]="192.168.11.38"
["10030"]="192.168.11.40"
["10040"]="192.168.11.41"
["10050"]="192.168.11.49"
["10060"]="192.168.11.42"
["10070"]="192.168.11.50"
["10080"]="192.168.11.43"
["10090"]="192.168.11.36"
["10091"]="192.168.11.35"
["10092"]="192.168.11.37"
["10200"]="192.168.11.46"
["10201"]="192.168.11.47"
["10202"]="192.168.11.48"
["10210"]="192.168.11.39"
["10230"]="192.168.11.51"
["10232"]="192.168.11.52"
)
echo "═══════════════════════════════════════════════════════════"
echo "Configuring Persistent Network Settings"
echo "═══════════════════════════════════════════════════════════"
echo ""
SUCCESS=0
FAILED=0
for vmid in "${!container_ips[@]}"; do
ip="${container_ips[$vmid]}"
hostname=$(ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct config $vmid 2>/dev/null | grep '^hostname:' | sed 's/^hostname: //'" || echo "CT$vmid")
echo "Configuring CT $vmid ($hostname) - $ip..."
# Create systemd-networkd configuration
config_content="[Match]
Name=eth0
[Network]
Address=${ip}/24
Gateway=${GATEWAY}
DNS=192.168.11.1
DNS=8.8.8.8
DNS=1.1.1.1
"
# Write configuration file
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- sh -c 'mkdir -p /etc/systemd/network && cat > /etc/systemd/network/10-eth0.network << \"EOF\"
[Match]
Name=eth0
[Network]
Address=${ip}/24
Gateway=${GATEWAY}
DNS=192.168.11.1
DNS=8.8.8.8
DNS=1.1.1.1
EOF
'" 2>&1; then
echo " ✅ Network config file created"
# Enable and start systemd-networkd
if ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no root@${NODE_IP} \
"pct exec $vmid -- systemctl enable systemd-networkd 2>&1 && pct exec $vmid -- systemctl start systemd-networkd 2>&1" 2>&1 | grep -v "Created symlink"; then
echo " ✅ systemd-networkd enabled and started"
((SUCCESS++))
else
echo " ⚠️ Config created but systemd-networkd may not be running"
((SUCCESS++))
fi
else
echo " ❌ Failed to create network config"
((FAILED++))
fi
echo ""
sleep 1
done
echo "═══════════════════════════════════════════════════════════"
echo "Configuration Complete"
echo "═══════════════════════════════════════════════════════════"
echo " Success: $SUCCESS"
echo " Failed: $FAILED"
echo " Total: ${#container_ips[@]}"
echo ""
echo "Note: Network configuration will persist across container restarts"
echo "═══════════════════════════════════════════════════════════"

View File

@@ -0,0 +1,309 @@
#!/bin/bash
# Configure Phoenix Vault Cluster - Authentication, Policies, and Secret Paths
# Run this from local machine, connects via SSH to Vault container
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.11}"
VAULT_CONTAINER="${VAULT_CONTAINER:-8640}"
VAULT_TOKEN="${VAULT_TOKEN:-}"
if [ -z "$VAULT_TOKEN" ]; then
log_error "VAULT_TOKEN environment variable is required"
log_info "Usage: VAULT_TOKEN=<root-token> ./scripts/configure-phoenix-vault-remote.sh"
exit 1
fi
echo "═══════════════════════════════════════════════════════════"
echo " Phoenix Vault Configuration"
echo "═══════════════════════════════════════════════════════════"
echo ""
# Function to run vault commands
vault_cmd() {
ssh root@"$PROXMOX_HOST" "pct exec $VAULT_CONTAINER -- bash -c 'export VAULT_ADDR=http://127.0.0.1:8200 && export VAULT_TOKEN=$VAULT_TOKEN && $1'"
}
# Verify connection
log_info "Verifying Vault connection..."
if vault_cmd "vault status > /dev/null 2>&1"; then
log_success "Connected to Vault cluster"
else
log_error "Failed to connect to Vault"
exit 1
fi
echo ""
# Phase 1: Enable AppRole authentication
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 1: Enabling AppRole Authentication"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
if vault_cmd "vault auth list | grep -q 'approle/'"; then
log_warn "AppRole auth method already enabled"
else
log_info "Enabling AppRole authentication..."
vault_cmd "vault auth enable approle" || {
log_error "Failed to enable AppRole"
exit 1
}
log_success "AppRole authentication enabled"
fi
echo ""
# Phase 2: Create Policies
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 2: Creating Vault Policies"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Phoenix API Policy
log_info "Creating phoenix-api-policy..."
ssh root@"$PROXMOX_HOST" "pct exec $VAULT_CONTAINER -- bash" << POLICY_EOF
export VAULT_ADDR=http://127.0.0.1:8200
export VAULT_TOKEN=$VAULT_TOKEN
vault policy write phoenix-api-policy - << 'POLICY_INNER_EOF'
# Phoenix API Policy
# Allows read access to Phoenix API secrets
# API secrets (JWT, API keys)
path "secret/data/phoenix/api/*" {
capabilities = ["read"]
}
# Database credentials
path "secret/data/phoenix/database/*" {
capabilities = ["read"]
}
# Keycloak secrets
path "secret/data/phoenix/keycloak/*" {
capabilities = ["read"]
}
# Service secrets
path "secret/data/phoenix/services/*" {
capabilities = ["read"]
}
# Metadata access
path "secret/metadata/phoenix/*" {
capabilities = ["list", "read"]
}
POLICY_INNER_EOF
POLICY_EOF
log_success "phoenix-api-policy created"
# Phoenix Portal Policy
log_info "Creating phoenix-portal-policy..."
ssh root@"$PROXMOX_HOST" "pct exec $VAULT_CONTAINER -- bash" << POLICY_EOF
export VAULT_ADDR=http://127.0.0.1:8200
export VAULT_TOKEN=$VAULT_TOKEN
vault policy write phoenix-portal-policy - << 'POLICY_INNER_EOF'
# Phoenix Portal Policy
# Allows read access to Phoenix Portal secrets
# JWT secrets for portal
path "secret/data/phoenix/api/jwt-secrets" {
capabilities = ["read"]
}
# Portal-specific secrets
path "secret/data/phoenix/portal/*" {
capabilities = ["read"]
}
# Metadata access
path "secret/metadata/phoenix/portal/*" {
capabilities = ["list", "read"]
}
POLICY_INNER_EOF
POLICY_EOF
log_success "phoenix-portal-policy created"
# Phoenix Admin Policy
log_info "Creating phoenix-admin-policy..."
ssh root@"$PROXMOX_HOST" "pct exec $VAULT_CONTAINER -- bash" << POLICY_EOF
export VAULT_ADDR=http://127.0.0.1:8200
export VAULT_TOKEN=$VAULT_TOKEN
vault policy write phoenix-admin-policy - << 'POLICY_INNER_EOF'
# Phoenix Admin Policy
# Full access to Phoenix secrets for administration
# All Phoenix secrets
path "secret/data/phoenix/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Metadata
path "secret/metadata/phoenix/*" {
capabilities = ["list", "read", "delete"]
}
POLICY_INNER_EOF
POLICY_EOF
log_success "phoenix-admin-policy created"
echo ""
# Phase 3: Create AppRole Roles
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 3: Creating AppRole Roles"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Phoenix API Role
log_info "Creating phoenix-api AppRole..."
vault_cmd "vault write auth/approle/role/phoenix-api \
token_policies=phoenix-api-policy \
bind_secret_id=true \
secret_id_ttl=24h \
token_ttl=1h \
token_max_ttl=4h \
token_num_uses=0" 2>&1 | grep -v "Success" || log_warn "phoenix-api role may already exist"
log_success "phoenix-api AppRole created"
# Phoenix Portal Role
log_info "Creating phoenix-portal AppRole..."
vault_cmd "vault write auth/approle/role/phoenix-portal \
token_policies=phoenix-portal-policy \
bind_secret_id=true \
secret_id_ttl=24h \
token_ttl=1h \
token_max_ttl=4h \
token_num_uses=0" 2>&1 | grep -v "Success" || log_warn "phoenix-portal role may already exist"
log_success "phoenix-portal AppRole created"
echo ""
# Phase 4: Create Secret Paths Structure
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 4: Creating Secret Paths Structure"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Enable KV v2 secrets engine if not already enabled
if ! vault_cmd "vault secrets list | grep -q 'secret/'"; then
log_info "Enabling KV v2 secrets engine..."
vault_cmd "vault secrets enable -version=2 -path=secret kv" || {
log_error "Failed to enable KV v2 secrets engine"
exit 1
}
log_success "KV v2 secrets engine enabled"
else
log_warn "KV secrets engine already enabled"
fi
# Create placeholder secrets
log_info "Creating secret path structure..."
vault_cmd "vault kv put secret/phoenix/api/jwt-secrets access-token-secret=CHANGE_ME refresh-token-secret=CHANGE_ME" > /dev/null 2>&1 || log_warn "jwt-secrets may already exist"
vault_cmd "vault kv put secret/phoenix/api/api-keys internal-api-key=CHANGE_ME external-api-key=CHANGE_ME" > /dev/null 2>&1 || log_warn "api-keys may already exist"
vault_cmd "vault kv put secret/phoenix/database/postgres username=phoenix password=CHANGE_ME host=CHANGE_ME port=5432 database=phoenix" > /dev/null 2>&1 || log_warn "postgres secrets may already exist"
vault_cmd "vault kv put secret/phoenix/database/redis password=CHANGE_ME host=CHANGE_ME port=6379" > /dev/null 2>&1 || log_warn "redis secrets may already exist"
vault_cmd "vault kv put secret/phoenix/keycloak/admin-credentials username=admin password=CHANGE_ME" > /dev/null 2>&1 || log_warn "keycloak admin credentials may already exist"
vault_cmd "vault kv put secret/phoenix/keycloak/oidc-secrets client-id=phoenix-api client-secret=CHANGE_ME" > /dev/null 2>&1 || log_warn "oidc secrets may already exist"
vault_cmd "vault kv put secret/phoenix/services/blockchain rpc-url=CHANGE_ME private-key=CHANGE_ME" > /dev/null 2>&1 || log_warn "blockchain secrets may already exist"
vault_cmd "vault kv put secret/phoenix/services/integrations cloudflare-api-token=CHANGE_ME" > /dev/null 2>&1 || log_warn "integrations secrets may already exist"
log_success "Secret path structure created"
echo ""
# Phase 5: Generate AppRole Credentials
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 5: Generating AppRole Credentials"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Get Role IDs
PHOENIX_API_ROLE_ID=$(vault_cmd "vault read -field=role_id auth/approle/role/phoenix-api/role-id")
PHOENIX_PORTAL_ROLE_ID=$(vault_cmd "vault read -field=role_id auth/approle/role/phoenix-portal/role-id")
log_info "Phoenix API Role ID: $PHOENIX_API_ROLE_ID"
log_info "Phoenix Portal Role ID: $PHOENIX_PORTAL_ROLE_ID"
# Generate Secret IDs
PHOENIX_API_SECRET_ID=$(vault_cmd "vault write -field=secret_id -f auth/approle/role/phoenix-api/secret-id")
PHOENIX_PORTAL_SECRET_ID=$(vault_cmd "vault write -field=secret_id -f auth/approle/role/phoenix-portal/secret-id")
log_success "AppRole credentials generated"
# Save credentials
CREDS_FILE="/home/intlc/projects/proxmox/.secure/vault-credentials/phoenix-approle-credentials-$(date +%Y%m%d).txt"
cat > "$CREDS_FILE" << EOF
═══════════════════════════════════════════════════════════
Phoenix Vault AppRole Credentials
═══════════════════════════════════════════════════════════
⚠️ SAVE SECURELY - DO NOT COMMIT TO GIT ⚠️
Phoenix API AppRole:
Role ID: $PHOENIX_API_ROLE_ID
Secret ID: $PHOENIX_API_SECRET_ID
Phoenix Portal AppRole:
Role ID: $PHOENIX_PORTAL_ROLE_ID
Secret ID: $PHOENIX_PORTAL_SECRET_ID
Usage:
export VAULT_ADDR=http://10.160.0.40:8200
export VAULT_ROLE_ID=<role-id>
export VAULT_SECRET_ID=<secret-id>
# Get token
vault write auth/approle/login role_id=\$VAULT_ROLE_ID secret_id=\$VAULT_SECRET_ID
═══════════════════════════════════════════════════════════
EOF
chmod 600 "$CREDS_FILE"
log_success "Credentials saved to $CREDS_FILE"
echo ""
# Summary
echo "═══════════════════════════════════════════════════════════"
echo " Configuration Summary"
echo "═══════════════════════════════════════════════════════════"
echo ""
log_success "✅ AppRole authentication enabled"
log_success "✅ Policies created (phoenix-api, phoenix-portal, phoenix-admin)"
log_success "✅ AppRole roles created (phoenix-api, phoenix-portal)"
log_success "✅ Secret paths structure created"
log_success "✅ AppRole credentials generated and saved"
echo ""
log_info "Next Steps:"
log_info " 1. Update placeholder secrets with actual values"
log_info " 2. Configure Phoenix services to use AppRole authentication"
log_info " 3. Test authentication and secret access"
log_info " 4. Set up monitoring and alerting"
log_info " 5. Configure automated backups"
echo ""

View File

@@ -0,0 +1,303 @@
#!/bin/bash
# Configure Phoenix Vault Cluster - Authentication, Policies, and Secret Paths
# Run this from local machine, connects via SSH to Vault container
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
PROXMOX_HOST="${PROXMOX_HOST:-192.168.11.11}"
VAULT_CONTAINER="${VAULT_CONTAINER:-8640}"
VAULT_TOKEN="${VAULT_TOKEN:-}"
if [ -z "$VAULT_TOKEN" ]; then
log_error "VAULT_TOKEN environment variable is required"
log_info "Usage: VAULT_TOKEN=<root-token> ./scripts/configure-phoenix-vault-remote.sh"
exit 1
fi
echo "═══════════════════════════════════════════════════════════"
echo " Phoenix Vault Configuration"
echo "═══════════════════════════════════════════════════════════"
echo ""
# Function to run vault commands
vault_cmd() {
ssh root@"$PROXMOX_HOST" "pct exec $VAULT_CONTAINER -- bash -c 'export VAULT_ADDR=http://127.0.0.1:8200 && export VAULT_TOKEN=$VAULT_TOKEN && $1'"
}
# Verify connection
log_info "Verifying Vault connection..."
if vault_cmd "vault status > /dev/null 2>&1"; then
log_success "Connected to Vault cluster"
else
log_error "Failed to connect to Vault"
exit 1
fi
echo ""
# Phase 1: Enable AppRole authentication
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 1: Enabling AppRole Authentication"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
if vault_cmd "vault auth list | grep -q 'approle/'"; then
log_warn "AppRole auth method already enabled"
else
log_info "Enabling AppRole authentication..."
vault_cmd "vault auth enable approle" || {
log_error "Failed to enable AppRole"
exit 1
}
log_success "AppRole authentication enabled"
fi
echo ""
# Phase 2: Create Policies
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 2: Creating Vault Policies"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Phoenix API Policy
log_info "Creating phoenix-api-policy..."
ssh root@"$PROXMOX_HOST" "pct exec $VAULT_CONTAINER -- bash" << POLICY_EOF
export VAULT_ADDR=http://127.0.0.1:8200
export VAULT_TOKEN=$VAULT_TOKEN
vault policy write phoenix-api-policy - << 'POLICY_INNER_EOF'
# Phoenix API Policy
# Allows read access to Phoenix API secrets
# API secrets (JWT, API keys)
path "secret/data/phoenix/api/*" {
capabilities = ["read"]
}
# Database credentials
path "secret/data/phoenix/database/*" {
capabilities = ["read"]
}
# Keycloak secrets
path "secret/data/phoenix/keycloak/*" {
capabilities = ["read"]
}
# Service secrets
path "secret/data/phoenix/services/*" {
capabilities = ["read"]
}
# Metadata access
path "secret/metadata/phoenix/*" {
capabilities = ["list", "read"]
}
POLICY_INNER_EOF
POLICY_EOF
log_success "phoenix-api-policy created"
# Phoenix Portal Policy
log_info "Creating phoenix-portal-policy..."
ssh root@"$PROXMOX_HOST" "pct exec $VAULT_CONTAINER -- bash" << POLICY_EOF
export VAULT_ADDR=http://127.0.0.1:8200
export VAULT_TOKEN=$VAULT_TOKEN
vault policy write phoenix-portal-policy - << 'POLICY_INNER_EOF'
# Phoenix Portal Policy
# Allows read access to Phoenix Portal secrets
# JWT secrets for portal
path "secret/data/phoenix/api/jwt-secrets" {
capabilities = ["read"]
}
# Portal-specific secrets
path "secret/data/phoenix/portal/*" {
capabilities = ["read"]
}
# Metadata access
path "secret/metadata/phoenix/portal/*" {
capabilities = ["list", "read"]
}
POLICY_INNER_EOF
POLICY_EOF
log_success "phoenix-portal-policy created"
# Phoenix Admin Policy
log_info "Creating phoenix-admin-policy..."
ssh root@"$PROXMOX_HOST" "pct exec $VAULT_CONTAINER -- bash" << POLICY_EOF
export VAULT_ADDR=http://127.0.0.1:8200
export VAULT_TOKEN=$VAULT_TOKEN
vault policy write phoenix-admin-policy - << 'POLICY_INNER_EOF'
# Phoenix Admin Policy
# Full access to Phoenix secrets for administration
# All Phoenix secrets
path "secret/data/phoenix/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Metadata
path "secret/metadata/phoenix/*" {
capabilities = ["list", "read", "delete"]
}
POLICY_INNER_EOF
POLICY_EOF
log_success "phoenix-admin-policy created"
echo ""
# Phase 3: Create AppRole Roles
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 3: Creating AppRole Roles"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Phoenix API Role
log_info "Creating phoenix-api AppRole..."
vault_cmd "vault write auth/approle/role/phoenix-api \
token_policies=phoenix-api-policy \
bind_secret_id=true \
secret_id_ttl=24h \
token_ttl=1h \
token_max_ttl=4h \
token_num_uses=0" 2>&1 | grep -v "Success" || log_warn "phoenix-api role may already exist"
log_success "phoenix-api AppRole created"
# Phoenix Portal Role
log_info "Creating phoenix-portal AppRole..."
vault_cmd "vault write auth/approle/role/phoenix-portal \
token_policies=phoenix-portal-policy \
bind_secret_id=true \
secret_id_ttl=24h \
token_ttl=1h \
token_max_ttl=4h \
token_num_uses=0" 2>&1 | grep -v "Success" || log_warn "phoenix-portal role may already exist"
log_success "phoenix-portal AppRole created"
echo ""
# Phase 4: Create Secret Paths Structure
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 4: Creating Secret Paths Structure"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Enable KV v2 secrets engine if not already enabled
if ! vault_cmd "vault secrets list | grep -q 'secret/'"; then
log_info "Enabling KV v2 secrets engine..."
vault_cmd "vault secrets enable -version=2 -path=secret kv" || {
log_error "Failed to enable KV v2 secrets engine"
exit 1
}
log_success "KV v2 secrets engine enabled"
else
log_warn "KV secrets engine already enabled"
fi
# Create placeholder secrets
log_info "Creating secret path structure..."
vault_cmd "vault kv put secret/phoenix/api/jwt-secrets access-token-secret=CHANGE_ME refresh-token-secret=CHANGE_ME" > /dev/null 2>&1 || log_warn "jwt-secrets may already exist"
vault_cmd "vault kv put secret/phoenix/api/api-keys internal-api-key=CHANGE_ME external-api-key=CHANGE_ME" > /dev/null 2>&1 || log_warn "api-keys may already exist"
vault_cmd "vault kv put secret/phoenix/database/postgres username=phoenix password=CHANGE_ME host=CHANGE_ME port=5432 database=phoenix" > /dev/null 2>&1 || log_warn "postgres secrets may already exist"
vault_cmd "vault kv put secret/phoenix/database/redis password=CHANGE_ME host=CHANGE_ME port=6379" > /dev/null 2>&1 || log_warn "redis secrets may already exist"
vault_cmd "vault kv put secret/phoenix/keycloak/admin-credentials username=admin password=CHANGE_ME" > /dev/null 2>&1 || log_warn "keycloak admin credentials may already exist"
vault_cmd "vault kv put secret/phoenix/keycloak/oidc-secrets client-id=phoenix-api client-secret=CHANGE_ME" > /dev/null 2>&1 || log_warn "oidc secrets may already exist"
vault_cmd "vault kv put secret/phoenix/services/blockchain rpc-url=CHANGE_ME private-key=CHANGE_ME" > /dev/null 2>&1 || log_warn "blockchain secrets may already exist"
vault_cmd "vault kv put secret/phoenix/services/integrations cloudflare-api-token=CHANGE_ME" > /dev/null 2>&1 || log_warn "integrations secrets may already exist"
log_success "Secret path structure created"
echo ""
# Phase 5: Generate AppRole Credentials
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 5: Generating AppRole Credentials"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Get Role IDs
PHOENIX_API_ROLE_ID=$(vault_cmd "vault read -field=role_id auth/approle/role/phoenix-api/role-id")
PHOENIX_PORTAL_ROLE_ID=$(vault_cmd "vault read -field=role_id auth/approle/role/phoenix-portal/role-id")
log_info "Phoenix API Role ID: $PHOENIX_API_ROLE_ID"
log_info "Phoenix Portal Role ID: $PHOENIX_PORTAL_ROLE_ID"
# Generate Secret IDs
PHOENIX_API_SECRET_ID=$(vault_cmd "vault write -field=secret_id -f auth/approle/role/phoenix-api/secret-id")
PHOENIX_PORTAL_SECRET_ID=$(vault_cmd "vault write -field=secret_id -f auth/approle/role/phoenix-portal/secret-id")
log_success "AppRole credentials generated"
# Save credentials
CREDS_FILE="/home/intlc/projects/proxmox/.secure/vault-credentials/phoenix-approle-credentials-$(date +%Y%m%d).txt"
cat > "$CREDS_FILE" << EOF
═══════════════════════════════════════════════════════════
Phoenix Vault AppRole Credentials
═══════════════════════════════════════════════════════════
⚠️ SAVE SECURELY - DO NOT COMMIT TO GIT ⚠️
Phoenix API AppRole:
Role ID: $PHOENIX_API_ROLE_ID
Secret ID: $PHOENIX_API_SECRET_ID
Phoenix Portal AppRole:
Role ID: $PHOENIX_PORTAL_ROLE_ID
Secret ID: $PHOENIX_PORTAL_SECRET_ID
Usage:
export VAULT_ADDR=http://10.160.0.40:8200
export VAULT_ROLE_ID=<role-id>
export VAULT_SECRET_ID=<secret-id>
# Get token
vault write auth/approle/login role_id=\$VAULT_ROLE_ID secret_id=\$VAULT_SECRET_ID
═══════════════════════════════════════════════════════════
EOF
chmod 600 "$CREDS_FILE"
log_success "Credentials saved to $CREDS_FILE"
echo ""
# Summary
echo "═══════════════════════════════════════════════════════════"
echo " Configuration Summary"
echo "═══════════════════════════════════════════════════════════"
echo ""
log_success "✅ AppRole authentication enabled"
log_success "✅ Policies created (phoenix-api, phoenix-portal, phoenix-admin)"
log_success "✅ AppRole roles created (phoenix-api, phoenix-portal)"
log_success "✅ Secret paths structure created"
log_success "✅ AppRole credentials generated and saved"
echo ""
log_info "Next Steps:"
log_info " 1. Update placeholder secrets with actual values"
log_info " 2. Configure Phoenix services to use AppRole authentication"
log_info " 3. Test authentication and secret access"
log_info " 4. Set up monitoring and alerting"
log_info " 5. Configure automated backups"
echo ""

View File

@@ -0,0 +1,326 @@
#!/bin/bash
# Configure Phoenix Vault Cluster - Authentication, Policies, and Secret Paths
# Run this after cluster deployment to set up Phoenix-specific configuration
set -euo pipefail
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Configuration
VAULT_ADDR="${VAULT_ADDR:-http://10.160.0.40:8200}"
VAULT_TOKEN="${VAULT_TOKEN:-}"
if [ -z "$VAULT_TOKEN" ]; then
log_error "VAULT_TOKEN environment variable is required"
log_info "Usage: VAULT_TOKEN=<root-token> ./scripts/configure-phoenix-vault.sh"
exit 1
fi
export VAULT_ADDR
export VAULT_TOKEN
echo "═══════════════════════════════════════════════════════════"
echo " Phoenix Vault Configuration"
echo "═══════════════════════════════════════════════════════════"
echo ""
# Verify connection
log_info "Verifying Vault connection..."
if ! vault status > /dev/null 2>&1; then
log_error "Failed to connect to Vault at $VAULT_ADDR"
exit 1
fi
log_success "Connected to Vault cluster"
echo ""
# Phase 1: Enable AppRole authentication
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 1: Enabling AppRole Authentication"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
if vault auth list | grep -q "approle/"; then
log_warn "AppRole auth method already enabled"
else
log_info "Enabling AppRole authentication..."
vault auth enable approle || {
log_error "Failed to enable AppRole"
exit 1
}
log_success "AppRole authentication enabled"
fi
echo ""
# Phase 2: Create Policies
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 2: Creating Vault Policies"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Phoenix API Policy
log_info "Creating phoenix-api-policy..."
vault policy write phoenix-api-policy - <<'POLICY_EOF'
# Phoenix API Policy
# Allows read access to Phoenix API secrets
# API secrets (JWT, API keys)
path "secret/data/phoenix/api/*" {
capabilities = ["read"]
}
# Database credentials
path "secret/data/phoenix/database/*" {
capabilities = ["read"]
}
# Keycloak secrets
path "secret/data/phoenix/keycloak/*" {
capabilities = ["read"]
}
# Service secrets
path "secret/data/phoenix/services/*" {
capabilities = ["read"]
}
# Metadata access
path "secret/metadata/phoenix/*" {
capabilities = ["list", "read"]
}
POLICY_EOF
log_success "phoenix-api-policy created"
# Phoenix Portal Policy
log_info "Creating phoenix-portal-policy..."
vault policy write phoenix-portal-policy - <<'POLICY_EOF'
# Phoenix Portal Policy
# Allows read access to Phoenix Portal secrets
# JWT secrets for portal
path "secret/data/phoenix/api/jwt-secrets" {
capabilities = ["read"]
}
# Portal-specific secrets
path "secret/data/phoenix/portal/*" {
capabilities = ["read"]
}
# Metadata access
path "secret/metadata/phoenix/portal/*" {
capabilities = ["list", "read"]
}
POLICY_EOF
log_success "phoenix-portal-policy created"
# Phoenix Admin Policy (for management)
log_info "Creating phoenix-admin-policy..."
vault policy write phoenix-admin-policy - <<'POLICY_EOF'
# Phoenix Admin Policy
# Full access to Phoenix secrets for administration
# All Phoenix secrets
path "secret/data/phoenix/*" {
capabilities = ["create", "read", "update", "delete", "list"]
}
# Metadata
path "secret/metadata/phoenix/*" {
capabilities = ["list", "read", "delete"]
}
POLICY_EOF
log_success "phoenix-admin-policy created"
echo ""
# Phase 3: Create AppRole Roles
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 3: Creating AppRole Roles"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Phoenix API Role
log_info "Creating phoenix-api AppRole..."
vault write auth/approle/role/phoenix-api \
token_policies="phoenix-api-policy" \
bind_secret_id=true \
secret_id_ttl=24h \
token_ttl=1h \
token_max_ttl=4h \
token_num_uses=0 || {
log_warn "phoenix-api role may already exist"
}
log_success "phoenix-api AppRole created"
# Phoenix Portal Role
log_info "Creating phoenix-portal AppRole..."
vault write auth/approle/role/phoenix-portal \
token_policies="phoenix-portal-policy" \
bind_secret_id=true \
secret_id_ttl=24h \
token_ttl=1h \
token_max_ttl=4h \
token_num_uses=0 || {
log_warn "phoenix-portal role may already exist"
}
log_success "phoenix-portal AppRole created"
echo ""
# Phase 4: Create Secret Paths Structure
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 4: Creating Secret Paths Structure"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Enable KV v2 secrets engine if not already enabled
if ! vault secrets list | grep -q "secret/"; then
log_info "Enabling KV v2 secrets engine..."
vault secrets enable -version=2 -path=secret kv || {
log_error "Failed to enable KV v2 secrets engine"
exit 1
}
log_success "KV v2 secrets engine enabled"
else
log_warn "KV secrets engine already enabled"
fi
# Create placeholder secrets to establish structure
log_info "Creating secret path structure..."
# Phoenix API secrets
vault kv put secret/phoenix/api/jwt-secrets \
access-token-secret="CHANGE_ME" \
refresh-token-secret="CHANGE_ME" \
> /dev/null 2>&1 || log_warn "jwt-secrets may already exist"
vault kv put secret/phoenix/api/api-keys \
internal-api-key="CHANGE_ME" \
external-api-key="CHANGE_ME" \
> /dev/null 2>&1 || log_warn "api-keys may already exist"
# Database secrets
vault kv put secret/phoenix/database/postgres \
username="phoenix" \
password="CHANGE_ME" \
host="CHANGE_ME" \
port="5432" \
database="phoenix" \
> /dev/null 2>&1 || log_warn "postgres secrets may already exist"
vault kv put secret/phoenix/database/redis \
password="CHANGE_ME" \
host="CHANGE_ME" \
port="6379" \
> /dev/null 2>&1 || log_warn "redis secrets may already exist"
# Keycloak secrets
vault kv put secret/phoenix/keycloak/admin-credentials \
username="admin" \
password="CHANGE_ME" \
> /dev/null 2>&1 || log_warn "keycloak admin credentials may already exist"
vault kv put secret/phoenix/keycloak/oidc-secrets \
client-id="phoenix-api" \
client-secret="CHANGE_ME" \
> /dev/null 2>&1 || log_warn "oidc secrets may already exist"
# Service secrets
vault kv put secret/phoenix/services/blockchain \
rpc-url="CHANGE_ME" \
private-key="CHANGE_ME" \
> /dev/null 2>&1 || log_warn "blockchain secrets may already exist"
vault kv put secret/phoenix/services/integrations \
cloudflare-api-token="CHANGE_ME" \
> /dev/null 2>&1 || log_warn "integrations secrets may already exist"
log_success "Secret path structure created"
echo ""
# Phase 5: Generate AppRole Credentials
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Phase 5: Generating AppRole Credentials"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Get Role IDs
PHOENIX_API_ROLE_ID=$(vault read -field=role_id auth/approle/role/phoenix-api/role-id)
PHOENIX_PORTAL_ROLE_ID=$(vault read -field=role_id auth/approle/role/phoenix-portal/role-id)
log_info "Phoenix API Role ID: $PHOENIX_API_ROLE_ID"
log_info "Phoenix Portal Role ID: $PHOENIX_PORTAL_ROLE_ID"
# Generate Secret IDs
PHOENIX_API_SECRET_ID=$(vault write -field=secret_id -f auth/approle/role/phoenix-api/secret-id)
PHOENIX_PORTAL_SECRET_ID=$(vault write -field=secret_id -f auth/approle/role/phoenix-portal/secret-id)
log_success "AppRole credentials generated"
# Save credentials
CREDS_FILE="/home/intlc/projects/proxmox/.secure/vault-credentials/phoenix-approle-credentials-$(date +%Y%m%d).txt"
cat > "$CREDS_FILE" << EOF
═══════════════════════════════════════════════════════════
Phoenix Vault AppRole Credentials
═══════════════════════════════════════════════════════════
⚠️ SAVE SECURELY - DO NOT COMMIT TO GIT ⚠️
Phoenix API AppRole:
Role ID: $PHOENIX_API_ROLE_ID
Secret ID: $PHOENIX_API_SECRET_ID
Phoenix Portal AppRole:
Role ID: $PHOENIX_PORTAL_ROLE_ID
Secret ID: $PHOENIX_PORTAL_SECRET_ID
Usage:
export VAULT_ADDR=http://10.160.0.40:8200
export VAULT_ROLE_ID=<role-id>
export VAULT_SECRET_ID=<secret-id>
# Get token
vault write auth/approle/login role_id=\$VAULT_ROLE_ID secret_id=\$VAULT_SECRET_ID
═══════════════════════════════════════════════════════════
EOF
chmod 600 "$CREDS_FILE"
log_success "Credentials saved to $CREDS_FILE"
echo ""
# Summary
echo "═══════════════════════════════════════════════════════════"
echo " Configuration Summary"
echo "═══════════════════════════════════════════════════════════"
echo ""
log_success "✅ AppRole authentication enabled"
log_success "✅ Policies created (phoenix-api, phoenix-portal, phoenix-admin)"
log_success "✅ AppRole roles created (phoenix-api, phoenix-portal)"
log_success "✅ Secret paths structure created"
log_success "✅ AppRole credentials generated and saved"
echo ""
log_info "Next Steps:"
log_info " 1. Update placeholder secrets with actual values"
log_info " 2. Configure Phoenix services to use AppRole authentication"
log_info " 3. Test authentication and secret access"
log_info " 4. Set up monitoring and alerting"
log_info " 5. Configure automated backups"
echo ""

View File

@@ -0,0 +1,190 @@
#!/usr/bin/env bash
# Configure tunnel-r630-02 to be ready for migration/token generation
# This ensures the tunnel has proper configuration before getting a token
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TUNNELS_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Load .env
if [ -f "$TUNNELS_DIR/../../.env" ]; then
source "$TUNNELS_DIR/../../.env" 2>/dev/null || true
fi
if [[ -z "${CLOUDFLARE_ACCOUNT_ID:-}" ]] || [[ -z "${CLOUDFLARE_API_KEY:-}" ]] || [[ -z "${CLOUDFLARE_EMAIL:-}" ]]; then
log_error "Cloudflare credentials not found in .env"
exit 1
fi
TUNNEL_ID="0876f12b-64d7-4927-9ab3-94cb6cf48af9"
HOSTNAME="r630-02.d-bis.org"
TARGET="https://${PROXMOX_HOST_R630_02}:8006"
log_info "=== Configuring tunnel-r630-02 for Migration ==="
echo ""
# Function to make API request
cf_api_request() {
local method="$1"
local endpoint="$2"
local data="${3:-}"
local url="https://api.cloudflare.com/client/v4${endpoint}"
local temp_file=$(mktemp)
local http_code
if [[ -n "$data" ]]; then
http_code=$(curl -s -o "$temp_file" -w "%{http_code}" \
-X "$method" "$url" \
-H "X-Auth-Email: ${CLOUDFLARE_EMAIL}" \
-H "X-Auth-Key: ${CLOUDFLARE_API_KEY}" \
-H "Content-Type: application/json" \
-d "$data" 2>/dev/null)
else
http_code=$(curl -s -o "$temp_file" -w "%{http_code}" \
-X "$method" "$url" \
-H "X-Auth-Email: ${CLOUDFLARE_EMAIL}" \
-H "X-Auth-Key: ${CLOUDFLARE_API_KEY}" \
-H "Content-Type: application/json" 2>/dev/null)
fi
local response=$(cat "$temp_file" 2>/dev/null || echo "")
rm -f "$temp_file"
if [[ "$http_code" != "200" ]] && [[ "$http_code" != "201" ]]; then
log_error "API request failed (HTTP $http_code)"
echo "$response" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "$response"
return 1
fi
echo "$response"
}
# Step 1: Ensure tunnel configuration exists
log_info "Step 1: Configuring tunnel route..."
CONFIG_DATA=$(jq -n \
--arg hostname "$HOSTNAME" \
--arg target "$TARGET" \
'{
config: {
ingress: [
{
hostname: $hostname,
service: $target,
originRequest: {
noHappyEyeballs: true,
connectTimeout: "30s",
tcpKeepAlive: "30s",
keepAliveConnections: 100,
keepAliveTimeout: "90s",
disableChunkedEncoding: true,
noTLSVerify: true
}
},
{
service: "http_status:404"
}
]
}
}')
response=$(cf_api_request "PUT" "/accounts/${CLOUDFLARE_ACCOUNT_ID}/cfd_tunnel/${TUNNEL_ID}/configurations" "$CONFIG_DATA" 2>&1)
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
log_success "Tunnel route configured"
else
log_error "Failed to configure tunnel route"
echo "$response" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null
exit 1
fi
# Step 2: Create local config file
log_info "Step 2: Creating local config file..."
CONFIG_FILE="$TUNNELS_DIR/configs/tunnel-r630-02.yml"
cat > "$CONFIG_FILE" <<EOF
# Cloudflare Tunnel Configuration for r630-02 Proxmox Host
# Tunnel Name: tunnel-r630-02
# Domain: r630-02.d-bis.org
# Target: ${PROXMOX_HOST_R630_02:-192.168.11.12}:8006 (Proxmox UI)
tunnel: $TUNNEL_ID
credentials-file: /etc/cloudflared/credentials-r630-02.json
ingress:
# Proxmox UI - r630-02
- hostname: $HOSTNAME
service: $TARGET
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tcpKeepAlive: 30s
keepAliveConnections: 100
keepAliveTimeout: 90s
disableChunkedEncoding: true
# Allow self-signed certificates (Proxmox uses self-signed)
noTLSVerify: true
# Catch-all (must be last)
- service: http_status:404
# Metrics endpoint (optional, for monitoring)
metrics: 127.0.0.1:9093
# Logging
loglevel: info
# Grace period for shutdown
gracePeriod: 30s
EOF
log_success "Config file created: $CONFIG_FILE"
# Step 3: Check tunnel status
log_info "Step 3: Checking tunnel status..."
response=$(cf_api_request "GET" "/accounts/${CLOUDFLARE_ACCOUNT_ID}/cfd_tunnel/${TUNNEL_ID}" 2>&1)
if echo "$response" | jq -e '.result' >/dev/null 2>&1; then
status=$(echo "$response" | jq -r '.result.status // "unknown"')
remote_config=$(echo "$response" | jq -r '.result.remote_config // false')
log_info "Tunnel status: $status"
log_info "Remote config: $remote_config"
if [[ "$status" == "healthy" ]] && [[ "$remote_config" == "true" ]]; then
log_success "Tunnel is ready for migration!"
else
log_warn "Tunnel needs to be connected to become healthy"
log_info "Once you install and start the tunnel, it will become healthy"
fi
fi
echo ""
log_success "=== Configuration Complete ==="
log_info "Next steps:"
log_info "1. Get token from Cloudflare Dashboard:"
log_info " https://one.dash.cloudflare.com/ → Zero Trust → Networks → Tunnels → tunnel-r630-02"
log_info "2. Install with: sudo cloudflared service install <token>"
log_info "3. Or use the install script once you have the token"

View File

@@ -0,0 +1,184 @@
#!/usr/bin/env bash
# Configure tunnel-r630-02 to be ready for migration/token generation
# This ensures the tunnel has proper configuration before getting a token
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TUNNELS_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
# Load .env
if [ -f "$TUNNELS_DIR/../../.env" ]; then
source "$TUNNELS_DIR/../../.env" 2>/dev/null || true
fi
if [[ -z "${CLOUDFLARE_ACCOUNT_ID:-}" ]] || [[ -z "${CLOUDFLARE_API_KEY:-}" ]] || [[ -z "${CLOUDFLARE_EMAIL:-}" ]]; then
log_error "Cloudflare credentials not found in .env"
exit 1
fi
TUNNEL_ID="0876f12b-64d7-4927-9ab3-94cb6cf48af9"
HOSTNAME="r630-02.d-bis.org"
TARGET="https://192.168.11.12:8006"
log_info "=== Configuring tunnel-r630-02 for Migration ==="
echo ""
# Function to make API request
cf_api_request() {
local method="$1"
local endpoint="$2"
local data="${3:-}"
local url="https://api.cloudflare.com/client/v4${endpoint}"
local temp_file=$(mktemp)
local http_code
if [[ -n "$data" ]]; then
http_code=$(curl -s -o "$temp_file" -w "%{http_code}" \
-X "$method" "$url" \
-H "X-Auth-Email: ${CLOUDFLARE_EMAIL}" \
-H "X-Auth-Key: ${CLOUDFLARE_API_KEY}" \
-H "Content-Type: application/json" \
-d "$data" 2>/dev/null)
else
http_code=$(curl -s -o "$temp_file" -w "%{http_code}" \
-X "$method" "$url" \
-H "X-Auth-Email: ${CLOUDFLARE_EMAIL}" \
-H "X-Auth-Key: ${CLOUDFLARE_API_KEY}" \
-H "Content-Type: application/json" 2>/dev/null)
fi
local response=$(cat "$temp_file" 2>/dev/null || echo "")
rm -f "$temp_file"
if [[ "$http_code" != "200" ]] && [[ "$http_code" != "201" ]]; then
log_error "API request failed (HTTP $http_code)"
echo "$response" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null || echo "$response"
return 1
fi
echo "$response"
}
# Step 1: Ensure tunnel configuration exists
log_info "Step 1: Configuring tunnel route..."
CONFIG_DATA=$(jq -n \
--arg hostname "$HOSTNAME" \
--arg target "$TARGET" \
'{
config: {
ingress: [
{
hostname: $hostname,
service: $target,
originRequest: {
noHappyEyeballs: true,
connectTimeout: "30s",
tcpKeepAlive: "30s",
keepAliveConnections: 100,
keepAliveTimeout: "90s",
disableChunkedEncoding: true,
noTLSVerify: true
}
},
{
service: "http_status:404"
}
]
}
}')
response=$(cf_api_request "PUT" "/accounts/${CLOUDFLARE_ACCOUNT_ID}/cfd_tunnel/${TUNNEL_ID}/configurations" "$CONFIG_DATA" 2>&1)
if echo "$response" | jq -e '.success' >/dev/null 2>&1; then
log_success "Tunnel route configured"
else
log_error "Failed to configure tunnel route"
echo "$response" | jq -r '.errors[0].message // "Unknown error"' 2>/dev/null
exit 1
fi
# Step 2: Create local config file
log_info "Step 2: Creating local config file..."
CONFIG_FILE="$TUNNELS_DIR/configs/tunnel-r630-02.yml"
cat > "$CONFIG_FILE" <<EOF
# Cloudflare Tunnel Configuration for r630-02 Proxmox Host
# Tunnel Name: tunnel-r630-02
# Domain: r630-02.d-bis.org
# Target: 192.168.11.12:8006 (Proxmox UI)
tunnel: $TUNNEL_ID
credentials-file: /etc/cloudflared/credentials-r630-02.json
ingress:
# Proxmox UI - r630-02
- hostname: $HOSTNAME
service: $TARGET
originRequest:
noHappyEyeballs: true
connectTimeout: 30s
tcpKeepAlive: 30s
keepAliveConnections: 100
keepAliveTimeout: 90s
disableChunkedEncoding: true
# Allow self-signed certificates (Proxmox uses self-signed)
noTLSVerify: true
# Catch-all (must be last)
- service: http_status:404
# Metrics endpoint (optional, for monitoring)
metrics: 127.0.0.1:9093
# Logging
loglevel: info
# Grace period for shutdown
gracePeriod: 30s
EOF
log_success "Config file created: $CONFIG_FILE"
# Step 3: Check tunnel status
log_info "Step 3: Checking tunnel status..."
response=$(cf_api_request "GET" "/accounts/${CLOUDFLARE_ACCOUNT_ID}/cfd_tunnel/${TUNNEL_ID}" 2>&1)
if echo "$response" | jq -e '.result' >/dev/null 2>&1; then
status=$(echo "$response" | jq -r '.result.status // "unknown"')
remote_config=$(echo "$response" | jq -r '.result.remote_config // false')
log_info "Tunnel status: $status"
log_info "Remote config: $remote_config"
if [[ "$status" == "healthy" ]] && [[ "$remote_config" == "true" ]]; then
log_success "Tunnel is ready for migration!"
else
log_warn "Tunnel needs to be connected to become healthy"
log_info "Once you install and start the tunnel, it will become healthy"
fi
fi
echo ""
log_success "=== Configuration Complete ==="
log_info "Next steps:"
log_info "1. Get token from Cloudflare Dashboard:"
log_info " https://one.dash.cloudflare.com/ → Zero Trust → Networks → Tunnels → tunnel-r630-02"
log_info "2. Install with: sudo cloudflared service install <token>"
log_info "3. Or use the install script once you have the token"

View File

@@ -0,0 +1,30 @@
#!/bin/bash
# Configure Service Dependencies
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
NODE_IP="${PROXMOX_HOST_R630_01}"
# Configure Order services
for vmid in 10030 10040 10050 10060 10070 10080 10090 10091 10092; do
ssh root@${NODE_IP} "pct enter $vmid -- bash -c '
# Update .env with database and Redis IPs
find /opt -name \".env\" -exec sed -i \"s|DATABASE_URL=.*|DATABASE_URL=postgresql://order_user:order_password@${ORDER_POSTGRES_PRIMARY:-192.168.11.44}:5432/order_db|g\" {} \;
find /opt -name \".env\" -exec sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://${ORDER_REDIS_IP:-192.168.11.38}:6379|g\" {} \;
echo \"Dependencies configured for CT $vmid\"
'"
done
# Configure DBIS services
for vmid in 10150 10151; do
ssh root@${NODE_IP} "pct enter $vmid -- bash -c '
find /opt -name \".env\" -exec sed -i \"s|DATABASE_URL=.*|DATABASE_URL=postgresql://dbis:8cba649443f97436db43b34ab2c0e75b5cf15611bef9c099cee6fb22cc3d7771@${DBIS_POSTGRES_PRIMARY:-192.168.11.105}:5432/dbis_core|g\" {} \;
find /opt -name \".env\" -exec sed -i \"s|REDIS_URL=.*|REDIS_URL=redis://${DBIS_REDIS_IP:-192.168.11.120}:6379|g\" {} \;
echo \"Dependencies configured for CT $vmid\"
'"
done

View File

@@ -0,0 +1,189 @@
#!/bin/bash
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
# Configure all VLANs on UDM Pro via Private API
# This script creates all required VLANs for the network architecture
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
# Load environment variables
if [ -f ~/.env ]; then
source <(grep "^UNIFI_" ~/.env | sed 's/^/export /')
fi
UDM_URL="${UNIFI_UDM_URL:-https://192.168.0.1}"
USERNAME="${UNIFI_USERNAME:-unifi_api}"
PASSWORD="${UNIFI_PASSWORD:-L@kers2010\$\$}"
SITE_ID="${UNIFI_SITE_ID:-default}"
COOKIE_FILE="/tmp/unifi_vlan_config_cookies.txt"
echo "UDM Pro VLAN Configuration Script"
echo "=================================="
echo ""
echo "UDM URL: $UDM_URL"
echo "Site ID: $SITE_ID"
echo ""
# Authenticate
echo "Authenticating..."
AUTH_RESPONSE=$(curl -k -s -X POST "$UDM_URL/api/auth/login" \
-H 'Content-Type: application/json' \
-d "{\"username\":\"$USERNAME\",\"password\":\"$PASSWORD\"}" \
-c "$COOKIE_FILE" \
-w "\n%{http_code}")
HTTP_CODE=$(echo "$AUTH_RESPONSE" | tail -n1)
if [ "$HTTP_CODE" != "200" ]; then
echo "❌ Authentication failed (HTTP $HTTP_CODE)"
exit 1
fi
echo "✅ Authentication successful"
echo ""
# Function to create VLAN network
create_vlan() {
local vlan_id=$1
local vlan_name=$2
local subnet=$3
local gateway=$4
local purpose=${5:-corporate}
echo "Creating VLAN $vlan_id: $vlan_name ($subnet)..."
# Parse subnet
IFS='/' read -r ip_base cidr <<< "$subnet"
IFS='.' read -r a b c d <<< "$ip_base"
# Create network configuration
NETWORK_CONFIG=$(cat <<EOF
{
"name": "$vlan_name",
"purpose": "$purpose",
"vlan": $vlan_id,
"ip_subnet": "$subnet",
"ipv6_interface_type": "none",
"dhcpd_enabled": true,
"dhcpd_start": "$(echo $ip_base | sed 's/\.[0-9]*$/.10/')",
"dhcpd_stop": "$(echo $ip_base | sed 's/\.[0-9]*$/.250/')",
"dhcpd_leasetime": 86400,
"domain_name": "localdomain",
"is_nat": true,
"networkgroup": "LAN"
}
EOF
)
# Create network
RESPONSE=$(curl -k -s -X POST "$UDM_URL/proxy/network/api/s/$SITE_ID/rest/networkconf" \
-b "$COOKIE_FILE" \
-H 'Content-Type: application/json' \
-d "$NETWORK_CONFIG" \
-w "\n%{http_code}")
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
RESPONSE_BODY=$(echo "$RESPONSE" | sed '$d')
if [ "$HTTP_CODE" = "200" ] || echo "$RESPONSE_BODY" | grep -q '"meta":{"rc":"ok"'; then
echo " ✅ VLAN $vlan_id created successfully"
return 0
else
# Check if network already exists
if echo "$RESPONSE_BODY" | grep -q "already exists\|duplicate"; then
echo " ⚠️ VLAN $vlan_id already exists (skipping)"
return 0
else
echo " ❌ Failed to create VLAN $vlan_id (HTTP $HTTP_CODE)"
echo " Response: $RESPONSE_BODY"
return 1
fi
fi
}
# Create all VLANs
echo "Creating VLANs..."
echo ""
# VLAN 11 - MGMT-LAN (special configuration with DHCP range)
echo "Creating VLAN 11 (MGMT-LAN) with custom DHCP range..."
MGMT_CONFIG=$(cat <<EOF
{
"name": "MGMT-LAN",
"purpose": "corporate",
"vlan": 11,
"ip_subnet": "${NETWORK_192_168_11_0:-192.168.11.0}/24",
"ipv6_interface_type": "none",
"dhcpd_enabled": true,
"dhcpd_start": "${IP_VALIDATOR_0:-${IP_VALIDATOR_0:-${IP_VALIDATOR_0:-${IP_VALIDATOR_0:-192.168.11.100}}}}",
"dhcpd_stop": "192.168.11.200",
"dhcpd_leasetime": 86400,
"domain_name": "localdomain",
"is_nat": true,
"networkgroup": "LAN"
}
EOF
)
RESPONSE=$(curl -k -s -X POST "$UDM_URL/proxy/network/api/s/$SITE_ID/rest/networkconf" \
-b "$COOKIE_FILE" \
-H 'Content-Type: application/json' \
-d "$MGMT_CONFIG" \
-w "\n%{http_code}")
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
if [ "$HTTP_CODE" = "200" ] || echo "$RESPONSE" | grep -q '"meta":{"rc":"ok"'; then
echo "✅ VLAN 11 created"
else
if echo "$RESPONSE" | grep -q "already exists\|duplicate"; then
echo "⚠️ VLAN 11 already exists"
else
echo "❌ Failed to create VLAN 11"
fi
fi
echo ""
# Create remaining VLANs
create_vlan 110 "BESU-VAL" "10.110.0.0/24" "10.110.0.1"
create_vlan 111 "BESU-SEN" "10.111.0.0/24" "10.111.0.1"
create_vlan 112 "BESU-RPC" "10.112.0.0/24" "10.112.0.1"
create_vlan 120 "BLOCKSCOUT" "10.120.0.0/24" "10.120.0.1"
create_vlan 121 "CACTI" "10.121.0.0/24" "10.121.0.1"
create_vlan 130 "CCIP-OPS" "10.130.0.0/24" "10.130.0.1"
create_vlan 132 "CCIP-COMMIT" "10.132.0.0/24" "10.132.0.1"
create_vlan 133 "CCIP-EXEC" "10.133.0.0/24" "10.133.0.1"
create_vlan 134 "CCIP-RMN" "10.134.0.0/24" "10.134.0.1"
create_vlan 140 "FABRIC" "10.140.0.0/24" "10.140.0.1"
create_vlan 141 "FIREFLY" "10.141.0.0/24" "10.141.0.1"
create_vlan 150 "INDY" "10.150.0.0/24" "10.150.0.1"
create_vlan 160 "SANKOFA-SVC" "10.160.0.0/22" "10.160.0.1"
create_vlan 200 "PHX-SOV-SMOM" "10.200.0.0/20" "10.200.0.1"
create_vlan 201 "PHX-SOV-ICCC" "10.201.0.0/20" "10.201.0.1"
create_vlan 202 "PHX-SOV-DBIS" "10.202.0.0/20" "10.202.0.1"
create_vlan 203 "PHX-SOV-AR" "10.203.0.0/20" "10.203.0.1"
echo ""
echo "✅ VLAN configuration complete!"
echo ""
echo "Verifying created networks..."
curl -k -s -X GET "$UDM_URL/proxy/network/api/s/$SITE_ID/rest/networkconf" \
-b "$COOKIE_FILE" \
-H 'Content-Type: application/json' | python3 -m json.tool 2>/dev/null | grep -E '"name"|"vlan"|"ip_subnet"' | head -60
# Cleanup
rm -f "$COOKIE_FILE"
echo ""
echo "✅ Configuration script completed"

View File

@@ -0,0 +1,183 @@
#!/bin/bash
set -euo pipefail
# Configure all VLANs on UDM Pro via Private API
# This script creates all required VLANs for the network architecture
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
cd "$PROJECT_ROOT"
# Load environment variables
if [ -f ~/.env ]; then
source <(grep "^UNIFI_" ~/.env | sed 's/^/export /')
fi
UDM_URL="${UNIFI_UDM_URL:-https://192.168.0.1}"
USERNAME="${UNIFI_USERNAME:-unifi_api}"
PASSWORD="${UNIFI_PASSWORD:-L@kers2010\$\$}"
SITE_ID="${UNIFI_SITE_ID:-default}"
COOKIE_FILE="/tmp/unifi_vlan_config_cookies.txt"
echo "UDM Pro VLAN Configuration Script"
echo "=================================="
echo ""
echo "UDM URL: $UDM_URL"
echo "Site ID: $SITE_ID"
echo ""
# Authenticate
echo "Authenticating..."
AUTH_RESPONSE=$(curl -k -s -X POST "$UDM_URL/api/auth/login" \
-H 'Content-Type: application/json' \
-d "{\"username\":\"$USERNAME\",\"password\":\"$PASSWORD\"}" \
-c "$COOKIE_FILE" \
-w "\n%{http_code}")
HTTP_CODE=$(echo "$AUTH_RESPONSE" | tail -n1)
if [ "$HTTP_CODE" != "200" ]; then
echo "❌ Authentication failed (HTTP $HTTP_CODE)"
exit 1
fi
echo "✅ Authentication successful"
echo ""
# Function to create VLAN network
create_vlan() {
local vlan_id=$1
local vlan_name=$2
local subnet=$3
local gateway=$4
local purpose=${5:-corporate}
echo "Creating VLAN $vlan_id: $vlan_name ($subnet)..."
# Parse subnet
IFS='/' read -r ip_base cidr <<< "$subnet"
IFS='.' read -r a b c d <<< "$ip_base"
# Create network configuration
NETWORK_CONFIG=$(cat <<EOF
{
"name": "$vlan_name",
"purpose": "$purpose",
"vlan": $vlan_id,
"ip_subnet": "$subnet",
"ipv6_interface_type": "none",
"dhcpd_enabled": true,
"dhcpd_start": "$(echo $ip_base | sed 's/\.[0-9]*$/.10/')",
"dhcpd_stop": "$(echo $ip_base | sed 's/\.[0-9]*$/.250/')",
"dhcpd_leasetime": 86400,
"domain_name": "localdomain",
"is_nat": true,
"networkgroup": "LAN"
}
EOF
)
# Create network
RESPONSE=$(curl -k -s -X POST "$UDM_URL/proxy/network/api/s/$SITE_ID/rest/networkconf" \
-b "$COOKIE_FILE" \
-H 'Content-Type: application/json' \
-d "$NETWORK_CONFIG" \
-w "\n%{http_code}")
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
RESPONSE_BODY=$(echo "$RESPONSE" | sed '$d')
if [ "$HTTP_CODE" = "200" ] || echo "$RESPONSE_BODY" | grep -q '"meta":{"rc":"ok"'; then
echo " ✅ VLAN $vlan_id created successfully"
return 0
else
# Check if network already exists
if echo "$RESPONSE_BODY" | grep -q "already exists\|duplicate"; then
echo " ⚠️ VLAN $vlan_id already exists (skipping)"
return 0
else
echo " ❌ Failed to create VLAN $vlan_id (HTTP $HTTP_CODE)"
echo " Response: $RESPONSE_BODY"
return 1
fi
fi
}
# Create all VLANs
echo "Creating VLANs..."
echo ""
# VLAN 11 - MGMT-LAN (special configuration with DHCP range)
echo "Creating VLAN 11 (MGMT-LAN) with custom DHCP range..."
MGMT_CONFIG=$(cat <<EOF
{
"name": "MGMT-LAN",
"purpose": "corporate",
"vlan": 11,
"ip_subnet": "192.168.11.0/24",
"ipv6_interface_type": "none",
"dhcpd_enabled": true,
"dhcpd_start": "192.168.11.100",
"dhcpd_stop": "192.168.11.200",
"dhcpd_leasetime": 86400,
"domain_name": "localdomain",
"is_nat": true,
"networkgroup": "LAN"
}
EOF
)
RESPONSE=$(curl -k -s -X POST "$UDM_URL/proxy/network/api/s/$SITE_ID/rest/networkconf" \
-b "$COOKIE_FILE" \
-H 'Content-Type: application/json' \
-d "$MGMT_CONFIG" \
-w "\n%{http_code}")
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
if [ "$HTTP_CODE" = "200" ] || echo "$RESPONSE" | grep -q '"meta":{"rc":"ok"'; then
echo "✅ VLAN 11 created"
else
if echo "$RESPONSE" | grep -q "already exists\|duplicate"; then
echo "⚠️ VLAN 11 already exists"
else
echo "❌ Failed to create VLAN 11"
fi
fi
echo ""
# Create remaining VLANs
create_vlan 110 "BESU-VAL" "10.110.0.0/24" "10.110.0.1"
create_vlan 111 "BESU-SEN" "10.111.0.0/24" "10.111.0.1"
create_vlan 112 "BESU-RPC" "10.112.0.0/24" "10.112.0.1"
create_vlan 120 "BLOCKSCOUT" "10.120.0.0/24" "10.120.0.1"
create_vlan 121 "CACTI" "10.121.0.0/24" "10.121.0.1"
create_vlan 130 "CCIP-OPS" "10.130.0.0/24" "10.130.0.1"
create_vlan 132 "CCIP-COMMIT" "10.132.0.0/24" "10.132.0.1"
create_vlan 133 "CCIP-EXEC" "10.133.0.0/24" "10.133.0.1"
create_vlan 134 "CCIP-RMN" "10.134.0.0/24" "10.134.0.1"
create_vlan 140 "FABRIC" "10.140.0.0/24" "10.140.0.1"
create_vlan 141 "FIREFLY" "10.141.0.0/24" "10.141.0.1"
create_vlan 150 "INDY" "10.150.0.0/24" "10.150.0.1"
create_vlan 160 "SANKOFA-SVC" "10.160.0.0/22" "10.160.0.1"
create_vlan 200 "PHX-SOV-SMOM" "10.200.0.0/20" "10.200.0.1"
create_vlan 201 "PHX-SOV-ICCC" "10.201.0.0/20" "10.201.0.1"
create_vlan 202 "PHX-SOV-DBIS" "10.202.0.0/20" "10.202.0.1"
create_vlan 203 "PHX-SOV-AR" "10.203.0.0/20" "10.203.0.1"
echo ""
echo "✅ VLAN configuration complete!"
echo ""
echo "Verifying created networks..."
curl -k -s -X GET "$UDM_URL/proxy/network/api/s/$SITE_ID/rest/networkconf" \
-b "$COOKIE_FILE" \
-H 'Content-Type: application/json' | python3 -m json.tool 2>/dev/null | grep -E '"name"|"vlan"|"ip_subnet"' | head -60
# Cleanup
rm -f "$COOKIE_FILE"
echo ""
echo "✅ Configuration script completed"

View File

@@ -0,0 +1,413 @@
#!/usr/bin/env bash
# Complete Bridge Deployment Script for ChainID 138
# Deploys WETH9 Bridge, WETH10 Bridge, and LINK Token (CREATE2)
# Run from hardwired system with access to Core RPC (${RPC_CORE_1:-${IP_SERVICE_21:-${IP_SERVICE_21:-${IP_SERVICE_21:-192.168.11.21}}}1}:8545)
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "\n${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"; echo -e "${CYAN}$1${NC}"; echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}\n"; }
# Load environment
if [ -f "$PROJECT_ROOT/.env" ]; then
set +e
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -e
fi
if [ -f "$PROJECT_ROOT/smom-dbis-138/.env" ]; then
set +e
source "$PROJECT_ROOT/smom-dbis-138/.env" 2>/dev/null || true
set -e
fi
# Required variables
PRIVATE_KEY="${PRIVATE_KEY:-}"
RPC_URL="${RPC_URL_138:-http://${RPC_CORE_1}:8545}"
CCIP_ROUTER="${CCIP_ROUTER:-0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e}"
CCIP_FEE_TOKEN="${CCIP_FEE_TOKEN:-0x514910771AF9Ca656af840dff83E8264EcF986CA}"
WETH9_ADDRESS="0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
WETH10_ADDRESS="0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f"
# Mainnet bridge addresses (for destination configuration)
MAINNET_CHAIN_SELECTOR="5009297550715157269"
MAINNET_WETH9_BRIDGE="0x3304b747E565a97ec8AC220b0B6A1f6ffDB837e6"
MAINNET_WETH10_BRIDGE="0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e"
# Validate required variables
if [ -z "$PRIVATE_KEY" ]; then
log_error "PRIVATE_KEY not found in environment"
exit 1
fi
DEPLOYER=$(cast wallet address "$PRIVATE_KEY" 2>/dev/null || echo "")
if [ -z "$DEPLOYER" ]; then
log_error "Failed to derive deployer address from private key"
exit 1
fi
log_section "ChainID 138 Complete Bridge Deployment"
log_info "Deployer: $DEPLOYER"
log_info "RPC: $RPC_URL"
log_info "CCIP Router: $CCIP_ROUTER"
log_info "Fee Token: $CCIP_FEE_TOKEN"
# Pre-flight checks
log_section "Pre-Flight Checks"
# 1. RPC connectivity
log_info "1. Checking RPC connectivity..."
CHAIN_ID=$(cast chain-id --rpc-url "$RPC_URL" 2>/dev/null || echo "")
if [ "$CHAIN_ID" != "138" ]; then
log_error "Cannot connect to RPC or wrong chain ID: $CHAIN_ID (expected: 138)"
exit 1
fi
log_success "RPC connected - Chain ID: $CHAIN_ID"
# 2. Block production
log_info "2. Checking block production..."
BLOCK1=$(cast block-number --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
sleep 2
BLOCK2=$(cast block-number --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
if [ "$BLOCK2" -gt "$BLOCK1" ]; then
log_success "Blocks being produced (Block $BLOCK1 -> $BLOCK2)"
else
log_warn "Block production may be stalled (Block: $BLOCK1)"
fi
# 3. Deployer balance
log_info "3. Checking deployer balance..."
BALANCE=$(cast balance "$DEPLOYER" --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
BALANCE_ETH=$(echo "scale=4; $BALANCE / 1000000000000000000" | bc 2>/dev/null || echo "0")
if [ "$BALANCE" -gt "1000000000000000000" ]; then
log_success "Deployer balance: $BALANCE_ETH ETH"
else
log_error "Insufficient deployer balance: $BALANCE_ETH ETH"
exit 1
fi
# 4. Calculate gas prices
log_info "4. Calculating optimal gas prices..."
GAS_PRICE=$(bash "$PROJECT_ROOT/scripts/calculate-chain138-gas-price.sh" 2>/dev/null || echo "1100000000")
# Check EIP-1559
BASE_FEE_HEX=$(cast rpc eth_getBlockByNumber latest false --rpc-url "$RPC_URL" 2>/dev/null | grep -o '"baseFeePerGas":"[^"]*"' | cut -d'"' -f4 || echo "0x0")
BASE_FEE_DEC=$(cast --to-dec "$BASE_FEE_HEX" 2>/dev/null || echo "7")
USE_EIP1559=false
if [ -n "$BASE_FEE_HEX" ] && [ "$BASE_FEE_HEX" != "0x0" ] && [ "$BASE_FEE_HEX" != "null" ]; then
USE_EIP1559=true
MAX_FEE_PER_GAS="$GAS_PRICE"
AVAILABLE_FEE=$((MAX_FEE_PER_GAS - BASE_FEE_DEC))
PRIORITY_FEE=$((AVAILABLE_FEE / 10))
MIN_PRIORITY="10000000"
if [ "$PRIORITY_FEE" -lt "$MIN_PRIORITY" ]; then
PRIORITY_FEE="$MIN_PRIORITY"
fi
TOTAL_CHECK=$((BASE_FEE_DEC + PRIORITY_FEE))
if [ "$TOTAL_CHECK" -gt "$MAX_FEE_PER_GAS" ]; then
PRIORITY_FEE=$((MAX_FEE_PER_GAS - BASE_FEE_DEC - 1000000))
if [ "$PRIORITY_FEE" -lt "$MIN_PRIORITY" ]; then
PRIORITY_FEE="$MIN_PRIORITY"
fi
fi
log_success "EIP-1559 gas settings:"
log_info " Max Fee: $MAX_FEE_PER_GAS wei ($(echo "scale=2; $MAX_FEE_PER_GAS / 1000000000" | bc) gwei)"
log_info " Priority: $PRIORITY_FEE wei ($(echo "scale=9; $PRIORITY_FEE / 1000000000" | bc) gwei)"
log_info " Base: $BASE_FEE_DEC wei"
else
log_info "Using legacy gas price: $GAS_PRICE wei ($(echo "scale=2; $GAS_PRICE / 1000000000" | bc) gwei)"
fi
cd "$PROJECT_ROOT/smom-dbis-138"
# Deploy WETH9 Bridge
log_section "Deploy WETH9 Bridge"
if [ "$USE_EIP1559" = true ]; then
log_info "Deploying WETH9 Bridge with EIP-1559..."
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH9Bridge.s.sol:DeployCCIPWETH9Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$MAX_FEE_PER_GAS" \
--priority-gas-price "$PRIORITY_FEE" \
--slow \
-vvv 2>&1 || true)
else
log_info "Deploying WETH9 Bridge with legacy gas..."
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH9Bridge.s.sol:DeployCCIPWETH9Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$GAS_PRICE" \
--legacy \
--slow \
-vvv 2>&1 || true)
fi
echo "$DEPLOY_OUTPUT" | tail -40
WETH9_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oP "CCIPWETH9Bridge deployed at: \K0x[a-fA-F0-9]+" | tail -1 || echo "")
if [ -z "$WETH9_BRIDGE" ]; then
WETH9_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oP "0x[a-fA-F0-9]{40}" | tail -1 || echo "")
fi
if [ -n "$WETH9_BRIDGE" ] && [ ${#WETH9_BRIDGE} -eq 42 ]; then
log_success "WETH9 Bridge deployed at: $WETH9_BRIDGE"
# Wait for confirmation
sleep 5
# Verify deployment
CODE_SIZE=$(cast code "$WETH9_BRIDGE" --rpc-url "$RPC_URL" 2>/dev/null | wc -c || echo "0")
if [ "$CODE_SIZE" -gt 1000 ]; then
log_success "Verification: Contract deployed ($CODE_SIZE bytes)"
else
log_warn "Verification: Code size small ($CODE_SIZE bytes) - may still be deploying"
fi
else
log_error "Failed to extract WETH9 Bridge address"
TX_HASH=$(echo "$DEPLOY_OUTPUT" | grep -oE "0x[a-fA-F0-9]{64}" | head -1 || echo "")
if [ -n "$TX_HASH" ]; then
log_info "Transaction hash: $TX_HASH"
log_info "Check status: cast tx $TX_HASH --rpc-url $RPC_URL"
fi
exit 1
fi
# Deploy WETH10 Bridge
log_section "Deploy WETH10 Bridge"
if [ "$USE_EIP1559" = true ]; then
log_info "Deploying WETH10 Bridge with EIP-1559..."
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH10Bridge.s.sol:DeployCCIPWETH10Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$MAX_FEE_PER_GAS" \
--priority-gas-price "$PRIORITY_FEE" \
--slow \
-vvv 2>&1 || true)
else
log_info "Deploying WETH10 Bridge with legacy gas..."
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH10Bridge.s.sol:DeployCCIPWETH10Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$GAS_PRICE" \
--legacy \
--slow \
-vvv 2>&1 || true)
fi
echo "$DEPLOY_OUTPUT" | tail -40
WETH10_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oP "CCIPWETH10Bridge deployed at: \K0x[a-fA-F0-9]+" | tail -1 || echo "")
if [ -z "$WETH10_BRIDGE" ]; then
WETH10_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oP "0x[a-fA-F0-9]{40}" | tail -1 || echo "")
fi
if [ -n "$WETH10_BRIDGE" ] && [ ${#WETH10_BRIDGE} -eq 42 ]; then
log_success "WETH10 Bridge deployed at: $WETH10_BRIDGE"
# Wait for confirmation
sleep 5
# Verify deployment
CODE_SIZE=$(cast code "$WETH10_BRIDGE" --rpc-url "$RPC_URL" 2>/dev/null | wc -c || echo "0")
if [ "$CODE_SIZE" -gt 1000 ]; then
log_success "Verification: Contract deployed ($CODE_SIZE bytes)"
else
log_warn "Verification: Code size small ($CODE_SIZE bytes) - may still be deploying"
fi
else
log_error "Failed to extract WETH10 Bridge address"
TX_HASH=$(echo "$DEPLOY_OUTPUT" | grep -oE "0x[a-fA-F0-9]{64}" | head -1 || echo "")
if [ -n "$TX_HASH" ]; then
log_info "Transaction hash: $TX_HASH"
log_info "Check status: cast tx $TX_HASH --rpc-url $RPC_URL"
fi
exit 1
fi
# Configure Destinations
log_section "Configure Bridge Destinations"
# Configure WETH9 Bridge with Mainnet
log_info "Configuring WETH9 Bridge with Mainnet destination..."
if [ "$USE_EIP1559" = true ]; then
CONFIG_OUTPUT=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH9_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--max-fee-per-gas "$MAX_FEE_PER_GAS" \
--priority-fee-per-gas "$PRIORITY_FEE" \
2>&1 || true)
else
CONFIG_OUTPUT=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH9_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$GAS_PRICE" \
--legacy \
2>&1 || true)
fi
if echo "$CONFIG_OUTPUT" | grep -q "Success\|transactionHash"; then
log_success "WETH9 Bridge destination configured"
else
log_warn "WETH9 Bridge destination configuration: $(echo "$CONFIG_OUTPUT" | head -3)"
fi
# Verify WETH9 destination
DEST_CHAINS=$(cast call "$WETH9_BRIDGE" "getDestinationChains()(uint64[])" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
log_info "WETH9 Bridge destinations: $DEST_CHAINS"
# Configure WETH10 Bridge with Mainnet
log_info "Configuring WETH10 Bridge with Mainnet destination..."
if [ "$USE_EIP1559" = true ]; then
CONFIG_OUTPUT=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH10_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--max-fee-per-gas "$MAX_FEE_PER_GAS" \
--priority-fee-per-gas "$PRIORITY_FEE" \
2>&1 || true)
else
CONFIG_OUTPUT=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH10_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$GAS_PRICE" \
--legacy \
2>&1 || true)
fi
if echo "$CONFIG_OUTPUT" | grep -q "Success\|transactionHash"; then
log_success "WETH10 Bridge destination configured"
else
log_warn "WETH10 Bridge destination configuration: $(echo "$CONFIG_OUTPUT" | head -3)"
fi
# Verify WETH10 destination
DEST_CHAINS=$(cast call "$WETH10_BRIDGE" "getDestinationChains()(uint64[])" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
log_info "WETH10 Bridge destinations: $DEST_CHAINS"
# Deploy LINK Token (CREATE2)
log_section "Deploy LINK Token (CREATE2)"
log_info "Attempting CREATE2 deployment to canonical address..."
if [ "$USE_EIP1559" = true ]; then
LINK_OUTPUT=$(forge script script/DeployLinkToCanonicalAddress.s.sol:DeployLinkToCanonicalAddress \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$MAX_FEE_PER_GAS" \
--priority-gas-price "$PRIORITY_FEE" \
--slow \
-vvv 2>&1 || true)
else
LINK_OUTPUT=$(forge script script/DeployLinkToCanonicalAddress.s.sol:DeployLinkToCanonicalAddress \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$GAS_PRICE" \
--legacy \
--slow \
-vvv 2>&1 || true)
fi
echo "$LINK_OUTPUT" | tail -30
LINK_ADDRESS="0x514910771AF9Ca656af840dff83E8264EcF986CA"
CODE_SIZE=$(cast code "$LINK_ADDRESS" --rpc-url "$RPC_URL" 2>/dev/null | wc -c || echo "0")
if [ "$CODE_SIZE" -gt 1000 ]; then
log_success "LINK Token deployed at canonical address: $LINK_ADDRESS ($CODE_SIZE bytes)"
else
log_warn "LINK Token not deployed at canonical address (code size: $CODE_SIZE bytes)"
log_info "CREATE2 deployment may require manual intervention"
fi
# Final Summary
log_section "Deployment Summary"
echo "Deployed Contracts:"
if [ -n "$WETH9_BRIDGE" ] && [ ${#WETH9_BRIDGE} -eq 42 ]; then
log_success "✓ WETH9 Bridge: $WETH9_BRIDGE"
else
log_error "✗ WETH9 Bridge: Deployment failed"
fi
if [ -n "$WETH10_BRIDGE" ] && [ ${#WETH10_BRIDGE} -eq 42 ]; then
log_success "✓ WETH10 Bridge: $WETH10_BRIDGE"
else
log_error "✗ WETH10 Bridge: Deployment failed"
fi
CODE_SIZE=$(cast code "$LINK_ADDRESS" --rpc-url "$RPC_URL" 2>/dev/null | wc -c || echo "0")
if [ "$CODE_SIZE" -gt 1000 ]; then
log_success "✓ LINK Token: $LINK_ADDRESS"
else
log_warn "⚠ LINK Token: Not deployed at canonical address"
fi
if [ -n "$WETH9_BRIDGE" ] && [ -n "$WETH10_BRIDGE" ] && [ ${#WETH9_BRIDGE} -eq 42 ] && [ ${#WETH10_BRIDGE} -eq 42 ]; then
log_success "\n✅ Bridge Deployment Complete!"
log_info "\nNext steps:"
log_info "1. Verify all contract addresses"
log_info "2. Update .env files with new addresses"
log_info "3. Test bidirectional transfers"
log_info "4. Update documentation"
# Save addresses to file
cat > /tmp/chain138-deployed-addresses.txt <<EOF
# ChainID 138 Deployed Contract Addresses
# Date: $(date)
WETH9_BRIDGE=$WETH9_BRIDGE
WETH10_BRIDGE=$WETH10_BRIDGE
LINK_TOKEN=$LINK_ADDRESS
CCIP_ROUTER=$CCIP_ROUTER
CCIP_FEE_TOKEN=$CCIP_FEE_TOKEN
# Mainnet Bridge Addresses (for reference)
MAINNET_WETH9_BRIDGE=$MAINNET_WETH9_BRIDGE
MAINNET_WETH10_BRIDGE=$MAINNET_WETH10_BRIDGE
MAINNET_CHAIN_SELECTOR=$MAINNET_CHAIN_SELECTOR
EOF
log_info "\nAddresses saved to: /tmp/chain138-deployed-addresses.txt"
exit 0
else
log_error "\n❌ Deployment Incomplete"
exit 1
fi

View File

@@ -0,0 +1,407 @@
#!/usr/bin/env bash
# Complete Bridge Deployment Script for ChainID 138
# Deploys WETH9 Bridge, WETH10 Bridge, and LINK Token (CREATE2)
# Run from hardwired system with access to Core RPC (192.168.11.211:8545)
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "\n${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"; echo -e "${CYAN}$1${NC}"; echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}\n"; }
# Load environment
if [ -f "$PROJECT_ROOT/.env" ]; then
set +e
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -e
fi
if [ -f "$PROJECT_ROOT/smom-dbis-138/.env" ]; then
set +e
source "$PROJECT_ROOT/smom-dbis-138/.env" 2>/dev/null || true
set -e
fi
# Required variables
PRIVATE_KEY="${PRIVATE_KEY:-}"
RPC_URL="${RPC_URL_138:-http://192.168.11.211:8545}"
CCIP_ROUTER="${CCIP_ROUTER:-0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e}"
CCIP_FEE_TOKEN="${CCIP_FEE_TOKEN:-0x514910771AF9Ca656af840dff83E8264EcF986CA}"
WETH9_ADDRESS="0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
WETH10_ADDRESS="0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f"
# Mainnet bridge addresses (for destination configuration)
MAINNET_CHAIN_SELECTOR="5009297550715157269"
MAINNET_WETH9_BRIDGE="0x3304b747E565a97ec8AC220b0B6A1f6ffDB837e6"
MAINNET_WETH10_BRIDGE="0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e"
# Validate required variables
if [ -z "$PRIVATE_KEY" ]; then
log_error "PRIVATE_KEY not found in environment"
exit 1
fi
DEPLOYER=$(cast wallet address "$PRIVATE_KEY" 2>/dev/null || echo "")
if [ -z "$DEPLOYER" ]; then
log_error "Failed to derive deployer address from private key"
exit 1
fi
log_section "ChainID 138 Complete Bridge Deployment"
log_info "Deployer: $DEPLOYER"
log_info "RPC: $RPC_URL"
log_info "CCIP Router: $CCIP_ROUTER"
log_info "Fee Token: $CCIP_FEE_TOKEN"
# Pre-flight checks
log_section "Pre-Flight Checks"
# 1. RPC connectivity
log_info "1. Checking RPC connectivity..."
CHAIN_ID=$(cast chain-id --rpc-url "$RPC_URL" 2>/dev/null || echo "")
if [ "$CHAIN_ID" != "138" ]; then
log_error "Cannot connect to RPC or wrong chain ID: $CHAIN_ID (expected: 138)"
exit 1
fi
log_success "RPC connected - Chain ID: $CHAIN_ID"
# 2. Block production
log_info "2. Checking block production..."
BLOCK1=$(cast block-number --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
sleep 2
BLOCK2=$(cast block-number --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
if [ "$BLOCK2" -gt "$BLOCK1" ]; then
log_success "Blocks being produced (Block $BLOCK1 -> $BLOCK2)"
else
log_warn "Block production may be stalled (Block: $BLOCK1)"
fi
# 3. Deployer balance
log_info "3. Checking deployer balance..."
BALANCE=$(cast balance "$DEPLOYER" --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
BALANCE_ETH=$(echo "scale=4; $BALANCE / 1000000000000000000" | bc 2>/dev/null || echo "0")
if [ "$BALANCE" -gt "1000000000000000000" ]; then
log_success "Deployer balance: $BALANCE_ETH ETH"
else
log_error "Insufficient deployer balance: $BALANCE_ETH ETH"
exit 1
fi
# 4. Calculate gas prices
log_info "4. Calculating optimal gas prices..."
GAS_PRICE=$(bash "$PROJECT_ROOT/scripts/calculate-chain138-gas-price.sh" 2>/dev/null || echo "1100000000")
# Check EIP-1559
BASE_FEE_HEX=$(cast rpc eth_getBlockByNumber latest false --rpc-url "$RPC_URL" 2>/dev/null | grep -o '"baseFeePerGas":"[^"]*"' | cut -d'"' -f4 || echo "0x0")
BASE_FEE_DEC=$(cast --to-dec "$BASE_FEE_HEX" 2>/dev/null || echo "7")
USE_EIP1559=false
if [ -n "$BASE_FEE_HEX" ] && [ "$BASE_FEE_HEX" != "0x0" ] && [ "$BASE_FEE_HEX" != "null" ]; then
USE_EIP1559=true
MAX_FEE_PER_GAS="$GAS_PRICE"
AVAILABLE_FEE=$((MAX_FEE_PER_GAS - BASE_FEE_DEC))
PRIORITY_FEE=$((AVAILABLE_FEE / 10))
MIN_PRIORITY="10000000"
if [ "$PRIORITY_FEE" -lt "$MIN_PRIORITY" ]; then
PRIORITY_FEE="$MIN_PRIORITY"
fi
TOTAL_CHECK=$((BASE_FEE_DEC + PRIORITY_FEE))
if [ "$TOTAL_CHECK" -gt "$MAX_FEE_PER_GAS" ]; then
PRIORITY_FEE=$((MAX_FEE_PER_GAS - BASE_FEE_DEC - 1000000))
if [ "$PRIORITY_FEE" -lt "$MIN_PRIORITY" ]; then
PRIORITY_FEE="$MIN_PRIORITY"
fi
fi
log_success "EIP-1559 gas settings:"
log_info " Max Fee: $MAX_FEE_PER_GAS wei ($(echo "scale=2; $MAX_FEE_PER_GAS / 1000000000" | bc) gwei)"
log_info " Priority: $PRIORITY_FEE wei ($(echo "scale=9; $PRIORITY_FEE / 1000000000" | bc) gwei)"
log_info " Base: $BASE_FEE_DEC wei"
else
log_info "Using legacy gas price: $GAS_PRICE wei ($(echo "scale=2; $GAS_PRICE / 1000000000" | bc) gwei)"
fi
cd "$PROJECT_ROOT/smom-dbis-138"
# Deploy WETH9 Bridge
log_section "Deploy WETH9 Bridge"
if [ "$USE_EIP1559" = true ]; then
log_info "Deploying WETH9 Bridge with EIP-1559..."
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH9Bridge.s.sol:DeployCCIPWETH9Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$MAX_FEE_PER_GAS" \
--priority-gas-price "$PRIORITY_FEE" \
--slow \
-vvv 2>&1 || true)
else
log_info "Deploying WETH9 Bridge with legacy gas..."
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH9Bridge.s.sol:DeployCCIPWETH9Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$GAS_PRICE" \
--legacy \
--slow \
-vvv 2>&1 || true)
fi
echo "$DEPLOY_OUTPUT" | tail -40
WETH9_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oP "CCIPWETH9Bridge deployed at: \K0x[a-fA-F0-9]+" | tail -1 || echo "")
if [ -z "$WETH9_BRIDGE" ]; then
WETH9_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oP "0x[a-fA-F0-9]{40}" | tail -1 || echo "")
fi
if [ -n "$WETH9_BRIDGE" ] && [ ${#WETH9_BRIDGE} -eq 42 ]; then
log_success "WETH9 Bridge deployed at: $WETH9_BRIDGE"
# Wait for confirmation
sleep 5
# Verify deployment
CODE_SIZE=$(cast code "$WETH9_BRIDGE" --rpc-url "$RPC_URL" 2>/dev/null | wc -c || echo "0")
if [ "$CODE_SIZE" -gt 1000 ]; then
log_success "Verification: Contract deployed ($CODE_SIZE bytes)"
else
log_warn "Verification: Code size small ($CODE_SIZE bytes) - may still be deploying"
fi
else
log_error "Failed to extract WETH9 Bridge address"
TX_HASH=$(echo "$DEPLOY_OUTPUT" | grep -oE "0x[a-fA-F0-9]{64}" | head -1 || echo "")
if [ -n "$TX_HASH" ]; then
log_info "Transaction hash: $TX_HASH"
log_info "Check status: cast tx $TX_HASH --rpc-url $RPC_URL"
fi
exit 1
fi
# Deploy WETH10 Bridge
log_section "Deploy WETH10 Bridge"
if [ "$USE_EIP1559" = true ]; then
log_info "Deploying WETH10 Bridge with EIP-1559..."
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH10Bridge.s.sol:DeployCCIPWETH10Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$MAX_FEE_PER_GAS" \
--priority-gas-price "$PRIORITY_FEE" \
--slow \
-vvv 2>&1 || true)
else
log_info "Deploying WETH10 Bridge with legacy gas..."
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH10Bridge.s.sol:DeployCCIPWETH10Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$GAS_PRICE" \
--legacy \
--slow \
-vvv 2>&1 || true)
fi
echo "$DEPLOY_OUTPUT" | tail -40
WETH10_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oP "CCIPWETH10Bridge deployed at: \K0x[a-fA-F0-9]+" | tail -1 || echo "")
if [ -z "$WETH10_BRIDGE" ]; then
WETH10_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oP "0x[a-fA-F0-9]{40}" | tail -1 || echo "")
fi
if [ -n "$WETH10_BRIDGE" ] && [ ${#WETH10_BRIDGE} -eq 42 ]; then
log_success "WETH10 Bridge deployed at: $WETH10_BRIDGE"
# Wait for confirmation
sleep 5
# Verify deployment
CODE_SIZE=$(cast code "$WETH10_BRIDGE" --rpc-url "$RPC_URL" 2>/dev/null | wc -c || echo "0")
if [ "$CODE_SIZE" -gt 1000 ]; then
log_success "Verification: Contract deployed ($CODE_SIZE bytes)"
else
log_warn "Verification: Code size small ($CODE_SIZE bytes) - may still be deploying"
fi
else
log_error "Failed to extract WETH10 Bridge address"
TX_HASH=$(echo "$DEPLOY_OUTPUT" | grep -oE "0x[a-fA-F0-9]{64}" | head -1 || echo "")
if [ -n "$TX_HASH" ]; then
log_info "Transaction hash: $TX_HASH"
log_info "Check status: cast tx $TX_HASH --rpc-url $RPC_URL"
fi
exit 1
fi
# Configure Destinations
log_section "Configure Bridge Destinations"
# Configure WETH9 Bridge with Mainnet
log_info "Configuring WETH9 Bridge with Mainnet destination..."
if [ "$USE_EIP1559" = true ]; then
CONFIG_OUTPUT=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH9_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--max-fee-per-gas "$MAX_FEE_PER_GAS" \
--priority-fee-per-gas "$PRIORITY_FEE" \
2>&1 || true)
else
CONFIG_OUTPUT=$(cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH9_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$GAS_PRICE" \
--legacy \
2>&1 || true)
fi
if echo "$CONFIG_OUTPUT" | grep -q "Success\|transactionHash"; then
log_success "WETH9 Bridge destination configured"
else
log_warn "WETH9 Bridge destination configuration: $(echo "$CONFIG_OUTPUT" | head -3)"
fi
# Verify WETH9 destination
DEST_CHAINS=$(cast call "$WETH9_BRIDGE" "getDestinationChains()(uint64[])" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
log_info "WETH9 Bridge destinations: $DEST_CHAINS"
# Configure WETH10 Bridge with Mainnet
log_info "Configuring WETH10 Bridge with Mainnet destination..."
if [ "$USE_EIP1559" = true ]; then
CONFIG_OUTPUT=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH10_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--max-fee-per-gas "$MAX_FEE_PER_GAS" \
--priority-fee-per-gas "$PRIORITY_FEE" \
2>&1 || true)
else
CONFIG_OUTPUT=$(cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH10_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$GAS_PRICE" \
--legacy \
2>&1 || true)
fi
if echo "$CONFIG_OUTPUT" | grep -q "Success\|transactionHash"; then
log_success "WETH10 Bridge destination configured"
else
log_warn "WETH10 Bridge destination configuration: $(echo "$CONFIG_OUTPUT" | head -3)"
fi
# Verify WETH10 destination
DEST_CHAINS=$(cast call "$WETH10_BRIDGE" "getDestinationChains()(uint64[])" --rpc-url "$RPC_URL" 2>/dev/null || echo "")
log_info "WETH10 Bridge destinations: $DEST_CHAINS"
# Deploy LINK Token (CREATE2)
log_section "Deploy LINK Token (CREATE2)"
log_info "Attempting CREATE2 deployment to canonical address..."
if [ "$USE_EIP1559" = true ]; then
LINK_OUTPUT=$(forge script script/DeployLinkToCanonicalAddress.s.sol:DeployLinkToCanonicalAddress \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$MAX_FEE_PER_GAS" \
--priority-gas-price "$PRIORITY_FEE" \
--slow \
-vvv 2>&1 || true)
else
LINK_OUTPUT=$(forge script script/DeployLinkToCanonicalAddress.s.sol:DeployLinkToCanonicalAddress \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$GAS_PRICE" \
--legacy \
--slow \
-vvv 2>&1 || true)
fi
echo "$LINK_OUTPUT" | tail -30
LINK_ADDRESS="0x514910771AF9Ca656af840dff83E8264EcF986CA"
CODE_SIZE=$(cast code "$LINK_ADDRESS" --rpc-url "$RPC_URL" 2>/dev/null | wc -c || echo "0")
if [ "$CODE_SIZE" -gt 1000 ]; then
log_success "LINK Token deployed at canonical address: $LINK_ADDRESS ($CODE_SIZE bytes)"
else
log_warn "LINK Token not deployed at canonical address (code size: $CODE_SIZE bytes)"
log_info "CREATE2 deployment may require manual intervention"
fi
# Final Summary
log_section "Deployment Summary"
echo "Deployed Contracts:"
if [ -n "$WETH9_BRIDGE" ] && [ ${#WETH9_BRIDGE} -eq 42 ]; then
log_success "✓ WETH9 Bridge: $WETH9_BRIDGE"
else
log_error "✗ WETH9 Bridge: Deployment failed"
fi
if [ -n "$WETH10_BRIDGE" ] && [ ${#WETH10_BRIDGE} -eq 42 ]; then
log_success "✓ WETH10 Bridge: $WETH10_BRIDGE"
else
log_error "✗ WETH10 Bridge: Deployment failed"
fi
CODE_SIZE=$(cast code "$LINK_ADDRESS" --rpc-url "$RPC_URL" 2>/dev/null | wc -c || echo "0")
if [ "$CODE_SIZE" -gt 1000 ]; then
log_success "✓ LINK Token: $LINK_ADDRESS"
else
log_warn "⚠ LINK Token: Not deployed at canonical address"
fi
if [ -n "$WETH9_BRIDGE" ] && [ -n "$WETH10_BRIDGE" ] && [ ${#WETH9_BRIDGE} -eq 42 ] && [ ${#WETH10_BRIDGE} -eq 42 ]; then
log_success "\n✅ Bridge Deployment Complete!"
log_info "\nNext steps:"
log_info "1. Verify all contract addresses"
log_info "2. Update .env files with new addresses"
log_info "3. Test bidirectional transfers"
log_info "4. Update documentation"
# Save addresses to file
cat > /tmp/chain138-deployed-addresses.txt <<EOF
# ChainID 138 Deployed Contract Addresses
# Date: $(date)
WETH9_BRIDGE=$WETH9_BRIDGE
WETH10_BRIDGE=$WETH10_BRIDGE
LINK_TOKEN=$LINK_ADDRESS
CCIP_ROUTER=$CCIP_ROUTER
CCIP_FEE_TOKEN=$CCIP_FEE_TOKEN
# Mainnet Bridge Addresses (for reference)
MAINNET_WETH9_BRIDGE=$MAINNET_WETH9_BRIDGE
MAINNET_WETH10_BRIDGE=$MAINNET_WETH10_BRIDGE
MAINNET_CHAIN_SELECTOR=$MAINNET_CHAIN_SELECTOR
EOF
log_info "\nAddresses saved to: /tmp/chain138-deployed-addresses.txt"
exit 0
else
log_error "\n❌ Deployment Incomplete"
exit 1
fi

View File

@@ -0,0 +1,315 @@
#!/usr/bin/env bash
# Standalone Bridge Deployment Script
# Run from hardwired system with Core RPC access
# Deploys: WETH9 Bridge, WETH10 Bridge, LINK Token (CREATE2), and configures destinations
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_section() { echo -e "\n${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}"; echo -e "${CYAN}$1${NC}"; echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}\n"; }
# Load environment
if [ -f "$PROJECT_ROOT/.env" ]; then
set +e
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -e
fi
if [ -f "$PROJECT_ROOT/smom-dbis-138/.env" ]; then
set +e
source "$PROJECT_ROOT/smom-dbis-138/.env" 2>/dev/null || true
set -e
fi
# Required variables
PRIVATE_KEY="${PRIVATE_KEY:-}"
RPC_URL="${RPC_URL_138:-http://${RPC_CORE_1}:8545}"
CCIP_ROUTER="${CCIP_ROUTER:-0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e}"
CCIP_FEE_TOKEN="${CCIP_FEE_TOKEN:-0x514910771AF9Ca656af840dff83E8264EcF986CA}"
WETH9_ADDRESS="0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
WETH10_ADDRESS="0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f"
# Mainnet configuration
MAINNET_CHAIN_SELECTOR="5009297550715157269"
MAINNET_WETH9_BRIDGE="0x3304b747E565a97ec8AC220b0B6A1f6ffDB837e6"
MAINNET_WETH10_BRIDGE="0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e"
# Validate
if [ -z "$PRIVATE_KEY" ]; then
log_error "PRIVATE_KEY not set"
exit 1
fi
DEPLOYER=$(cast wallet address "$PRIVATE_KEY" 2>/dev/null || echo "")
if [ -z "$DEPLOYER" ]; then
log_error "Failed to derive deployer address"
exit 1
fi
log_section "ChainID 138 Complete Bridge Deployment"
log_info "Deployer: $DEPLOYER"
log_info "RPC: $RPC_URL"
log_info "CCIP Router: $CCIP_ROUTER"
log_info "Fee Token: $CCIP_FEE_TOKEN"
# Pre-flight checks
log_section "Pre-Flight Checks"
CHAIN_ID=$(cast chain-id --rpc-url "$RPC_URL" 2>/dev/null || echo "")
if [ "$CHAIN_ID" != "138" ]; then
log_error "Cannot connect to RPC or wrong chain ID: $CHAIN_ID"
exit 1
fi
log_success "RPC connected - Chain ID: $CHAIN_ID"
BALANCE=$(cast balance "$DEPLOYER" --rpc-url "$RPC_URL" 2>/dev/null || echo "0")
BALANCE_ETH=$(echo "scale=4; $BALANCE / 1000000000000000000" | bc 2>/dev/null || echo "0")
MIN_BALANCE="1000000000000000000"
# Use bc for large number comparison
BALANCE_CHECK=$(echo "$BALANCE < $MIN_BALANCE" | bc 2>/dev/null || echo "0")
if [ "$BALANCE_CHECK" = "1" ]; then
log_error "Insufficient balance: $BALANCE_ETH ETH"
exit 1
fi
log_success "Balance: $BALANCE_ETH ETH"
# Calculate gas prices
GAS_PRICE=$(bash "$PROJECT_ROOT/scripts/calculate-chain138-gas-price.sh" 2>/dev/null || echo "1100000000")
BASE_FEE_HEX=$(cast rpc eth_getBlockByNumber latest false --rpc-url "$RPC_URL" 2>/dev/null | grep -o '"baseFeePerGas":"[^"]*"' | cut -d'"' -f4 || echo "0x0")
BASE_FEE_DEC=$(cast --to-dec "$BASE_FEE_HEX" 2>/dev/null || echo "7")
USE_EIP1559=false
if [ -n "$BASE_FEE_HEX" ] && [ "$BASE_FEE_HEX" != "0x0" ] && [ "$BASE_FEE_HEX" != "null" ]; then
USE_EIP1559=true
MAX_FEE="$GAS_PRICE"
AVAILABLE=$((MAX_FEE - BASE_FEE_DEC))
PRIORITY=$((AVAILABLE / 10))
if [ "$PRIORITY" -lt "10000000" ]; then
PRIORITY="10000000"
fi
TOTAL=$((BASE_FEE_DEC + PRIORITY))
if [ "$TOTAL" -gt "$MAX_FEE" ]; then
PRIORITY=$((MAX_FEE - BASE_FEE_DEC - 1000000))
fi
log_success "EIP-1559: Max=$MAX_FEE, Priority=$PRIORITY, Base=$BASE_FEE_DEC"
else
log_info "Legacy: Gas Price=$GAS_PRICE"
fi
cd "$PROJECT_ROOT/smom-dbis-138"
# Deploy WETH9 Bridge
log_section "Deploy WETH9 Bridge"
if [ "$USE_EIP1559" = true ]; then
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH9Bridge.s.sol:DeployCCIPWETH9Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$MAX_FEE" \
--priority-gas-price "$PRIORITY" \
--slow \
-vvv 2>&1)
else
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH9Bridge.s.sol:DeployCCIPWETH9Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$GAS_PRICE" \
--legacy \
--slow \
-vvv 2>&1)
fi
echo "$DEPLOY_OUTPUT" | tail -50
WETH9_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oP "CCIPWETH9Bridge deployed at: \K0x[a-fA-F0-9]+" | tail -1 || echo "")
if [ -z "$WETH9_BRIDGE" ]; then
WETH9_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oE "0x[a-fA-F0-9]{40}" | tail -1 || echo "")
fi
if [ -n "$WETH9_BRIDGE" ] && [ ${#WETH9_BRIDGE} -eq 42 ]; then
log_success "WETH9 Bridge: $WETH9_BRIDGE"
sleep 5
CODE_SIZE=$(cast code "$WETH9_BRIDGE" --rpc-url "$RPC_URL" 2>/dev/null | wc -c || echo "0")
log_info "Code size: $CODE_SIZE bytes"
else
log_error "Failed to deploy WETH9 Bridge"
exit 1
fi
# Deploy WETH10 Bridge
log_section "Deploy WETH10 Bridge"
if [ "$USE_EIP1559" = true ]; then
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH10Bridge.s.sol:DeployCCIPWETH10Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$MAX_FEE" \
--priority-gas-price "$PRIORITY" \
--slow \
-vvv 2>&1)
else
DEPLOY_OUTPUT=$(forge script script/DeployCCIPWETH10Bridge.s.sol:DeployCCIPWETH10Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$GAS_PRICE" \
--legacy \
--slow \
-vvv 2>&1)
fi
echo "$DEPLOY_OUTPUT" | tail -50
WETH10_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oP "CCIPWETH10Bridge deployed at: \K0x[a-fA-F0-9]+" | tail -1 || echo "")
if [ -z "$WETH10_BRIDGE" ]; then
WETH10_BRIDGE=$(echo "$DEPLOY_OUTPUT" | grep -oE "0x[a-fA-F0-9]{40}" | tail -1 || echo "")
fi
if [ -n "$WETH10_BRIDGE" ] && [ ${#WETH10_BRIDGE} -eq 42 ]; then
log_success "WETH10 Bridge: $WETH10_BRIDGE"
sleep 5
CODE_SIZE=$(cast code "$WETH10_BRIDGE" --rpc-url "$RPC_URL" 2>/dev/null | wc -c || echo "0")
log_info "Code size: $CODE_SIZE bytes"
else
log_error "Failed to deploy WETH10 Bridge"
exit 1
fi
# Configure Destinations
log_section "Configure Bridge Destinations"
log_info "Configuring WETH9 Bridge with Mainnet..."
if [ "$USE_EIP1559" = true ]; then
cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH9_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--max-fee-per-gas "$MAX_FEE" \
--priority-fee-per-gas "$PRIORITY" \
-vv 2>&1 | tail -20
else
cast send "$WETH9_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH9_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$GAS_PRICE" \
--legacy \
-vv 2>&1 | tail -20
fi
log_info "Configuring WETH10 Bridge with Mainnet..."
if [ "$USE_EIP1559" = true ]; then
cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH10_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--max-fee-per-gas "$MAX_FEE" \
--priority-fee-per-gas "$PRIORITY" \
-vv 2>&1 | tail -20
else
cast send "$WETH10_BRIDGE" \
"addDestination(uint64,address)" \
"$MAINNET_CHAIN_SELECTOR" \
"$MAINNET_WETH10_BRIDGE" \
--rpc-url "$RPC_URL" \
--private-key "$PRIVATE_KEY" \
--gas-price "$GAS_PRICE" \
--legacy \
-vv 2>&1 | tail -20
fi
# Deploy LINK Token
log_section "Deploy LINK Token (CREATE2)"
log_info "Attempting CREATE2 deployment to canonical address..."
if [ "$USE_EIP1559" = true ]; then
LINK_OUTPUT=$(forge script script/DeployLinkToCanonicalAddress.s.sol:DeployLinkToCanonicalAddress \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$MAX_FEE" \
--priority-gas-price "$PRIORITY" \
--slow \
-vvv 2>&1)
else
LINK_OUTPUT=$(forge script script/DeployLinkToCanonicalAddress.s.sol:DeployLinkToCanonicalAddress \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--with-gas-price "$GAS_PRICE" \
--legacy \
--slow \
-vvv 2>&1)
fi
echo "$LINK_OUTPUT" | tail -30
LINK_ADDRESS="0x514910771AF9Ca656af840dff83E8264EcF986CA"
sleep 5
CODE_SIZE=$(cast code "$LINK_ADDRESS" --rpc-url "$RPC_URL" 2>/dev/null | wc -c || echo "0")
if [ "$CODE_SIZE" -gt 1000 ]; then
log_success "LINK Token deployed: $LINK_ADDRESS ($CODE_SIZE bytes)"
else
log_warn "LINK Token not deployed (code size: $CODE_SIZE bytes)"
fi
# Final Summary
log_section "Deployment Complete"
echo "Deployed Addresses:"
log_success "WETH9 Bridge: $WETH9_BRIDGE"
log_success "WETH10 Bridge: $WETH10_BRIDGE"
if [ "$CODE_SIZE" -gt 1000 ]; then
log_success "LINK Token: $LINK_ADDRESS"
else
log_warn "LINK Token: Not deployed"
fi
# Save to file
cat > /tmp/chain138-deployed-addresses-$(date +%Y%m%d-%H%M%S).txt <<EOF
# ChainID 138 Deployed Contract Addresses
# Date: $(date)
WETH9_BRIDGE=$WETH9_BRIDGE
WETH10_BRIDGE=$WETH10_BRIDGE
LINK_TOKEN=$LINK_ADDRESS
CCIP_ROUTER=$CCIP_ROUTER
CCIP_FEE_TOKEN=$CCIP_FEE_TOKEN
# Mainnet Configuration
MAINNET_CHAIN_SELECTOR=$MAINNET_CHAIN_SELECTOR
MAINNET_WETH9_BRIDGE=$MAINNET_WETH9_BRIDGE
MAINNET_WETH10_BRIDGE=$MAINNET_WETH10_BRIDGE
EOF
log_success "\n✅ All deployments complete!"
log_info "Addresses saved to: /tmp/chain138-deployed-addresses-*.txt"

Some files were not shown because too many files have changed in this diff Show More