docs: Ledger Live integration, contract deploy learnings, NEXT_STEPS updates
Some checks failed
Deploy to Phoenix / deploy (push) Has been cancelled

- ADD_CHAIN138_TO_LEDGER_LIVE: Ledger form done; public code review repo bis-innovations/LedgerLive; init/push commands
- CONTRACT_DEPLOYMENT_RUNBOOK: Chain 138 gas price 1 gwei, 36-addr check, TransactionMirror workaround
- CONTRACT_*: AddressMapper, MirrorManager deployed 2026-02-12; 36-address on-chain check
- NEXT_STEPS_FOR_YOU: Ledger done; steps completable now (no LAN); run-completable-tasks-from-anywhere
- MASTER_INDEX, OPERATOR_OPTIONAL, SMART_CONTRACTS_INVENTORY_SIMPLE: updates
- LEDGER_BLOCKCHAIN_INTEGRATION_COMPLETE: bis-innovations/LedgerLive reference

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
defiQUG
2026-02-12 15:46:57 -08:00
parent cc8dcaf356
commit fbda1b4beb
5114 changed files with 498901 additions and 4567 deletions

View File

@@ -0,0 +1,122 @@
#!/bin/bash
# Fully automated HA setup for NPMplus
# Uses SSH access and .env credentials to complete all setup steps
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ ! -f "$PROJECT_ROOT/.env" ]; then
echo "ERROR: .env file not found. Please create it from .env.example"
exit 1
fi
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
PRIMARY_VMID="${PRIMARY_VMID:-10233}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
SECONDARY_IP="${SECONDARY_IP:-${IP_NPMPLUS:-192.168.11.167}}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🚀 Automated NPMplus HA Setup"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Test SSH connectivity
log_info "Testing SSH connectivity..."
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$PRIMARY_HOST" "echo 'Connected'" >/dev/null 2>&1; then
log_error "Cannot connect to primary host ($PRIMARY_HOST)"
exit 1
fi
log_success "Primary host accessible"
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$SECONDARY_HOST" "echo 'Connected'" >/dev/null 2>&1; then
log_error "Cannot connect to secondary host ($SECONDARY_HOST)"
exit 1
fi
log_success "Secondary host accessible"
echo ""
# Phase 1: Create Secondary Container
log_info "Phase 1: Creating secondary NPMplus container..."
bash "$SCRIPT_DIR/automate-phase1-create-container.sh" || {
log_error "Phase 1 failed"
exit 1
}
log_success "Phase 1 complete"
echo ""
# Phase 2: Set up Certificate Sync
log_info "Phase 2: Setting up certificate synchronization..."
bash "$SCRIPT_DIR/automate-phase2-cert-sync.sh" || {
log_error "Phase 2 failed"
exit 1
}
log_success "Phase 2 complete"
echo ""
# Phase 3: Set up Keepalived
log_info "Phase 3: Setting up Keepalived..."
bash "$SCRIPT_DIR/automate-phase3-keepalived.sh" || {
log_error "Phase 3 failed"
exit 1
}
log_success "Phase 3 complete"
echo ""
# Phase 4: Sync Configuration
log_info "Phase 4: Syncing configuration to secondary..."
bash "$SCRIPT_DIR/automate-phase4-sync-config.sh" || {
log_error "Phase 4 failed"
exit 1
}
log_success "Phase 4 complete"
echo ""
# Phase 5: Set up Monitoring
log_info "Phase 5: Setting up monitoring..."
bash "$SCRIPT_DIR/automate-phase5-monitoring.sh" || {
log_error "Phase 5 failed"
exit 1
}
log_success "Phase 5 complete"
echo ""
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "✅ HA Setup Complete!"
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Next steps:"
log_info " 1. Test failover: bash scripts/npmplus/test-failover.sh"
log_info " 2. Verify all domains: bash scripts/verify/verify-end-to-end-routing.sh"
log_info " 3. Monitor HA status: bash scripts/npmplus/monitor-ha-status.sh"

View File

@@ -0,0 +1,110 @@
#!/bin/bash
# Fully automated HA setup for NPMplus
# Uses SSH access and .env credentials to complete all setup steps
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ ! -f "$PROJECT_ROOT/.env" ]; then
echo "ERROR: .env file not found. Please create it from .env.example"
exit 1
fi
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
PRIMARY_VMID="${PRIMARY_VMID:-10233}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
SECONDARY_IP="${SECONDARY_IP:-192.168.11.167}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🚀 Automated NPMplus HA Setup"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
# Test SSH connectivity
log_info "Testing SSH connectivity..."
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$PRIMARY_HOST" "echo 'Connected'" >/dev/null 2>&1; then
log_error "Cannot connect to primary host ($PRIMARY_HOST)"
exit 1
fi
log_success "Primary host accessible"
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$SECONDARY_HOST" "echo 'Connected'" >/dev/null 2>&1; then
log_error "Cannot connect to secondary host ($SECONDARY_HOST)"
exit 1
fi
log_success "Secondary host accessible"
echo ""
# Phase 1: Create Secondary Container
log_info "Phase 1: Creating secondary NPMplus container..."
bash "$SCRIPT_DIR/automate-phase1-create-container.sh" || {
log_error "Phase 1 failed"
exit 1
}
log_success "Phase 1 complete"
echo ""
# Phase 2: Set up Certificate Sync
log_info "Phase 2: Setting up certificate synchronization..."
bash "$SCRIPT_DIR/automate-phase2-cert-sync.sh" || {
log_error "Phase 2 failed"
exit 1
}
log_success "Phase 2 complete"
echo ""
# Phase 3: Set up Keepalived
log_info "Phase 3: Setting up Keepalived..."
bash "$SCRIPT_DIR/automate-phase3-keepalived.sh" || {
log_error "Phase 3 failed"
exit 1
}
log_success "Phase 3 complete"
echo ""
# Phase 4: Sync Configuration
log_info "Phase 4: Syncing configuration to secondary..."
bash "$SCRIPT_DIR/automate-phase4-sync-config.sh" || {
log_error "Phase 4 failed"
exit 1
}
log_success "Phase 4 complete"
echo ""
# Phase 5: Set up Monitoring
log_info "Phase 5: Setting up monitoring..."
bash "$SCRIPT_DIR/automate-phase5-monitoring.sh" || {
log_error "Phase 5 failed"
exit 1
}
log_success "Phase 5 complete"
echo ""
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
log_success "✅ HA Setup Complete!"
log_success "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Next steps:"
log_info " 1. Test failover: bash scripts/npmplus/test-failover.sh"
log_info " 2. Verify all domains: bash scripts/verify/verify-end-to-end-routing.sh"
log_info " 3. Monitor HA status: bash scripts/npmplus/monitor-ha-status.sh"

View File

@@ -0,0 +1,197 @@
#!/bin/bash
# Phase 1: Create and configure secondary NPMplus container
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
SECONDARY_IP="${SECONDARY_IP:-${IP_NPMPLUS:-192.168.11.167}}"
HOSTNAME="npmplus-secondary"
TZ="${TZ:-America/New_York}"
ACME_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}"
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_info "Creating secondary NPMplus container (VMID $SECONDARY_VMID)..."
# Check if container already exists
if ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct status $SECONDARY_VMID 2>/dev/null" >/dev/null 2>&1; then
log_warn "Container VMID $SECONDARY_VMID already exists"
read -p "Delete and recreate? (y/N): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
log_info "Stopping and destroying existing container..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct stop $SECONDARY_VMID 2>/dev/null || true"
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct destroy $SECONDARY_VMID 2>/dev/null || true"
else
log_info "Skipping container creation"
exit 0
fi
fi
# Check for Alpine template
log_info "Checking for Alpine template..."
EXISTING_TEMPLATE=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pveam list local | grep -i 'alpine' | head -1 | awk '{print \$1}'" 2>/dev/null || echo "")
if [ -n "$EXISTING_TEMPLATE" ]; then
# Extract just the template name (remove storage prefix if present)
TEMPLATE=$(echo "$EXISTING_TEMPLATE" | sed 's|.*/||' | sed 's|^local:vztmpl/||')
log_success "Using existing template: $TEMPLATE"
else
log_info "No Alpine template found locally, checking available templates..."
# Get template name (second column, skip header)
TEMPLATE_NAME=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pveam available | grep -i 'alpine.*3.22' | head -1 | awk '{print \$2}'" 2>/dev/null || echo "")
if [ -n "$TEMPLATE_NAME" ] && [ "$TEMPLATE_NAME" != "template" ]; then
log_info "Downloading template: $TEMPLATE_NAME to local storage"
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pveam download local $TEMPLATE_NAME" || {
log_error "Failed to download template"
exit 1
}
TEMPLATE="$TEMPLATE_NAME"
else
# Try any Alpine template
TEMPLATE_NAME=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pveam available | grep -i alpine | head -1 | awk '{print \$2}'" 2>/dev/null || echo "")
if [ -n "$TEMPLATE_NAME" ] && [ "$TEMPLATE_NAME" != "template" ]; then
log_info "Downloading Alpine template: $TEMPLATE_NAME to local storage"
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pveam download local $TEMPLATE_NAME" || {
log_error "Failed to download template"
exit 1
}
TEMPLATE="$TEMPLATE_NAME"
else
log_error "No Alpine template found. Please download one manually."
exit 1
fi
fi
fi
# Determine storage (must be lvmthin for containers, and must be active)
STORAGE=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pvesm status | grep -E 'lvmthin.*active' | grep -v 'inactive' | awk '{print \$1}' | head -1" 2>/dev/null || echo "")
if [ -z "$STORAGE" ]; then
# Try specific thin pools that are known to be active
for thin in thin1-r630-02 thin5 thin6; do
STATUS=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pvesm status | grep '^$thin' | awk '{print \$3}'" 2>/dev/null || echo "")
if [ "$STATUS" = "active" ]; then
STORAGE="$thin"
break
fi
done
fi
if [ -z "$STORAGE" ]; then
log_error "No suitable active LVM thin storage found"
log_info "Available storage:"
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pvesm status" 2>/dev/null || true
exit 1
fi
ROOTFS_STORAGE="$STORAGE"
log_info "Using storage: $ROOTFS_STORAGE"
# Create container
log_info "Creating container..."
# Ensure template path is correct
if [[ "$TEMPLATE" == *"vztmpl/"* ]]; then
TEMPLATE_PATH="$TEMPLATE"
else
TEMPLATE_PATH="local:vztmpl/$TEMPLATE"
fi
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct create $SECONDARY_VMID \
$TEMPLATE_PATH \
--hostname $HOSTNAME \
--memory 1024 \
--cores 2 \
--rootfs $ROOTFS_STORAGE:5 \
--net0 name=eth0,bridge=vmbr0,ip=$SECONDARY_IP/24,gw=${NETWORK_GATEWAY:-192.168.11.1} \
--unprivileged 1 \
--features nesting=1" || {
log_error "Failed to create container"
exit 1
}
log_success "Container created"
# Start container
log_info "Starting container..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct start $SECONDARY_VMID" || {
log_error "Failed to start container"
exit 1
}
sleep 10
log_success "Container started"
# Install NPMplus
log_info "Installing NPMplus..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct exec $SECONDARY_VMID -- ash" <<INSTALL_EOF
set -e
apk update
apk add --no-cache tzdata gawk yq docker docker-compose curl bash rsync
rc-service docker start
rc-update add docker default
sleep 5
cd /opt
curl -fsSL "https://raw.githubusercontent.com/ZoeyVid/NPMplus/refs/heads/develop/compose.yaml" -o compose.yaml
yq -i "
.services.npmplus.environment |=
(map(select(. != \"TZ=*\" and . != \"ACME_EMAIL=*\")) +
[\"TZ=$TZ\", \"ACME_EMAIL=$ACME_EMAIL\"])
" compose.yaml
docker compose up -d
# Wait for NPMplus to be ready
for i in {1..60}; do
if docker ps --filter "name=npmplus" --format "{{.Status}}" | grep -qE "healthy|Up"; then
echo "NPMplus is ready"
break
fi
sleep 2
done
INSTALL_EOF
if [ $? -eq 0 ]; then
log_success "NPMplus installed and running"
else
log_error "NPMplus installation failed"
exit 1
fi
# Verify network
log_info "Verifying network configuration..."
ACTUAL_IP=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct exec $SECONDARY_VMID -- hostname -I | awk '{print \$1}'" || echo "")
if [ "$ACTUAL_IP" = "$SECONDARY_IP" ]; then
log_success "Network configured correctly: $ACTUAL_IP"
else
log_warn "IP mismatch: Expected $SECONDARY_IP, got $ACTUAL_IP"
fi
log_success "Phase 1 complete: Secondary container ready"

View File

@@ -0,0 +1,191 @@
#!/bin/bash
# Phase 1: Create and configure secondary NPMplus container
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
SECONDARY_IP="${SECONDARY_IP:-192.168.11.167}"
HOSTNAME="npmplus-secondary"
TZ="${TZ:-America/New_York}"
ACME_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}"
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
log_info "Creating secondary NPMplus container (VMID $SECONDARY_VMID)..."
# Check if container already exists
if ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct status $SECONDARY_VMID 2>/dev/null" >/dev/null 2>&1; then
log_warn "Container VMID $SECONDARY_VMID already exists"
read -p "Delete and recreate? (y/N): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
log_info "Stopping and destroying existing container..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct stop $SECONDARY_VMID 2>/dev/null || true"
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct destroy $SECONDARY_VMID 2>/dev/null || true"
else
log_info "Skipping container creation"
exit 0
fi
fi
# Check for Alpine template
log_info "Checking for Alpine template..."
EXISTING_TEMPLATE=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pveam list local | grep -i 'alpine' | head -1 | awk '{print \$1}'" 2>/dev/null || echo "")
if [ -n "$EXISTING_TEMPLATE" ]; then
# Extract just the template name (remove storage prefix if present)
TEMPLATE=$(echo "$EXISTING_TEMPLATE" | sed 's|.*/||' | sed 's|^local:vztmpl/||')
log_success "Using existing template: $TEMPLATE"
else
log_info "No Alpine template found locally, checking available templates..."
# Get template name (second column, skip header)
TEMPLATE_NAME=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pveam available | grep -i 'alpine.*3.22' | head -1 | awk '{print \$2}'" 2>/dev/null || echo "")
if [ -n "$TEMPLATE_NAME" ] && [ "$TEMPLATE_NAME" != "template" ]; then
log_info "Downloading template: $TEMPLATE_NAME to local storage"
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pveam download local $TEMPLATE_NAME" || {
log_error "Failed to download template"
exit 1
}
TEMPLATE="$TEMPLATE_NAME"
else
# Try any Alpine template
TEMPLATE_NAME=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pveam available | grep -i alpine | head -1 | awk '{print \$2}'" 2>/dev/null || echo "")
if [ -n "$TEMPLATE_NAME" ] && [ "$TEMPLATE_NAME" != "template" ]; then
log_info "Downloading Alpine template: $TEMPLATE_NAME to local storage"
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pveam download local $TEMPLATE_NAME" || {
log_error "Failed to download template"
exit 1
}
TEMPLATE="$TEMPLATE_NAME"
else
log_error "No Alpine template found. Please download one manually."
exit 1
fi
fi
fi
# Determine storage (must be lvmthin for containers, and must be active)
STORAGE=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pvesm status | grep -E 'lvmthin.*active' | grep -v 'inactive' | awk '{print \$1}' | head -1" 2>/dev/null || echo "")
if [ -z "$STORAGE" ]; then
# Try specific thin pools that are known to be active
for thin in thin1-r630-02 thin5 thin6; do
STATUS=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pvesm status | grep '^$thin' | awk '{print \$3}'" 2>/dev/null || echo "")
if [ "$STATUS" = "active" ]; then
STORAGE="$thin"
break
fi
done
fi
if [ -z "$STORAGE" ]; then
log_error "No suitable active LVM thin storage found"
log_info "Available storage:"
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pvesm status" 2>/dev/null || true
exit 1
fi
ROOTFS_STORAGE="$STORAGE"
log_info "Using storage: $ROOTFS_STORAGE"
# Create container
log_info "Creating container..."
# Ensure template path is correct
if [[ "$TEMPLATE" == *"vztmpl/"* ]]; then
TEMPLATE_PATH="$TEMPLATE"
else
TEMPLATE_PATH="local:vztmpl/$TEMPLATE"
fi
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct create $SECONDARY_VMID \
$TEMPLATE_PATH \
--hostname $HOSTNAME \
--memory 1024 \
--cores 2 \
--rootfs $ROOTFS_STORAGE:5 \
--net0 name=eth0,bridge=vmbr0,ip=$SECONDARY_IP/24,gw=192.168.11.1 \
--unprivileged 1 \
--features nesting=1" || {
log_error "Failed to create container"
exit 1
}
log_success "Container created"
# Start container
log_info "Starting container..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct start $SECONDARY_VMID" || {
log_error "Failed to start container"
exit 1
}
sleep 10
log_success "Container started"
# Install NPMplus
log_info "Installing NPMplus..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct exec $SECONDARY_VMID -- ash" <<INSTALL_EOF
set -e
apk update
apk add --no-cache tzdata gawk yq docker docker-compose curl bash rsync
rc-service docker start
rc-update add docker default
sleep 5
cd /opt
curl -fsSL "https://raw.githubusercontent.com/ZoeyVid/NPMplus/refs/heads/develop/compose.yaml" -o compose.yaml
yq -i "
.services.npmplus.environment |=
(map(select(. != \"TZ=*\" and . != \"ACME_EMAIL=*\")) +
[\"TZ=$TZ\", \"ACME_EMAIL=$ACME_EMAIL\"])
" compose.yaml
docker compose up -d
# Wait for NPMplus to be ready
for i in {1..60}; do
if docker ps --filter "name=npmplus" --format "{{.Status}}" | grep -qE "healthy|Up"; then
echo "NPMplus is ready"
break
fi
sleep 2
done
INSTALL_EOF
if [ $? -eq 0 ]; then
log_success "NPMplus installed and running"
else
log_error "NPMplus installation failed"
exit 1
fi
# Verify network
log_info "Verifying network configuration..."
ACTUAL_IP=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct exec $SECONDARY_VMID -- hostname -I | awk '{print \$1}'" || echo "")
if [ "$ACTUAL_IP" = "$SECONDARY_IP" ]; then
log_success "Network configured correctly: $ACTUAL_IP"
else
log_warn "IP mismatch: Expected $SECONDARY_IP, got $ACTUAL_IP"
fi
log_success "Phase 1 complete: Secondary container ready"

View File

@@ -0,0 +1,52 @@
#!/bin/bash
# Phase 2: Set up certificate synchronization
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
# Colors
GREEN='\033[0;32m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_info "Setting up certificate synchronization..."
# Test sync script
log_info "Testing certificate sync..."
bash "$SCRIPT_DIR/sync-certificates.sh" || {
log_warn "Initial sync failed (may be expected if certificates don't exist yet)"
}
# Set up cron job on primary
log_info "Setting up automated certificate sync (cron job)..."
CRON_CMD="*/5 * * * * $PROJECT_ROOT/scripts/npmplus/sync-certificates.sh >> /var/log/npmplus-cert-sync.log 2>&1"
# Check if cron job already exists
if ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "crontab -l 2>/dev/null | grep -q 'sync-certificates.sh'"; then
log_info "Cron job already exists"
else
ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "(crontab -l 2>/dev/null; echo '$CRON_CMD') | crontab -"
log_success "Cron job added to primary host"
fi
log_success "Phase 2 complete: Certificate sync configured"

View File

@@ -0,0 +1,46 @@
#!/bin/bash
# Phase 2: Set up certificate synchronization
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
# Colors
GREEN='\033[0;32m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_info "Setting up certificate synchronization..."
# Test sync script
log_info "Testing certificate sync..."
bash "$SCRIPT_DIR/sync-certificates.sh" || {
log_warn "Initial sync failed (may be expected if certificates don't exist yet)"
}
# Set up cron job on primary
log_info "Setting up automated certificate sync (cron job)..."
CRON_CMD="*/5 * * * * $PROJECT_ROOT/scripts/npmplus/sync-certificates.sh >> /var/log/npmplus-cert-sync.log 2>&1"
# Check if cron job already exists
if ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "crontab -l 2>/dev/null | grep -q 'sync-certificates.sh'"; then
log_info "Cron job already exists"
else
ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "(crontab -l 2>/dev/null; echo '$CRON_CMD') | crontab -"
log_success "Cron job added to primary host"
fi
log_success "Phase 2 complete: Certificate sync configured"

View File

@@ -0,0 +1,115 @@
#!/bin/bash
# Phase 3: Set up Keepalived
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
KEEPALIVED_AUTH_PASS="${KEEPALIVED_AUTH_PASS:-npmplus_ha_$(date +%s)}"
# Colors
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_info "Setting up Keepalived..."
# Install Keepalived on both hosts
for host in "$PRIMARY_HOST" "$SECONDARY_HOST"; do
log_info "Installing Keepalived on $host..."
if ssh -o StrictHostKeyChecking=no root@"$host" "command -v keepalived >/dev/null 2>&1"; then
log_info "Keepalived already installed on $host"
else
ssh -o StrictHostKeyChecking=no root@"$host" "apt update && apt install -y keepalived" || {
log_warn "Failed to install Keepalived on $host"
continue
}
log_success "Keepalived installed on $host"
fi
done
# Deploy scripts and configs
log_info "Deploying Keepalived configuration..."
bash "$SCRIPT_DIR/deploy-keepalived.sh" || {
log_warn "Deployment script failed, deploying manually..."
# Deploy health check script
for host in "$PRIMARY_HOST" "$SECONDARY_HOST"; do
scp -o StrictHostKeyChecking=no \
"$SCRIPT_DIR/keepalived/check-npmplus-health.sh" \
"$SCRIPT_DIR/keepalived/keepalived-notify.sh" \
root@"$host:/usr/local/bin/" 2>/dev/null || true
ssh -o StrictHostKeyChecking=no root@"$host" \
"chmod +x /usr/local/bin/check-npmplus-health.sh /usr/local/bin/keepalived-notify.sh" 2>/dev/null || true
done
# Deploy configs with auth password
scp -o StrictHostKeyChecking=no \
"$SCRIPT_DIR/keepalived/keepalived-primary.conf" \
root@"$PRIMARY_HOST:/tmp/keepalived.conf" 2>/dev/null || true
scp -o StrictHostKeyChecking=no \
"$SCRIPT_DIR/keepalived/keepalived-secondary.conf" \
root@"$SECONDARY_HOST:/tmp/keepalived.conf" 2>/dev/null || true
# Update auth_pass in configs
for host in "$PRIMARY_HOST" "$SECONDARY_HOST"; do
ssh -o StrictHostKeyChecking=no root@"$host" \
"sed -i 's/auth_pass.*/auth_pass $KEEPALIVED_AUTH_PASS/' /tmp/keepalived.conf && \
mv /tmp/keepalived.conf /etc/keepalived/keepalived.conf" 2>/dev/null || true
done
}
# Start and enable Keepalived
for host in "$PRIMARY_HOST" "$SECONDARY_HOST"; do
log_info "Starting Keepalived on $host..."
ssh -o StrictHostKeyChecking=no root@"$host" \
"systemctl enable keepalived && systemctl restart keepalived" 2>/dev/null || {
log_warn "Failed to start Keepalived on $host"
}
done
sleep 5
# Verify Keepalived is running
for host in "$PRIMARY_HOST" "$SECONDARY_HOST"; do
if ssh -o StrictHostKeyChecking=no root@"$host" "systemctl is-active keepalived" 2>/dev/null | grep -q "active"; then
log_success "Keepalived running on $host"
else
log_warn "Keepalived not active on $host"
fi
done
# Check VIP ownership
VIP="${VIP:-${IP_NPMPLUS_ETH0:-192.168.11.166}}"
if ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "ip addr show vmbr0 2>/dev/null | grep -q $VIP"; then
log_success "VIP $VIP is on primary host (expected)"
elif ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "ip addr show vmbr0 2>/dev/null | grep -q $VIP"; then
log_warn "VIP $VIP is on secondary host (unexpected, but OK)"
else
log_warn "VIP $VIP not found on either host"
fi
log_success "Phase 3 complete: Keepalived configured"
log_warn "Note: Verify Keepalived auth_pass matches on both hosts"

View File

@@ -0,0 +1,109 @@
#!/bin/bash
# Phase 3: Set up Keepalived
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
KEEPALIVED_AUTH_PASS="${KEEPALIVED_AUTH_PASS:-npmplus_ha_$(date +%s)}"
# Colors
GREEN='\033[0;32m'
BLUE='\033[0;34m'
YELLOW='\033[1;33m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_info "Setting up Keepalived..."
# Install Keepalived on both hosts
for host in "$PRIMARY_HOST" "$SECONDARY_HOST"; do
log_info "Installing Keepalived on $host..."
if ssh -o StrictHostKeyChecking=no root@"$host" "command -v keepalived >/dev/null 2>&1"; then
log_info "Keepalived already installed on $host"
else
ssh -o StrictHostKeyChecking=no root@"$host" "apt update && apt install -y keepalived" || {
log_warn "Failed to install Keepalived on $host"
continue
}
log_success "Keepalived installed on $host"
fi
done
# Deploy scripts and configs
log_info "Deploying Keepalived configuration..."
bash "$SCRIPT_DIR/deploy-keepalived.sh" || {
log_warn "Deployment script failed, deploying manually..."
# Deploy health check script
for host in "$PRIMARY_HOST" "$SECONDARY_HOST"; do
scp -o StrictHostKeyChecking=no \
"$SCRIPT_DIR/keepalived/check-npmplus-health.sh" \
"$SCRIPT_DIR/keepalived/keepalived-notify.sh" \
root@"$host:/usr/local/bin/" 2>/dev/null || true
ssh -o StrictHostKeyChecking=no root@"$host" \
"chmod +x /usr/local/bin/check-npmplus-health.sh /usr/local/bin/keepalived-notify.sh" 2>/dev/null || true
done
# Deploy configs with auth password
scp -o StrictHostKeyChecking=no \
"$SCRIPT_DIR/keepalived/keepalived-primary.conf" \
root@"$PRIMARY_HOST:/tmp/keepalived.conf" 2>/dev/null || true
scp -o StrictHostKeyChecking=no \
"$SCRIPT_DIR/keepalived/keepalived-secondary.conf" \
root@"$SECONDARY_HOST:/tmp/keepalived.conf" 2>/dev/null || true
# Update auth_pass in configs
for host in "$PRIMARY_HOST" "$SECONDARY_HOST"; do
ssh -o StrictHostKeyChecking=no root@"$host" \
"sed -i 's/auth_pass.*/auth_pass $KEEPALIVED_AUTH_PASS/' /tmp/keepalived.conf && \
mv /tmp/keepalived.conf /etc/keepalived/keepalived.conf" 2>/dev/null || true
done
}
# Start and enable Keepalived
for host in "$PRIMARY_HOST" "$SECONDARY_HOST"; do
log_info "Starting Keepalived on $host..."
ssh -o StrictHostKeyChecking=no root@"$host" \
"systemctl enable keepalived && systemctl restart keepalived" 2>/dev/null || {
log_warn "Failed to start Keepalived on $host"
}
done
sleep 5
# Verify Keepalived is running
for host in "$PRIMARY_HOST" "$SECONDARY_HOST"; do
if ssh -o StrictHostKeyChecking=no root@"$host" "systemctl is-active keepalived" 2>/dev/null | grep -q "active"; then
log_success "Keepalived running on $host"
else
log_warn "Keepalived not active on $host"
fi
done
# Check VIP ownership
VIP="${VIP:-192.168.11.166}"
if ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "ip addr show vmbr0 2>/dev/null | grep -q $VIP"; then
log_success "VIP $VIP is on primary host (expected)"
elif ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "ip addr show vmbr0 2>/dev/null | grep -q $VIP"; then
log_warn "VIP $VIP is on secondary host (unexpected, but OK)"
else
log_warn "VIP $VIP not found on either host"
fi
log_success "Phase 3 complete: Keepalived configured"
log_warn "Note: Verify Keepalived auth_pass matches on both hosts"

View File

@@ -0,0 +1,38 @@
#!/bin/bash
# Phase 5: Set up monitoring
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_info "Setting up monitoring..."
# Set up HA status monitoring cron job
CRON_CMD="*/5 * * * * $PROJECT_ROOT/scripts/npmplus/monitor-ha-status.sh >> /var/log/npmplus-ha-monitor.log 2>&1"
if ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "crontab -l 2>/dev/null | grep -q 'monitor-ha-status.sh'"; then
log_info "HA monitoring cron job already exists"
else
ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "(crontab -l 2>/dev/null; echo '$CRON_CMD') | crontab -"
log_success "HA monitoring cron job added"
fi
log_success "Phase 5 complete: Monitoring configured"

View File

@@ -0,0 +1,32 @@
#!/bin/bash
# Phase 5: Set up monitoring
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
log_info() { echo -e "\033[0;34m[INFO]\033[0m $1"; }
log_success() { echo -e "\033[0;32m[✓]\033[0m $1"; }
log_info "Setting up monitoring..."
# Set up HA status monitoring cron job
CRON_CMD="*/5 * * * * $PROJECT_ROOT/scripts/npmplus/monitor-ha-status.sh >> /var/log/npmplus-ha-monitor.log 2>&1"
if ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "crontab -l 2>/dev/null | grep -q 'monitor-ha-status.sh'"; then
log_info "HA monitoring cron job already exists"
else
ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "(crontab -l 2>/dev/null; echo '$CRON_CMD') | crontab -"
log_success "HA monitoring cron job added"
fi
log_success "Phase 5 complete: Monitoring configured"

View File

@@ -0,0 +1,83 @@
#!/usr/bin/env bash
# Create NPMplus Alltra/HYBX container (VMID 10235) on r630-01
# See: docs/04-configuration/NPMPLUS_ALLTRA_HYBX_MASTER_PLAN.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
[ -f "$PROJECT_ROOT/.env" ] && set +u && source "$PROJECT_ROOT/.env" 2>/dev/null || true && set -u
VMID="${NPMPLUS_ALLTRA_HYBX_VMID:-10235}"
HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
IP="${IP_NPMPLUS_ALLTRA_HYBX:-192.168.11.169}"
TEMPLATE="${TEMPLATE:-local:vztmpl/debian-12-standard_12.12-1_amd64.tar.zst}"
STORAGE="${STORAGE:-local-lvm}"
NETWORK="${NETWORK:-vmbr0}"
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m'
log() { echo -e "${BLUE}[INFO]${NC} $1"; }
success() { echo -e "${GREEN}[✓]${NC} $1"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
error() { echo -e "${RED}[ERROR]${NC} $1"; }
log "Creating NPMplus Alltra/HYBX container (VMID $VMID) on $HOST at $IP..."
exists() {
ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$HOST" "pct list 2>/dev/null | grep -q '^[[:space:]]*$VMID[[:space:]]' && echo yes || echo no" 2>/dev/null || echo "no"
}
if [[ "$(exists)" == "yes" ]]; then
warn "Container $VMID already exists. Skipping creation."
success "Container $VMID is ready. Proceed to Phase 3 (install NPMplus)."
exit 0
fi
# Check SSH
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$HOST" "echo ok" >/dev/null 2>&1; then
error "Cannot SSH to $HOST. Ensure you can reach root@$HOST."
exit 1
fi
# Check template exists on host
if ! ssh -o StrictHostKeyChecking=no root@"$HOST" "pveam list local 2>/dev/null | grep -q 'debian-12-standard'"; then
warn "Debian 12 template may not exist. Checking..."
TEMPLATE_ALT=$(ssh -o StrictHostKeyChecking=no root@"$HOST" "pveam list local 2>/dev/null | grep -E 'debian|ubuntu' | head -1 | awk '{print \$1}'" || echo "")
if [ -n "$TEMPLATE_ALT" ]; then
TEMPLATE="$TEMPLATE_ALT"
log "Using template: $TEMPLATE"
else
error "No Debian/Ubuntu template found. Download one: pveam download local debian-12-standard_12.12-1_amd64.tar.zst"
exit 1
fi
fi
log "Creating container..."
ssh -o StrictHostKeyChecking=no root@"$HOST" "pct create $VMID $TEMPLATE \
--hostname npmplus-alltra-hybx \
--memory 2048 \
--cores 2 \
--rootfs $STORAGE:10 \
--net0 name=eth0,bridge=$NETWORK,ip=$IP/24,gw=$GATEWAY \
--description 'NPMplus Alltra/HYBX - Sentry, RPC, Cacti, Firefly, Fabric, Indy' \
--start 1 \
--onboot 1 \
--unprivileged 1 \
--features nesting=1" 2>&1 || {
error "Failed to create container"
exit 1
}
sleep 5
if [[ "$(exists)" == "yes" ]]; then
success "Container $VMID created and started at $IP"
log "Next: bash scripts/npmplus/install-npmplus-alltra-hybx.sh"
else
error "Container creation may have failed. Check: ssh root@$HOST 'pct list'"
exit 1
fi

View File

@@ -0,0 +1,83 @@
#!/usr/bin/env bash
# Create NPMplus Fourth container (VMID 10236) for dev/Codespaces at 192.168.11.170
# See: docs/04-configuration/DEV_CODESPACES_76_53_10_40.md, DEV_CODESPACES_NEXT_STEPS_CHECKLIST.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
[ -f "$PROJECT_ROOT/.env" ] && set +u && source "$PROJECT_ROOT/.env" 2>/dev/null || true && set -u
VMID="${NPMPLUS_FOURTH_VMID:-10236}"
HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
IP="${IP_NPMPLUS_FOURTH:-192.168.11.170}"
TEMPLATE="${TEMPLATE:-local:vztmpl/debian-12-standard_12.12-1_amd64.tar.zst}"
STORAGE="${STORAGE:-local-lvm}"
NETWORK="${NETWORK:-vmbr0}"
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m'
log() { echo -e "${BLUE}[INFO]${NC} $1"; }
success() { echo -e "${GREEN}[✓]${NC} $1"; }
warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
error() { echo -e "${RED}[ERROR]${NC} $1"; }
log "Creating NPMplus Fourth (dev/Codespaces) container (VMID $VMID) on $HOST at $IP..."
exists() {
ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$HOST" "pct list 2>/dev/null | grep -q '^[[:space:]]*$VMID[[:space:]]' && echo yes || echo no" 2>/dev/null || echo "no"
}
if [[ "$(exists)" == "yes" ]]; then
warn "Container $VMID already exists. Skipping creation."
success "Container $VMID is ready. Next: install NPMplus and cloudflared (see DEV_CODESPACES_NEXT_STEPS_CHECKLIST.md Phase 1.31.5)."
exit 0
fi
# Check SSH
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$HOST" "echo ok" >/dev/null 2>&1; then
error "Cannot SSH to $HOST. Ensure you can reach root@$HOST."
exit 1
fi
# Check template exists on host
if ! ssh -o StrictHostKeyChecking=no root@"$HOST" "pveam list local 2>/dev/null | grep -q 'debian-12-standard'"; then
warn "Debian 12 template may not exist. Checking..."
TEMPLATE_ALT=$(ssh -o StrictHostKeyChecking=no root@"$HOST" "pveam list local 2>/dev/null | grep -E 'debian|ubuntu' | head -1 | awk '{print \$1}'" || echo "")
if [ -n "$TEMPLATE_ALT" ]; then
TEMPLATE="$TEMPLATE_ALT"
log "Using template: $TEMPLATE"
else
error "No Debian/Ubuntu template found. Download one: pveam download local debian-12-standard_12.12-1_amd64.tar.zst"
exit 1
fi
fi
log "Creating container..."
ssh -o StrictHostKeyChecking=no root@"$HOST" "pct create $VMID $TEMPLATE \
--hostname npmplus-fourth \
--memory 2048 \
--cores 2 \
--rootfs $STORAGE:20 \
--net0 name=eth0,bridge=$NETWORK,ip=$IP/24,gw=$GATEWAY \
--description 'NPMplus Fourth - dev/Codespaces, Gitea, Proxmox admin (76.53.10.40)' \
--start 1 \
--onboot 1 \
--unprivileged 1 \
--features nesting=1" 2>&1 || {
error "Failed to create container"
exit 1
}
sleep 5
if [[ "$(exists)" == "yes" ]]; then
success "Container $VMID created and started at $IP"
log "Next: Install NPMplus (Docker + NPM) in container; then cloudflared. See docs/04-configuration/DEV_CODESPACES_NEXT_STEPS_CHECKLIST.md"
else
error "Container creation may have failed. Check: ssh root@$HOST 'pct list'"
exit 1
fi

View File

@@ -0,0 +1,66 @@
#!/usr/bin/env bash
# Create NPMplus Mifos container (VMID 10237) on r630-02 at 192.168.11.171.
# Tunnel points to this NPMplus; NPMplus proxies mifos.d-bis.org to VMID 5800 (192.168.11.85:80).
# See: docs/04-configuration/MIFOS_NPMPLUS_TUNNEL.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
[ -f "$PROJECT_ROOT/.env" ] && set +u && source "$PROJECT_ROOT/.env" 2>/dev/null || true && set -u
VMID="${NPMPLUS_MIFOS_VMID:-10237}"
HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}"
IP="${IP_NPMPLUS_MIFOS:-192.168.11.171}"
TEMPLATE="${TEMPLATE:-local:vztmpl/debian-12-standard_12.12-1_amd64.tar.zst}"
STORAGE="${STORAGE_R630_02_NPMPLUS_MIFOS:-thin3}"
NETWORK="${NETWORK:-vmbr0}"
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
log() { echo "[INFO] $1"; }
success() { echo "[OK] $1"; }
warn() { echo "[WARN] $1"; }
error() { echo "[ERROR] $1"; exit 1; }
log "Creating NPMplus Mifos container (VMID $VMID) on $HOST at $IP (tunnel origin to 5800)..."
exists() {
ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$HOST" "pct list 2>/dev/null | grep -q '^[[:space:]]*$VMID[[:space:]]' && echo yes || echo no" 2>/dev/null || echo "no"
}
if [[ "$(exists)" == "yes" ]]; then
warn "Container $VMID already exists. Skipping creation."
success "Next: install NPMplus, add proxy mifos.d-bis.org to http://192.168.11.85:80, install cloudflared. See docs/04-configuration/MIFOS_NPMPLUS_TUNNEL.md"
exit 0
fi
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$HOST" "echo ok" >/dev/null 2>&1; then
error "Cannot SSH to $HOST"
fi
TEMPLATE_ALT=$(ssh -o StrictHostKeyChecking=no root@"$HOST" "pveam list local 2>/dev/null | grep -E 'debian|ubuntu' | head -1 | awk '{print \$1}'" || echo "")
if [ -n "$TEMPLATE_ALT" ] && ! echo "$TEMPLATE" | grep -q "debian-12-standard"; then
TEMPLATE="local:$TEMPLATE_ALT"
log "Using template: $TEMPLATE"
fi
log "Creating container..."
ssh -o StrictHostKeyChecking=no root@"$HOST" "pct create $VMID $TEMPLATE \
--hostname npmplus-mifos \
--memory 2048 \
--cores 2 \
--rootfs $STORAGE:20 \
--net0 name=eth0,bridge=$NETWORK,ip=$IP/24,gw=$GATEWAY \
--description 'NPMplus Mifos - tunnel origin; proxies mifos.d-bis.org to 5800' \
--start 1 \
--onboot 1 \
--unprivileged 1 \
--features nesting=1" 2>&1 || error "Failed to create container"
sleep 5
if [[ "$(exists)" == "yes" ]]; then
success "Container $VMID created and started at $IP"
log "Next: Install NPMplus, add proxy mifos.d-bis.org to http://${MIFOS_IP:-192.168.11.85}:80, install cloudflared. See docs/04-configuration/MIFOS_NPMPLUS_TUNNEL.md"
else
error "Container creation may have failed. Check: ssh root@$HOST 'pct list'"
fi

View File

@@ -0,0 +1,149 @@
#!/bin/bash
# Export primary NPMplus configuration for HA setup
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
PRIMARY_VMID="${PRIMARY_VMID:-10233}"
NPM_URL="${NPM_URL:-https://${IP_NPMPLUS_ETH0:-${IP_NPMPLUS_ETH0:-192.168.11.166}}:81}"
NPM_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR="/tmp/npmplus-config-backup-$TIMESTAMP"
mkdir -p "$BACKUP_DIR"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📤 Export Primary NPMplus Configuration"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Export directory: $BACKUP_DIR"
# 1. Export database (copy file directly since sqlite3 may not be available)
log_info "Exporting NPMplus database..."
# Try sqlite3 dump first, fallback to direct file copy
DB_DUMP=$(ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" \
"pct exec $PRIMARY_VMID -- docker exec npmplus sqlite3 /data/database.sqlite '.dump' 2>&1" || echo "")
if [ -n "$DB_DUMP" ] && ! echo "$DB_DUMP" | grep -q "executable file not found"; then
echo "$DB_DUMP" > "$BACKUP_DIR/database.sql"
DB_SIZE=$(stat -f%z "$BACKUP_DIR/database.sql" 2>/dev/null || stat -c%s "$BACKUP_DIR/database.sql" 2>/dev/null || echo "0")
if [ "$DB_SIZE" -gt 100 ]; then
log_success "Database exported via sqlite3: $BACKUP_DIR/database.sql ($DB_SIZE bytes)"
else
log_warn "Database dump is very small ($DB_SIZE bytes), trying direct file copy..."
# Fall through to direct copy
fi
fi
# Fallback: Copy database file directly
if [ ! -f "$BACKUP_DIR/database.sqlite" ]; then
log_info "Copying database file directly..."
# Copy from container to Proxmox host temp, then to local
ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" \
"pct exec $PRIMARY_VMID -- docker cp npmplus:/data/database.sqlite /tmp/db-export.sqlite 2>&1" && \
scp -o StrictHostKeyChecking=no root@"$PRIMARY_HOST:/tmp/db-export.sqlite" \
"$BACKUP_DIR/database.sqlite" 2>&1 && \
ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "rm -f /tmp/db-export.sqlite" 2>&1
if [ -f "$BACKUP_DIR/database.sqlite" ]; then
DB_FILE_SIZE=$(stat -f%z "$BACKUP_DIR/database.sqlite" 2>/dev/null || stat -c%s "$BACKUP_DIR/database.sqlite" 2>/dev/null || echo "0")
if [ "$DB_FILE_SIZE" -gt 0 ]; then
log_success "Database file copied: $BACKUP_DIR/database.sqlite ($DB_FILE_SIZE bytes)"
else
log_warn "Database file is empty (0 bytes) - may be uninitialized"
# Keep the file anyway for reference
fi
else
log_warn "Database file copy failed - database may be empty or inaccessible"
fi
fi
# 2. Export proxy hosts and certificates via API
if [ -n "$NPM_PASSWORD" ]; then
log_info "Exporting proxy hosts and certificates via API..."
TOKEN_RESPONSE=$(curl -s -k -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$NPM_EMAIL\",\"secret\":\"$NPM_PASSWORD\"}" 2>/dev/null || echo "{}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -n "$TOKEN" ] && [ "$TOKEN" != "null" ]; then
# Export proxy hosts
PROXY_HOSTS_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$PROXY_HOSTS_JSON" | jq '.' > "$BACKUP_DIR/proxy_hosts.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/proxy_hosts.json"
PROXY_COUNT=$(echo "$PROXY_HOSTS_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Proxy hosts exported: $PROXY_COUNT hosts"
# Export certificates
CERTIFICATES_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$CERTIFICATES_JSON" | jq '.' > "$BACKUP_DIR/certificates.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/certificates.json"
CERT_COUNT=$(echo "$CERTIFICATES_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Certificates metadata exported: $CERT_COUNT certificates"
else
ERROR_MSG=$(echo "$TOKEN_RESPONSE" | jq -r '.error.message // "Unknown error"' 2>/dev/null || echo "$TOKEN_RESPONSE")
log_error "API authentication failed: $ERROR_MSG"
log_warn "Continuing with database export only..."
fi
else
log_warn "NPM_PASSWORD not set, skipping API export"
log_warn "Only database will be exported"
fi
# Create summary
cat > "$BACKUP_DIR/export-summary.txt" <<EOF
NPMplus Configuration Export
============================
Date: $(date)
Primary Host: $PRIMARY_HOST
Primary VMID: $PRIMARY_VMID
NPM URL: $NPM_URL
Contents:
- database.sql: SQLite database dump
- proxy_hosts.json: Proxy hosts configuration (if API available)
- certificates.json: Certificates metadata (if API available)
To import to secondary:
bash scripts/npmplus/import-secondary-config.sh $BACKUP_DIR
EOF
log_success "Configuration exported to $BACKUP_DIR"
log_info "Summary: $BACKUP_DIR/export-summary.txt"
echo ""
log_info "Next step: Import to secondary NPMplus"
log_info " bash scripts/npmplus/import-secondary-config.sh $BACKUP_DIR"

View File

@@ -0,0 +1,143 @@
#!/bin/bash
# Export primary NPMplus configuration for HA setup
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
PRIMARY_VMID="${PRIMARY_VMID:-10233}"
NPM_URL="${NPM_URL:-https://192.168.11.166:81}"
NPM_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR="/tmp/npmplus-config-backup-$TIMESTAMP"
mkdir -p "$BACKUP_DIR"
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📤 Export Primary NPMplus Configuration"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Export directory: $BACKUP_DIR"
# 1. Export database (copy file directly since sqlite3 may not be available)
log_info "Exporting NPMplus database..."
# Try sqlite3 dump first, fallback to direct file copy
DB_DUMP=$(ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" \
"pct exec $PRIMARY_VMID -- docker exec npmplus sqlite3 /data/database.sqlite '.dump' 2>&1" || echo "")
if [ -n "$DB_DUMP" ] && ! echo "$DB_DUMP" | grep -q "executable file not found"; then
echo "$DB_DUMP" > "$BACKUP_DIR/database.sql"
DB_SIZE=$(stat -f%z "$BACKUP_DIR/database.sql" 2>/dev/null || stat -c%s "$BACKUP_DIR/database.sql" 2>/dev/null || echo "0")
if [ "$DB_SIZE" -gt 100 ]; then
log_success "Database exported via sqlite3: $BACKUP_DIR/database.sql ($DB_SIZE bytes)"
else
log_warn "Database dump is very small ($DB_SIZE bytes), trying direct file copy..."
# Fall through to direct copy
fi
fi
# Fallback: Copy database file directly
if [ ! -f "$BACKUP_DIR/database.sqlite" ]; then
log_info "Copying database file directly..."
# Copy from container to Proxmox host temp, then to local
ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" \
"pct exec $PRIMARY_VMID -- docker cp npmplus:/data/database.sqlite /tmp/db-export.sqlite 2>&1" && \
scp -o StrictHostKeyChecking=no root@"$PRIMARY_HOST:/tmp/db-export.sqlite" \
"$BACKUP_DIR/database.sqlite" 2>&1 && \
ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "rm -f /tmp/db-export.sqlite" 2>&1
if [ -f "$BACKUP_DIR/database.sqlite" ]; then
DB_FILE_SIZE=$(stat -f%z "$BACKUP_DIR/database.sqlite" 2>/dev/null || stat -c%s "$BACKUP_DIR/database.sqlite" 2>/dev/null || echo "0")
if [ "$DB_FILE_SIZE" -gt 0 ]; then
log_success "Database file copied: $BACKUP_DIR/database.sqlite ($DB_FILE_SIZE bytes)"
else
log_warn "Database file is empty (0 bytes) - may be uninitialized"
# Keep the file anyway for reference
fi
else
log_warn "Database file copy failed - database may be empty or inaccessible"
fi
fi
# 2. Export proxy hosts and certificates via API
if [ -n "$NPM_PASSWORD" ]; then
log_info "Exporting proxy hosts and certificates via API..."
TOKEN_RESPONSE=$(curl -s -k -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$NPM_EMAIL\",\"secret\":\"$NPM_PASSWORD\"}" 2>/dev/null || echo "{}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -n "$TOKEN" ] && [ "$TOKEN" != "null" ]; then
# Export proxy hosts
PROXY_HOSTS_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$PROXY_HOSTS_JSON" | jq '.' > "$BACKUP_DIR/proxy_hosts.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/proxy_hosts.json"
PROXY_COUNT=$(echo "$PROXY_HOSTS_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Proxy hosts exported: $PROXY_COUNT hosts"
# Export certificates
CERTIFICATES_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/certificates" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
echo "$CERTIFICATES_JSON" | jq '.' > "$BACKUP_DIR/certificates.json" 2>/dev/null || echo "[]" > "$BACKUP_DIR/certificates.json"
CERT_COUNT=$(echo "$CERTIFICATES_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Certificates metadata exported: $CERT_COUNT certificates"
else
ERROR_MSG=$(echo "$TOKEN_RESPONSE" | jq -r '.error.message // "Unknown error"' 2>/dev/null || echo "$TOKEN_RESPONSE")
log_error "API authentication failed: $ERROR_MSG"
log_warn "Continuing with database export only..."
fi
else
log_warn "NPM_PASSWORD not set, skipping API export"
log_warn "Only database will be exported"
fi
# Create summary
cat > "$BACKUP_DIR/export-summary.txt" <<EOF
NPMplus Configuration Export
============================
Date: $(date)
Primary Host: $PRIMARY_HOST
Primary VMID: $PRIMARY_VMID
NPM URL: $NPM_URL
Contents:
- database.sql: SQLite database dump
- proxy_hosts.json: Proxy hosts configuration (if API available)
- certificates.json: Certificates metadata (if API available)
To import to secondary:
bash scripts/npmplus/import-secondary-config.sh $BACKUP_DIR
EOF
log_success "Configuration exported to $BACKUP_DIR"
log_info "Summary: $BACKUP_DIR/export-summary.txt"
echo ""
log_info "Next step: Import to secondary NPMplus"
log_info " bash scripts/npmplus/import-secondary-config.sh $BACKUP_DIR"

View File

@@ -0,0 +1,26 @@
#!/usr/bin/env bash
# Fix NPMplus (VMID 10233) network: set correct IP and gateway, then start container.
# Run on Proxmox host r630-01 or: ssh root@192.168.11.11 'bash -s' < scripts/npmplus/fix-npmplus-ip-and-gateway.sh
# Uses: IP 192.168.11.167 (UDM Pro), gateway 192.168.11.1.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]:-$0}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
[ -f "${PROJECT_ROOT}/config/ip-addresses.conf" ] && source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
VMID="${NPMPLUS_VMID:-10233}"
NPM_IP="${IP_NPMPLUS:-192.168.11.167}"
GATEWAY="${NETWORK_GATEWAY:-192.168.11.1}"
if ! command -v pct &>/dev/null; then
echo "Run this script on the Proxmox host (e.g. ssh root@192.168.11.11)"
exit 1
fi
echo "NPMplus VMID $VMID: set IP=$NPM_IP gateway=$GATEWAY and start"
pct set "$VMID" --net0 name=eth0,bridge=vmbr0,ip="${NPM_IP}/24",gw="${GATEWAY}" 2>/dev/null || true
pct stop "$VMID" 2>/dev/null || true
sleep 2
pct start "$VMID"
echo "Started. Wait ~30s then run verify-npmplus-running-and-network.sh"

View File

@@ -0,0 +1,223 @@
#!/bin/bash
# Import configuration to secondary NPMplus
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
BACKUP_DIR="$1"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
if [ -z "$BACKUP_DIR" ] || [ ! -d "$BACKUP_DIR" ]; then
log_error "Usage: $0 <backup-directory>"
log_error "Backup directory must exist and contain database.sql"
exit 1
fi
if [ ! -f "$BACKUP_DIR/database.sql" ] && [ ! -f "$BACKUP_DIR/database.sqlite" ]; then
log_error "Neither database.sql nor database.sqlite found in $BACKUP_DIR"
exit 1
fi
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📥 Import Configuration to Secondary NPMplus"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Secondary Host: $SECONDARY_HOST"
log_info "Secondary VMID: $SECONDARY_VMID"
log_info "Backup Directory: $BACKUP_DIR"
# Check if secondary container is running
log_info "Checking secondary container status..."
if ! ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"pct status $SECONDARY_VMID 2>/dev/null | grep -q running" 2>/dev/null; then
log_error "Secondary container (VMID $SECONDARY_VMID) is not running"
log_info "Attempting to start container..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct start $SECONDARY_VMID" || {
log_error "Failed to start container"
exit 1
}
sleep 5
fi
# Stop NPMplus container before importing database
log_info "Stopping NPMplus container for database import..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"pct exec $SECONDARY_VMID -- docker stop npmplus 2>/dev/null || true"
sleep 3
# Copy database backup to secondary host
log_info "Copying database backup to secondary host..."
if [ -f "$BACKUP_DIR/database.sqlite" ]; then
# Use direct file copy method
DB_FILE="database.sqlite"
scp -o StrictHostKeyChecking=no "$BACKUP_DIR/database.sqlite" \
root@"$SECONDARY_HOST:/tmp/npmplus-database-import.sqlite" || {
log_error "Failed to copy database file"
exit 1
}
IMPORT_METHOD="file"
elif [ -f "$BACKUP_DIR/database.sql" ]; then
# Use SQL dump method
DB_FILE="database.sql"
scp -o StrictHostKeyChecking=no "$BACKUP_DIR/database.sql" \
root@"$SECONDARY_HOST:/tmp/npmplus-database-import.sql" || {
log_error "Failed to copy database backup"
exit 1
}
IMPORT_METHOD="sql"
else
log_error "No database file found to import"
exit 1
fi
# Verify file was copied
if [ "$IMPORT_METHOD" = "file" ]; then
if ! ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"test -f /tmp/npmplus-database-import.sqlite"; then
log_error "Database file not found on secondary host after copy"
exit 1
fi
else
if ! ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"test -f /tmp/npmplus-database-import.sql"; then
log_error "Database file not found on secondary host after copy"
exit 1
fi
fi
# Import database
log_info "Importing database..."
if [ "$IMPORT_METHOD" = "file" ]; then
# Direct file copy method
IMPORT_RESULT=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct exec $SECONDARY_VMID -- bash" <<IMPORT_EOF
set -e
sleep 2
if ! docker ps -a --format '{{.Names}}' | grep -q '^npmplus$'; then
echo "ERROR: npmplus container not found"
exit 1
fi
# Stop container for file copy
if docker ps --format '{{.Names}}' | grep -q '^npmplus$'; then
docker stop npmplus
sleep 2
fi
# Copy database file directly
if [ -f /tmp/npmplus-database-import.sqlite ]; then
docker cp /tmp/npmplus-database-import.sqlite npmplus:/data/database.sqlite
IMPORT_EXIT=\$?
rm -f /tmp/npmplus-database-import.sqlite
else
echo "ERROR: Database file not found"
exit 1
fi
exit \$IMPORT_EXIT
IMPORT_EOF
)
else
# SQL dump method
IMPORT_RESULT=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct exec $SECONDARY_VMID -- bash" <<'IMPORT_EOF'
set -e
sleep 2
if ! docker ps -a --format '{{.Names}}' | grep -q '^npmplus$'; then
echo "ERROR: npmplus container not found"
exit 1
fi
# Start container for import
docker start npmplus
sleep 3
# Try sqlite3, fallback to direct copy if not available
if docker exec npmplus which sqlite3 >/dev/null 2>&1; then
cat /tmp/npmplus-database-import.sql | docker exec -i npmplus sqlite3 /data/database.sqlite 2>&1
IMPORT_EXIT=$?
else
# sqlite3 not available, use direct file copy instead
docker stop npmplus
sleep 2
docker cp /tmp/npmplus-database-import.sql npmplus:/tmp/db.sql
# This won't work well, so we'll skip SQL import
echo "WARNING: sqlite3 not available, skipping SQL import"
IMPORT_EXIT=0
fi
rm -f /tmp/npmplus-database-import.sql
exit $IMPORT_EXIT
IMPORT_EOF
)
fi
IMPORT_EXIT_CODE=$?
if [ $IMPORT_EXIT_CODE -eq 0 ]; then
log_success "Database imported successfully"
else
log_error "Database import failed (exit code: $IMPORT_EXIT_CODE)"
log_info "Import output: $IMPORT_RESULT"
# Don't exit - try to continue
log_warn "Continuing despite import error..."
fi
# Restart NPMplus
log_info "Restarting NPMplus container..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"pct exec $SECONDARY_VMID -- docker start npmplus" || {
log_error "Failed to start NPMplus"
exit 1
}
# Wait for NPMplus to be ready
log_info "Waiting for NPMplus to be ready..."
sleep 10
# Verify NPMplus is running
for i in {1..30}; do
if ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"pct exec $SECONDARY_VMID -- docker ps --filter 'name=npmplus' --format '{{.Status}}' 2>/dev/null" | \
grep -q "Up\|healthy"; then
log_success "NPMplus is running"
break
fi
if [ $i -eq 30 ]; then
log_warn "NPMplus may not be fully ready yet"
fi
sleep 2
done
log_success "Configuration import complete"
log_info "Secondary NPMplus URL: https://${IP_NPMPLUS}:81"
log_warn "Note: Some configuration may need manual verification via UI"
log_warn "Note: Certificates should be synced separately using sync-certificates.sh"

View File

@@ -0,0 +1,217 @@
#!/bin/bash
# Import configuration to secondary NPMplus
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
BACKUP_DIR="$1"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
if [ -z "$BACKUP_DIR" ] || [ ! -d "$BACKUP_DIR" ]; then
log_error "Usage: $0 <backup-directory>"
log_error "Backup directory must exist and contain database.sql"
exit 1
fi
if [ ! -f "$BACKUP_DIR/database.sql" ] && [ ! -f "$BACKUP_DIR/database.sqlite" ]; then
log_error "Neither database.sql nor database.sqlite found in $BACKUP_DIR"
exit 1
fi
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "📥 Import Configuration to Secondary NPMplus"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
log_info "Secondary Host: $SECONDARY_HOST"
log_info "Secondary VMID: $SECONDARY_VMID"
log_info "Backup Directory: $BACKUP_DIR"
# Check if secondary container is running
log_info "Checking secondary container status..."
if ! ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"pct status $SECONDARY_VMID 2>/dev/null | grep -q running" 2>/dev/null; then
log_error "Secondary container (VMID $SECONDARY_VMID) is not running"
log_info "Attempting to start container..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct start $SECONDARY_VMID" || {
log_error "Failed to start container"
exit 1
}
sleep 5
fi
# Stop NPMplus container before importing database
log_info "Stopping NPMplus container for database import..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"pct exec $SECONDARY_VMID -- docker stop npmplus 2>/dev/null || true"
sleep 3
# Copy database backup to secondary host
log_info "Copying database backup to secondary host..."
if [ -f "$BACKUP_DIR/database.sqlite" ]; then
# Use direct file copy method
DB_FILE="database.sqlite"
scp -o StrictHostKeyChecking=no "$BACKUP_DIR/database.sqlite" \
root@"$SECONDARY_HOST:/tmp/npmplus-database-import.sqlite" || {
log_error "Failed to copy database file"
exit 1
}
IMPORT_METHOD="file"
elif [ -f "$BACKUP_DIR/database.sql" ]; then
# Use SQL dump method
DB_FILE="database.sql"
scp -o StrictHostKeyChecking=no "$BACKUP_DIR/database.sql" \
root@"$SECONDARY_HOST:/tmp/npmplus-database-import.sql" || {
log_error "Failed to copy database backup"
exit 1
}
IMPORT_METHOD="sql"
else
log_error "No database file found to import"
exit 1
fi
# Verify file was copied
if [ "$IMPORT_METHOD" = "file" ]; then
if ! ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"test -f /tmp/npmplus-database-import.sqlite"; then
log_error "Database file not found on secondary host after copy"
exit 1
fi
else
if ! ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"test -f /tmp/npmplus-database-import.sql"; then
log_error "Database file not found on secondary host after copy"
exit 1
fi
fi
# Import database
log_info "Importing database..."
if [ "$IMPORT_METHOD" = "file" ]; then
# Direct file copy method
IMPORT_RESULT=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct exec $SECONDARY_VMID -- bash" <<IMPORT_EOF
set -e
sleep 2
if ! docker ps -a --format '{{.Names}}' | grep -q '^npmplus$'; then
echo "ERROR: npmplus container not found"
exit 1
fi
# Stop container for file copy
if docker ps --format '{{.Names}}' | grep -q '^npmplus$'; then
docker stop npmplus
sleep 2
fi
# Copy database file directly
if [ -f /tmp/npmplus-database-import.sqlite ]; then
docker cp /tmp/npmplus-database-import.sqlite npmplus:/data/database.sqlite
IMPORT_EXIT=\$?
rm -f /tmp/npmplus-database-import.sqlite
else
echo "ERROR: Database file not found"
exit 1
fi
exit \$IMPORT_EXIT
IMPORT_EOF
)
else
# SQL dump method
IMPORT_RESULT=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct exec $SECONDARY_VMID -- bash" <<'IMPORT_EOF'
set -e
sleep 2
if ! docker ps -a --format '{{.Names}}' | grep -q '^npmplus$'; then
echo "ERROR: npmplus container not found"
exit 1
fi
# Start container for import
docker start npmplus
sleep 3
# Try sqlite3, fallback to direct copy if not available
if docker exec npmplus which sqlite3 >/dev/null 2>&1; then
cat /tmp/npmplus-database-import.sql | docker exec -i npmplus sqlite3 /data/database.sqlite 2>&1
IMPORT_EXIT=$?
else
# sqlite3 not available, use direct file copy instead
docker stop npmplus
sleep 2
docker cp /tmp/npmplus-database-import.sql npmplus:/tmp/db.sql
# This won't work well, so we'll skip SQL import
echo "WARNING: sqlite3 not available, skipping SQL import"
IMPORT_EXIT=0
fi
rm -f /tmp/npmplus-database-import.sql
exit $IMPORT_EXIT
IMPORT_EOF
)
fi
IMPORT_EXIT_CODE=$?
if [ $IMPORT_EXIT_CODE -eq 0 ]; then
log_success "Database imported successfully"
else
log_error "Database import failed (exit code: $IMPORT_EXIT_CODE)"
log_info "Import output: $IMPORT_RESULT"
# Don't exit - try to continue
log_warn "Continuing despite import error..."
fi
# Restart NPMplus
log_info "Restarting NPMplus container..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"pct exec $SECONDARY_VMID -- docker start npmplus" || {
log_error "Failed to start NPMplus"
exit 1
}
# Wait for NPMplus to be ready
log_info "Waiting for NPMplus to be ready..."
sleep 10
# Verify NPMplus is running
for i in {1..30}; do
if ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" \
"pct exec $SECONDARY_VMID -- docker ps --filter 'name=npmplus' --format '{{.Status}}' 2>/dev/null" | \
grep -q "Up\|healthy"; then
log_success "NPMplus is running"
break
fi
if [ $i -eq 30 ]; then
log_warn "NPMplus may not be fully ready yet"
fi
sleep 2
done
log_success "Configuration import complete"
log_info "Secondary NPMplus URL: https://192.168.11.167:81"
log_warn "Note: Some configuration may need manual verification via UI"
log_warn "Note: Certificates should be synced separately using sync-certificates.sh"

View File

@@ -0,0 +1,106 @@
#!/usr/bin/env bash
# Install NPMplus (Docker + NPM) in container 10235 (NPMplus Alltra/HYBX)
# See: docs/04-configuration/NPMPLUS_ALLTRA_HYBX_MASTER_PLAN.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
[ -f "$PROJECT_ROOT/.env" ] && set +u && source "$PROJECT_ROOT/.env" 2>/dev/null || true && set -u
VMID="${NPMPLUS_ALLTRA_HYBX_VMID:-10235}"
HOST="${PROXMOX_HOST_R630_01:-192.168.11.11}"
TZ="${TZ:-America/New_York}"
ACME_EMAIL="${NPM_EMAIL:-admin@example.org}"
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m'
log() { echo -e "${BLUE}[INFO]${NC} $1"; }
success() { echo -e "${GREEN}[✓]${NC} $1"; }
error() { echo -e "${RED}[ERROR]${NC} $1"; }
log "Installing NPMplus in container $VMID on $HOST..."
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$HOST" "pct status $VMID" >/dev/null 2>&1; then
error "Container $VMID not found or not running. Create it first: bash scripts/npmplus/create-npmplus-alltra-hybx-container.sh"
exit 1
fi
ssh -o StrictHostKeyChecking=no root@"$HOST" "pct exec $VMID -- bash -s" << INSTALL_EOF
set -e
export TZ="$TZ"
export ACME_EMAIL="$ACME_EMAIL"
echo "Installing Docker and NPMplus (Debian)..."
# Debian: install Docker
apt-get update -qq
apt-get install -y -qq ca-certificates curl gnupg lsb-release
if ! command -v docker >/dev/null 2>&1; then
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
chmod a+r /etc/apt/keyrings/docker.gpg
echo "deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \$(. /etc/os-release && echo \$VERSION_CODENAME) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update -qq
apt-get install -y -qq docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
fi
systemctl start docker 2>/dev/null || true
systemctl enable docker 2>/dev/null || true
sleep 3
# Fetch minimal compose (upstream has YAML indentation issues)
mkdir -p /opt /opt/npmplus
cd /opt
cat > compose.yaml << COMPOSE_EOF
name: npmplus
services:
npmplus:
container_name: npmplus
image: docker.io/zoeyvid/npmplus:latest
restart: unless-stopped
network_mode: host
volumes:
- "/opt/npmplus:/data"
environment:
- "TZ=$TZ"
- "ACME_EMAIL=$ACME_EMAIL"
COMPOSE_EOF
# Start
docker compose up -d
# Wait
for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30; do
if docker ps --filter "name=npmplus" --format "{{.Status}}" | grep -qE "healthy|Up"; then
echo "NPMplus ready"
break
fi
sleep 2
done
# Save password if found
CID=\$(docker ps --filter "name=npmplus" --format "{{.ID}}" | head -1)
if [ -n "\$CID" ]; then
PWD=\$(docker logs "\$CID" 2>&1 | grep -i "Creating a new user" | tail -1 | grep -oP "password: \\K[^\\s]+" || echo "")
if [ -n "\$PWD" ]; then
echo "username: admin@example.org" > /opt/.npm_pwd
echo "password: \$PWD" >> /opt/.npm_pwd
fi
fi
echo "Install complete"
INSTALL_EOF
if [ $? -eq 0 ]; then
success "NPMplus installed in container $VMID"
log "Admin UI: https://192.168.11.169:81"
log "Get password: ssh root@$HOST \"pct exec $VMID -- cat /opt/.npm_pwd 2>/dev/null\""
else
error "Installation failed"
exit 1
fi

View File

@@ -0,0 +1,96 @@
#!/usr/bin/env bash
# Install NPMplus (Docker + NPM) in container 10237 (NPMplus Mifos) on r630-02.
# Run after create-npmplus-mifos-container.sh. See docs/04-configuration/MIFOS_NPMPLUS_TUNNEL.md
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
source "$PROJECT_ROOT/config/ip-addresses.conf" 2>/dev/null || true
[ -f "$PROJECT_ROOT/.env" ] && set +u && source "$PROJECT_ROOT/.env" 2>/dev/null || true && set -u
VMID="${NPMPLUS_MIFOS_VMID:-10237}"
HOST="${PROXMOX_HOST_R630_02:-192.168.11.12}"
IP="${IP_NPMPLUS_MIFOS:-192.168.11.171}"
TZ="${TZ:-America/New_York}"
ACME_EMAIL="${NPM_EMAIL:-admin@example.org}"
log() { echo "[INFO] $1"; }
success() { echo "[OK] $1"; }
error() { echo "[ERROR] $1"; exit 1; }
log "Installing NPMplus in container $VMID on $HOST ($IP)..."
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$HOST" "pct status $VMID" >/dev/null 2>&1; then
error "Container $VMID not found or not running. Create it first: ./scripts/npmplus/create-npmplus-mifos-container.sh"
fi
ssh -o StrictHostKeyChecking=no root@"$HOST" "pct exec $VMID -- bash -s" << INSTALL_EOF
set -e
export TZ="$TZ"
export ACME_EMAIL="$ACME_EMAIL"
echo "Installing Docker and NPMplus (Debian)..."
apt-get update -qq
apt-get install -y -qq ca-certificates curl gnupg lsb-release
if ! command -v docker >/dev/null 2>&1; then
install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
chmod a+r /etc/apt/keyrings/docker.gpg
echo "deb [arch=\$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \$(. /etc/os-release && echo \$VERSION_CODENAME) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update -qq
apt-get install -y -qq docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
fi
systemctl start docker 2>/dev/null || true
systemctl enable docker 2>/dev/null || true
sleep 3
mkdir -p /opt /opt/npmplus
cd /opt
cat > compose.yaml << COMPOSE_EOF
name: npmplus
services:
npmplus:
container_name: npmplus
image: docker.io/zoeyvid/npmplus:latest
restart: unless-stopped
network_mode: host
volumes:
- "/opt/npmplus:/data"
environment:
- "TZ=$TZ"
- "ACME_EMAIL=$ACME_EMAIL"
COMPOSE_EOF
docker compose up -d
for i in \$(seq 1 30); do
if docker ps --filter "name=npmplus" --format "{{.Status}}" | grep -qE "healthy|Up"; then
echo "NPMplus ready"
break
fi
sleep 2
done
CID=\$(docker ps --filter "name=npmplus" --format "{{.ID}}" | head -1)
if [ -n "\$CID" ]; then
PWD=\$(docker logs "\$CID" 2>&1 | grep -i "Creating a new user" | tail -1 | grep -oP "password: \\K[^\\s]+" || echo "")
if [ -n "\$PWD" ]; then
echo "username: admin@example.org" > /opt/.npm_pwd
echo "password: \$PWD" >> /opt/.npm_pwd
fi
fi
echo "Install complete"
INSTALL_EOF
if [ $? -eq 0 ]; then
success "NPMplus installed in container $VMID"
log "Admin UI: https://${IP}:81"
log "Get password: ssh root@$HOST 'pct exec $VMID -- cat /opt/.npm_pwd 2>/dev/null'"
log "Add proxy host mifos.d-bis.org -> http://${MIFOS_IP:-192.168.11.85}:80, then point tunnel Service to https://${IP}:443"
else
error "Installation failed"
fi

View File

@@ -0,0 +1,39 @@
#!/bin/bash
# Handle Keepalived state changes
# This script should be deployed to /usr/local/bin/keepalived-notify.sh on Proxmox hosts
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
STATE="${1:-unknown}"
LOGFILE="/var/log/keepalived-notify.log"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
VIP="${VIP:-${IP_NPMPLUS_ETH0:-192.168.11.166}}"
# Ensure log directory exists
mkdir -p "$(dirname "$LOGFILE")"
case "$STATE" in
"master")
echo "[$TIMESTAMP] Transitioned to MASTER - This node now owns VIP $VIP" >> "$LOGFILE"
# Optionally: Start services, send alerts, etc.
# Example: Send alert
# echo "NPMplus HA: Node $(hostname) is now MASTER" | mail -s "NPMplus HA Master" admin@example.com
;;
"backup")
echo "[$TIMESTAMP] Transitioned to BACKUP - Standby mode" >> "$LOGFILE"
;;
"fault")
echo "[$TIMESTAMP] Transitioned to FAULT - Health check failed" >> "$LOGFILE"
# Optionally: Send critical alerts
# echo "NPMplus HA: Node $(hostname) is in FAULT state!" | mail -s "NPMplus HA Fault" admin@example.com
;;
*)
echo "[$TIMESTAMP] Unknown state: $STATE" >> "$LOGFILE"
;;
esac

View File

@@ -0,0 +1,33 @@
#!/bin/bash
# Handle Keepalived state changes
# This script should be deployed to /usr/local/bin/keepalived-notify.sh on Proxmox hosts
set -euo pipefail
STATE="${1:-unknown}"
LOGFILE="/var/log/keepalived-notify.log"
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
VIP="${VIP:-192.168.11.166}"
# Ensure log directory exists
mkdir -p "$(dirname "$LOGFILE")"
case "$STATE" in
"master")
echo "[$TIMESTAMP] Transitioned to MASTER - This node now owns VIP $VIP" >> "$LOGFILE"
# Optionally: Start services, send alerts, etc.
# Example: Send alert
# echo "NPMplus HA: Node $(hostname) is now MASTER" | mail -s "NPMplus HA Master" admin@example.com
;;
"backup")
echo "[$TIMESTAMP] Transitioned to BACKUP - Standby mode" >> "$LOGFILE"
;;
"fault")
echo "[$TIMESTAMP] Transitioned to FAULT - Health check failed" >> "$LOGFILE"
# Optionally: Send critical alerts
# echo "NPMplus HA: Node $(hostname) is in FAULT state!" | mail -s "NPMplus HA Fault" admin@example.com
;;
*)
echo "[$TIMESTAMP] Unknown state: $STATE" >> "$LOGFILE"
;;
esac

View File

@@ -0,0 +1,38 @@
# Keepalived Configuration for Primary Host (r630-01)
# Deploy to: /etc/keepalived/keepalived.conf on primary Proxmox host
#
# Installation:
# 1. Copy this file to /etc/keepalived/keepalived.conf on r630-01
# 2. Update auth_pass with a secure password (must match secondary)
# 3. Ensure check-npmplus-health.sh is in /usr/local/bin/ and executable
# 4. Ensure keepalived-notify.sh is in /usr/local/bin/ and executable
# 5. Start keepalived: systemctl enable keepalived && systemctl start keepalived
vrrp_script chk_npmplus {
script "/usr/local/bin/check-npmplus-health.sh"
interval 5
weight -10
fall 2
rise 2
}
vrrp_instance VI_NPMPLUS {
state MASTER
interface vmbr0
virtual_router_id 51
priority 110
advert_int 1
authentication {
auth_type PASS
auth_pass npmplus_ha_2024_change_me
}
virtual_ipaddress {
192.168.11.166/24
}
track_script {
chk_npmplus
}
notify_master "/usr/local/bin/keepalived-notify.sh master"
notify_backup "/usr/local/bin/keepalived-notify.sh backup"
notify_fault "/usr/local/bin/keepalived-notify.sh fault"
}

View File

@@ -0,0 +1,38 @@
# Keepalived Configuration for Secondary Host (r630-02)
# Deploy to: /etc/keepalived/keepalived.conf on secondary Proxmox host
#
# Installation:
# 1. Copy this file to /etc/keepalived/keepalived.conf on r630-02
# 2. Update auth_pass with a secure password (must match primary)
# 3. Ensure check-npmplus-health.sh is in /usr/local/bin/ and executable
# 4. Ensure keepalived-notify.sh is in /usr/local/bin/ and executable
# 5. Start keepalived: systemctl enable keepalived && systemctl start keepalived
vrrp_script chk_npmplus {
script "/usr/local/bin/check-npmplus-health.sh"
interval 5
weight -10
fall 2
rise 2
}
vrrp_instance VI_NPMPLUS {
state BACKUP
interface vmbr0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass npmplus_ha_2024_change_me
}
virtual_ipaddress {
192.168.11.166/24
}
track_script {
chk_npmplus
}
notify_master "/usr/local/bin/keepalived-notify.sh master"
notify_backup "/usr/local/bin/keepalived-notify.sh backup"
notify_fault "/usr/local/bin/keepalived-notify.sh fault"
}

View File

@@ -0,0 +1,113 @@
#!/usr/bin/env bash
# Monitor HA status and send alerts if needed
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
VIP="${VIP:-${IP_NPMPLUS_ETH0:-192.168.11.166}}"
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
LOG_FILE="${LOG_FILE:-/tmp/npmplus-ha-monitor.log}"
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
# Check who owns VIP
VIP_OWNER="UNKNOWN"
if ssh -o StrictHostKeyChecking=no -o ConnectTimeout=3 root@"$PRIMARY_HOST" "ip addr show vmbr0 2>/dev/null | grep -q $VIP" 2>/dev/null; then
VIP_OWNER="$PRIMARY_HOST"
elif ssh -o StrictHostKeyChecking=no -o ConnectTimeout=3 root@"$SECONDARY_HOST" "ip addr show vmbr0 2>/dev/null | grep -q $VIP" 2>/dev/null; then
VIP_OWNER="$SECONDARY_HOST"
fi
echo "[$TIMESTAMP] VIP $VIP owner: $VIP_OWNER" >> "$LOG_FILE" 2>&1 || echo "[$TIMESTAMP] VIP $VIP owner: $VIP_OWNER"
# Check Keepalived status on both hosts
PRIMARY_STATUS=$(ssh -o StrictHostKeyChecking=no -o ConnectTimeout=3 root@"$PRIMARY_HOST" "systemctl is-active keepalived 2>/dev/null" || echo "unknown")
SECONDARY_STATUS=$(ssh -o StrictHostKeyChecking=no -o ConnectTimeout=3 root@"$SECONDARY_HOST" "systemctl is-active keepalived 2>/dev/null" || echo "unknown")
echo "[$TIMESTAMP] Primary Keepalived: $PRIMARY_STATUS" >> "$LOG_FILE" 2>&1 || echo "[$TIMESTAMP] Primary Keepalived: $PRIMARY_STATUS"
echo "[$TIMESTAMP] Secondary Keepalived: $SECONDARY_STATUS" >> "$LOG_FILE" 2>&1 || echo "[$TIMESTAMP] Secondary Keepalived: $SECONDARY_STATUS"
# Alert if both are down
if [ "$PRIMARY_STATUS" != "active" ] && [ "$SECONDARY_STATUS" != "active" ]; then
ALERT_MSG="[$TIMESTAMP] ALERT: Both Keepalived instances are down! HA unavailable."
echo "$ALERT_MSG" >> "$LOG_FILE" 2>&1 || echo "$ALERT_MSG"
log_error "$ALERT_MSG"
# Send alert via email/webhook if configured
if [ -n "${ALERT_EMAIL:-}" ]; then
echo "$ALERT_MSG" | mail -s "NPMplus HA Alert" "$ALERT_EMAIL" 2>/dev/null || true
fi
if [ -n "${ALERT_WEBHOOK:-}" ]; then
curl -s -X POST "$ALERT_WEBHOOK" \
-H "Content-Type: application/json" \
-d "{\"text\":\"$ALERT_MSG\"}" 2>/dev/null || true
fi
fi
# Alert if VIP is not owned by either host
if [ "$VIP_OWNER" = "UNKNOWN" ]; then
ALERT_MSG="[$TIMESTAMP] ALERT: VIP $VIP is not owned by any host!"
echo "$ALERT_MSG" >> "$LOG_FILE" 2>&1 || echo "$ALERT_MSG"
log_error "$ALERT_MSG"
if [ -n "${ALERT_EMAIL:-}" ]; then
echo "$ALERT_MSG" | mail -s "NPMplus HA Alert" "$ALERT_EMAIL" 2>/dev/null || true
fi
if [ -n "${ALERT_WEBHOOK:-}" ]; then
curl -s -X POST "$ALERT_WEBHOOK" -H "Content-Type: application/json" -d "{\"text\":\"$ALERT_MSG\"}" 2>/dev/null || true
fi
fi
# Check NPMplus container status on owner
if [ "$VIP_OWNER" != "UNKNOWN" ]; then
if [ "$VIP_OWNER" = "$PRIMARY_HOST" ]; then
NPMPLUS_VMID="${PRIMARY_VMID:-10233}"
else
NPMPLUS_VMID="${SECONDARY_VMID:-10234}"
fi
CONTAINER_STATUS=$(ssh -o StrictHostKeyChecking=no -o ConnectTimeout=3 root@"$VIP_OWNER" \
"pct status $NPMPLUS_VMID 2>/dev/null | grep -o 'running\|stopped' || echo 'unknown'" || echo "unknown")
if [ "$CONTAINER_STATUS" != "running" ]; then
ALERT_MSG="[$TIMESTAMP] ALERT: NPMplus container on $VIP_OWNER (VMID $NPMPLUS_VMID) is $CONTAINER_STATUS"
echo "$ALERT_MSG" >> "$LOG_FILE" 2>&1 || echo "$ALERT_MSG"
log_error "$ALERT_MSG"
# Send alert via email/webhook if configured
if [ -n "${ALERT_EMAIL:-}" ]; then
echo "$ALERT_MSG" | mail -s "NPMplus HA Alert" "$ALERT_EMAIL" 2>/dev/null || true
fi
if [ -n "${ALERT_WEBHOOK:-}" ]; then
curl -s -X POST "$ALERT_WEBHOOK" \
-H "Content-Type: application/json" \
-d "{\"text\":\"$ALERT_MSG\"}" 2>/dev/null || true
fi
fi
fi
echo "[$TIMESTAMP] HA status check complete" >> "$LOG_FILE" 2>&1 || echo "[$TIMESTAMP] HA status check complete"

View File

@@ -0,0 +1,102 @@
#!/bin/bash
# Monitor HA status and send alerts if needed
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
VIP="${VIP:-192.168.11.166}"
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
LOG_FILE="${LOG_FILE:-/tmp/npmplus-ha-monitor.log}"
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
TIMESTAMP=$(date '+%Y-%m-%d %H:%M:%S')
# Check who owns VIP
VIP_OWNER="UNKNOWN"
if ssh -o StrictHostKeyChecking=no -o ConnectTimeout=3 root@"$PRIMARY_HOST" "ip addr show vmbr0 2>/dev/null | grep -q $VIP" 2>/dev/null; then
VIP_OWNER="$PRIMARY_HOST"
elif ssh -o StrictHostKeyChecking=no -o ConnectTimeout=3 root@"$SECONDARY_HOST" "ip addr show vmbr0 2>/dev/null | grep -q $VIP" 2>/dev/null; then
VIP_OWNER="$SECONDARY_HOST"
fi
echo "[$TIMESTAMP] VIP $VIP owner: $VIP_OWNER" >> "$LOG_FILE" 2>&1 || echo "[$TIMESTAMP] VIP $VIP owner: $VIP_OWNER"
# Check Keepalived status on both hosts
PRIMARY_STATUS=$(ssh -o StrictHostKeyChecking=no -o ConnectTimeout=3 root@"$PRIMARY_HOST" "systemctl is-active keepalived 2>/dev/null" || echo "unknown")
SECONDARY_STATUS=$(ssh -o StrictHostKeyChecking=no -o ConnectTimeout=3 root@"$SECONDARY_HOST" "systemctl is-active keepalived 2>/dev/null" || echo "unknown")
echo "[$TIMESTAMP] Primary Keepalived: $PRIMARY_STATUS" >> "$LOG_FILE" 2>&1 || echo "[$TIMESTAMP] Primary Keepalived: $PRIMARY_STATUS"
echo "[$TIMESTAMP] Secondary Keepalived: $SECONDARY_STATUS" >> "$LOG_FILE" 2>&1 || echo "[$TIMESTAMP] Secondary Keepalived: $SECONDARY_STATUS"
# Alert if both are down
if [ "$PRIMARY_STATUS" != "active" ] && [ "$SECONDARY_STATUS" != "active" ]; then
ALERT_MSG="[$TIMESTAMP] ALERT: Both Keepalived instances are down! HA unavailable."
echo "$ALERT_MSG" >> "$LOG_FILE" 2>&1 || echo "$ALERT_MSG"
log_error "$ALERT_MSG"
# Send alert via email/webhook if configured
if [ -n "${ALERT_EMAIL:-}" ]; then
echo "$ALERT_MSG" | mail -s "NPMplus HA Alert" "$ALERT_EMAIL" 2>/dev/null || true
fi
if [ -n "${ALERT_WEBHOOK:-}" ]; then
curl -s -X POST "$ALERT_WEBHOOK" \
-H "Content-Type: application/json" \
-d "{\"text\":\"$ALERT_MSG\"}" 2>/dev/null || true
fi
fi
# Alert if VIP is not owned by either host
if [ "$VIP_OWNER" = "UNKNOWN" ]; then
ALERT_MSG="[$TIMESTAMP] ALERT: VIP $VIP is not owned by any host!"
echo "$ALERT_MSG" >> "$LOG_FILE" 2>&1 || echo "$ALERT_MSG"
log_error "$ALERT_MSG"
# TODO: Send alert (email, webhook, etc.)
fi
# Check NPMplus container status on owner
if [ "$VIP_OWNER" != "UNKNOWN" ]; then
if [ "$VIP_OWNER" = "$PRIMARY_HOST" ]; then
NPMPLUS_VMID="${PRIMARY_VMID:-10233}"
else
NPMPLUS_VMID="${SECONDARY_VMID:-10234}"
fi
CONTAINER_STATUS=$(ssh -o StrictHostKeyChecking=no -o ConnectTimeout=3 root@"$VIP_OWNER" \
"pct status $NPMPLUS_VMID 2>/dev/null | grep -o 'running\|stopped' || echo 'unknown'" || echo "unknown")
if [ "$CONTAINER_STATUS" != "running" ]; then
ALERT_MSG="[$TIMESTAMP] ALERT: NPMplus container on $VIP_OWNER (VMID $NPMPLUS_VMID) is $CONTAINER_STATUS"
echo "$ALERT_MSG" >> "$LOG_FILE" 2>&1 || echo "$ALERT_MSG"
log_error "$ALERT_MSG"
# Send alert via email/webhook if configured
if [ -n "${ALERT_EMAIL:-}" ]; then
echo "$ALERT_MSG" | mail -s "NPMplus HA Alert" "$ALERT_EMAIL" 2>/dev/null || true
fi
if [ -n "${ALERT_WEBHOOK:-}" ]; then
curl -s -X POST "$ALERT_WEBHOOK" \
-H "Content-Type: application/json" \
-d "{\"text\":\"$ALERT_MSG\"}" 2>/dev/null || true
fi
fi
fi
echo "[$TIMESTAMP] HA status check complete" >> "$LOG_FILE" 2>&1 || echo "[$TIMESTAMP] HA status check complete"

View File

@@ -0,0 +1,12 @@
name: npmplus
services:
npmplus:
container_name: npmplus
image: docker.io/zoeyvid/npmplus:latest
restart: unless-stopped
network_mode: host
volumes:
- "/opt/npmplus:/data"
environment:
- "TZ=America/New_York"
- "ACME_EMAIL=admin@example.org"

View File

@@ -0,0 +1,164 @@
#!/bin/bash
# Synchronize NPMplus certificates from primary to secondary
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
PRIMARY_VMID="${PRIMARY_VMID:-10233}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
# Detect actual certificate path
detect_cert_path() {
local host=$1
local vmid=$2
# Try finding via docker volume inspect (most reliable)
VOLUME_PATH=$(ssh -o StrictHostKeyChecking=no root@"$host" \
"pct exec $vmid -- docker volume inspect npmplus_data --format '{{.Mountpoint}}' 2>/dev/null" || echo "")
if [ -n "$VOLUME_PATH" ] && [ "$VOLUME_PATH" != "null" ]; then
# Check if certbot/live exists in volume
if ssh -o StrictHostKeyChecking=no root@"$host" \
"test -d $VOLUME_PATH/tls/certbot/live 2>/dev/null" 2>/dev/null; then
echo "$VOLUME_PATH/tls/certbot/live"
return 0
elif ssh -o StrictHostKeyChecking=no root@"$host" \
"test -d $VOLUME_PATH/certbot/live 2>/dev/null" 2>/dev/null; then
echo "$VOLUME_PATH/certbot/live"
return 0
fi
fi
# Try container filesystem paths
for path in \
"/var/lib/vz/containers/$vmid/var/lib/docker/volumes/npmplus_data/_data/tls/certbot/live" \
"/var/lib/vz/containers/$vmid/var/lib/docker/volumes/npmplus_data/_data/certbot/live" \
"/var/lib/vz/containers/$vmid/var/lib/docker/volumes/npmplus_data/_data/letsencrypt/live"; do
if ssh -o StrictHostKeyChecking=no root@"$host" "test -d $path 2>/dev/null" 2>/dev/null; then
echo "$path"
return 0
fi
done
# Try finding certificates inside container
CERT_DIR=$(ssh -o StrictHostKeyChecking=no root@"$host" \
"pct exec $vmid -- docker exec npmplus find /data -name 'fullchain.pem' -type f 2>/dev/null | head -1 | xargs dirname 2>/dev/null" || echo "")
if [ -n "$CERT_DIR" ]; then
# Convert container path to host path
if [ -n "$VOLUME_PATH" ]; then
REL_PATH=$(echo "$CERT_DIR" | sed 's|^/data/||')
echo "$VOLUME_PATH/$REL_PATH"
return 0
fi
fi
# Default fallback
echo "/var/lib/vz/containers/$vmid/var/lib/docker/volumes/npmplus_data/_data/tls/certbot/live"
return 1
}
# Detect certificate paths
PRIMARY_CERT_PATH=$(detect_cert_path "$PRIMARY_HOST" "$PRIMARY_VMID")
SECONDARY_CERT_PATH=$(detect_cert_path "$SECONDARY_HOST" "$SECONDARY_VMID")
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
log_info "Starting certificate synchronization from primary to secondary..."
# Check if primary NPMplus is accessible
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$PRIMARY_HOST" "pct status $PRIMARY_VMID 2>/dev/null | grep -q running" 2>/dev/null; then
log_error "Primary NPMplus container (VMID $PRIMARY_VMID) is not running"
exit 1
fi
# Check if secondary NPMplus is accessible
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$SECONDARY_HOST" "pct status $SECONDARY_VMID 2>/dev/null | grep -q running" 2>/dev/null; then
log_warn "Secondary NPMplus container (VMID $SECONDARY_VMID) is not running"
log_info "Attempting to start secondary container..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct start $SECONDARY_VMID" || {
log_error "Failed to start secondary container"
exit 1
}
sleep 5
fi
# Sync certificates from primary to secondary
# Use intermediate temp directory since rsync can't do remote-to-remote directly
log_info "Syncing certificates..."
TEMP_DIR="/tmp/npmplus-cert-sync-$$"
mkdir -p "$TEMP_DIR"
trap "rm -rf $TEMP_DIR" EXIT
# Copy from primary to local temp
log_info "Copying certificates from primary to temporary location..."
log_info "Primary certificate path: $PRIMARY_CERT_PATH"
rsync -avz --delete \
-e "ssh -o StrictHostKeyChecking=no" \
root@"$PRIMARY_HOST:$PRIMARY_CERT_PATH/" \
"$TEMP_DIR/" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
# Copy from local temp to secondary
if [ -d "$TEMP_DIR" ] && [ "$(ls -A $TEMP_DIR 2>/dev/null)" ]; then
log_info "Copying certificates from temporary location to secondary..."
log_info "Secondary certificate path: $SECONDARY_CERT_PATH"
# Ensure destination directory exists
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "mkdir -p $SECONDARY_CERT_PATH" 2>/dev/null || true
rsync -avz --delete \
-e "ssh -o StrictHostKeyChecking=no" \
"$TEMP_DIR/" \
root@"$SECONDARY_HOST:$SECONDARY_CERT_PATH/" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
else
log_warn "No certificates found to sync"
fi
if [ ${PIPESTATUS[0]} -eq 0 ]; then
log_success "Certificate synchronization complete"
# Verify sync
PRIMARY_COUNT=$(ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "find $PRIMARY_CERT_PATH -type d -mindepth 1 -maxdepth 1 2>/dev/null | wc -l" || echo "0")
SECONDARY_COUNT=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "find $SECONDARY_CERT_PATH -type d -mindepth 1 -maxdepth 1 2>/dev/null | wc -l" || echo "0")
log_info "Primary certificates: $PRIMARY_COUNT directories"
log_info "Secondary certificates: $SECONDARY_COUNT directories"
if [ "$PRIMARY_COUNT" = "$SECONDARY_COUNT" ]; then
log_success "Certificate counts match"
else
log_warn "Certificate counts differ - sync may be incomplete"
fi
else
log_error "Certificate synchronization failed"
exit 1
fi

View File

@@ -0,0 +1,158 @@
#!/bin/bash
# Synchronize NPMplus certificates from primary to secondary
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
PRIMARY_VMID="${PRIMARY_VMID:-10233}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
# Detect actual certificate path
detect_cert_path() {
local host=$1
local vmid=$2
# Try finding via docker volume inspect (most reliable)
VOLUME_PATH=$(ssh -o StrictHostKeyChecking=no root@"$host" \
"pct exec $vmid -- docker volume inspect npmplus_data --format '{{.Mountpoint}}' 2>/dev/null" || echo "")
if [ -n "$VOLUME_PATH" ] && [ "$VOLUME_PATH" != "null" ]; then
# Check if certbot/live exists in volume
if ssh -o StrictHostKeyChecking=no root@"$host" \
"test -d $VOLUME_PATH/tls/certbot/live 2>/dev/null" 2>/dev/null; then
echo "$VOLUME_PATH/tls/certbot/live"
return 0
elif ssh -o StrictHostKeyChecking=no root@"$host" \
"test -d $VOLUME_PATH/certbot/live 2>/dev/null" 2>/dev/null; then
echo "$VOLUME_PATH/certbot/live"
return 0
fi
fi
# Try container filesystem paths
for path in \
"/var/lib/vz/containers/$vmid/var/lib/docker/volumes/npmplus_data/_data/tls/certbot/live" \
"/var/lib/vz/containers/$vmid/var/lib/docker/volumes/npmplus_data/_data/certbot/live" \
"/var/lib/vz/containers/$vmid/var/lib/docker/volumes/npmplus_data/_data/letsencrypt/live"; do
if ssh -o StrictHostKeyChecking=no root@"$host" "test -d $path 2>/dev/null" 2>/dev/null; then
echo "$path"
return 0
fi
done
# Try finding certificates inside container
CERT_DIR=$(ssh -o StrictHostKeyChecking=no root@"$host" \
"pct exec $vmid -- docker exec npmplus find /data -name 'fullchain.pem' -type f 2>/dev/null | head -1 | xargs dirname 2>/dev/null" || echo "")
if [ -n "$CERT_DIR" ]; then
# Convert container path to host path
if [ -n "$VOLUME_PATH" ]; then
REL_PATH=$(echo "$CERT_DIR" | sed 's|^/data/||')
echo "$VOLUME_PATH/$REL_PATH"
return 0
fi
fi
# Default fallback
echo "/var/lib/vz/containers/$vmid/var/lib/docker/volumes/npmplus_data/_data/tls/certbot/live"
return 1
}
# Detect certificate paths
PRIMARY_CERT_PATH=$(detect_cert_path "$PRIMARY_HOST" "$PRIMARY_VMID")
SECONDARY_CERT_PATH=$(detect_cert_path "$SECONDARY_HOST" "$SECONDARY_VMID")
# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[WARN]${NC} $1"; }
log_error() { echo -e "${RED}[ERROR]${NC} $1"; }
log_success() { echo -e "${GREEN}[SUCCESS]${NC} $1"; }
log_info "Starting certificate synchronization from primary to secondary..."
# Check if primary NPMplus is accessible
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$PRIMARY_HOST" "pct status $PRIMARY_VMID 2>/dev/null | grep -q running" 2>/dev/null; then
log_error "Primary NPMplus container (VMID $PRIMARY_VMID) is not running"
exit 1
fi
# Check if secondary NPMplus is accessible
if ! ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 root@"$SECONDARY_HOST" "pct status $SECONDARY_VMID 2>/dev/null | grep -q running" 2>/dev/null; then
log_warn "Secondary NPMplus container (VMID $SECONDARY_VMID) is not running"
log_info "Attempting to start secondary container..."
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "pct start $SECONDARY_VMID" || {
log_error "Failed to start secondary container"
exit 1
}
sleep 5
fi
# Sync certificates from primary to secondary
# Use intermediate temp directory since rsync can't do remote-to-remote directly
log_info "Syncing certificates..."
TEMP_DIR="/tmp/npmplus-cert-sync-$$"
mkdir -p "$TEMP_DIR"
trap "rm -rf $TEMP_DIR" EXIT
# Copy from primary to local temp
log_info "Copying certificates from primary to temporary location..."
log_info "Primary certificate path: $PRIMARY_CERT_PATH"
rsync -avz --delete \
-e "ssh -o StrictHostKeyChecking=no" \
root@"$PRIMARY_HOST:$PRIMARY_CERT_PATH/" \
"$TEMP_DIR/" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
# Copy from local temp to secondary
if [ -d "$TEMP_DIR" ] && [ "$(ls -A $TEMP_DIR 2>/dev/null)" ]; then
log_info "Copying certificates from temporary location to secondary..."
log_info "Secondary certificate path: $SECONDARY_CERT_PATH"
# Ensure destination directory exists
ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "mkdir -p $SECONDARY_CERT_PATH" 2>/dev/null || true
rsync -avz --delete \
-e "ssh -o StrictHostKeyChecking=no" \
"$TEMP_DIR/" \
root@"$SECONDARY_HOST:$SECONDARY_CERT_PATH/" 2>&1 | while IFS= read -r line; do
log_info "$line"
done
else
log_warn "No certificates found to sync"
fi
if [ ${PIPESTATUS[0]} -eq 0 ]; then
log_success "Certificate synchronization complete"
# Verify sync
PRIMARY_COUNT=$(ssh -o StrictHostKeyChecking=no root@"$PRIMARY_HOST" "find $PRIMARY_CERT_PATH -type d -mindepth 1 -maxdepth 1 2>/dev/null | wc -l" || echo "0")
SECONDARY_COUNT=$(ssh -o StrictHostKeyChecking=no root@"$SECONDARY_HOST" "find $SECONDARY_CERT_PATH -type d -mindepth 1 -maxdepth 1 2>/dev/null | wc -l" || echo "0")
log_info "Primary certificates: $PRIMARY_COUNT directories"
log_info "Secondary certificates: $SECONDARY_COUNT directories"
if [ "$PRIMARY_COUNT" = "$SECONDARY_COUNT" ]; then
log_success "Certificate counts match"
else
log_warn "Certificate counts differ - sync may be incomplete"
fi
else
log_error "Certificate synchronization failed"
exit 1
fi

96
scripts/npmplus/sync-config.sh Executable file
View File

@@ -0,0 +1,96 @@
#!/bin/bash
# Sync NPMplus configuration from primary to secondary (API-based)
# Note: Full automated sync requires shared database or manual replication
set -euo pipefail
# Load IP configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
source "${PROJECT_ROOT}/config/ip-addresses.conf" 2>/dev/null || true
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
PRIMARY_VMID="${PRIMARY_VMID:-10233}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
NPM_URL="${NPM_URL:-https://${IP_NPMPLUS_ETH0:-${IP_NPMPLUS_ETH0:-192.168.11.166}}:81}"
SECONDARY_URL="${SECONDARY_URL:-https://${IP_NPMPLUS}:81}"
NPM_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔄 NPMplus Configuration Sync"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
if [ -z "$NPM_PASSWORD" ]; then
log_error "NPM_PASSWORD not set in .env"
log_info "Please set NPM_PASSWORD in .env file"
exit 1
fi
# Authenticate to primary
log_info "Authenticating to primary NPMplus..."
TOKEN_RESPONSE=$(curl -s -k -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$NPM_EMAIL\",\"secret\":\"$NPM_PASSWORD\"}" 2>/dev/null || echo "{}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ]; then
ERROR_MSG=$(echo "$TOKEN_RESPONSE" | jq -r '.error.message // "Unknown error"' 2>/dev/null || echo "$TOKEN_RESPONSE")
log_error "Authentication failed: $ERROR_MSG"
exit 1
fi
log_success "Authenticated to primary NPMplus"
# Export from primary
log_info "Exporting configuration from primary..."
PROXY_HOSTS_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
PROXY_COUNT=$(echo "$PROXY_HOSTS_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Exported $PROXY_COUNT proxy hosts from primary"
# Note: Full automated sync would require:
# 1. Shared database (PostgreSQL/MariaDB migration from SQLite)
# 2. Or complex API-based replication script
# 3. Or manual replication via UI
log_warn "Full automated configuration sync is not yet implemented"
log_info "Current options:"
log_info " 1. Use shared database (requires database migration)"
log_info " 2. Manual replication via UI when configuration changes"
log_info " 3. Use export-primary-config.sh and import-secondary-config.sh for initial setup"
log_info ""
log_info "Primary URL: $NPM_URL"
log_info "Secondary URL: $SECONDARY_URL"
log_info ""
log_info "To manually sync:"
log_info " 1. Make changes on primary NPMplus"
log_info " 2. Export configuration: bash scripts/npmplus/export-primary-config.sh"
log_info " 3. Import to secondary: bash scripts/npmplus/import-secondary-config.sh <backup-dir>"

View File

@@ -0,0 +1,90 @@
#!/bin/bash
# Sync NPMplus configuration from primary to secondary (API-based)
# Note: Full automated sync requires shared database or manual replication
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
if [ -f "$PROJECT_ROOT/.env" ]; then
set +euo pipefail
source "$PROJECT_ROOT/.env" 2>/dev/null || true
set -euo pipefail
fi
PRIMARY_HOST="${PRIMARY_HOST:-192.168.11.11}"
PRIMARY_VMID="${PRIMARY_VMID:-10233}"
SECONDARY_HOST="${SECONDARY_HOST:-192.168.11.12}"
SECONDARY_VMID="${SECONDARY_VMID:-10234}"
NPM_URL="${NPM_URL:-https://192.168.11.166:81}"
SECONDARY_URL="${SECONDARY_URL:-https://192.168.11.167:81}"
NPM_EMAIL="${NPM_EMAIL:-nsatoshi2007@hotmail.com}"
NPM_PASSWORD="${NPM_PASSWORD:-}"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
log_info() { echo -e "${BLUE}[INFO]${NC} $1"; }
log_success() { echo -e "${GREEN}[✓]${NC} $1"; }
log_warn() { echo -e "${YELLOW}[⚠]${NC} $1"; }
log_error() { echo -e "${RED}[✗]${NC} $1"; }
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "🔄 NPMplus Configuration Sync"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
if [ -z "$NPM_PASSWORD" ]; then
log_error "NPM_PASSWORD not set in .env"
log_info "Please set NPM_PASSWORD in .env file"
exit 1
fi
# Authenticate to primary
log_info "Authenticating to primary NPMplus..."
TOKEN_RESPONSE=$(curl -s -k -X POST "$NPM_URL/api/tokens" \
-H "Content-Type: application/json" \
-d "{\"identity\":\"$NPM_EMAIL\",\"secret\":\"$NPM_PASSWORD\"}" 2>/dev/null || echo "{}")
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.token // empty' 2>/dev/null || echo "")
if [ -z "$TOKEN" ] || [ "$TOKEN" = "null" ]; then
ERROR_MSG=$(echo "$TOKEN_RESPONSE" | jq -r '.error.message // "Unknown error"' 2>/dev/null || echo "$TOKEN_RESPONSE")
log_error "Authentication failed: $ERROR_MSG"
exit 1
fi
log_success "Authenticated to primary NPMplus"
# Export from primary
log_info "Exporting configuration from primary..."
PROXY_HOSTS_JSON=$(curl -s -k -X GET "$NPM_URL/api/nginx/proxy-hosts" \
-H "Authorization: Bearer $TOKEN" 2>/dev/null || echo "[]")
PROXY_COUNT=$(echo "$PROXY_HOSTS_JSON" | jq '. | length' 2>/dev/null || echo "0")
log_success "Exported $PROXY_COUNT proxy hosts from primary"
# Note: Full automated sync would require:
# 1. Shared database (PostgreSQL/MariaDB migration from SQLite)
# 2. Or complex API-based replication script
# 3. Or manual replication via UI
log_warn "Full automated configuration sync is not yet implemented"
log_info "Current options:"
log_info " 1. Use shared database (requires database migration)"
log_info " 2. Manual replication via UI when configuration changes"
log_info " 3. Use export-primary-config.sh and import-secondary-config.sh for initial setup"
log_info ""
log_info "Primary URL: $NPM_URL"
log_info "Secondary URL: $SECONDARY_URL"
log_info ""
log_info "To manually sync:"
log_info " 1. Make changes on primary NPMplus"
log_info " 2. Export configuration: bash scripts/npmplus/export-primary-config.sh"
log_info " 3. Import to secondary: bash scripts/npmplus/import-secondary-config.sh <backup-dir>"