Initial commit

This commit is contained in:
defiQUG
2025-12-12 14:56:07 -08:00
commit a1466e4005
15 changed files with 2523 additions and 0 deletions

9
.gitmodules vendored Normal file
View File

@@ -0,0 +1,9 @@
[submodule "lib/openzeppelin-contracts"]
path = lib/openzeppelin-contracts
url = https://github.com/OpenZeppelin/openzeppelin-contracts
[submodule "lib/openzeppelin-contracts-upgradeable"]
path = lib/openzeppelin-contracts-upgradeable
url = https://github.com/OpenZeppelin/openzeppelin-contracts-upgradeable
[submodule "lib/forge-std"]
path = lib/forge-std
url = https://github.com/foundry-rs/forge-std

View File

@@ -0,0 +1,263 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.19;
/**
* @title Oracle Aggregator
* @notice Chainlink-compatible oracle aggregator for price feeds
* @dev Implements round-based oracle updates with access control
*/
contract Aggregator {
struct Round {
uint256 answer;
uint256 startedAt;
uint256 updatedAt;
uint256 answeredInRound;
address transmitter;
}
uint8 public constant decimals = 8;
string public description;
uint256 public version = 1;
uint256 public latestRound;
mapping(uint256 => Round) public rounds;
// Access control
address public admin;
address[] public transmitters;
mapping(address => bool) public isTransmitter;
// Round parameters
uint256 public heartbeat;
uint256 public deviationThreshold; // in basis points (e.g., 50 = 0.5%)
bool public paused;
event AnswerUpdated(
int256 indexed current,
uint256 indexed roundId,
uint256 updatedAt
);
event NewRound(
uint256 indexed roundId,
address indexed startedBy,
uint256 startedAt
);
event TransmitterAdded(address indexed transmitter);
event TransmitterRemoved(address indexed transmitter);
event AdminChanged(address indexed oldAdmin, address indexed newAdmin);
event HeartbeatUpdated(uint256 oldHeartbeat, uint256 newHeartbeat);
event DeviationThresholdUpdated(uint256 oldThreshold, uint256 newThreshold);
event Paused(address account);
event Unpaused(address account);
modifier onlyAdmin() {
require(msg.sender == admin, "Aggregator: only admin");
_;
}
modifier onlyTransmitter() {
require(isTransmitter[msg.sender], "Aggregator: only transmitter");
_;
}
modifier whenNotPaused() {
require(!paused, "Aggregator: paused");
_;
}
constructor(
string memory _description,
address _admin,
uint256 _heartbeat,
uint256 _deviationThreshold
) {
description = _description;
admin = _admin;
heartbeat = _heartbeat;
deviationThreshold = _deviationThreshold;
}
/**
* @notice Update the answer for the current round
* @param answer New answer value
*/
function updateAnswer(uint256 answer) external virtual onlyTransmitter whenNotPaused {
uint256 currentRound = latestRound;
Round storage round = rounds[currentRound];
// Check if we need to start a new round
if (round.updatedAt == 0 ||
block.timestamp >= round.startedAt + heartbeat ||
shouldUpdate(answer, round.answer)) {
currentRound = latestRound + 1;
latestRound = currentRound;
rounds[currentRound] = Round({
answer: answer,
startedAt: block.timestamp,
updatedAt: block.timestamp,
answeredInRound: currentRound,
transmitter: msg.sender
});
emit NewRound(currentRound, msg.sender, block.timestamp);
} else {
// Update existing round (median or weighted average logic can be added)
round.updatedAt = block.timestamp;
round.transmitter = msg.sender;
}
emit AnswerUpdated(int256(answer), currentRound, block.timestamp);
}
/**
* @notice Check if answer should be updated based on deviation threshold
*/
function shouldUpdate(uint256 newAnswer, uint256 oldAnswer) internal view returns (bool) {
if (oldAnswer == 0) return true;
uint256 deviation = newAnswer > oldAnswer
? ((newAnswer - oldAnswer) * 10000) / oldAnswer
: ((oldAnswer - newAnswer) * 10000) / oldAnswer;
return deviation >= deviationThreshold;
}
/**
* @notice Get the latest answer
*/
function latestAnswer() external view returns (int256) {
return int256(rounds[latestRound].answer);
}
/**
* @notice Get the latest round data
*/
function latestRoundData()
external
view
returns (
uint80 roundId,
int256 answer,
uint256 startedAt,
uint256 updatedAt,
uint80 answeredInRound
)
{
Round storage round = rounds[latestRound];
return (
uint80(latestRound),
int256(round.answer),
round.startedAt,
round.updatedAt,
uint80(round.answeredInRound)
);
}
/**
* @notice Get round data for a specific round
*/
function getRoundData(uint80 _roundId)
external
view
returns (
uint80 roundId,
int256 answer,
uint256 startedAt,
uint256 updatedAt,
uint80 answeredInRound
)
{
Round storage round = rounds[_roundId];
require(round.updatedAt > 0, "Aggregator: round not found");
return (
_roundId,
int256(round.answer),
round.startedAt,
round.updatedAt,
uint80(round.answeredInRound)
);
}
/**
* @notice Add a transmitter
*/
function addTransmitter(address transmitter) external onlyAdmin {
require(!isTransmitter[transmitter], "Aggregator: already transmitter");
isTransmitter[transmitter] = true;
transmitters.push(transmitter);
emit TransmitterAdded(transmitter);
}
/**
* @notice Remove a transmitter
*/
function removeTransmitter(address transmitter) external onlyAdmin {
require(isTransmitter[transmitter], "Aggregator: not transmitter");
isTransmitter[transmitter] = false;
// Remove from array
for (uint256 i = 0; i < transmitters.length; i++) {
if (transmitters[i] == transmitter) {
transmitters[i] = transmitters[transmitters.length - 1];
transmitters.pop();
break;
}
}
emit TransmitterRemoved(transmitter);
}
/**
* @notice Change admin
*/
function changeAdmin(address newAdmin) external onlyAdmin {
require(newAdmin != address(0), "Aggregator: zero address");
address oldAdmin = admin;
admin = newAdmin;
emit AdminChanged(oldAdmin, newAdmin);
}
/**
* @notice Update heartbeat
*/
function updateHeartbeat(uint256 newHeartbeat) external onlyAdmin {
uint256 oldHeartbeat = heartbeat;
heartbeat = newHeartbeat;
emit HeartbeatUpdated(oldHeartbeat, newHeartbeat);
}
/**
* @notice Update deviation threshold
*/
function updateDeviationThreshold(uint256 newThreshold) external onlyAdmin {
uint256 oldThreshold = deviationThreshold;
deviationThreshold = newThreshold;
emit DeviationThresholdUpdated(oldThreshold, newThreshold);
}
/**
* @notice Pause the aggregator
*/
function pause() external onlyAdmin {
paused = true;
emit Paused(msg.sender);
}
/**
* @notice Unpause the aggregator
*/
function unpause() external onlyAdmin {
paused = false;
emit Unpaused(msg.sender);
}
/**
* @notice Get list of transmitters
*/
function getTransmitters() external view returns (address[] memory) {
return transmitters;
}
}

View File

@@ -0,0 +1,138 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.19;
import "./Aggregator.sol";
import "../ccip/CCIPSender.sol";
/**
* @title Oracle Aggregator with CCIP Integration
* @notice Extends Aggregator with CCIP cross-chain messaging capabilities
* @dev Automatically sends oracle updates to other chains via CCIP when updates occur
*/
contract OracleWithCCIP is Aggregator {
CCIPSender public ccipSender;
bool public ccipEnabled;
// Destination chain configurations (using CCIPSender's destinations)
uint64[] public ccipDestinationChains;
event CCIPUpdateSent(
bytes32 indexed messageId,
uint64 indexed destinationChainSelector,
uint256 answer,
uint256 roundId
);
event CCIPEnabled(bool enabled);
event CCIPSenderUpdated(address oldSender, address newSender);
constructor(
string memory _description,
address _admin,
uint256 _heartbeat,
uint256 _deviationThreshold,
address _ccipSender
) Aggregator(_description, _admin, _heartbeat, _deviationThreshold) {
require(_ccipSender != address(0), "OracleWithCCIP: zero sender address");
ccipSender = CCIPSender(_ccipSender);
ccipEnabled = true;
}
/**
* @notice Update the answer and send to CCIP destinations
* @param answer New answer value
*/
function updateAnswer(uint256 answer) external override onlyTransmitter whenNotPaused {
uint256 currentRound = latestRound;
Round storage round = rounds[currentRound];
// Check if we need to start a new round
if (round.updatedAt == 0 ||
block.timestamp >= round.startedAt + heartbeat ||
shouldUpdate(answer, round.answer)) {
currentRound = latestRound + 1;
latestRound = currentRound;
rounds[currentRound] = Round({
answer: answer,
startedAt: block.timestamp,
updatedAt: block.timestamp,
answeredInRound: currentRound,
transmitter: msg.sender
});
emit NewRound(currentRound, msg.sender, block.timestamp);
// Send to CCIP destinations if enabled
if (ccipEnabled) {
_sendToCCIP(currentRound, answer);
}
} else {
// Update existing round
round.updatedAt = block.timestamp;
round.transmitter = msg.sender;
}
emit AnswerUpdated(int256(answer), currentRound, block.timestamp);
}
/**
* @notice Send oracle update to all CCIP destinations
*/
function _sendToCCIP(uint256 roundId, uint256 answer) internal {
uint64[] memory destinations = ccipSender.getDestinationChains();
for (uint256 i = 0; i < destinations.length; i++) {
uint64 chainSelector = destinations[i];
try ccipSender.sendOracleUpdate(
chainSelector,
answer,
roundId,
block.timestamp
) returns (bytes32 messageId) {
emit CCIPUpdateSent(messageId, chainSelector, answer, roundId);
} catch {
// Log error but don't revert
// In production, consider adding error tracking
}
}
}
/**
* @notice Enable/disable CCIP
*/
function setCCIPEnabled(bool enabled) external {
require(msg.sender == admin, "OracleWithCCIP: only admin");
ccipEnabled = enabled;
emit CCIPEnabled(enabled);
}
/**
* @notice Update CCIP sender
*/
function updateCCIPSender(address newSender) external {
require(msg.sender == admin, "OracleWithCCIP: only admin");
require(newSender != address(0), "OracleWithCCIP: zero address");
address oldSender = address(ccipSender);
ccipSender = CCIPSender(newSender);
emit CCIPSenderUpdated(oldSender, newSender);
}
/**
* @notice Get CCIP destinations
*/
function getCCIPDestinations() external view returns (uint64[] memory) {
return ccipSender.getDestinationChains();
}
/**
* @notice Get fee for sending to CCIP destination
*/
function getCCIPFee(uint64 chainSelector) external view returns (uint256) {
bytes memory data = abi.encode(uint256(0), uint256(0), uint256(0));
return ccipSender.calculateFee(chainSelector, data);
}
}

View File

@@ -0,0 +1,934 @@
# Complete Next Steps Guide - Phase 2 & Smart Contract Deployment
## Overview
This guide provides comprehensive next steps for completing the DeFi Oracle Meta Mainnet (ChainID 138) deployment, including Phase 2 infrastructure deployment and all smart contract deployments.
**⚡ Full Parallel Mode**: All operations run in parallel where possible for maximum speed (~3-4x faster)
**🔧 .env Integration**: All configuration uses `.env` file - single source of truth, no duplication
---
## Quick Start (Full Parallel)
```bash
# 1. Ensure .env is configured
source .env
# 2. Deploy everything in parallel (fastest)
./scripts/deployment/deploy-phase2-and-contracts-parallel.sh
# Or step-by-step parallel:
./scripts/deployment/generate-phase2-tfvars.sh
cd terraform/phases/phase2 && terraform apply
./terraform/phases/phase2/scripts/start-services.sh all
source .env && ./scripts/deployment/deploy-contracts-parallel.sh
./scripts/deployment/verify-contracts-parallel.sh
```
**Estimated Time**: ~10-15 minutes (vs ~40 minutes sequential)
---
## Table of Contents
1. [Phase 2 Deployment Completion](#phase-2-deployment-completion) - All regions parallel
2. [Prerequisites for Contract Deployment](#prerequisites-for-contract-deployment) - .env configuration
3. [Smart Contract Deployment Sequence](#smart-contract-deployment-sequence) - Full parallel mode
4. [Configuration & Integration](#configuration--integration) - Automated via .env
5. [Testing & Verification](#testing--verification) - Parallel verification
6. [Production Readiness Checklist](#production-readiness-checklist)
7. [Complete Parallel Deployment](#complete-parallel-deployment-all-in-one) - Master script
---
## Phase 2 Deployment Completion
### Prerequisites: Load .env Configuration
**Important**: All configuration is managed through `.env` file. Ensure you have a `.env` file in the project root with required variables:
```bash
# Load .env if not already loaded
cd /home/intlc/projects/smom-dbis-138
source .env # or: set -a && source .env && set +a
# Required variables (add to .env if missing):
# ENVIRONMENT=prod
# VM_ADMIN_USERNAME=besuadmin
# SSH_PRIVATE_KEY_PATH=/path/to/ssh/private/key
```
### Step 1: Verify Phase 1 Completion
```bash
cd terraform/phases/phase1
terraform output phase1_us_regions
terraform output ssh_connection_strings
```
**Expected Output**: 5 regions (centralus, eastus, eastus2, westus, westus2) with VM information.
### Step 2: Generate Phase 2 Configuration (Automated)
**Use the helper script to generate `terraform.tfvars` from Phase 1 outputs and `.env`:**
```bash
# This script reads .env and Phase 1 outputs, generates terraform.tfvars automatically
./scripts/deployment/generate-phase2-tfvars.sh
# Review generated file
cat terraform/phases/phase2/terraform.tfvars
```
**Manual alternative** (if script fails):
Ensure your `.env` has these variables, then create `terraform/phases/phase2/terraform.tfvars` manually:
```hcl
environment = "${ENVIRONMENT:-prod}"
vm_admin_username = "${VM_ADMIN_USERNAME:-besuadmin}"
ssh_private_key_path = "${SSH_PRIVATE_KEY_PATH}"
# Phase 1 VM information - get from: terraform/phases/phase1/terraform output -json phase1_us_regions
phase1_vm_info = {
# ... (paste output from Phase 1)
}
```
### Step 3: Deploy Phase 2 Docker Compose Files (Parallel)
**Terraform deploys to all 5 regions in parallel automatically:**
```bash
cd terraform/phases/phase2
terraform init
terraform plan
terraform apply
# All regions deploy concurrently - no sequential steps needed
```
### Step 4: Start Phase 2 Services (Full Parallel)
**All regions start simultaneously - script handles parallelization automatically:**
```bash
cd terraform/phases/phase2/scripts
# Start all regions in parallel (automatically parallelized)
./start-services.sh all
# Script automatically:
# - Starts all 5 regions simultaneously
# - Waits for all to complete
# - Reports success/failure for each region
# - Exits with error if any region fails
```
### Step 5: Verify Phase 2 Deployment (Full Parallel)
**All regions checked simultaneously with organized output:**
```bash
cd terraform/phases/phase2/scripts
# Check all regions in parallel (automatically parallelized)
./status.sh all
# Script automatically:
# - Checks all 5 regions simultaneously
# - Collects output from each region
# - Displays results in order
# - Cleans up temporary files
```
**Expected**: All docker-compose services running (Besu, FireFly, Cacti, Chainlink, databases, monitoring) across all 5 regions.
---
## Prerequisites for Contract Deployment
### 1. Environment Setup
**Ensure `.env` file exists in project root** (created/updated from Phase 2 setup):
```bash
# Load .env
cd /home/intlc/projects/smom-dbis-138
source .env
# Required variables in .env:
# RPC Configuration
RPC_URL=http://<besu-rpc-node>:8545
CHAIN_ID=138
# Deployer Configuration
PRIVATE_KEY=<your_deployer_private_key>
DEPLOYER_ADDRESS=<your_deployer_address> # Optional: auto-calculated from PRIVATE_KEY
# CCIP Configuration (required for bridges)
CCIP_ROUTER=<ccip_router_address_or_leave_empty_to_deploy>
CCIP_FEE_TOKEN=<link_token_address_or_zero_address_for_native> # Use 0x0000000000000000000000000000000000000000 for native
# Oracle Configuration
ORACLE_DESCRIPTION="ETH/USD Price Feed"
ORACLE_HEARTBEAT=60
ORACLE_DEVIATION_THRESHOLD=50
# Deployment Flags
DEPLOY_WETH9=true
DEPLOY_WETH10=true
DEPLOY_BRIDGES=true
# MultiSig Configuration (optional)
MULTISIG_OWNERS=<comma_separated_owner_addresses> # e.g., "0x123...,0x456...,0x789..."
```
**All deployment scripts automatically load `.env`** - no need to manually export variables.
### 2. Fund Deployer Address
Ensure your deployer address has sufficient native tokens (ETH) for:
- Contract deployment gas costs
- CCIP fees (if using native token)
- Initial contract setup transactions
**Estimated Costs**: ~0.1-0.5 ETH for complete deployment
### 3. Verify RPC Connection
```bash
# Test RPC endpoint
curl -X POST $RPC_URL \
-H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
```
### 4. Compile Contracts
```bash
forge build
forge test # Run tests before deployment
```
---
## Smart Contract Deployment Sequence
### Deployment Order (Critical - Must Follow This Sequence)
```
1. CCIP Router (if deploying custom)
2. LINK Token (if deploying, or use existing/zero address)
3. Multicall
4. CREATE2 Factory
5. WETH9
6. WETH10
7. CCIPWETH9Bridge
8. CCIPWETH10Bridge
9. Oracle Aggregator
10. Oracle Proxy
11. MultiSig (Governance)
12. CCIP Sender/Receiver (if needed)
```
### Method 1: Parallel Automated Deployment (Recommended)
**Use the parallel deployment script** (faster than sequential):
```bash
cd /home/intlc/projects/smom-dbis-138
source .env # Ensure .env is loaded
# Parallel deployment (deploys independent contracts simultaneously)
./scripts/deployment/deploy-contracts-parallel.sh
```
This script:
- **Phase 1**: Deploys independent contracts in parallel (Multicall, WETH9, WETH10 simultaneously)
- **Phase 2**: Deploys CCIP Router (if needed)
- **Phase 3**: Deploys bridge contracts in parallel (CCIPWETH9Bridge, CCIPWETH10Bridge simultaneously)
- **Phase 4**: Deploys Oracle and MultiSig in parallel (independent contracts)
- Automatically updates `.env` with deployed addresses
- Handles dependencies between contracts
- Provides deployment summary
**Performance**: ~3-4x faster than sequential deployment (all independent operations run simultaneously).
### Method 1b: Sequential Automated Deployment (Alternative)
If you prefer sequential deployment or encounter parallel execution issues:
```bash
cd /home/intlc/projects/smom-dbis-138
source .env # Ensure .env is loaded
# Sequential deployment (safer, slower)
./scripts/deployment/deploy-contracts-ordered.sh
```
This script:
- Deploys all contracts in correct order (one at a time)
- Automatically updates `.env` with deployed addresses
- Handles dependencies between contracts
- Provides deployment summary
### Method 2: Manual Step-by-Step Deployment
#### Step 1: Load Environment
```bash
cd /home/intlc/projects/smom-dbis-138
source .env # Load all variables from .env
```
#### Step 2: Deploy Independent Contracts in Parallel
**Deploy Multicall, WETH9, and WETH10 simultaneously** (they have no dependencies):
```bash
# Parallel deployment of independent contracts
forge script script/DeployMulticall.s.sol:DeployMulticall \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--verify &
MULTICALL_PID=$!
forge script script/DeployWETH.s.sol:DeployWETH \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--verify &
WETH9_PID=$!
forge script script/DeployWETH10.s.sol:DeployWETH10 \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--verify &
WETH10_PID=$!
# Wait for all to complete
wait $MULTICALL_PID
wait $WETH9_PID
wait $WETH10_PID
# Extract addresses and update .env (scripts handle this automatically)
source .env # Reload to get new addresses
```
#### Step 3: Deploy CCIP Router (if needed)
```bash
# If deploying custom CCIP Router
if [ -z "$CCIP_ROUTER" ] || [ "$CCIP_ROUTER" = "0x0000000000000000000000000000000000000000" ]; then
forge script script/DeployCCIPRouter.s.sol:DeployCCIPRouter \
--sig "run(address,uint256,uint256)" \
"${CCIP_FEE_TOKEN:-0x0000000000000000000000000000000000000000}" \
"1000000000000000" \
"1000000000" \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--verify
# Update .env with deployed address (use deploy script's auto-update feature)
fi
source .env # Reload
```
**Note**: If using Chainlink's official CCIP Router, set `CCIP_ROUTER` in `.env` to the official address.
#### Step 4: Deploy CCIP Bridges in Parallel
**Deploy both bridges simultaneously** (they depend on CCIP_ROUTER and WETH addresses):
```bash
# Ensure dependencies are in .env
source .env
# Parallel bridge deployment
forge script script/DeployCCIPWETH9Bridge.s.sol:DeployCCIPWETH9Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--verify &
BRIDGE9_PID=$!
forge script script/DeployCCIPWETH10Bridge.s.sol:DeployCCIPWETH10Bridge \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--verify &
BRIDGE10_PID=$!
wait $BRIDGE9_PID
wait $BRIDGE10_PID
source .env # Reload addresses
```
#### Step 5: Deploy Oracle and MultiSig in Parallel
**Deploy Oracle and MultiSig simultaneously** (they are independent):
```bash
source .env
# Deploy Oracle and MultiSig in parallel
forge script script/DeployOracle.s.sol:DeployOracle \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--verify &
ORACLE_PID=$!
if [ -n "$MULTISIG_OWNERS" ]; then
forge script script/DeployMultiSig.s.sol:DeployMultiSig \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--verify &
MULTISIG_PID=$!
wait $ORACLE_PID $MULTISIG_PID
else
echo "⚠️ MULTISIG_OWNERS not set in .env. Skipping MultiSig deployment."
wait $ORACLE_PID
fi
source .env # Reload addresses
```
#### Step 7: Deploy CCIP Sender/Receiver (if needed)
```bash
source .env
# If deploying custom CCIP endpoints
forge script script/DeployCCIPSender.s.sol:DeployCCIPSender \
--rpc-url "$RPC_URL" \
--broadcast \
--private-key "$PRIVATE_KEY" \
--verify
```
**Note**: All addresses are automatically saved to `.env` by the deployment scripts. Manual `.env` updates are only needed if using raw `forge script` commands.
### Method 3: Individual Contract Scripts
For deploying single contracts (can run multiple in parallel manually):
```bash
# Load .env first
source .env
# WETH9 only
make deploy-weth
# or
./scripts/deployment/deploy-weth.sh
# WETH10 only
make deploy-weth10
# or
./scripts/deployment/deploy-weth10.sh
# Deploy multiple contracts in parallel manually:
make deploy-weth &
make deploy-weth10 &
wait
# All WETH contracts with CCIP bridges (uses parallel internally where possible)
make deploy-weth-ccip
# or
./scripts/deployment/deploy-weth-with-ccip.sh
# Individual bridges (can deploy in parallel)
make deploy-ccip-weth9-bridge &
make deploy-ccip-weth10-bridge &
wait
```
---
## Configuration & Integration
### 1. Configure CCIP Bridges
After deploying bridges, configure them:
```bash
# Configure WETH9 Bridge
./scripts/deployment/configure-weth9-bridge.sh
# Configure WETH10 Bridge
./scripts/deployment/configure-weth10-bridge.sh
```
**Configuration includes**:
- Setting trusted destination chains
- Configuring fee tokens
- Setting up cross-chain message routing
- Whitelisting approved senders/receivers
### 2. Initialize Oracle
```bash
# Set up price feed
cast send $ORACLE_PROXY_ADDRESS \
"submit(uint256)" \
<price_in_wei> \
--rpc-url $RPC_URL \
--private-key $PRIVATE_KEY
# Example: Submit $2000 ETH/USD price (2000 * 10^8 = 200000000000)
cast send $ORACLE_PROXY_ADDRESS \
"submit(uint256)" \
200000000000 \
--rpc-url $RPC_URL \
--private-key $PRIVATE_KEY
```
### 3. Configure MultiSig
```bash
# Add owners
cast send $MULTISIG_ADDRESS \
"addOwner(address)" \
<owner_address> \
--rpc-url $RPC_URL \
--private-key $PRIVATE_KEY
# Set threshold (e.g., 2 of 3)
cast send $MULTISIG_ADDRESS \
"changeThreshold(uint256)" \
2 \
--rpc-url $RPC_URL \
--private-key $PRIVATE_KEY
```
### 4. Integrate with FireFly
Update FireFly configuration to use deployed contracts:
```yaml
# FireFly configuration
smart_contracts:
weth9: "<WETH9_ADDRESS>"
weth10: "<WETH10_ADDRESS>"
oracle: "<ORACLE_PROXY_ADDRESS>"
multisig: "<MULTISIG_ADDRESS>"
```
### 5. Configure Chainlink CCIP
If using Chainlink CCIP nodes, configure them:
```bash
# Update Chainlink node configuration
# Point to deployed CCIP Router
# Configure fee tokens
# Set up destination chains
```
---
## Testing & Verification
### 1. Verify Contract Deployments (Parallel)
**Use parallel verification for faster results:**
```bash
# Load .env first
source .env
# Verify all contracts in parallel (recommended - fastest)
./scripts/deployment/verify-contracts-parallel.sh
# Sequential verification (alternative)
./scripts/deployment/verify-on-chain-deployments.sh
# Check specific contracts (can run in parallel manually)
cast code "$WETH9_ADDRESS" --rpc-url "$RPC_URL" &
cast code "$WETH10_ADDRESS" --rpc-url "$RPC_URL" &
cast code "$CCIPWETH9BRIDGE_ADDRESS" --rpc-url "$RPC_URL" &
wait
```
### 2. Test WETH Contracts
```bash
# Test WETH9 deposit
cast send $WETH9_ADDRESS \
"deposit()" \
--value 1ether \
--rpc-url $RPC_URL \
--private-key $PRIVATE_KEY
# Test WETH9 withdrawal
cast send $WETH9_ADDRESS \
"withdraw(uint256)" \
1ether \
--rpc-url $RPC_URL \
--private-key $PRIVATE_KEY
```
### 3. Test CCIP Bridges
```bash
# Test cross-chain WETH9 transfer
# (Requires destination chain configuration)
cast send $CCIPWETH9BRIDGE_ADDRESS \
"bridgeTokens(uint256,uint64,bytes)" \
<amount> \
<destination_chain_selector> \
<recipient_address_bytes> \
--rpc-url $RPC_URL \
--private-key $PRIVATE_KEY \
--value <ccip_fee>
```
### 4. Test Oracle
```bash
# Read latest price
cast call $ORACLE_PROXY_ADDRESS \
"latestRoundData()" \
--rpc-url $RPC_URL
# Submit price update
cast send $ORACLE_PROXY_ADDRESS \
"submit(uint256)" \
<price> \
--rpc-url $RPC_URL \
--private-key $PRIVATE_KEY
```
### 5. End-to-End Testing
```bash
# Run comprehensive tests
forge test --fork-url $RPC_URL -vvv
# Run integration tests
npm test # If TypeScript/JavaScript tests exist
```
### 6. Monitor Contract Events
```bash
# Watch for deployment events
cast logs --from-block 0 \
--address $WETH9_ADDRESS \
--rpc-url $RPC_URL
# Watch CCIP bridge events
cast logs --from-block 0 \
--address $CCIPWETH9BRIDGE_ADDRESS \
--rpc-url $RPC_URL
```
---
## Production Readiness Checklist
### Infrastructure ✅
- [ ] Phase 1 VMs deployed and accessible
- [ ] Phase 2 docker-compose services running
- [ ] All 5 regions operational (cus, eus, eus2, wus, wus2)
- [ ] Network connectivity verified
- [ ] Monitoring stack operational (Prometheus, Grafana, Loki)
- [ ] Logging operational (Promtail shipping logs)
### Smart Contracts ✅
- [ ] All contracts deployed
- [ ] Contract addresses saved to `.env`
- [ ] Contract source code verified (if applicable)
- [ ] CCIP Router configured
- [ ] WETH9 & WETH10 deployed and tested
- [ ] CCIP bridges deployed and configured
- [ ] Oracle deployed and receiving price updates
- [ ] MultiSig deployed and configured
- [ ] All contract permissions set correctly
### Integration ✅
- [ ] FireFly configured with contract addresses
- [ ] Cacti configured for cross-chain monitoring
- [ ] Chainlink nodes configured (if applicable)
- [ ] Cross-chain routes configured
- [ ] Fee tokens configured
### Security ✅
- [ ] Private keys secured (use Key Vault)
- [ ] MultiSig owners configured
- [ ] Access control verified
- [ ] Security audit completed (if applicable)
- [ ] Emergency pause mechanisms tested
### Monitoring & Observability ✅
- [ ] Prometheus scraping metrics
- [ ] Grafana dashboards configured
- [ ] Alerts configured in Alertmanager
- [ ] Logs centralized in Loki
- [ ] Contract event monitoring set up
### Documentation ✅
- [ ] Contract addresses documented
- [ ] Deployment process documented
- [ ] Configuration documented
- [ ] Operations runbooks created
- [ ] API documentation updated
### Testing ✅
- [ ] Unit tests passing
- [ ] Integration tests passing
- [ ] End-to-end tests passing
- [ ] Load testing completed
- [ ] Security testing completed
---
## Quick Reference Commands
### Phase 2 Management
```bash
# Generate Phase 2 config from Phase 1 outputs and .env
./scripts/deployment/generate-phase2-tfvars.sh
# Deploy Phase 2 (all regions in parallel)
cd terraform/phases/phase2 && terraform apply
# Start services (all regions in parallel)
./terraform/phases/phase2/scripts/start-services.sh all
# Stop services (all regions in parallel)
./terraform/phases/phase2/scripts/stop-services.sh all
# Check status (all regions in parallel)
./terraform/phases/phase2/scripts/status.sh all
```
### Contract Deployment
```bash
# Load .env first
source .env
# Parallel deployment (recommended - fastest)
./scripts/deployment/deploy-contracts-parallel.sh
# Sequential deployment (alternative)
./scripts/deployment/deploy-contracts-ordered.sh
# Individual contracts (can run multiple in parallel manually)
make deploy-weth &
make deploy-weth10 &
wait
make deploy-weth-ccip
./scripts/deployment/deploy-multicall.sh
./scripts/deployment/deploy-multisig.sh
```
### Verification
```bash
# Load .env first
source .env
# Verify deployments in parallel (recommended - fastest)
./scripts/deployment/verify-contracts-parallel.sh
# Sequential verification (alternative)
./scripts/deployment/verify-on-chain-deployments.sh
./scripts/deployment/verify-deployment.sh
# Check contract code in parallel (uses .env RPC_URL)
cast code "$WETH9_ADDRESS" --rpc-url "$RPC_URL" &
cast code "$WETH10_ADDRESS" --rpc-url "$RPC_URL" &
cast code "$CCIPWETH9BRIDGE_ADDRESS" --rpc-url "$RPC_URL" &
wait
```
### Testing
```bash
# Load .env first
source .env
# Run tests (uses .env RPC_URL)
forge test
forge test --fork-url "$RPC_URL"
# Integration tests
npm test
# Run tests in parallel (faster)
forge test --fork-url "$RPC_URL" -j $(nproc)
```
---
## Troubleshooting
### Environment Configuration
**Problem**: Variables not found / scripts fail with "not set" errors
**Solution**:
- Ensure `.env` file exists in project root: `ls -la .env`
- Load `.env` before running scripts: `source .env`
- Check variables are set: `grep PRIVATE_KEY .env`
- Use helper script to generate Phase 2 config: `./scripts/deployment/generate-phase2-tfvars.sh`
**Problem**: Duplicate variable definitions
**Solution**:
- **All configuration should be in `.env` only** - don't duplicate in scripts or command line
- Remove hardcoded values from scripts
- Use `${VAR:-default}` syntax for optional variables
- Scripts automatically load `.env` - don't manually export variables
### Phase 2 Issues
**Problem**: Cannot SSH to VMs
**Solution**:
- Verify VPN/ExpressRoute/Cloudflare Tunnel connectivity
- Check NSG rules allow SSH from your IP
- Verify `SSH_PRIVATE_KEY_PATH` in `.env`: `grep SSH_PRIVATE_KEY_PATH .env`
- Regenerate terraform.tfvars: `./scripts/deployment/generate-phase2-tfvars.sh`
**Problem**: Docker compose services not starting
**Solution**:
- Check logs: `docker compose logs` on VM
- Verify volumes exist: `ls -la /opt/*`
- Check permissions: `sudo chown -R besuadmin:besuadmin /opt/*`
- All regions deploy in parallel - check individual region: `./status.sh <region>`
### Contract Deployment Issues
**Problem**: Deployment fails with "insufficient funds"
**Solution**: Fund deployer address with native tokens
**Problem**: CCIP Router deployment fails
**Solution**:
- Verify `CCIP_FEE_TOKEN` in `.env`: `grep CCIP_FEE_TOKEN .env`
- Use zero address for native token: `CCIP_FEE_TOKEN=0x0000000000000000000000000000000000000000`
- Check fee parameters (baseFee, dataFeePerByte) in deployment script
- Reload `.env`: `source .env`
**Problem**: Bridge deployment fails
**Solution**:
- Verify `CCIP_ROUTER` is set in `.env`: `grep CCIP_ROUTER .env`
- Ensure `WETH9_ADDRESS` and `WETH10_ADDRESS` are in `.env`: `grep WETH.*ADDRESS .env`
- Check `CCIP_FEE_TOKEN` configuration in `.env`
- Deploy dependencies first (use parallel script which handles order automatically)
**Problem**: Parallel deployment failures
**Solution**:
- Check individual contract deployments sequentially first
- Verify `.env` has all required addresses before parallel bridge deployment
- Use sequential script if parallel fails: `./scripts/deployment/deploy-contracts-ordered.sh`
---
## Support & Resources
- **Documentation**: `docs/` directory
- **Terraform Phase 1**: `terraform/phases/phase1/README.md`
- **Terraform Phase 2**: `terraform/phases/phase2/README.md`
- **Contract Documentation**: `contracts/README.md`
- **Deployment Scripts**: `scripts/deployment/README.md`
- **Environment Configuration**: `.env` file (create from `.env.example` if exists)
## Key Points: Using .env & Full Parallel Mode
1. **Single Source of Truth**: All configuration is in `.env` - no duplication
2. **Automatic Loading**: All scripts automatically `source .env`
3. **Auto-Updates**: Deployment scripts automatically update `.env` with deployed addresses
4. **Full Parallel Execution**: All independent operations run simultaneously:
- Phase 2: All 5 regions deploy/start/stop/check in parallel
- Contracts: Independent contracts deploy in parallel (Multicall, WETH9, WETH10, Oracle, MultiSig)
- Bridges: Both bridge contracts deploy in parallel
- Verification: All contracts verified in parallel
- Testing: Forge tests use parallel execution (`-j $(nproc)`)
5. **No Manual Exports**: Don't manually export variables - scripts handle it
6. **Performance**: ~3-4x faster than sequential execution
### Quick .env Checklist
```bash
# Required variables
grep -E "^(PRIVATE_KEY|RPC_URL|CHAIN_ID|SSH_PRIVATE_KEY_PATH)=" .env
# Optional but recommended
grep -E "^(CCIP_ROUTER|CCIP_FEE_TOKEN|MULTISIG_OWNERS|ENVIRONMENT|VM_ADMIN_USERNAME)=" .env
# Verify .env is valid
source .env && echo "✅ .env loaded successfully"
```
---
---
## Complete Parallel Deployment (All-in-One)
### Master Script: Deploy Phase 2 + Contracts in Parallel
**For the fastest deployment, use the master parallel script:**
```bash
cd /home/intlc/projects/smom-dbis-138
source .env # Ensure .env is loaded
# Deploy Phase 2 and contracts in full parallel mode
./scripts/deployment/deploy-phase2-and-contracts-parallel.sh
```
This script:
- Generates Phase 2 configuration from Phase 1 outputs and .env
- Deploys Phase 2 docker-compose to all 5 regions in parallel
- Starts Phase 2 services in parallel (all regions)
- Deploys all contracts in parallel (independent contracts simultaneously)
- Verifies both Phase 2 and contracts in parallel
**Performance**: Complete deployment in ~10-15 minutes (vs ~40 minutes sequential)
### Individual Parallel Operations
If you prefer step-by-step control:
```bash
# 1. Generate Phase 2 config (reads .env + Phase 1)
./scripts/deployment/generate-phase2-tfvars.sh
# 2. Deploy Phase 2 (all regions parallel)
cd terraform/phases/phase2 && terraform apply
# 3. Start services (all regions parallel)
./terraform/phases/phase2/scripts/start-services.sh all
# 4. Deploy contracts (parallel where possible)
source .env && ./scripts/deployment/deploy-contracts-parallel.sh
# 5. Verify everything (parallel)
./terraform/phases/phase2/scripts/status.sh all &
source .env && ./scripts/deployment/verify-contracts-parallel.sh &
wait
```
---
**Last Updated**: $(date)
**Status**: Complete Deployment Guide with Full Parallel Execution & .env Integration

1
lib/forge-std Submodule

Submodule lib/forge-std added at 27ba11c86a

View File

@@ -0,0 +1,27 @@
/**
* Main entry point for the Multi-Cloud Orchestration Portal
* Supports both Vue and React frameworks
*/
// Vue version (default)
import { createApp } from 'vue';
import App from './vue/App.vue';
import router from './vue/router';
import './styles/main.css';
const app = createApp(App);
app.use(router);
app.mount('#app');
// React version (alternative - uncomment to use React instead)
// import React from 'react';
// import ReactDOM from 'react-dom/client';
// import App from './react/App';
// import './styles/main.css';
//
// ReactDOM.createRoot(document.getElementById('app')!).render(
// <React.StrictMode>
// <App />
// </React.StrictMode>
// );

View File

@@ -0,0 +1,62 @@
/**
* React version of the Multi-Cloud Orchestration Portal
* Alternative to Vue implementation
*/
import React, { useState, useEffect } from 'react';
import { BrowserRouter, Routes, Route, Link } from 'react-router-dom';
import Dashboard from './views/Dashboard';
import HealthDashboard from './views/HealthDashboard';
import CostDashboard from './views/CostDashboard';
import AdminPanel from './views/AdminPanel';
import MonitoringDashboard from './views/MonitoringDashboard';
import type { Environment, DeploymentStatus, Alert } from '../types';
function App() {
return (
<BrowserRouter>
<div className="min-h-screen bg-gray-50">
{/* Navigation */}
<nav className="bg-gradient-to-r from-primary-500 to-purple-600 text-white shadow-lg">
<div className="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8">
<div className="flex justify-between items-center h-16">
<div className="flex items-center">
<h1 className="text-2xl font-bold">
<i className="fas fa-cloud mr-2"></i>
Multi-Cloud Orchestration
</h1>
</div>
<div className="flex gap-6">
<Link to="/" className="hover:opacity-80 transition-opacity">
<i className="fas fa-home mr-1"></i> Dashboard
</Link>
<Link to="/health" className="hover:opacity-80 transition-opacity">
<i className="fas fa-heartbeat mr-1"></i> Health
</Link>
<Link to="/costs" className="hover:opacity-80 transition-opacity">
<i className="fas fa-dollar-sign mr-1"></i> Costs
</Link>
<Link to="/admin" className="hover:opacity-80 transition-opacity">
<i className="fas fa-cog mr-1"></i> Admin
</Link>
</div>
</div>
</div>
</nav>
{/* Main Content */}
<main className="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8 py-8">
<Routes>
<Route path="/" element={<Dashboard />} />
<Route path="/health" element={<HealthDashboard />} />
<Route path="/costs" element={<CostDashboard />} />
<Route path="/admin" element={<AdminPanel />} />
</Routes>
</main>
</div>
</BrowserRouter>
);
}
export default App;

View File

@@ -0,0 +1,582 @@
/**
* Enhanced Multi-Cloud Orchestration Portal
* TypeScript/Node.js Express server
*/
import express, { Request, Response, NextFunction } from 'express';
import cors from 'cors';
import path from 'path';
import fs from 'fs';
import http from 'http';
import { Server as SocketIOServer } from 'socket.io';
import { ConfigManager } from './config';
import { DatabaseManager } from './database';
import { Environment, DeploymentRequest, Deployment, Alert, Cost } from './types';
import { requireAdmin, createSession, getClientIp, AuthRequest } from './middleware/auth';
import { MonitoringService } from './services/monitoring';
const app = express();
app.use(cors());
app.use(express.json());
// Serve static files from client build (production) or static directory
const isProduction = process.env.NODE_ENV === 'production';
if (isProduction) {
app.use(express.static(path.join(__dirname, '../client')));
} else {
app.use(express.static(path.join(__dirname, '../static')));
}
// Set up view engine (EJS for templates - fallback)
app.set('view engine', 'ejs');
app.set('views', path.join(__dirname, '../templates'));
// Initialize managers
const config = new ConfigManager();
const db = new DatabaseManager(config.getDbFile());
const monitoring = new MonitoringService(db);
// Helper function to group environments by provider
function groupByProvider(environments: Environment[]): Record<string, Environment[]> {
const grouped: Record<string, Environment[]> = {};
for (const env of environments) {
const provider = env.provider || 'unknown';
if (!grouped[provider]) {
grouped[provider] = [];
}
grouped[provider].push(env);
}
return grouped;
}
// ============================================
// ROUTES
// ============================================
// Main dashboard - serve Vue/React SPA in production, EJS template in development
app.get('/', (_req: Request, res: Response): void => {
if (isProduction) {
// Serve the built Vue/React app
res.sendFile(path.join(__dirname, '../client/index.html'));
} else {
// Fallback to EJS template for development
const environments = config.loadEnvironments();
const byProvider = groupByProvider(environments);
const envStatuses: Record<string, any> = {};
for (const env of environments) {
if (env.enabled) {
envStatuses[env.name] = config.getDeploymentStatus(env.name, db);
}
}
const alerts = db.getAlerts({ unacknowledged_only: true });
const recentDeployments = db.getDeployments({ limit: 10 });
const totalEnvironments = environments.length;
const enabledCount = environments.filter(e => e.enabled).length;
const totalProviders = Object.keys(byProvider).length;
res.render('dashboard', {
environments,
by_provider: byProvider,
env_statuses: envStatuses,
alerts,
recent_deployments: recentDeployments,
total_environments: totalEnvironments,
enabled_count: enabledCount,
total_providers: totalProviders
});
}
});
// API: Get all environments
app.get('/api/environments', (_req: Request, res: Response) => {
const environments = config.loadEnvironments();
res.json(environments);
});
// API: Get specific environment
app.get('/api/environments/:name', (req: Request, res: Response): void => {
const { name } = req.params;
const env = config.getEnvironmentByName(name);
if (!env) {
res.status(404).json({ error: 'Environment not found' });
return;
}
const status = config.getDeploymentStatus(name, db);
const metrics = db.getMetrics(name, 24);
const alerts = db.getAlerts({ environment: name, unacknowledged_only: true });
const costs = db.getCosts({ environment: name, days: 30 });
res.json({
config: env,
status,
metrics,
alerts,
costs
});
});
// API: Deploy to environment
app.post('/api/environments/:name/deploy', (req: Request, res: Response): void => {
const { name } = req.params;
const body: DeploymentRequest = req.body || {};
const env = config.getEnvironmentByName(name);
if (!env) {
res.status(404).json({ error: 'Environment not found' });
return;
}
if (!env.enabled) {
res.status(400).json({ error: 'Environment is disabled' });
return;
}
const strategy = body.strategy || 'blue-green';
const version = body.version || 'latest';
const now = new Date();
const deploymentId = `${name}-${now.toISOString().replace(/[-:T.]/g, '').slice(0, 14)}`;
const logFile = path.join(config.getDeploymentLogDir(), `${deploymentId}.log`);
// Log deployment request
const logContent = [
`Deployment requested for ${name} at ${now.toISOString()}`,
`Strategy: ${strategy}`,
`Version: ${version}`,
`Environment config: ${JSON.stringify(env, null, 2)}`
].join('\n');
fs.writeFileSync(logFile, logContent);
// Store in database
const deployment: Deployment = {
id: deploymentId,
environment: name,
status: 'queued',
started_at: now.toISOString(),
triggered_by: body.triggered_by || 'api',
strategy,
version,
logs_path: logFile
};
db.createDeployment(deployment);
res.json({
deployment_id: deploymentId,
status: 'queued',
environment: name,
strategy,
version,
message: 'Deployment queued successfully'
});
});
// API: Get environment status
app.get('/api/environments/:name/status', (req: Request, res: Response): void => {
const { name } = req.params;
const status = config.getDeploymentStatus(name, db);
res.json(status);
});
// API: Get environment metrics
app.get('/api/environments/:name/metrics', (req: Request, res: Response): void => {
const { name } = req.params;
const hours = parseInt(req.query.hours as string) || 24;
const metrics = db.getMetrics(name, hours);
res.json(metrics);
});
// API: Get environment alerts
app.get('/api/environments/:name/alerts', (req: Request, res: Response) => {
const { name } = req.params;
const unacknowledgedOnly = req.query.unacknowledged_only === 'true';
const alerts = db.getAlerts({ environment: name, unacknowledged_only: unacknowledgedOnly });
res.json(alerts);
});
// API: Acknowledge alert
app.post('/api/alerts/:id/acknowledge', (req: Request, res: Response) => {
const alertId = parseInt(req.params.id);
db.acknowledgeAlert(alertId);
res.json({ message: 'Alert acknowledged' });
});
// API: Get costs
app.get('/api/costs', (req: Request, res: Response): void => {
const environment = req.query.environment as string | undefined;
const days = parseInt(req.query.days as string) || 30;
const costs = db.getCosts({ environment, days });
res.json(costs);
});
// API: Get deployments
app.get('/api/deployments', (req: Request, res: Response): void => {
const environment = req.query.environment as string | undefined;
const status = req.query.status as string | undefined;
const limit = parseInt(req.query.limit as string) || 50;
const deployments = db.getDeployments({ environment, status, limit });
res.json(deployments);
});
// API: Get deployment logs
app.get('/api/deployments/:id/logs', (req: Request, res: Response): void => {
const { id } = req.params;
const logsPath = db.getDeploymentLogsPath(id);
if (!logsPath || !fs.existsSync(logsPath)) {
res.status(404).json({ error: 'Logs not found' });
return;
}
const logs = fs.readFileSync(logsPath, 'utf-8');
res.json({ logs });
});
// Environment detail page - serve SPA in production
app.get('/environment/:name', (req: Request, res: Response): void => {
if (isProduction) {
res.sendFile(path.join(__dirname, '../client/index.html'));
} else {
const { name } = req.params;
const env = config.getEnvironmentByName(name);
if (!env) {
res.status(404).send('Environment not found');
return;
}
const status = config.getDeploymentStatus(name, db);
const metrics = db.getMetrics(name, 168); // 7 days
const alerts = db.getAlerts({ environment: name });
const costs = db.getCosts({ environment: name, days: 30 });
const deployments = db.getDeployments({ environment: name, limit: 20 });
res.render('environment_detail', {
environment: env,
status,
metrics,
alerts,
costs,
deployments
});
}
});
// Health dashboard - serve SPA in production
app.get('/dashboard/health', (_req: Request, res: Response): void => {
if (isProduction) {
res.sendFile(path.join(__dirname, '../client/index.html'));
} else {
const environments = config.loadEnvironments();
const healthData: any[] = [];
for (const env of environments) {
if (env.enabled) {
const status = config.getDeploymentStatus(env.name, db);
healthData.push({
name: env.name,
provider: env.provider,
region: env.region,
status,
health: status.cluster_health
});
}
}
res.render('health_dashboard', { health_data: healthData });
}
});
// Cost dashboard - serve SPA in production
app.get('/dashboard/costs', (_req: Request, res: Response): void => {
if (isProduction) {
res.sendFile(path.join(__dirname, '../client/index.html'));
} else {
const costs = db.getCosts({ days: 90 });
// Aggregate by provider
const byProvider: Record<string, number> = {};
let totalCost = 0;
for (const cost of costs) {
const provider = cost.provider;
if (!byProvider[provider]) {
byProvider[provider] = 0;
}
byProvider[provider] += cost.cost;
totalCost += cost.cost;
}
res.render('cost_dashboard', {
costs,
by_provider: byProvider,
total_cost: totalCost
});
}
});
// Seed sample data
function seedSampleData(): void {
if (db.getMetricsCount() > 0) {
return; // Data already seeded
}
const environments = config.loadEnvironments();
const now = new Date();
for (const env of environments.slice(0, 3)) {
const envName = env.name;
// Generate sample metrics (24 hours)
for (let i = 0; i < 24; i++) {
const timestamp = new Date(now.getTime() - (24 - i) * 60 * 60 * 1000).toISOString();
db.insertMetric(envName, 'cpu_usage', Math.random() * 60 + 20, timestamp);
db.insertMetric(envName, 'memory_usage', Math.random() * 55 + 30, timestamp);
}
// Generate sample alerts (30% chance)
if (Math.random() > 0.7) {
const alert: Omit<Alert, 'id'> = {
environment: envName,
severity: Math.random() > 0.5 ? 'warning' : 'error',
message: `Sample alert for ${envName}`,
timestamp: now.toISOString()
};
db.createAlert(alert);
}
// Generate sample costs (30 days)
for (let i = 0; i < 30; i++) {
const periodStart = new Date(now.getTime() - (30 - i) * 24 * 60 * 60 * 1000).toISOString();
const periodEnd = new Date(now.getTime() - (29 - i) * 24 * 60 * 60 * 1000).toISOString();
const cost: Cost = {
environment: envName,
provider: env.provider || 'azure',
cost: Math.random() * 490 + 10,
currency: 'USD',
period_start: periodStart,
period_end: periodEnd,
resource_type: 'compute'
};
db.insertCost(cost);
}
}
}
// ============================================
// ADMIN API ROUTES
// ============================================
// Admin login (simple for now - enhance with proper auth later)
app.post('/api/admin/login', (req: Request, res: Response): void => {
const { username, password } = req.body;
// Simple hardcoded admin for now (replace with proper auth)
if (username === 'admin' && (password === process.env.ADMIN_PASSWORD || password === 'admin')) {
const token = createSession(username);
res.json({ token, username });
} else {
res.status(401).json({ error: 'Invalid credentials' });
}
});
// Get all service configurations
app.get('/api/admin/services', requireAdmin, (_req: AuthRequest, res: Response): void => {
try {
const services = db.getAllServiceConfigs();
res.json(services);
} catch (error) {
res.status(500).json({ error: 'Failed to fetch services' });
}
});
// Get specific service configuration
app.get('/api/admin/services/:name', requireAdmin, (req: AuthRequest, res: Response): void => {
try {
const { name } = req.params;
const service = db.getServiceConfig(name);
if (!service) {
res.status(404).json({ error: 'Service not found' });
return;
}
res.json({ service_name: name, ...service });
} catch (error) {
res.status(500).json({ error: 'Failed to fetch service' });
}
});
// Update service configuration
app.put('/api/admin/services/:name', requireAdmin, (req: AuthRequest, res: Response): void => {
try {
const { name } = req.params;
const { enabled, config } = req.body;
const adminUser = req.adminUser || 'unknown';
const ipAddress = getClientIp(req);
db.setServiceConfig(name, enabled !== false, config || null, adminUser);
db.logAdminAction(adminUser, 'update_service', 'service', name, JSON.stringify({ enabled, config }), ipAddress);
// Broadcast real-time update
broadcastAdminUpdate('service-updated', { service_name: name, enabled, updated_by: adminUser });
res.json({ success: true, message: `Service ${name} ${enabled ? 'enabled' : 'disabled'}` });
} catch (error) {
res.status(500).json({ error: 'Failed to update service' });
}
});
// Get all provider configurations
app.get('/api/admin/providers', requireAdmin, (_req: AuthRequest, res: Response): void => {
try {
const providers = db.getAllProviderConfigs();
res.json(providers);
} catch (error) {
res.status(500).json({ error: 'Failed to fetch providers' });
}
});
// Get specific provider configuration
app.get('/api/admin/providers/:name', requireAdmin, (req: AuthRequest, res: Response): void => {
try {
const { name } = req.params;
const provider = db.getProviderConfig(name);
if (!provider) {
res.status(404).json({ error: 'Provider not found' });
return;
}
res.json({ provider_name: name, ...provider });
} catch (error) {
res.status(500).json({ error: 'Failed to fetch provider' });
}
});
// Update provider configuration
app.put('/api/admin/providers/:name', requireAdmin, (req: AuthRequest, res: Response): void => {
try {
const { name } = req.params;
const { enabled, config } = req.body;
const adminUser = req.adminUser || 'unknown';
const ipAddress = getClientIp(req);
db.setProviderConfig(name, enabled !== false, config || null, adminUser);
db.logAdminAction(adminUser, 'update_provider', 'provider', name, JSON.stringify({ enabled, config }), ipAddress);
// Broadcast real-time update
broadcastAdminUpdate('provider-updated', { provider_name: name, enabled, updated_by: adminUser });
res.json({ success: true, message: `Provider ${name} ${enabled ? 'enabled' : 'disabled'}` });
} catch (error) {
res.status(500).json({ error: 'Failed to update provider' });
}
});
// Get audit logs
app.get('/api/admin/audit-logs', requireAdmin, (req: AuthRequest, res: Response): void => {
try {
const limit = parseInt(req.query.limit as string) || 100;
const logs = db.getAuditLogs(limit);
res.json(logs);
} catch (error) {
res.status(500).json({ error: 'Failed to fetch audit logs' });
}
});
// Toggle environment enabled/disabled (updates YAML via config manager)
app.put('/api/admin/environments/:name/toggle', requireAdmin, (req: AuthRequest, res: Response): void => {
try {
const { name } = req.params;
const { enabled } = req.body;
const adminUser = req.adminUser || 'unknown';
const ipAddress = getClientIp(req);
const env = config.getEnvironmentByName(name);
if (!env) {
res.status(404).json({ error: 'Environment not found' });
return;
}
// Update environment in YAML file
config.updateEnvironmentEnabled(name, enabled !== false);
db.logAdminAction(adminUser, 'toggle_environment', 'environment', name, JSON.stringify({ enabled }), ipAddress);
// Broadcast real-time update
broadcastAdminUpdate('environment-updated', { environment_name: name, enabled, updated_by: adminUser });
res.json({ success: true, message: `Environment ${name} ${enabled ? 'enabled' : 'disabled'}` });
} catch (error) {
res.status(500).json({ error: 'Failed to toggle environment' });
}
});
// Error handling middleware
app.use((err: Error, _req: Request, res: Response, _next: NextFunction) => {
console.error('Error:', err);
res.status(500).json({ error: 'Internal server error' });
});
// Start server
const PORT = process.env.PORT || 5000;
const HOST = process.env.HOST || '0.0.0.0';
// Create HTTP server for Socket.IO
const server = http.createServer(app);
// Initialize Socket.IO
const io = new SocketIOServer(server, {
cors: {
origin: '*',
methods: ['GET', 'POST']
}
});
// Socket.IO connection handling
io.on('connection', (socket) => {
console.log('Client connected:', socket.id);
// Join admin room for real-time updates
socket.on('join-admin', () => {
socket.join('admin');
console.log('Client joined admin room:', socket.id);
});
// Handle disconnection
socket.on('disconnect', () => {
console.log('Client disconnected:', socket.id);
});
});
// Helper function to broadcast admin updates
function broadcastAdminUpdate(type: string, data: any): void {
io.to('admin').emit('admin-update', { type, data, timestamp: new Date().toISOString() });
}
// Seed sample data on startup
seedSampleData();
server.listen(PORT, () => {
console.log('🚀 Enhanced Multi-Cloud Orchestration Portal starting...');
console.log(`📊 Access dashboard at: http://${HOST}:${PORT}`);
console.log(`🔍 Health dashboard at: http://${HOST}:${PORT}/dashboard/health`);
console.log(`💰 Cost dashboard at: http://${HOST}:${PORT}/dashboard/costs`);
console.log(`🔐 Admin panel at: http://${HOST}:${PORT}/admin`);
console.log(`🔌 WebSocket server ready on port ${PORT}`);
});
// Graceful shutdown
process.on('SIGINT', () => {
console.log('\nShutting down gracefully...');
db.close();
process.exit(0);
});
process.on('SIGTERM', () => {
console.log('\nShutting down gracefully...');
db.close();
process.exit(0);
});

View File

@@ -0,0 +1,39 @@
import { defineConfig } from 'vite';
import vue from '@vitejs/plugin-vue';
import react from '@vitejs/plugin-react';
import path from 'path';
// https://vitejs.dev/config/
export default defineConfig({
plugins: [
vue({
// Vue plugin configuration
}),
react(),
],
root: 'client',
build: {
outDir: '../dist/client',
emptyOutDir: true,
},
server: {
port: 5173,
proxy: {
'/api': {
target: 'http://localhost:5000',
changeOrigin: true,
},
},
},
resolve: {
alias: {
'@': path.resolve(__dirname, './client/src'),
'@vue': path.resolve(__dirname, './client/src/vue'),
'@react': path.resolve(__dirname, './client/src/react'),
},
dedupe: ['vue', 'react', 'react-dom'],
},
optimizeDeps: {
include: ['vue', 'vue-router', 'react', 'react-dom', 'react-router-dom'],
},
});

View File

@@ -0,0 +1,154 @@
#!/bin/bash
# Import all existing resources into Terraform state
# Fixes "already exists" errors
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
TERRAFORM_DIR="$PROJECT_ROOT/terraform/well-architected/cloud-sovereignty"
cd "$TERRAFORM_DIR"
echo "=== Importing All Existing Resources ==="
echo ""
# Region code mapping (supports both old 2-char and new 3-char codes for backward compatibility)
# Standard codes are now 3 characters, but we maintain old mappings for existing resources
declare -A REGION_CODES=(
["northeurope"]="nor"
["uksouth"]="uks"
["ukwest"]="ukw"
["westeurope"]="wst"
["francecentral"]="frc"
["germanywestcentral"]="gwc"
["switzerlandnorth"]="swn"
["switzerlandwest"]="swt"
["italynorth"]="ita"
["norwayeast"]="noe"
["polandcentral"]="pol"
["spaincentral"]="spa"
["swedencentral"]="swc"
["belgiumcentral"]="bel"
["austriaeast"]="aut"
["australiaeast"]="aus"
["australiasoutheast"]="ase"
["eastasia"]="eas"
["southeastasia"]="sea"
["centralindia"]="cin"
["southindia"]="sin"
["westindia"]="win"
["japaneast"]="jpe"
["japanwest"]="jpw"
["koreacentral"]="kor"
["koreasouth"]="kos"
["newzealandnorth"]="nzl"
["indonesiacentral"]="idn"
["malaysiawest"]="mys"
["uaenorth"]="uae"
["qatarcentral"]="qat"
["israelcentral"]="ilc"
["canadacentral"]="can"
["canadaeast"]="cae"
["brazilsouth"]="bra"
["chilecentral"]="chl"
["mexicocentral"]="mex"
["southafricanorth"]="zaf"
)
# Reverse mapping for old codes (for importing existing resources)
declare -A OLD_CODE_TO_REGION=(
["ne"]="northeurope"
["we"]="westeurope"
["fc"]="francecentral"
["sn"]="switzerlandnorth"
["sw"]="switzerlandwest"
["in"]="italynorth"
["pc"]="polandcentral"
["sc"]="spaincentral"
["bc"]="belgiumcentral"
["ae"]="australiaeast" # Note: conflicts with austriaeast (old), prefer australiaeast
["ea"]="eastasia"
["ci"]="centralindia"
["si"]="southindia"
["wi"]="westindia"
["je"]="japaneast"
["jw"]="japanwest"
["kc"]="koreacentral"
["ks"]="koreasouth"
["cc"]="canadacentral"
["ce"]="canadaeast"
["bs"]="brazilsouth"
["mc"]="mexicocentral"
["qc"]="qatarcentral"
["ic"]="indonesiacentral"
["mw"]="malaysiawest"
["nzn"]="newzealandnorth"
["san"]="southafricanorth"
["uan"]="uaenorth"
["chc"]="chilecentral"
)
SUBSCRIPTION_ID="fc08d829-4f14-413d-ab27-ce024425db0b"
echo "Step 1: Importing West Europe Admin Resources"
echo ""
# Import West Europe resource groups (using new 3-char code)
for rg_type in compute network storage security monitoring identity; do
# Try new 3-char code first, fall back to old 2-char code
rg_name_new="az-p-wst-rg-${rg_type}-001"
rg_name_old="az-p-we-rg-${rg_type}-001"
# Check which one exists
if az group show --name "$rg_name_new" &> /dev/null; then
rg_name="$rg_name_new"
elif az group show --name "$rg_name_old" &> /dev/null; then
rg_name="$rg_name_old"
else
echo " ⚠️ Resource group not found: $rg_name_new or $rg_name_old"
continue
fi
resource_id="/subscriptions/${SUBSCRIPTION_ID}/resourceGroups/${rg_name}"
echo "Importing $rg_name..."
terraform import "module.admin_region[0].azurerm_resource_group.${rg_type}" "$resource_id" 2>&1 | grep -E "Import|Imported|Error" || echo " ⚠️ Already in state or failed"
done
echo ""
echo "Step 2: Importing Existing AKS Clusters"
echo ""
# Get all existing clusters
CLUSTERS=$(az aks list --subscription "$SUBSCRIPTION_ID" --query "[?contains(name, 'az-p-')].{name:name, rg:resourceGroup}" -o json)
# Import each cluster
echo "$CLUSTERS" | jq -r '.[] | "\(.rg)|\(.name)"' | while IFS='|' read -r rg name; do
# Extract region code from name
region_code=$(echo "$name" | sed 's/az-p-\([a-z]*\)-aks-main/\1/')
# Find region name from code
region=""
for reg in "${!REGION_CODES[@]}"; do
if [ "${REGION_CODES[$reg]}" == "$region_code" ]; then
region="$reg"
break
fi
done
if [ -z "$region" ]; then
echo " ⚠️ Unknown region code: $region_code"
continue
fi
echo "Importing $name ($region)..."
resource_id="/subscriptions/${SUBSCRIPTION_ID}/resourceGroups/${rg}/providers/Microsoft.ContainerService/managedClusters/${name}"
terraform import "module.region_deployment[\"$region\"].azurerm_kubernetes_cluster.main[0]" "$resource_id" 2>&1 | grep -E "Import|Imported|Error" | tail -1 || echo " ⚠️ Import failed or already in state"
done
echo ""
echo "=== ✅ Import Complete ==="
echo ""
echo "Next: Run terraform apply to continue deployment"

View File

@@ -0,0 +1,55 @@
#!/usr/bin/env bash
set -euo pipefail
# Validate Standard Dplsv6 Family vCPU usage via legacy VM usage API across nonUS commercial regions
REGIONS=(
australiacentral
australiaeast
australiasoutheast
austriaeast
belgiumcentral
brazilsouth
canadacentral
canadaeast
centralindia
chilecentral
eastasia
francecentral
germanywestcentral
indonesiacentral
israelcentral
italynorth
japaneast
japanwest
koreacentral
koreasouth
malaysiawest
mexicocentral
newzealandnorth
northeurope
polandcentral
qatarcentral
southafricanorth
southafricawest
southeastasia
southindia
spaincentral
switzerlandnorth
switzerlandwest
uaecentral
uaenorth
uksouth
ukwest
westeurope
westindia
)
echo -e "Region\tName\tUsage\tLimit"
for region in "${REGIONS[@]}"; do
echo "Checking $region..." 1>&2
# List all usage rows, then filter by names containing Dpl and v6, print Name/Usage/Limit
az vm list-usage --location "$region" --output tsv --query "[].{Name:name.localizedValue,Usage:currentValue,Limit:limit}" \
| awk -v R="$region" -F '\t' 'tolower($1) ~ /dpl/ && tolower($1) ~ /v6/ { print R"\t"$1"\t"$2"\t"$3 }'
sleep 0.2
done

View File

@@ -0,0 +1,115 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.19;
import {Test, console} from "forge-std/Test.sol";
import {CCIPSender} from "../../contracts/ccip/CCIPSender.sol";
import {CCIPReceiver} from "../../contracts/ccip/CCIPReceiver.sol";
import {IRouterClient} from "../../contracts/ccip/IRouterClient.sol";
contract CCIPErrorHandlingTest is Test {
CCIPSender public sender;
CCIPReceiver public receiver;
address public mockRouter;
address public linkToken;
uint64 constant TARGET_CHAIN_SELECTOR = 5009297550715157269;
function setUp() public {
mockRouter = address(new MockRouter());
linkToken = address(new MockLinkToken());
address oracleAggregator = address(this); // Use test contract as aggregator
sender = new CCIPSender(mockRouter, oracleAggregator, linkToken);
receiver = new CCIPReceiver(mockRouter, address(0));
MockLinkToken(linkToken).mint(address(sender), 1000e18);
}
function testInvalidMessageFormat() public {
bytes memory invalidData = "invalid";
IRouterClient.Any2EVMMessage memory message = IRouterClient.Any2EVMMessage({
messageId: keccak256("test"),
sourceChainSelector: 138,
sender: abi.encode(address(sender)),
data: invalidData,
tokenAmounts: new IRouterClient.TokenAmount[](0)
});
vm.prank(mockRouter);
// Should handle invalid format gracefully
try receiver.ccipReceive(message) {
// If it doesn't revert, that's also acceptable if error handling is implemented
} catch {
// Expected to revert on invalid format
}
}
function testUnauthorizedSender() public {
bytes memory messageData = abi.encode(uint256(25000000000), uint256(1), uint256(block.timestamp));
IRouterClient.Any2EVMMessage memory message = IRouterClient.Any2EVMMessage({
messageId: keccak256("test"),
sourceChainSelector: 138,
sender: abi.encode(address(0x123)), // Unauthorized sender
data: messageData,
tokenAmounts: new IRouterClient.TokenAmount[](0)
});
vm.prank(mockRouter);
// Should reject unauthorized sender
receiver.ccipReceive(message);
}
function testRouterOnlyAccess() public {
bytes memory messageData = abi.encode(uint256(25000000000), uint256(1), uint256(block.timestamp));
IRouterClient.Any2EVMMessage memory message = IRouterClient.Any2EVMMessage({
messageId: keccak256("test"),
sourceChainSelector: 138,
sender: abi.encode(address(sender)),
data: messageData,
tokenAmounts: new IRouterClient.TokenAmount[](0)
});
// Try to call from non-router address
vm.expectRevert("CCIPReceiver: only router");
receiver.ccipReceive(message);
}
function testInsufficientLinkBalance() public {
// Add destination first
sender.addDestination(TARGET_CHAIN_SELECTOR, address(receiver));
// Drain LINK balance
MockLinkToken(linkToken).transfer(address(0xdead), 1000e18);
bytes memory messageData = abi.encode(uint256(25000000000), uint256(1), uint256(block.timestamp));
// Should revert due to insufficient balance
vm.expectRevert();
sender.sendOracleUpdate(TARGET_CHAIN_SELECTOR, 25000000000, 1, block.timestamp);
}
}
contract MockRouter {
function send(uint64, bytes memory) external pure returns (bytes32) {
return bytes32(0);
}
}
contract MockLinkToken {
mapping(address => uint256) public balanceOf;
function mint(address to, uint256 amount) external {
balanceOf[to] += amount;
}
function transfer(address to, uint256 amount) external returns (bool) {
require(balanceOf[msg.sender] >= amount, "Insufficient balance");
balanceOf[msg.sender] -= amount;
balanceOf[to] += amount;
return true;
}
}

View File

@@ -0,0 +1,142 @@
// SPDX-License-Identifier: MIT
pragma solidity ^0.8.19;
import {Test, console} from "forge-std/Test.sol";
import {CCIPSender} from "../../contracts/ccip/CCIPSender.sol";
import {CCIPReceiver} from "../../contracts/ccip/CCIPReceiver.sol";
import {IRouterClient} from "../../contracts/ccip/IRouterClient.sol";
contract CCIPIntegrationTest is Test {
CCIPSender public sender;
CCIPReceiver public receiver;
address public mockRouter;
address public linkToken;
uint64 constant SOURCE_CHAIN_SELECTOR = 138;
uint64 constant TARGET_CHAIN_SELECTOR = 5009297550715157269; // Ethereum Mainnet
event MessageSent(bytes32 indexed messageId, uint64 indexed destinationChainSelector, address receiver, bytes data);
event MessageReceived(bytes32 indexed messageId, uint64 indexed sourceChainSelector, address sender, bytes data);
function setUp() public {
// Deploy mock router (in real tests, use actual CCIP Router)
mockRouter = address(new MockRouter());
linkToken = address(new MockLinkToken());
// Deploy sender and receiver
address oracleAggregator = address(this);
sender = new CCIPSender(mockRouter, oracleAggregator, linkToken);
receiver = new CCIPReceiver(mockRouter, address(0)); // Oracle aggregator address
// Fund sender with LINK
MockLinkToken(linkToken).mint(address(sender), 1000e18);
}
function testSendMessage() public {
// Add destination first
sender.addDestination(TARGET_CHAIN_SELECTOR, address(receiver));
bytes memory messageData = abi.encode(uint256(25000000000), uint256(1), uint256(block.timestamp));
// Send oracle update
sender.sendOracleUpdate(TARGET_CHAIN_SELECTOR, 25000000000, 1, block.timestamp);
}
function testReceiveMessage() public {
bytes memory messageData = abi.encode(uint256(25000000000), uint256(1), uint256(block.timestamp));
IRouterClient.Any2EVMMessage memory message = IRouterClient.Any2EVMMessage({
messageId: keccak256("test"),
sourceChainSelector: SOURCE_CHAIN_SELECTOR,
sender: abi.encode(address(sender)),
data: messageData,
tokenAmounts: new IRouterClient.TokenAmount[](0)
});
vm.prank(mockRouter);
receiver.ccipReceive(message);
assertTrue(receiver.processedMessages(keccak256("test")));
}
function testReplayProtection() public {
bytes memory messageData = abi.encode(uint256(25000000000), uint256(1), uint256(block.timestamp));
IRouterClient.Any2EVMMessage memory message = IRouterClient.Any2EVMMessage({
messageId: keccak256("test"),
sourceChainSelector: SOURCE_CHAIN_SELECTOR,
sender: abi.encode(address(sender)),
data: messageData,
tokenAmounts: new IRouterClient.TokenAmount[](0)
});
vm.prank(mockRouter);
receiver.ccipReceive(message);
// Try to process same message again
vm.prank(mockRouter);
vm.expectRevert("CCIPReceiver: message already processed");
receiver.ccipReceive(message);
}
function testFeeCalculation() public {
bytes memory messageData = abi.encode(uint256(25000000000), uint256(1), uint256(block.timestamp));
uint256 fee = sender.calculateFee(TARGET_CHAIN_SELECTOR, messageData);
assertGt(fee, 0, "Fee should be greater than 0");
}
function testInsufficientFee() public {
// Add destination first
sender.addDestination(TARGET_CHAIN_SELECTOR, address(receiver));
// Drain balance to cause insufficient fee
MockLinkToken(linkToken).transfer(address(0xdead), 1000e18);
vm.expectRevert();
sender.sendOracleUpdate(TARGET_CHAIN_SELECTOR, 25000000000, 1, block.timestamp);
}
}
// Mock contracts for testing
contract MockRouter is IRouterClient {
function ccipSend(uint64, EVM2AnyMessage memory) external pure returns (bytes32, uint256) {
return (keccak256("mock"), 0.01e18);
}
function getFee(uint64, EVM2AnyMessage memory) external pure returns (uint256) {
return 0.01e18;
}
function getSupportedTokens(uint64) external pure returns (address[] memory) {
return new address[](0);
}
}
contract MockLinkToken {
mapping(address => uint256) public balanceOf;
function mint(address to, uint256 amount) external {
balanceOf[to] += amount;
}
function transfer(address to, uint256 amount) external returns (bool) {
require(balanceOf[msg.sender] >= amount, "Insufficient balance");
balanceOf[msg.sender] -= amount;
balanceOf[to] += amount;
return true;
}
function transferFrom(address from, address to, uint256 amount) external returns (bool) {
require(balanceOf[from] >= amount, "Insufficient balance");
balanceOf[from] -= amount;
balanceOf[to] += amount;
return true;
}
function approve(address, uint256) external pure returns (bool) {
return true;
}
}