- Introduced Aggregator.sol for Chainlink-compatible oracle functionality, including round-based updates and access control. - Added OracleWithCCIP.sol to extend Aggregator with CCIP cross-chain messaging capabilities. - Created .gitmodules to include OpenZeppelin contracts as a submodule. - Developed a comprehensive deployment guide in NEXT_STEPS_COMPLETE_GUIDE.md for Phase 2 and smart contract deployment. - Implemented Vite configuration for the orchestration portal, supporting both Vue and React frameworks. - Added server-side logic for the Multi-Cloud Orchestration Portal, including API endpoints for environment management and monitoring. - Created scripts for resource import and usage validation across non-US regions. - Added tests for CCIP error handling and integration to ensure robust functionality. - Included various new files and directories for the orchestration portal and deployment scripts.
382 lines
9.0 KiB
HCL
382 lines
9.0 KiB
HCL
# AWS Infrastructure Module
|
|
# Creates EKS cluster, networking, and supporting resources for AWS environments
|
|
|
|
locals {
|
|
env = var.environment_config
|
|
|
|
# Extract AWS-specific config
|
|
aws_config = try(local.env.aws, {})
|
|
|
|
# Extract infrastructure config
|
|
infra = try(local.env.infrastructure, {})
|
|
k8s_config = try(local.infra.kubernetes, {})
|
|
net_config = try(local.infra.networking, {})
|
|
|
|
# Naming
|
|
name_prefix = "${local.env.name}-${var.environment}"
|
|
|
|
# Node pools
|
|
node_pools = try(local.k8s_config.node_pools, {})
|
|
}
|
|
|
|
# VPC
|
|
resource "aws_vpc" "main" {
|
|
cidr_block = try(local.net_config.vpc_cidr, "10.0.0.0/16")
|
|
enable_dns_hostnames = true
|
|
enable_dns_support = true
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-vpc"
|
|
Type = "Multi-Cloud"
|
|
})
|
|
}
|
|
|
|
# Internet Gateway
|
|
resource "aws_internet_gateway" "main" {
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-igw"
|
|
})
|
|
}
|
|
|
|
# Subnets
|
|
resource "aws_subnet" "eks" {
|
|
for_each = {
|
|
for idx, subnet in try(local.net_config.subnets, []) : subnet.name => subnet
|
|
if subnet.name == "eks" || subnet.name == "gke" || subnet.name == "aks"
|
|
}
|
|
|
|
vpc_id = aws_vpc.main.id
|
|
cidr_block = each.value.cidr
|
|
availability_zone = try(each.value.availability_zone, data.aws_availability_zones.available.names[0])
|
|
|
|
map_public_ip_on_launch = true
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-subnet-${each.key}"
|
|
"kubernetes.io/role/elb" = "1"
|
|
})
|
|
}
|
|
|
|
resource "aws_subnet" "validators" {
|
|
for_each = {
|
|
for idx, subnet in try(local.net_config.subnets, []) : subnet.name => subnet
|
|
if subnet.name == "validators"
|
|
}
|
|
|
|
vpc_id = aws_vpc.main.id
|
|
cidr_block = each.value.cidr
|
|
availability_zone = try(each.value.availability_zone, data.aws_availability_zones.available.names[1])
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-subnet-validators"
|
|
})
|
|
}
|
|
|
|
resource "aws_subnet" "rpc" {
|
|
for_each = {
|
|
for idx, subnet in try(local.net_config.subnets, []) : subnet.name => subnet
|
|
if subnet.name == "rpc"
|
|
}
|
|
|
|
vpc_id = aws_vpc.main.id
|
|
cidr_block = each.value.cidr
|
|
availability_zone = try(each.value.availability_zone, data.aws_availability_zones.available.names[0])
|
|
|
|
map_public_ip_on_launch = true
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-subnet-rpc"
|
|
"kubernetes.io/role/elb" = "1"
|
|
})
|
|
}
|
|
|
|
# Route Table
|
|
resource "aws_route_table" "main" {
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
route {
|
|
cidr_block = "0.0.0.0/0"
|
|
gateway_id = aws_internet_gateway.main.id
|
|
}
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-rt"
|
|
})
|
|
}
|
|
|
|
resource "aws_route_table_association" "subnets" {
|
|
for_each = merge(
|
|
aws_subnet.eks,
|
|
aws_subnet.validators,
|
|
aws_subnet.rpc
|
|
)
|
|
|
|
subnet_id = each.value.id
|
|
route_table_id = aws_route_table.main.id
|
|
}
|
|
|
|
# Security Groups
|
|
resource "aws_security_group" "eks_cluster" {
|
|
name = "${local.name_prefix}-eks-cluster-sg"
|
|
description = "Security group for EKS cluster"
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
egress {
|
|
from_port = 0
|
|
to_port = 0
|
|
protocol = "-1"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
}
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-eks-cluster-sg"
|
|
})
|
|
}
|
|
|
|
resource "aws_security_group" "validators" {
|
|
name = "${local.name_prefix}-validators-sg"
|
|
description = "Security group for validator nodes (private)"
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
ingress {
|
|
from_port = 30303
|
|
to_port = 30303
|
|
protocol = "tcp"
|
|
cidr_blocks = [aws_vpc.main.cidr_block]
|
|
description = "P2P port from internal"
|
|
}
|
|
|
|
egress {
|
|
from_port = 0
|
|
to_port = 0
|
|
protocol = "-1"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
}
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-validators-sg"
|
|
})
|
|
}
|
|
|
|
resource "aws_security_group" "rpc" {
|
|
name = "${local.name_prefix}-rpc-sg"
|
|
description = "Security group for RPC nodes"
|
|
vpc_id = aws_vpc.main.id
|
|
|
|
ingress {
|
|
from_port = 8545
|
|
to_port = 8545
|
|
protocol = "tcp"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
description = "RPC HTTP"
|
|
}
|
|
|
|
ingress {
|
|
from_port = 8546
|
|
to_port = 8546
|
|
protocol = "tcp"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
description = "RPC WebSocket"
|
|
}
|
|
|
|
egress {
|
|
from_port = 0
|
|
to_port = 0
|
|
protocol = "-1"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
}
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-rpc-sg"
|
|
})
|
|
}
|
|
|
|
# EKS Cluster
|
|
resource "aws_eks_cluster" "main" {
|
|
name = "${local.name_prefix}-eks"
|
|
role_arn = aws_iam_role.eks_cluster.arn
|
|
version = try(local.k8s_config.version, "1.28")
|
|
|
|
vpc_config {
|
|
subnet_ids = [for s in aws_subnet.eks : s.id]
|
|
security_group_ids = [aws_security_group.eks_cluster.id]
|
|
endpoint_private_access = true
|
|
endpoint_public_access = true
|
|
}
|
|
|
|
enabled_cluster_log_types = ["api", "audit", "authenticator", "controllerManager", "scheduler"]
|
|
|
|
depends_on = [
|
|
aws_cloudwatch_log_group.eks,
|
|
aws_iam_role_policy_attachment.eks_cluster_policy,
|
|
]
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-eks"
|
|
})
|
|
}
|
|
|
|
# EKS Node Groups
|
|
resource "aws_eks_node_group" "system" {
|
|
count = try(local.node_pools.system.count, 0) > 0 ? 1 : 0
|
|
|
|
cluster_name = aws_eks_cluster.main.name
|
|
node_group_name = "system"
|
|
node_role_arn = aws_iam_role.eks_node.arn
|
|
subnet_ids = [for s in aws_subnet.eks : s.id]
|
|
|
|
instance_types = [try(local.node_pools.system.instance_type, "t3.medium")]
|
|
capacity_type = "ON_DEMAND"
|
|
|
|
scaling_config {
|
|
desired_size = try(local.node_pools.system.count, 1)
|
|
max_size = try(local.node_pools.system.count, 1) * 2
|
|
min_size = 1
|
|
}
|
|
|
|
labels = {
|
|
pool = "system"
|
|
role = "system"
|
|
}
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-node-system"
|
|
Pool = "system"
|
|
})
|
|
}
|
|
|
|
resource "aws_eks_node_group" "validators" {
|
|
count = try(local.node_pools.validators.count, 0) > 0 ? 1 : 0
|
|
|
|
cluster_name = aws_eks_cluster.main.name
|
|
node_group_name = "validators"
|
|
node_role_arn = aws_iam_role.eks_node.arn
|
|
subnet_ids = [for s in aws_subnet.validators : s.id]
|
|
|
|
instance_types = [try(local.node_pools.validators.instance_type, "t3.medium")]
|
|
capacity_type = "ON_DEMAND"
|
|
|
|
scaling_config {
|
|
desired_size = try(local.node_pools.validators.count, 1)
|
|
max_size = try(local.node_pools.validators.count, 1) * 2
|
|
min_size = 1
|
|
}
|
|
|
|
labels = {
|
|
pool = "validators"
|
|
role = "validator"
|
|
}
|
|
|
|
taint {
|
|
key = "role"
|
|
value = "validator"
|
|
effect = "NO_SCHEDULE"
|
|
}
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-node-validators"
|
|
Pool = "validators"
|
|
})
|
|
}
|
|
|
|
resource "aws_eks_node_group" "rpc" {
|
|
count = try(local.node_pools.rpc.count, 0) > 0 ? 1 : 0
|
|
|
|
cluster_name = aws_eks_cluster.main.name
|
|
node_group_name = "rpc"
|
|
node_role_arn = aws_iam_role.eks_node.arn
|
|
subnet_ids = [for s in aws_subnet.rpc : s.id]
|
|
|
|
instance_types = [try(local.node_pools.rpc.instance_type, "t3.medium")]
|
|
capacity_type = "ON_DEMAND"
|
|
|
|
scaling_config {
|
|
desired_size = try(local.node_pools.rpc.count, 1)
|
|
max_size = try(local.node_pools.rpc.count, 1) * 2
|
|
min_size = 1
|
|
}
|
|
|
|
labels = {
|
|
pool = "rpc"
|
|
role = "rpc"
|
|
}
|
|
|
|
tags = merge(var.tags, {
|
|
Name = "${local.name_prefix}-node-rpc"
|
|
Pool = "rpc"
|
|
})
|
|
}
|
|
|
|
# Data sources
|
|
data "aws_availability_zones" "available" {
|
|
state = "available"
|
|
}
|
|
|
|
data "aws_caller_identity" "current" {}
|
|
|
|
# IAM Roles and Policies
|
|
resource "aws_iam_role" "eks_cluster" {
|
|
name = "${local.name_prefix}-eks-cluster-role"
|
|
|
|
assume_role_policy = jsonencode({
|
|
Version = "2012-10-17"
|
|
Statement = [{
|
|
Action = "sts:AssumeRole"
|
|
Effect = "Allow"
|
|
Principal = {
|
|
Service = "eks.amazonaws.com"
|
|
}
|
|
}]
|
|
})
|
|
|
|
tags = var.tags
|
|
}
|
|
|
|
resource "aws_iam_role_policy_attachment" "eks_cluster_policy" {
|
|
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
|
|
role = aws_iam_role.eks_cluster.name
|
|
}
|
|
|
|
resource "aws_iam_role" "eks_node" {
|
|
name = "${local.name_prefix}-eks-node-role"
|
|
|
|
assume_role_policy = jsonencode({
|
|
Version = "2012-10-17"
|
|
Statement = [{
|
|
Action = "sts:AssumeRole"
|
|
Effect = "Allow"
|
|
Principal = {
|
|
Service = "ec2.amazonaws.com"
|
|
}
|
|
}]
|
|
})
|
|
|
|
tags = var.tags
|
|
}
|
|
|
|
resource "aws_iam_role_policy_attachment" "eks_worker_node_policy" {
|
|
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
|
|
role = aws_iam_role.eks_node.name
|
|
}
|
|
|
|
resource "aws_iam_role_policy_attachment" "eks_cni_policy" {
|
|
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
|
|
role = aws_iam_role.eks_node.name
|
|
}
|
|
|
|
resource "aws_iam_role_policy_attachment" "eks_container_registry_policy" {
|
|
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
|
role = aws_iam_role.eks_node.name
|
|
}
|
|
|
|
# CloudWatch Log Group
|
|
resource "aws_cloudwatch_log_group" "eks" {
|
|
name = "/aws/eks/${local.name_prefix}-eks/cluster"
|
|
retention_in_days = 7
|
|
|
|
tags = var.tags
|
|
}
|
|
|