feat: explorer API, wallet, CCIP scripts, and config refresh

- Backend REST/gateway/track routes, analytics, Blockscout proxy paths.
- Frontend wallet and liquidity surfaces; MetaMask token list alignment.
- Deployment docs, verification scripts, address inventory updates.

Check: go build ./... under backend/ (pass).
Made-with: Cursor
This commit is contained in:
defiQUG
2026-04-07 23:22:12 -07:00
parent 4044fb07e1
commit bdae5a9f6e
224 changed files with 19671 additions and 3291 deletions

3
.gitignore vendored
View File

@@ -2,6 +2,9 @@
node_modules/
vendor/
# Optional local Mermaid (see frontend/public/thirdparty/README.md)
frontend/public/thirdparty/mermaid.min.js
# Build outputs
dist/
build/

View File

@@ -24,9 +24,12 @@ If the script doesn't work, see `START_HERE.md` for step-by-step manual commands
## Frontend
- **Production (canonical):** The **SPA** (`frontend/public/index.html`) is what is deployed and served at **https://explorer.d-bis.org** (VMID 5000).
- **Next.js app** in `frontend/src/` is for **local dev and build validation only**; it is not deployed to production.
- **Deploy frontend only:** `./scripts/deploy-frontend-to-vmid5000.sh` (from repo root; copies `index.html` and assets to `/var/www/html/`)
- **Production (canonical target):** the current **Next.js standalone frontend** in `frontend/src/`, built from `frontend/` with `npm run build` and deployed to VMID 5000 as a Node service behind nginx.
- **Canonical deploy script:** `./scripts/deploy-next-frontend-to-vmid5000.sh`
- **Canonical nginx wiring:** keep `/api`, `/api/config/*`, `/explorer-api/*`, `/token-aggregation/api/v1/*`, `/snap/`, and `/health`; proxy `/` and `/_next/` to the frontend service using `deployment/common/nginx-next-frontend-proxy.conf`.
- **Legacy fallback only:** the static SPA (`frontend/public/index.html` + `explorer-spa.js`) remains in-repo for compatibility, but it is no longer the preferred deployment target.
- **Architecture command center:** `frontend/public/chain138-command-center.html` — tabbed Mermaid topology (Chain 138 hub, network, stack, flows, cross-chain, cW Mainnet, off-chain, integrations). Linked from the SPA **More → Explore → Visual Command Center**.
- **Legacy static deploy:** `./scripts/deploy-frontend-to-vmid5000.sh` (copies `index.html` and assets to `/var/www/html/`)
- **Frontend review & tasks:** [frontend/FRONTEND_REVIEW.md](frontend/FRONTEND_REVIEW.md), [frontend/FRONTEND_TASKS_AND_REVIEW.md](frontend/FRONTEND_TASKS_AND_REVIEW.md)
## Documentation

View File

@@ -3,6 +3,7 @@ package analytics
import (
"context"
"fmt"
"strings"
"time"
"github.com/jackc/pgx/v5/pgxpool"
@@ -28,60 +29,64 @@ type BridgeStats struct {
// ChainStats represents chain statistics
type ChainStats struct {
Outbound int
Inbound int
VolumeOut string
VolumeIn string
Outbound int
Inbound int
VolumeOut string
VolumeIn string
}
// TokenStats represents token statistics
type TokenStats struct {
Token string
Symbol string
Transfers int
Volume string
Token string
Symbol string
Transfers int
Volume string
}
// GetBridgeStats gets bridge statistics
func (ba *BridgeAnalytics) GetBridgeStats(ctx context.Context, chainFrom, chainTo *int, startDate, endDate *time.Time) (*BridgeStats, error) {
query := `
SELECT
COUNT(*) as transfers_24h,
SUM(amount) as volume_24h
FROM analytics_bridge_history
WHERE timestamp >= NOW() - INTERVAL '24 hours'
`
clauses := []string{"timestamp >= NOW() - INTERVAL '24 hours'"}
args := []interface{}{}
argIndex := 1
if chainFrom != nil {
query += fmt.Sprintf(" AND chain_from = $%d", argIndex)
clauses = append(clauses, fmt.Sprintf("chain_from = $%d", argIndex))
args = append(args, *chainFrom)
argIndex++
}
if chainTo != nil {
query += fmt.Sprintf(" AND chain_to = $%d", argIndex)
clauses = append(clauses, fmt.Sprintf("chain_to = $%d", argIndex))
args = append(args, *chainTo)
argIndex++
}
if startDate != nil {
query += fmt.Sprintf(" AND timestamp >= $%d", argIndex)
clauses = append(clauses, fmt.Sprintf("timestamp >= $%d", argIndex))
args = append(args, *startDate)
argIndex++
}
if endDate != nil {
query += fmt.Sprintf(" AND timestamp <= $%d", argIndex)
clauses = append(clauses, fmt.Sprintf("timestamp <= $%d", argIndex))
args = append(args, *endDate)
argIndex++
}
filteredCTE := fmt.Sprintf(`
WITH filtered AS (
SELECT chain_from, chain_to, token_contract, amount
FROM analytics_bridge_history
WHERE %s
)
`, strings.Join(clauses, " AND "))
var transfers24h int
var volume24h string
err := ba.db.QueryRow(ctx, query, args...).Scan(&transfers24h, &volume24h)
err := ba.db.QueryRow(ctx, filteredCTE+`
SELECT COUNT(*) as transfers_24h, COALESCE(SUM(amount)::text, '0') as volume_24h
FROM filtered
`, args...).Scan(&transfers24h, &volume24h)
if err != nil {
return nil, fmt.Errorf("failed to get bridge stats: %w", err)
}
@@ -93,21 +98,28 @@ func (ba *BridgeAnalytics) GetBridgeStats(ctx context.Context, chainFrom, chainT
TopTokens: []TokenStats{},
}
// Get chain stats
chainQuery := `
SELECT
chain_from,
COUNT(*) FILTER (WHERE chain_from = $1) as outbound,
COUNT(*) FILTER (WHERE chain_to = $1) as inbound,
SUM(amount) FILTER (WHERE chain_from = $1) as volume_out,
SUM(amount) FILTER (WHERE chain_to = $1) as volume_in
FROM analytics_bridge_history
WHERE (chain_from = $1 OR chain_to = $1) AND timestamp >= NOW() - INTERVAL '24 hours'
GROUP BY chain_from
`
rows, err := ba.db.Query(ctx, filteredCTE+`
SELECT
chain_id,
SUM(outbound) as outbound,
SUM(inbound) as inbound,
COALESCE(SUM(volume_out)::text, '0') as volume_out,
COALESCE(SUM(volume_in)::text, '0') as volume_in
FROM (
SELECT chain_from AS chain_id, 1 AS outbound, 0 AS inbound, amount AS volume_out, 0::numeric AS volume_in
FROM filtered
UNION ALL
SELECT chain_to AS chain_id, 0 AS outbound, 1 AS inbound, 0::numeric AS volume_out, amount AS volume_in
FROM filtered
) chain_rollup
GROUP BY chain_id
ORDER BY chain_id
`, args...)
if err != nil {
return nil, fmt.Errorf("failed to get chain breakdown: %w", err)
}
defer rows.Close()
// Simplified - in production, iterate over all chains
rows, _ := ba.db.Query(ctx, chainQuery, 138)
for rows.Next() {
var chainID, outbound, inbound int
var volumeOut, volumeIn string
@@ -120,8 +132,30 @@ func (ba *BridgeAnalytics) GetBridgeStats(ctx context.Context, chainFrom, chainT
}
}
}
rows.Close()
tokenRows, err := ba.db.Query(ctx, filteredCTE+`
SELECT
token_contract,
COUNT(*) as transfers,
COALESCE(SUM(amount)::text, '0') as volume
FROM filtered
WHERE token_contract IS NOT NULL AND token_contract <> ''
GROUP BY token_contract
ORDER BY transfers DESC, volume DESC
LIMIT 10
`, args...)
if err != nil {
return nil, fmt.Errorf("failed to get top bridge tokens: %w", err)
}
defer tokenRows.Close()
for tokenRows.Next() {
var token TokenStats
if err := tokenRows.Scan(&token.Token, &token.Transfers, &token.Volume); err != nil {
continue
}
stats.TopTokens = append(stats.TopTokens, token)
}
return stats, nil
}

View File

@@ -3,13 +3,15 @@ package analytics
import (
"context"
"fmt"
"math"
"math/big"
"github.com/jackc/pgx/v5/pgxpool"
)
// TokenDistribution provides token distribution analytics
type TokenDistribution struct {
db *pgxpool.Pool
db *pgxpool.Pool
chainID int
}
@@ -23,12 +25,12 @@ func NewTokenDistribution(db *pgxpool.Pool, chainID int) *TokenDistribution {
// DistributionStats represents token distribution statistics
type DistributionStats struct {
Contract string
Symbol string
TotalSupply string
Holders int
Distribution map[string]string
TopHolders []HolderInfo
Contract string
Symbol string
TotalSupply string
Holders int
Distribution map[string]string
TopHolders []HolderInfo
}
// HolderInfo represents holder information
@@ -76,13 +78,16 @@ func (td *TokenDistribution) GetTokenDistribution(ctx context.Context, contract
defer rows.Close()
topHolders := []HolderInfo{}
totalSupplyRat, ok := parseNumericString(totalSupply)
if !ok || totalSupplyRat.Sign() <= 0 {
totalSupplyRat = big.NewRat(0, 1)
}
for rows.Next() {
var holder HolderInfo
if err := rows.Scan(&holder.Address, &holder.Balance); err != nil {
continue
}
// Calculate percentage (simplified)
holder.Percentage = "0.0" // TODO: Calculate from total supply
holder.Percentage = formatPercentage(holder.Balance, totalSupplyRat, 4)
topHolders = append(topHolders, holder)
}
@@ -94,11 +99,132 @@ func (td *TokenDistribution) GetTokenDistribution(ctx context.Context, contract
TopHolders: topHolders,
}
// Calculate distribution metrics
stats.Distribution["top_10_percent"] = "0.0" // TODO: Calculate
stats.Distribution["top_1_percent"] = "0.0" // TODO: Calculate
stats.Distribution["gini_coefficient"] = "0.0" // TODO: Calculate
balances, err := td.loadHolderBalances(ctx, contract)
if err != nil {
return nil, fmt.Errorf("failed to compute holder metrics: %w", err)
}
stats.Distribution["top_10_percent"] = concentrationPercent(balances, totalSupplyRat, 0.10)
stats.Distribution["top_1_percent"] = concentrationPercent(balances, totalSupplyRat, 0.01)
stats.Distribution["gini_coefficient"] = giniCoefficient(balances)
return stats, nil
}
func (td *TokenDistribution) loadHolderBalances(ctx context.Context, contract string) ([]*big.Rat, error) {
rows, err := td.db.Query(ctx, `
SELECT balance
FROM token_balances
WHERE token_contract = $1 AND chain_id = $2 AND balance > 0
ORDER BY balance DESC
`, contract, td.chainID)
if err != nil {
return nil, err
}
defer rows.Close()
balances := make([]*big.Rat, 0)
for rows.Next() {
var raw string
if err := rows.Scan(&raw); err != nil {
continue
}
if balance, ok := parseNumericString(raw); ok && balance.Sign() > 0 {
balances = append(balances, balance)
}
}
return balances, nil
}
func parseNumericString(raw string) (*big.Rat, bool) {
value, ok := new(big.Rat).SetString(raw)
return value, ok
}
func formatPercentage(raw string, total *big.Rat, decimals int) string {
value, ok := parseNumericString(raw)
if !ok {
return "0"
}
return formatRatioAsPercent(value, total, decimals)
}
func concentrationPercent(balances []*big.Rat, total *big.Rat, percentile float64) string {
if len(balances) == 0 {
return "0"
}
count := int(math.Ceil(float64(len(balances)) * percentile))
if count < 1 {
count = 1
}
if count > len(balances) {
count = len(balances)
}
sum := new(big.Rat)
for i := 0; i < count; i++ {
sum.Add(sum, balances[i])
}
return formatRatioAsPercent(sum, total, 4)
}
func formatRatioAsPercent(value, total *big.Rat, decimals int) string {
if value == nil || total == nil || total.Sign() <= 0 {
return "0"
}
percent := new(big.Rat).Quo(value, total)
percent.Mul(percent, big.NewRat(100, 1))
return formatRat(percent, decimals)
}
func giniCoefficient(balances []*big.Rat) string {
if len(balances) == 0 {
return "0"
}
total := new(big.Rat)
for _, balance := range balances {
total.Add(total, balance)
}
if total.Sign() <= 0 {
return "0"
}
weightedSum := new(big.Rat)
n := len(balances)
for i := range balances {
index := n - i
weighted := new(big.Rat).Mul(balances[i], big.NewRat(int64(index), 1))
weightedSum.Add(weightedSum, weighted)
}
nRat := big.NewRat(int64(n), 1)
numerator := new(big.Rat).Mul(weightedSum, big.NewRat(2, 1))
denominator := new(big.Rat).Mul(nRat, total)
gini := new(big.Rat).Quo(numerator, denominator)
gini.Sub(gini, new(big.Rat).Quo(big.NewRat(int64(n+1), 1), nRat))
if gini.Sign() < 0 {
return "0"
}
return formatRat(gini, 6)
}
func formatRat(value *big.Rat, decimals int) string {
if value == nil {
return "0"
}
text := new(big.Float).SetPrec(256).SetRat(value).Text('f', decimals)
for len(text) > 1 && text[len(text)-1] == '0' {
text = text[:len(text)-1]
}
if len(text) > 1 && text[len(text)-1] == '.' {
text = text[:len(text)-1]
}
return text
}

View File

@@ -1,13 +1,19 @@
package gateway
import (
"crypto/subtle"
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strings"
"sync"
"time"
httperrors "github.com/explorer/backend/libs/go-http-errors"
httpmiddleware "github.com/explorer/backend/libs/go-http-middleware"
)
// Gateway represents the API gateway
@@ -64,7 +70,9 @@ func (g *Gateway) handleRequest(proxy *httputil.ReverseProxy) http.HandlerFunc {
}
// Add headers
r.Header.Set("X-Forwarded-For", r.RemoteAddr)
if clientIP := httpmiddleware.ClientIP(r); clientIP != "" {
r.Header.Set("X-Forwarded-For", clientIP)
}
if apiKey := g.auth.GetAPIKey(r); apiKey != "" {
r.Header.Set("X-API-Key", apiKey)
}
@@ -92,14 +100,17 @@ func (g *Gateway) addSecurityHeaders(w http.ResponseWriter) {
// RateLimiter handles rate limiting
type RateLimiter struct {
// Simple in-memory rate limiter (should use Redis in production)
mu sync.Mutex
limits map[string]*limitEntry
}
type limitEntry struct {
count int
resetAt int64
resetAt time.Time
}
const gatewayRequestsPerMinute = 120
func NewRateLimiter() *RateLimiter {
return &RateLimiter{
limits: make(map[string]*limitEntry),
@@ -107,26 +118,62 @@ func NewRateLimiter() *RateLimiter {
}
func (rl *RateLimiter) Allow(r *http.Request) bool {
_ = r.RemoteAddr // Will be used in production for per-IP limiting
// In production, use Redis with token bucket algorithm
// For now, simple per-IP limiting
return true // Simplified - implement proper rate limiting
clientIP := httpmiddleware.ClientIP(r)
if clientIP == "" {
clientIP = r.RemoteAddr
}
now := time.Now()
rl.mu.Lock()
defer rl.mu.Unlock()
entry, ok := rl.limits[clientIP]
if !ok || now.After(entry.resetAt) {
rl.limits[clientIP] = &limitEntry{
count: 1,
resetAt: now.Add(time.Minute),
}
return true
}
if entry.count >= gatewayRequestsPerMinute {
return false
}
entry.count++
return true
}
// AuthMiddleware handles authentication
type AuthMiddleware struct {
// In production, validate against database
allowAnonymous bool
apiKeys []string
}
func NewAuthMiddleware() *AuthMiddleware {
return &AuthMiddleware{}
return &AuthMiddleware{
allowAnonymous: parseBoolEnv("GATEWAY_ALLOW_ANONYMOUS"),
apiKeys: splitNonEmptyEnv("GATEWAY_API_KEYS"),
}
}
func (am *AuthMiddleware) Authenticate(r *http.Request) bool {
// Allow anonymous access for now
// In production, validate API key
apiKey := am.GetAPIKey(r)
return apiKey != "" || true // Allow anonymous for MVP
if apiKey == "" {
return am.allowAnonymous
}
if len(am.apiKeys) == 0 {
return am.allowAnonymous
}
for _, allowedKey := range am.apiKeys {
if subtle.ConstantTimeCompare([]byte(apiKey), []byte(allowedKey)) == 1 {
return true
}
}
return false
}
func (am *AuthMiddleware) GetAPIKey(r *http.Request) string {
@@ -140,3 +187,29 @@ func (am *AuthMiddleware) GetAPIKey(r *http.Request) string {
}
return ""
}
func parseBoolEnv(key string) bool {
value := strings.TrimSpace(os.Getenv(key))
return strings.EqualFold(value, "1") ||
strings.EqualFold(value, "true") ||
strings.EqualFold(value, "yes") ||
strings.EqualFold(value, "on")
}
func splitNonEmptyEnv(key string) []string {
raw := strings.TrimSpace(os.Getenv(key))
if raw == "" {
return nil
}
parts := strings.Split(raw, ",")
values := make([]string, 0, len(parts))
for _, part := range parts {
trimmed := strings.TrimSpace(part)
if trimmed != "" {
values = append(values, trimmed)
}
}
return values
}

View File

@@ -0,0 +1,78 @@
package gateway
import (
"net/http/httptest"
"testing"
"time"
)
func TestAuthMiddlewareRejectsAnonymousByDefault(t *testing.T) {
t.Setenv("GATEWAY_ALLOW_ANONYMOUS", "")
t.Setenv("GATEWAY_API_KEYS", "")
auth := NewAuthMiddleware()
req := httptest.NewRequest("GET", "http://example.com", nil)
if auth.Authenticate(req) {
t.Fatal("expected anonymous request to be rejected by default")
}
}
func TestAuthMiddlewareAllowsConfiguredAPIKey(t *testing.T) {
t.Setenv("GATEWAY_ALLOW_ANONYMOUS", "")
t.Setenv("GATEWAY_API_KEYS", "alpha,beta")
auth := NewAuthMiddleware()
req := httptest.NewRequest("GET", "http://example.com", nil)
req.Header.Set("X-API-Key", "beta")
if !auth.Authenticate(req) {
t.Fatal("expected configured API key to be accepted")
}
}
func TestAuthMiddlewareAllowsAnonymousOnlyWhenEnabled(t *testing.T) {
t.Setenv("GATEWAY_ALLOW_ANONYMOUS", "true")
t.Setenv("GATEWAY_API_KEYS", "")
auth := NewAuthMiddleware()
req := httptest.NewRequest("GET", "http://example.com", nil)
if !auth.Authenticate(req) {
t.Fatal("expected anonymous request to be accepted when explicitly enabled")
}
}
func TestRateLimiterBlocksAfterWindowBudget(t *testing.T) {
limiter := NewRateLimiter()
req := httptest.NewRequest("GET", "http://example.com", nil)
req.RemoteAddr = "203.0.113.10:1234"
for i := 0; i < gatewayRequestsPerMinute; i++ {
if !limiter.Allow(req) {
t.Fatalf("expected request %d to pass", i+1)
}
}
if limiter.Allow(req) {
t.Fatal("expected request over the per-minute budget to be rejected")
}
}
func TestRateLimiterResetsAfterWindow(t *testing.T) {
limiter := NewRateLimiter()
req := httptest.NewRequest("GET", "http://example.com", nil)
req.RemoteAddr = "203.0.113.11:1234"
if !limiter.Allow(req) {
t.Fatal("expected first request to pass")
}
limiter.mu.Lock()
limiter.limits["203.0.113.11"].resetAt = time.Now().Add(-time.Second)
limiter.mu.Unlock()
if !limiter.Allow(req) {
t.Fatal("expected limiter window to reset")
}
}

View File

@@ -0,0 +1,16 @@
# Core explorer API
PORT=8080
CHAIN_ID=138
RPC_URL=https://rpc-http-pub.d-bis.org
DB_HOST=localhost
DB_NAME=explorer
# Mission-control helpers
TOKEN_AGGREGATION_BASE_URL=http://127.0.0.1:3000
BLOCKSCOUT_INTERNAL_URL=http://127.0.0.1:4000
EXPLORER_PUBLIC_BASE=https://explorer.d-bis.org
# Track 4 operator script execution
OPERATOR_SCRIPTS_ROOT=/opt/explorer/scripts
OPERATOR_SCRIPT_ALLOWLIST=check-health.sh,check-bridges.sh
OPERATOR_SCRIPT_TIMEOUT_SEC=120

View File

@@ -10,6 +10,7 @@ REST API implementation for the ChainID 138 Explorer Platform.
- `transactions.go` - Transaction-related endpoints
- `addresses.go` - Address-related endpoints
- `search.go` - Unified search endpoint
- `mission_control.go` - Mission-control bridge trace and cached liquidity helpers
- `validation.go` - Input validation utilities
- `middleware.go` - HTTP middleware (logging, compression)
- `errors.go` - Error response utilities
@@ -34,6 +35,14 @@ REST API implementation for the ChainID 138 Explorer Platform.
### Health
- `GET /health` - Health check endpoint
### Mission control
- `GET /api/v1/mission-control/stream` - SSE stream for bridge/RPC health
- `GET /api/v1/mission-control/bridge/trace?tx=0x...` - Blockscout-backed tx trace with Chain 138 contract labels
- `GET /api/v1/mission-control/liquidity/token/{address}/pools` - 30-second cached proxy to token-aggregation pools
### Track 4 operator
- `POST /api/v1/track4/operator/run-script` - Run an allowlisted script under `OPERATOR_SCRIPTS_ROOT`
## Features
- Input validation (addresses, hashes, block numbers)
@@ -66,4 +75,19 @@ Set environment variables:
- `DB_NAME` - Database name
- `PORT` - API server port (default: 8080)
- `CHAIN_ID` - Chain ID (default: 138)
- `RPC_URL` - Chain RPC used by Track 1 and mission-control health/SSE data
- `TOKEN_AGGREGATION_BASE_URL` - Upstream token-aggregation base URL for mission-control liquidity proxy
- `BLOCKSCOUT_INTERNAL_URL` - Internal Blockscout base URL for bridge trace lookups
- `EXPLORER_PUBLIC_BASE` - Public explorer base URL used in mission-control trace responses
- `CCIP_RELAY_HEALTH_URL` - Optional relay health probe URL, for example `http://192.168.11.11:9860/healthz`
- `CCIP_RELAY_HEALTH_URLS` - Optional comma-separated named relay probes, for example `mainnet=http://192.168.11.11:9860/healthz,bsc=http://192.168.11.11:9861/healthz,avax=http://192.168.11.11:9862/healthz`
- `MISSION_CONTROL_CCIP_JSON` - Optional JSON snapshot fallback when relay health is provided as a file instead of an HTTP endpoint
- `OPERATOR_SCRIPTS_ROOT` - Root directory for allowlisted Track 4 scripts
- `OPERATOR_SCRIPT_ALLOWLIST` - Comma-separated list of permitted script names or relative paths
- `OPERATOR_SCRIPT_TIMEOUT_SEC` - Optional Track 4 script timeout in seconds (max 599)
## Mission-control deployment notes
- Include `explorer-monorepo/deployment/common/nginx-mission-control-sse.conf` in the same nginx server block that proxies `/explorer-api/`.
- Keep the nginx upstream port aligned with the Go API `PORT`.
- Verify internal reachability to `BLOCKSCOUT_INTERNAL_URL` and `TOKEN_AGGREGATION_BASE_URL` from the API host before enabling the mission-control cards in production.

View File

@@ -15,9 +15,12 @@ func (s *Server) handleGetAddress(w http.ResponseWriter, r *http.Request) {
writeMethodNotAllowed(w)
return
}
if !s.requireDB(w) {
return
}
// Parse address from URL
address := r.URL.Query().Get("address")
address := normalizeAddress(r.URL.Query().Get("address"))
if address == "" {
writeValidationError(w, fmt.Errorf("address required"))
return
@@ -36,7 +39,7 @@ func (s *Server) handleGetAddress(w http.ResponseWriter, r *http.Request) {
// Get transaction count
var txCount int64
err := s.db.QueryRow(ctx,
`SELECT COUNT(*) FROM transactions WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)`,
`SELECT COUNT(*) FROM transactions WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)`,
s.chainID, address,
).Scan(&txCount)
if err != nil {
@@ -47,7 +50,7 @@ func (s *Server) handleGetAddress(w http.ResponseWriter, r *http.Request) {
// Get token count
var tokenCount int
err = s.db.QueryRow(ctx,
`SELECT COUNT(DISTINCT token_address) FROM token_transfers WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)`,
`SELECT COUNT(DISTINCT token_contract) FROM token_transfers WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)`,
s.chainID, address,
).Scan(&tokenCount)
if err != nil {
@@ -57,44 +60,42 @@ func (s *Server) handleGetAddress(w http.ResponseWriter, r *http.Request) {
// Get label
var label sql.NullString
s.db.QueryRow(ctx,
`SELECT label FROM address_labels WHERE chain_id = $1 AND address = $2 AND label_type = 'public' LIMIT 1`,
`SELECT label FROM address_labels WHERE chain_id = $1 AND LOWER(address) = $2 AND label_type = 'public' LIMIT 1`,
s.chainID, address,
).Scan(&label)
// Get tags
rows, _ := s.db.Query(ctx,
`SELECT tag FROM address_tags WHERE chain_id = $1 AND address = $2`,
rows, err := s.db.Query(ctx,
`SELECT tag FROM address_tags WHERE chain_id = $1 AND LOWER(address) = $2`,
s.chainID, address,
)
defer rows.Close()
tags := []string{}
for rows.Next() {
var tag string
if err := rows.Scan(&tag); err == nil {
tags = append(tags, tag)
if err == nil {
defer rows.Close()
for rows.Next() {
var tag string
if err := rows.Scan(&tag); err == nil {
tags = append(tags, tag)
}
}
}
// Check if contract
var isContract bool
s.db.QueryRow(ctx,
`SELECT EXISTS(SELECT 1 FROM contracts WHERE chain_id = $1 AND address = $2)`,
`SELECT EXISTS(SELECT 1 FROM contracts WHERE chain_id = $1 AND LOWER(address) = $2)`,
s.chainID, address,
).Scan(&isContract)
// Get balance (if we have RPC access, otherwise 0)
balance := "0"
// TODO: Add RPC call to get balance if needed
response := map[string]interface{}{
"address": address,
"chain_id": s.chainID,
"balance": balance,
"transaction_count": txCount,
"token_count": tokenCount,
"is_contract": isContract,
"tags": tags,
"address": address,
"chain_id": s.chainID,
"balance": nil,
"balance_unavailable": true,
"transaction_count": txCount,
"token_count": tokenCount,
"is_contract": isContract,
"tags": tags,
}
if label.Valid {

View File

@@ -0,0 +1,19 @@
package rest
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestHandleGetAddressRequiresDB(t *testing.T) {
server := NewServer(nil, 138)
req := httptest.NewRequest(http.MethodGet, "/api/v1/addresses/138/0xAbCdEf1234567890ABCdef1234567890abCDef12?address=0xAbCdEf1234567890ABCdef1234567890abCDef12", nil)
w := httptest.NewRecorder()
server.handleGetAddress(w, req)
if w.Code != http.StatusServiceUnavailable {
t.Fatalf("expected 503 when db is unavailable, got %d", w.Code)
}
}

View File

@@ -61,7 +61,7 @@ func (s *Server) handleListAddresses(w http.ResponseWriter, r *http.Request) {
MAX(seen_at) AS last_seen_at
FROM (
SELECT
t.from_address AS address,
LOWER(t.from_address) AS address,
'sent' AS direction,
b.timestamp AS seen_at
FROM transactions t
@@ -69,7 +69,7 @@ func (s *Server) handleListAddresses(w http.ResponseWriter, r *http.Request) {
WHERE t.chain_id = $1 AND t.from_address IS NOT NULL AND t.from_address <> ''
UNION ALL
SELECT
t.to_address AS address,
LOWER(t.to_address) AS address,
'received' AS direction,
b.timestamp AS seen_at
FROM transactions t
@@ -79,28 +79,28 @@ func (s *Server) handleListAddresses(w http.ResponseWriter, r *http.Request) {
GROUP BY address
),
token_activity AS (
SELECT address, COUNT(DISTINCT token_address) AS token_count
SELECT address, COUNT(DISTINCT token_contract) AS token_count
FROM (
SELECT from_address AS address, token_address
SELECT LOWER(from_address) AS address, token_contract
FROM token_transfers
WHERE chain_id = $1 AND from_address IS NOT NULL AND from_address <> ''
UNION ALL
SELECT to_address AS address, token_address
SELECT LOWER(to_address) AS address, token_contract
FROM token_transfers
WHERE chain_id = $1 AND to_address IS NOT NULL AND to_address <> ''
) tokens
GROUP BY address
),
label_activity AS (
SELECT DISTINCT ON (address)
address,
SELECT DISTINCT ON (LOWER(address))
LOWER(address) AS address,
label
FROM address_labels
WHERE chain_id = $1 AND label_type = 'public'
ORDER BY address, updated_at DESC, id DESC
ORDER BY LOWER(address), updated_at DESC, id DESC
),
contract_activity AS (
SELECT address, TRUE AS is_contract
SELECT LOWER(address) AS address, TRUE AS is_contract
FROM contracts
WHERE chain_id = $1
)

View File

@@ -222,6 +222,11 @@ func explorerAIEnabled() bool {
return strings.TrimSpace(os.Getenv("XAI_API_KEY")) != ""
}
// explorerAIOperatorToolsEnabled allows the model to discuss server-side operator/MCP automation (default off).
func explorerAIOperatorToolsEnabled() bool {
return strings.TrimSpace(os.Getenv("EXPLORER_AI_OPERATOR_TOOLS_ENABLED")) == "1"
}
func explorerAIModel() string {
if model := strings.TrimSpace(os.Getenv("XAI_MODEL")); model != "" {
return model
@@ -316,7 +321,15 @@ func (s *Server) queryAIStats(ctx context.Context) (map[string]any, error) {
}
var totalAddresses int64
if err := s.db.QueryRow(ctx, `SELECT COUNT(DISTINCT from_address) + COUNT(DISTINCT to_address) FROM transactions WHERE chain_id = $1`, s.chainID).Scan(&totalAddresses); err == nil {
if err := s.db.QueryRow(ctx, `SELECT COUNT(*) FROM (
SELECT from_address AS address
FROM transactions
WHERE chain_id = $1 AND from_address IS NOT NULL AND from_address <> ''
UNION
SELECT to_address AS address
FROM transactions
WHERE chain_id = $1 AND to_address IS NOT NULL AND to_address <> ''
) unique_addresses`, s.chainID).Scan(&totalAddresses); err == nil {
stats["total_addresses"] = totalAddresses
}
@@ -429,17 +442,19 @@ func (s *Server) queryAIAddress(ctx context.Context, address string) (map[string
ctx, cancel := context.WithTimeout(ctx, 4*time.Second)
defer cancel()
address = normalizeAddress(address)
result := map[string]any{
"address": address,
}
var txCount int64
if err := s.db.QueryRow(ctx, `SELECT COUNT(*) FROM transactions WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)`, s.chainID, address).Scan(&txCount); err == nil {
if err := s.db.QueryRow(ctx, `SELECT COUNT(*) FROM transactions WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)`, s.chainID, address).Scan(&txCount); err == nil {
result["transaction_count"] = txCount
}
var tokenCount int64
if err := s.db.QueryRow(ctx, `SELECT COUNT(DISTINCT token_address) FROM token_transfers WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)`, s.chainID, address).Scan(&tokenCount); err == nil {
if err := s.db.QueryRow(ctx, `SELECT COUNT(DISTINCT token_contract) FROM token_transfers WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)`, s.chainID, address).Scan(&tokenCount); err == nil {
result["token_count"] = tokenCount
}
@@ -447,7 +462,7 @@ func (s *Server) queryAIAddress(ctx context.Context, address string) (map[string
rows, err := s.db.Query(ctx, `
SELECT hash
FROM transactions
WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)
WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)
ORDER BY block_number DESC, transaction_index DESC
LIMIT 5
`, s.chainID, address)
@@ -884,10 +899,15 @@ func (s *Server) callXAIChatCompletions(ctx context.Context, messages []AIChatMe
contextJSON, _ := json.MarshalIndent(contextEnvelope, "", " ")
contextText := clipString(string(contextJSON), maxExplorerAIContextChars)
baseSystem := "You are the SolaceScanScout ecosystem assistant for Chain 138. Answer using the supplied indexed explorer data, route inventory, and workspace documentation. Be concise, operationally useful, and explicit about uncertainty. Never claim a route, deployment, or production status is live unless the provided context says it is live. If data is missing, say exactly what is missing."
if !explorerAIOperatorToolsEnabled() {
baseSystem += " Never instruct users to paste private keys or seed phrases. Do not direct users to run privileged mint, liquidity, or bridge execution from the public explorer UI. Operator changes belong on LAN-gated workflows and authenticated Track 4 APIs; PMM/MCP-style execution tools are disabled on this deployment unless EXPLORER_AI_OPERATOR_TOOLS_ENABLED=1."
}
input := []xAIChatMessageReq{
{
Role: "system",
Content: "You are the SolaceScanScout ecosystem assistant for Chain 138. Answer using the supplied indexed explorer data, route inventory, and workspace documentation. Be concise, operationally useful, and explicit about uncertainty. Never claim a route, deployment, or production status is live unless the provided context says it is live. If data is missing, say exactly what is missing.",
Content: baseSystem,
},
{
Role: "system",

View File

@@ -3,11 +3,12 @@ package rest
import (
"encoding/json"
"log"
"net"
"net/http"
"strings"
"sync"
"time"
httpmiddleware "github.com/explorer/backend/libs/go-http-middleware"
)
type AIRateLimiter struct {
@@ -158,22 +159,7 @@ func (m *AIMetrics) Snapshot() map[string]any {
}
func clientIPAddress(r *http.Request) string {
for _, header := range []string{"X-Forwarded-For", "X-Real-IP"} {
if raw := strings.TrimSpace(r.Header.Get(header)); raw != "" {
if header == "X-Forwarded-For" {
parts := strings.Split(raw, ",")
if len(parts) > 0 {
return strings.TrimSpace(parts[0])
}
}
return raw
}
}
host, _, err := net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
if err == nil && host != "" {
return host
}
return strings.TrimSpace(r.RemoteAddr)
return httpmiddleware.ClientIP(r)
}
func explorerAIContextRateLimit() (int, time.Duration) {

View File

@@ -214,6 +214,38 @@ func TestPagination(t *testing.T) {
}
}
func TestAuthNonceRequiresDB(t *testing.T) {
_, mux := setupTestServer(t)
req := httptest.NewRequest("POST", "/api/v1/auth/nonce", bytes.NewBufferString(`{"address":"0x4A666F96fC8764181194447A7dFdb7d471b301C8"}`))
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
assert.Equal(t, http.StatusServiceUnavailable, w.Code)
var response map[string]interface{}
err := json.Unmarshal(w.Body.Bytes(), &response)
require.NoError(t, err)
assert.NotNil(t, response["error"])
}
func TestAuthWalletRequiresDB(t *testing.T) {
_, mux := setupTestServer(t)
req := httptest.NewRequest("POST", "/api/v1/auth/wallet", bytes.NewBufferString(`{"address":"0x4A666F96fC8764181194447A7dFdb7d471b301C8","signature":"0xdeadbeef","nonce":"abc"}`))
req.Header.Set("Content-Type", "application/json")
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
assert.Equal(t, http.StatusServiceUnavailable, w.Code)
var response map[string]interface{}
err := json.Unmarshal(w.Body.Bytes(), &response)
require.NoError(t, err)
assert.NotNil(t, response["error"])
}
func TestAIContextEndpoint(t *testing.T) {
_, mux := setupTestServer(t)

View File

@@ -2,6 +2,7 @@ package rest
import (
"encoding/json"
"errors"
"net/http"
"github.com/explorer/backend/auth"
@@ -13,6 +14,9 @@ func (s *Server) handleAuthNonce(w http.ResponseWriter, r *http.Request) {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if !s.requireDB(w) {
return
}
var req auth.NonceRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
@@ -23,6 +27,10 @@ func (s *Server) handleAuthNonce(w http.ResponseWriter, r *http.Request) {
// Generate nonce
nonceResp, err := s.walletAuth.GenerateNonce(r.Context(), req.Address)
if err != nil {
if errors.Is(err, auth.ErrWalletAuthStorageNotInitialized) {
writeError(w, http.StatusServiceUnavailable, "service_unavailable", err.Error())
return
}
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
@@ -37,6 +45,9 @@ func (s *Server) handleAuthWallet(w http.ResponseWriter, r *http.Request) {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if !s.requireDB(w) {
return
}
var req auth.WalletAuthRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
@@ -47,6 +58,10 @@ func (s *Server) handleAuthWallet(w http.ResponseWriter, r *http.Request) {
// Authenticate wallet
authResp, err := s.walletAuth.AuthenticateWallet(r.Context(), &req)
if err != nil {
if errors.Is(err, auth.ErrWalletAuthStorageNotInitialized) {
writeError(w, http.StatusServiceUnavailable, "service_unavailable", err.Error())
return
}
writeError(w, http.StatusUnauthorized, "unauthorized", err.Error())
return
}
@@ -54,4 +69,3 @@ func (s *Server) handleAuthWallet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(authResp)
}

View File

@@ -10,6 +10,10 @@ import (
// handleGetBlockByNumber handles GET /api/v1/blocks/{chain_id}/{number}
func (s *Server) handleGetBlockByNumber(w http.ResponseWriter, r *http.Request, blockNumber int64) {
if !s.requireDB(w) {
return
}
// Validate input (already validated in routes.go, but double-check)
if blockNumber < 0 {
writeValidationError(w, ErrInvalidBlockNumber)
@@ -72,6 +76,12 @@ func (s *Server) handleGetBlockByNumber(w http.ResponseWriter, r *http.Request,
// handleGetBlockByHash handles GET /api/v1/blocks/{chain_id}/hash/{hash}
func (s *Server) handleGetBlockByHash(w http.ResponseWriter, r *http.Request, hash string) {
if !s.requireDB(w) {
return
}
hash = normalizeHash(hash)
// Validate hash format (already validated in routes.go, but double-check)
if !isValidHash(hash) {
writeValidationError(w, ErrInvalidHash)

View File

@@ -1,8 +1,14 @@
package rest
import (
"crypto/sha256"
_ "embed"
"encoding/hex"
"net/http"
"os"
"path/filepath"
"strings"
"time"
)
//go:embed config/metamask/DUAL_CHAIN_NETWORKS.json
@@ -14,6 +20,111 @@ var dualChainTokenListJSON []byte
//go:embed config/metamask/CHAIN138_RPC_CAPABILITIES.json
var chain138RPCCapabilitiesJSON []byte
type configPayload struct {
body []byte
source string
modTime time.Time
}
func uniqueConfigPaths(paths []string) []string {
seen := make(map[string]struct{}, len(paths))
out := make([]string, 0, len(paths))
for _, candidate := range paths {
trimmed := strings.TrimSpace(candidate)
if trimmed == "" {
continue
}
if _, ok := seen[trimmed]; ok {
continue
}
seen[trimmed] = struct{}{}
out = append(out, trimmed)
}
return out
}
func buildConfigCandidates(envKeys []string, defaults []string) []string {
candidates := make([]string, 0, len(envKeys)+len(defaults)*4)
for _, key := range envKeys {
if value := strings.TrimSpace(os.Getenv(key)); value != "" {
candidates = append(candidates, value)
}
}
if cwd, err := os.Getwd(); err == nil {
for _, rel := range defaults {
if filepath.IsAbs(rel) {
candidates = append(candidates, rel)
continue
}
candidates = append(candidates, filepath.Join(cwd, rel))
candidates = append(candidates, rel)
}
}
if exe, err := os.Executable(); err == nil {
exeDir := filepath.Dir(exe)
for _, rel := range defaults {
if filepath.IsAbs(rel) {
continue
}
candidates = append(candidates,
filepath.Join(exeDir, rel),
filepath.Join(exeDir, "..", rel),
filepath.Join(exeDir, "..", "..", rel),
)
}
}
return uniqueConfigPaths(candidates)
}
func loadConfigPayload(envKeys []string, defaults []string, embedded []byte) configPayload {
for _, candidate := range buildConfigCandidates(envKeys, defaults) {
body, err := os.ReadFile(candidate)
if err != nil || len(body) == 0 {
continue
}
payload := configPayload{
body: body,
source: "runtime-file",
}
if info, statErr := os.Stat(candidate); statErr == nil {
payload.modTime = info.ModTime().UTC()
}
return payload
}
return configPayload{
body: embedded,
source: "embedded",
}
}
func payloadETag(body []byte) string {
sum := sha256.Sum256(body)
return `W/"` + hex.EncodeToString(sum[:]) + `"`
}
func serveJSONConfig(w http.ResponseWriter, r *http.Request, payload configPayload, cacheControl string) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", cacheControl)
w.Header().Set("X-Config-Source", payload.source)
etag := payloadETag(payload.body)
w.Header().Set("ETag", etag)
if !payload.modTime.IsZero() {
w.Header().Set("Last-Modified", payload.modTime.Format(http.TimeFormat))
}
if match := strings.TrimSpace(r.Header.Get("If-None-Match")); match != "" && strings.Contains(match, etag) {
w.WriteHeader(http.StatusNotModified)
return
}
_, _ = w.Write(payload.body)
}
// handleConfigNetworks serves GET /api/config/networks (Chain 138 + Ethereum Mainnet params for wallet_addEthereumChain).
func (s *Server) handleConfigNetworks(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
@@ -21,9 +132,17 @@ func (s *Server) handleConfigNetworks(w http.ResponseWriter, r *http.Request) {
writeMethodNotAllowed(w)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "public, max-age=3600")
w.Write(dualChainNetworksJSON)
payload := loadConfigPayload(
[]string{"CONFIG_NETWORKS_JSON_PATH", "NETWORKS_CONFIG_JSON_PATH"},
[]string{
"explorer-monorepo/backend/api/rest/config/metamask/DUAL_CHAIN_NETWORKS.json",
"backend/api/rest/config/metamask/DUAL_CHAIN_NETWORKS.json",
"api/rest/config/metamask/DUAL_CHAIN_NETWORKS.json",
"config/metamask/DUAL_CHAIN_NETWORKS.json",
},
dualChainNetworksJSON,
)
serveJSONConfig(w, r, payload, "public, max-age=0, must-revalidate")
}
// handleConfigTokenList serves GET /api/config/token-list (Uniswap token list format for MetaMask).
@@ -33,9 +152,17 @@ func (s *Server) handleConfigTokenList(w http.ResponseWriter, r *http.Request) {
writeMethodNotAllowed(w)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "public, max-age=3600")
w.Write(dualChainTokenListJSON)
payload := loadConfigPayload(
[]string{"CONFIG_TOKEN_LIST_JSON_PATH", "TOKEN_LIST_CONFIG_JSON_PATH"},
[]string{
"explorer-monorepo/backend/api/rest/config/metamask/DUAL_CHAIN_TOKEN_LIST.tokenlist.json",
"backend/api/rest/config/metamask/DUAL_CHAIN_TOKEN_LIST.tokenlist.json",
"api/rest/config/metamask/DUAL_CHAIN_TOKEN_LIST.tokenlist.json",
"config/metamask/DUAL_CHAIN_TOKEN_LIST.tokenlist.json",
},
dualChainTokenListJSON,
)
serveJSONConfig(w, r, payload, "public, max-age=0, must-revalidate")
}
// handleConfigCapabilities serves GET /api/config/capabilities (Chain 138 wallet/RPC capability matrix).
@@ -45,7 +172,15 @@ func (s *Server) handleConfigCapabilities(w http.ResponseWriter, r *http.Request
writeMethodNotAllowed(w)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "public, max-age=900")
w.Write(chain138RPCCapabilitiesJSON)
payload := loadConfigPayload(
[]string{"CONFIG_CAPABILITIES_JSON_PATH", "RPC_CAPABILITIES_JSON_PATH"},
[]string{
"explorer-monorepo/backend/api/rest/config/metamask/CHAIN138_RPC_CAPABILITIES.json",
"backend/api/rest/config/metamask/CHAIN138_RPC_CAPABILITIES.json",
"api/rest/config/metamask/CHAIN138_RPC_CAPABILITIES.json",
"config/metamask/CHAIN138_RPC_CAPABILITIES.json",
},
chain138RPCCapabilitiesJSON,
)
serveJSONConfig(w, r, payload, "public, max-age=0, must-revalidate")
}

View File

@@ -0,0 +1,842 @@
{
"generatedAt": "2026-04-04T16:10:52.278Z",
"summary": {
"wave1Assets": 7,
"wave1TransportActive": 0,
"wave1TransportPending": 7,
"wave1WrappedSymbols": 10,
"wave1WrappedSymbolsCoveredByPoolMatrix": 10,
"wave1WrappedSymbolsMissingFromPoolMatrix": 0,
"desiredPublicEvmTargets": 11,
"chainsWithLoadedCwSuites": 10,
"chainsMissingCwSuites": 1,
"firstTierWave1PoolsPlanned": 110,
"firstTierWave1PoolsRecordedLive": 6,
"protocolsTracked": 5,
"protocolsLive": 1
},
"assetQueue": [
{
"code": "EUR",
"name": "Euro",
"canonicalSymbols": [
"cEURC",
"cEURT"
],
"wrappedSymbols": [
"cWEURC",
"cWEURT"
],
"transportActive": false,
"canonicalDeployed": true,
"x402Ready": false,
"coveredByPoolMatrix": true,
"nextSteps": [
"enable_bridge_controls",
"set_max_outstanding",
"promote_transport_overlay",
"deploy_public_pools"
]
},
{
"code": "JPY",
"name": "Japanese Yen",
"canonicalSymbols": [
"cJPYC"
],
"wrappedSymbols": [
"cWJPYC"
],
"transportActive": false,
"canonicalDeployed": true,
"x402Ready": false,
"coveredByPoolMatrix": true,
"nextSteps": [
"enable_bridge_controls",
"set_max_outstanding",
"promote_transport_overlay",
"deploy_public_pools"
]
},
{
"code": "GBP",
"name": "Pound Sterling",
"canonicalSymbols": [
"cGBPC",
"cGBPT"
],
"wrappedSymbols": [
"cWGBPC",
"cWGBPT"
],
"transportActive": false,
"canonicalDeployed": true,
"x402Ready": false,
"coveredByPoolMatrix": true,
"nextSteps": [
"enable_bridge_controls",
"set_max_outstanding",
"promote_transport_overlay",
"deploy_public_pools"
]
},
{
"code": "AUD",
"name": "Australian Dollar",
"canonicalSymbols": [
"cAUDC"
],
"wrappedSymbols": [
"cWAUDC"
],
"transportActive": false,
"canonicalDeployed": true,
"x402Ready": false,
"coveredByPoolMatrix": true,
"nextSteps": [
"enable_bridge_controls",
"set_max_outstanding",
"promote_transport_overlay",
"deploy_public_pools"
]
},
{
"code": "CAD",
"name": "Canadian Dollar",
"canonicalSymbols": [
"cCADC"
],
"wrappedSymbols": [
"cWCADC"
],
"transportActive": false,
"canonicalDeployed": true,
"x402Ready": false,
"coveredByPoolMatrix": true,
"nextSteps": [
"enable_bridge_controls",
"set_max_outstanding",
"promote_transport_overlay",
"deploy_public_pools"
]
},
{
"code": "CHF",
"name": "Swiss Franc",
"canonicalSymbols": [
"cCHFC"
],
"wrappedSymbols": [
"cWCHFC"
],
"transportActive": false,
"canonicalDeployed": true,
"x402Ready": false,
"coveredByPoolMatrix": true,
"nextSteps": [
"enable_bridge_controls",
"set_max_outstanding",
"promote_transport_overlay",
"deploy_public_pools"
]
},
{
"code": "XAU",
"name": "Gold",
"canonicalSymbols": [
"cXAUC",
"cXAUT"
],
"wrappedSymbols": [
"cWXAUC",
"cWXAUT"
],
"transportActive": false,
"canonicalDeployed": true,
"x402Ready": false,
"coveredByPoolMatrix": true,
"nextSteps": [
"enable_bridge_controls",
"set_max_outstanding",
"promote_transport_overlay",
"deploy_public_pools"
]
}
],
"chainQueue": [
{
"chainId": 1,
"name": "Ethereum Mainnet",
"hubStable": "USDC",
"bridgeAvailable": true,
"cwTokenCount": 12,
"wave1WrappedCoverage": 10,
"plannedWave1Pairs": [
"cWEURC/USDC",
"cWEURT/USDC",
"cWGBPC/USDC",
"cWGBPT/USDC",
"cWAUDC/USDC",
"cWJPYC/USDC",
"cWCHFC/USDC",
"cWCADC/USDC",
"cWXAUC/USDC",
"cWXAUT/USDC"
],
"recordedWave1Pairs": [
"cWEURC/USDC",
"cWGBPC/USDC",
"cWAUDC/USDC",
"cWJPYC/USDC",
"cWCHFC/USDC",
"cWCADC/USDC"
],
"nextStep": "deploy_first_tier_wave1_pools"
},
{
"chainId": 10,
"name": "Optimism",
"hubStable": "USDC",
"bridgeAvailable": true,
"cwTokenCount": 12,
"wave1WrappedCoverage": 10,
"plannedWave1Pairs": [
"cWEURC/USDC",
"cWEURT/USDC",
"cWGBPC/USDC",
"cWGBPT/USDC",
"cWAUDC/USDC",
"cWJPYC/USDC",
"cWCHFC/USDC",
"cWCADC/USDC",
"cWXAUC/USDC",
"cWXAUT/USDC"
],
"recordedWave1Pairs": [],
"nextStep": "deploy_first_tier_wave1_pools"
},
{
"chainId": 25,
"name": "Cronos",
"hubStable": "USDT",
"bridgeAvailable": true,
"cwTokenCount": 12,
"wave1WrappedCoverage": 10,
"plannedWave1Pairs": [
"cWEURC/USDT",
"cWEURT/USDT",
"cWGBPC/USDT",
"cWGBPT/USDT",
"cWAUDC/USDT",
"cWJPYC/USDT",
"cWCHFC/USDT",
"cWCADC/USDT",
"cWXAUC/USDT",
"cWXAUT/USDT"
],
"recordedWave1Pairs": [],
"nextStep": "deploy_first_tier_wave1_pools"
},
{
"chainId": 56,
"name": "BSC",
"hubStable": "USDT",
"bridgeAvailable": true,
"cwTokenCount": 14,
"wave1WrappedCoverage": 10,
"plannedWave1Pairs": [
"cWEURC/USDT",
"cWEURT/USDT",
"cWGBPC/USDT",
"cWGBPT/USDT",
"cWAUDC/USDT",
"cWJPYC/USDT",
"cWCHFC/USDT",
"cWCADC/USDT",
"cWXAUC/USDT",
"cWXAUT/USDT"
],
"recordedWave1Pairs": [],
"nextStep": "deploy_first_tier_wave1_pools"
},
{
"chainId": 100,
"name": "Gnosis",
"hubStable": "USDC",
"bridgeAvailable": true,
"cwTokenCount": 12,
"wave1WrappedCoverage": 10,
"plannedWave1Pairs": [
"cWEURC/USDC",
"cWEURT/USDC",
"cWGBPC/USDC",
"cWGBPT/USDC",
"cWAUDC/USDC",
"cWJPYC/USDC",
"cWCHFC/USDC",
"cWCADC/USDC",
"cWXAUC/USDC",
"cWXAUT/USDC"
],
"recordedWave1Pairs": [],
"nextStep": "deploy_first_tier_wave1_pools"
},
{
"chainId": 137,
"name": "Polygon",
"hubStable": "USDC",
"bridgeAvailable": true,
"cwTokenCount": 13,
"wave1WrappedCoverage": 10,
"plannedWave1Pairs": [
"cWEURC/USDC",
"cWEURT/USDC",
"cWGBPC/USDC",
"cWGBPT/USDC",
"cWAUDC/USDC",
"cWJPYC/USDC",
"cWCHFC/USDC",
"cWCADC/USDC",
"cWXAUC/USDC",
"cWXAUT/USDC"
],
"recordedWave1Pairs": [],
"nextStep": "deploy_first_tier_wave1_pools"
},
{
"chainId": 42161,
"name": "Arbitrum One",
"hubStable": "USDC",
"bridgeAvailable": true,
"cwTokenCount": 12,
"wave1WrappedCoverage": 10,
"plannedWave1Pairs": [
"cWEURC/USDC",
"cWEURT/USDC",
"cWGBPC/USDC",
"cWGBPT/USDC",
"cWAUDC/USDC",
"cWJPYC/USDC",
"cWCHFC/USDC",
"cWCADC/USDC",
"cWXAUC/USDC",
"cWXAUT/USDC"
],
"recordedWave1Pairs": [],
"nextStep": "deploy_first_tier_wave1_pools"
},
{
"chainId": 42220,
"name": "Celo",
"hubStable": "USDC",
"bridgeAvailable": true,
"cwTokenCount": 14,
"wave1WrappedCoverage": 10,
"plannedWave1Pairs": [
"cWEURC/USDC",
"cWEURT/USDC",
"cWGBPC/USDC",
"cWGBPT/USDC",
"cWAUDC/USDC",
"cWJPYC/USDC",
"cWCHFC/USDC",
"cWCADC/USDC",
"cWXAUC/USDC",
"cWXAUT/USDC"
],
"recordedWave1Pairs": [],
"nextStep": "deploy_first_tier_wave1_pools"
},
{
"chainId": 43114,
"name": "Avalanche C-Chain",
"hubStable": "USDC",
"bridgeAvailable": true,
"cwTokenCount": 14,
"wave1WrappedCoverage": 10,
"plannedWave1Pairs": [
"cWEURC/USDC",
"cWEURT/USDC",
"cWGBPC/USDC",
"cWGBPT/USDC",
"cWAUDC/USDC",
"cWJPYC/USDC",
"cWCHFC/USDC",
"cWCADC/USDC",
"cWXAUC/USDC",
"cWXAUT/USDC"
],
"recordedWave1Pairs": [],
"nextStep": "deploy_first_tier_wave1_pools"
},
{
"chainId": 8453,
"name": "Base",
"hubStable": "USDC",
"bridgeAvailable": true,
"cwTokenCount": 12,
"wave1WrappedCoverage": 10,
"plannedWave1Pairs": [
"cWEURC/USDC",
"cWEURT/USDC",
"cWGBPC/USDC",
"cWGBPT/USDC",
"cWAUDC/USDC",
"cWJPYC/USDC",
"cWCHFC/USDC",
"cWCADC/USDC",
"cWXAUC/USDC",
"cWXAUT/USDC"
],
"recordedWave1Pairs": [],
"nextStep": "deploy_first_tier_wave1_pools"
},
{
"chainId": 1111,
"name": "Wemix",
"hubStable": "USDT",
"bridgeAvailable": false,
"cwTokenCount": 0,
"wave1WrappedCoverage": 0,
"plannedWave1Pairs": [
"cWEURC/USDT",
"cWEURT/USDT",
"cWGBPC/USDT",
"cWGBPT/USDT",
"cWAUDC/USDT",
"cWJPYC/USDT",
"cWCHFC/USDT",
"cWCADC/USDT",
"cWXAUC/USDT",
"cWXAUT/USDT"
],
"recordedWave1Pairs": [],
"nextStep": "complete_cw_suite_then_deploy_pools"
}
],
"protocolQueue": [
{
"key": "uniswap_v3",
"name": "Uniswap v3",
"role": "primary_public_pool_venue",
"deploymentStage": "stage1_first_tier_pools",
"activePublicPools": 0,
"currentState": "queued_not_live",
"activationDependsOn": [
"cW token suite deployed on destination chain",
"first-tier cW/hub pools created",
"pool addresses written to deployment-status.json",
"token-aggregation/indexer visibility enabled"
]
},
{
"key": "dodo_pmm",
"name": "DODO PMM",
"role": "primary_public_pmm_edge_venue",
"deploymentStage": "stage1_first_tier_pools",
"activePublicPools": 10,
"currentState": "partially_live_on_public_cw_mesh",
"activationDependsOn": [
"cW token suite deployed on destination chain",
"first-tier cW/hub pools created",
"pool addresses written to deployment-status.json",
"policy controls and MCP visibility attached"
]
},
{
"key": "balancer",
"name": "Balancer",
"role": "secondary_basket_liquidity",
"deploymentStage": "stage2_post_first_tier_liquidity",
"activePublicPools": 0,
"currentState": "queued_not_live",
"activationDependsOn": [
"first-tier Uniswap v3 or DODO PMM liquidity live",
"basket design approved for the destination chain",
"pool addresses written to deployment-status.json"
]
},
{
"key": "curve_3",
"name": "Curve 3",
"role": "secondary_stable_curve",
"deploymentStage": "stage2_post_first_tier_liquidity",
"activePublicPools": 0,
"currentState": "queued_not_live",
"activationDependsOn": [
"first-tier stable liquidity live",
"stable basket design approved for the destination chain",
"pool addresses written to deployment-status.json"
]
},
{
"key": "one_inch",
"name": "1inch",
"role": "routing_aggregation_layer",
"deploymentStage": "stage3_after_underlying_pools_live",
"activePublicPools": 0,
"currentState": "queued_not_live",
"activationDependsOn": [
"underlying public pools already live",
"router/indexer visibility enabled",
"token-aggregation/provider capability surfaced publicly"
]
}
],
"blockers": [
"Desired public EVM targets still missing cW suites: Wemix.",
"Wave 1 transport is still pending for: EUR, JPY, GBP, AUD, CAD, CHF, XAU.",
"Arbitrum bootstrap remains blocked on the current Mainnet hub leg: tx 0x97df657f0e31341ca852666766e553650531bbcc86621246d041985d7261bb07 reverted before any bridge event was emitted."
],
"resolutionMatrix": [
{
"key": "mainnet_arbitrum_hub_blocked",
"state": "open",
"blocker": "Arbitrum bootstrap remains blocked on the current Mainnet hub leg: tx 0x97df657f0e31341ca852666766e553650531bbcc86621246d041985d7261bb07 reverted from 0xc9901ce2Ddb6490FAA183645147a87496d8b20B6 before any bridge event was emitted.",
"targets": [
{
"fromChain": 138,
"viaChain": 1,
"toChain": 42161,
"currentPath": "138 -> Mainnet -> Arbitrum"
}
],
"resolution": [
"Repair or replace the current Mainnet WETH9 fan-out bridge before treating Arbitrum as an available public bootstrap target.",
"Retest 138 -> Mainnet first-hop delivery, then rerun a smaller Mainnet -> Arbitrum send and require destination bridge events before promoting the route.",
"Keep Arbitrum marked blocked in the explorer and status surfaces until the hub leg emits and completes normally."
],
"runbooks": [
"docs/07-ccip/CROSS_NETWORK_FUNDING_BOOTSTRAP_STRATEGY.md",
"docs/07-ccip/CHAIN138_PUBLIC_CHAIN_UNLOAD_ROUTES.md",
"docs/00-meta/REQUIRED_FIXES_GAPS_AND_DEPLOYMENTS_LIST.md"
],
"exitCriteria": "A fresh Mainnet -> Arbitrum WETH9 send emits bridge events and completes destination delivery successfully."
},
{
"key": "missing_public_cw_suites",
"state": "open",
"blocker": "Desired public EVM targets still missing cW suites: Wemix.",
"targets": [
{
"chainId": 1111,
"name": "Wemix",
"nextStep": "complete_cw_suite_then_deploy_pools"
}
],
"resolution": [
"Deploy the full cW core suite on each missing destination chain using the existing CW deploy-and-wire flow.",
"Grant bridge mint/burn roles and mark the corridor live in cross-chain-pmm-lps/config/deployment-status.json.",
"Update public token lists / explorer config, then rerun check-cw-evm-deployment-mesh.sh and check-cw-public-pool-status.sh."
],
"runbooks": [
"docs/07-ccip/CW_DEPLOY_AND_WIRE_RUNBOOK.md",
"docs/03-deployment/PHASE_C_CW_AND_EDGE_POOLS_RUNBOOK.md",
"scripts/deployment/run-cw-remaining-steps.sh",
"scripts/verify/check-cw-evm-deployment-mesh.sh"
],
"exitCriteria": "Wemix report non-zero cW suites and become bridgeAvailable in deployment-status.json."
},
{
"key": "wave1_transport_pending",
"state": "open",
"blocker": "Wave 1 transport is still pending for: EUR, JPY, GBP, AUD, CAD, CHF, XAU.",
"targets": [
{
"code": "EUR",
"canonicalSymbols": [
"cEURC",
"cEURT"
],
"wrappedSymbols": [
"cWEURC",
"cWEURT"
]
},
{
"code": "JPY",
"canonicalSymbols": [
"cJPYC"
],
"wrappedSymbols": [
"cWJPYC"
]
},
{
"code": "GBP",
"canonicalSymbols": [
"cGBPC",
"cGBPT"
],
"wrappedSymbols": [
"cWGBPC",
"cWGBPT"
]
},
{
"code": "AUD",
"canonicalSymbols": [
"cAUDC"
],
"wrappedSymbols": [
"cWAUDC"
]
},
{
"code": "CAD",
"canonicalSymbols": [
"cCADC"
],
"wrappedSymbols": [
"cWCADC"
]
},
{
"code": "CHF",
"canonicalSymbols": [
"cCHFC"
],
"wrappedSymbols": [
"cWCHFC"
]
},
{
"code": "XAU",
"canonicalSymbols": [
"cXAUC",
"cXAUT"
],
"wrappedSymbols": [
"cWXAUC",
"cWXAUT"
]
}
],
"resolution": [
"Enable bridge controls and supervision policy for each Wave 1 canonical asset on Chain 138.",
"Set max-outstanding / capacity controls, then promote the canonical symbols into config/gru-transport-active.json.",
"Verify the overlay promotion with check-gru-global-priority-rollout.sh and check-gru-v2-chain138-readiness.sh before attaching public liquidity."
],
"runbooks": [
"docs/04-configuration/GRU_GLOBAL_PRIORITY_CROSS_CHAIN_ROLLOUT.md",
"docs/04-configuration/GRU_TRANSPORT_ACTIVE_JSON.md",
"scripts/verify/check-gru-global-priority-rollout.sh",
"scripts/verify/check-gru-v2-chain138-readiness.sh"
],
"exitCriteria": "Wave 1 transport pending count reaches zero and the overlay reports the seven non-USD assets as live_transport."
},
{
"key": "first_tier_public_pools_not_live",
"state": "in_progress",
"blocker": "Some first-tier Wave 1 public cW pools are live, but the rollout is incomplete.",
"targets": [
{
"chainId": 1,
"name": "Ethereum Mainnet",
"hubStable": "USDC",
"plannedWave1Pairs": 10,
"recordedWave1Pairs": 6
},
{
"chainId": 10,
"name": "Optimism",
"hubStable": "USDC",
"plannedWave1Pairs": 10,
"recordedWave1Pairs": 0
},
{
"chainId": 25,
"name": "Cronos",
"hubStable": "USDT",
"plannedWave1Pairs": 10,
"recordedWave1Pairs": 0
},
{
"chainId": 56,
"name": "BSC",
"hubStable": "USDT",
"plannedWave1Pairs": 10,
"recordedWave1Pairs": 0
},
{
"chainId": 100,
"name": "Gnosis",
"hubStable": "USDC",
"plannedWave1Pairs": 10,
"recordedWave1Pairs": 0
},
{
"chainId": 137,
"name": "Polygon",
"hubStable": "USDC",
"plannedWave1Pairs": 10,
"recordedWave1Pairs": 0
},
{
"chainId": 42161,
"name": "Arbitrum One",
"hubStable": "USDC",
"plannedWave1Pairs": 10,
"recordedWave1Pairs": 0
},
{
"chainId": 42220,
"name": "Celo",
"hubStable": "USDC",
"plannedWave1Pairs": 10,
"recordedWave1Pairs": 0
},
{
"chainId": 43114,
"name": "Avalanche C-Chain",
"hubStable": "USDC",
"plannedWave1Pairs": 10,
"recordedWave1Pairs": 0
},
{
"chainId": 8453,
"name": "Base",
"hubStable": "USDC",
"plannedWave1Pairs": 10,
"recordedWave1Pairs": 0
},
{
"chainId": 1111,
"name": "Wemix",
"hubStable": "USDT",
"plannedWave1Pairs": 10,
"recordedWave1Pairs": 0
}
],
"resolution": [
"Deploy the first-tier cW/hub-stable pairs from pool-matrix.json on every chain with a loaded cW suite.",
"Seed the new pools with initial liquidity and record the resulting pool addresses in cross-chain-pmm-lps/config/deployment-status.json.",
"Use check-cw-public-pool-status.sh to verify the mesh is no longer empty before surfacing the venues publicly."
],
"runbooks": [
"docs/03-deployment/SINGLE_SIDED_LPS_PUBLIC_NETWORKS_RUNBOOK.md",
"docs/03-deployment/PMM_FULL_MESH_AND_PUBLIC_SINGLE_SIDED_PLAN.md",
"cross-chain-pmm-lps/config/pool-matrix.json",
"scripts/verify/check-cw-public-pool-status.sh"
],
"exitCriteria": "First-tier Wave 1 pools are recorded live in deployment-status.json and check-cw-public-pool-status.sh reports non-zero pool coverage."
},
{
"key": "public_protocols_queued",
"state": "in_progress",
"blocker": "Some tracked public protocols have begun activation, but the full protocol stack is not live yet.",
"targets": [
{
"key": "uniswap_v3",
"name": "Uniswap v3",
"deploymentStage": "stage1_first_tier_pools",
"activationDependsOn": [
"cW token suite deployed on destination chain",
"first-tier cW/hub pools created",
"pool addresses written to deployment-status.json",
"token-aggregation/indexer visibility enabled"
]
},
{
"key": "dodo_pmm",
"name": "DODO PMM",
"deploymentStage": "stage1_first_tier_pools",
"activationDependsOn": [
"cW token suite deployed on destination chain",
"first-tier cW/hub pools created",
"pool addresses written to deployment-status.json",
"policy controls and MCP visibility attached"
]
},
{
"key": "balancer",
"name": "Balancer",
"deploymentStage": "stage2_post_first_tier_liquidity",
"activationDependsOn": [
"first-tier Uniswap v3 or DODO PMM liquidity live",
"basket design approved for the destination chain",
"pool addresses written to deployment-status.json"
]
},
{
"key": "curve_3",
"name": "Curve 3",
"deploymentStage": "stage2_post_first_tier_liquidity",
"activationDependsOn": [
"first-tier stable liquidity live",
"stable basket design approved for the destination chain",
"pool addresses written to deployment-status.json"
]
},
{
"key": "one_inch",
"name": "1inch",
"deploymentStage": "stage3_after_underlying_pools_live",
"activationDependsOn": [
"underlying public pools already live",
"router/indexer visibility enabled",
"token-aggregation/provider capability surfaced publicly"
]
}
],
"resolution": [
"Stage 1: activate Uniswap v3 and DODO PMM once first-tier cW pools exist on the public mesh.",
"Stage 2: activate Balancer and Curve 3 only after first-tier stable liquidity is already live.",
"Stage 3: expose 1inch after the underlying pools, routing/indexer visibility, and public provider-capability wiring are in place."
],
"runbooks": [
"config/gru-v2-public-protocol-rollout-plan.json",
"docs/11-references/GRU_V2_PUBLIC_PROTOCOL_DEPLOYMENT_STATUS.md",
"scripts/verify/check-gru-v2-public-protocols.sh"
],
"exitCriteria": "The public protocol status surface reports non-zero active cW pools for the staged venues."
},
{
"key": "global_priority_backlog",
"state": "open",
"blocker": "The ranked GRU global rollout still has 29 backlog assets outside the live manifest.",
"targets": [
{
"backlogAssets": 29
}
],
"resolution": [
"Complete Wave 1 transport and first-tier public liquidity before promoting the remaining ranked assets.",
"For each backlog asset, add canonical + wrapped symbols to the manifest/rollout plan, deploy contracts, and extend the public pool matrix.",
"Promote each new asset through the same transport and public-liquidity gates used for Wave 1."
],
"runbooks": [
"config/gru-global-priority-currency-rollout.json",
"config/gru-iso4217-currency-manifest.json",
"docs/04-configuration/GRU_GLOBAL_PRIORITY_CROSS_CHAIN_ROLLOUT.md",
"scripts/verify/check-gru-global-priority-rollout.sh"
],
"exitCriteria": "Backlog assets count reaches zero in check-gru-global-priority-rollout.sh."
},
{
"key": "solana_non_evm_program",
"state": "planned",
"blocker": "Desired non-EVM GRU targets remain planned / relay-dependent: Solana.",
"targets": [
{
"identifier": "Solana",
"label": "Solana"
}
],
"resolution": [
"Define the destination-chain token/program model first: SPL or wrapped-account representation, authority model, and relay custody surface.",
"Implement the relay/program path and only then promote Solana from desired-target status into the active transport inventory.",
"Add dedicated verifier coverage before marking Solana live anywhere in the explorer or status docs."
],
"runbooks": [
"docs/04-configuration/ADDITIONAL_PATHS_AND_EXTENSIONS.md",
"docs/04-configuration/GRU_GLOBAL_PRIORITY_CROSS_CHAIN_ROLLOUT.md"
],
"exitCriteria": "Solana has a real relay/program surface, a verifier, and is no longer only listed as a desired non-EVM target."
}
],
"notes": [
"This queue is an operator/deployment planning surface. It does not mark queued pools or transports as live.",
"Chain 138 canonical venues remain a separate live surface from the public cW mesh."
]
}

View File

@@ -0,0 +1,348 @@
{
"generatedAt": "2026-04-04T16:10:52.261Z",
"canonicalChainId": 138,
"summary": {
"desiredPublicEvmTargets": 11,
"loadedPublicEvmChains": 10,
"loadedPublicEvmFullCoreSuite": 10,
"desiredButNotLoaded": 1,
"publicProtocolsTracked": 5,
"publicProtocolsWithActiveCwPools": 1,
"chainsWithAnyRecordedPublicCwPools": 1,
"liveTransportAssets": 1,
"wave1CanonicalOnly": 7,
"backlogAssets": 29
},
"publicEvmMesh": {
"coreCwSuite": [
"cWUSDT",
"cWUSDC",
"cWEURC",
"cWEURT",
"cWGBPC",
"cWGBPT",
"cWAUDC",
"cWJPYC",
"cWCHFC",
"cWCADC",
"cWXAUC",
"cWXAUT"
],
"desiredChains": [
{
"chainId": 1,
"name": "Ethereum Mainnet",
"cwTokenCount": 12,
"hasFullCoreSuite": true,
"bridgeAvailable": true,
"pmmPoolCount": 10
},
{
"chainId": 10,
"name": "Optimism",
"cwTokenCount": 12,
"hasFullCoreSuite": true,
"bridgeAvailable": true,
"pmmPoolCount": 0
},
{
"chainId": 25,
"name": "Cronos",
"cwTokenCount": 12,
"hasFullCoreSuite": true,
"bridgeAvailable": true,
"pmmPoolCount": 0
},
{
"chainId": 56,
"name": "BSC (BNB Chain)",
"cwTokenCount": 14,
"hasFullCoreSuite": true,
"bridgeAvailable": true,
"pmmPoolCount": 0
},
{
"chainId": 100,
"name": "Gnosis Chain",
"cwTokenCount": 12,
"hasFullCoreSuite": true,
"bridgeAvailable": true,
"pmmPoolCount": 0
},
{
"chainId": 137,
"name": "Polygon",
"cwTokenCount": 13,
"hasFullCoreSuite": true,
"bridgeAvailable": true,
"pmmPoolCount": 0
},
{
"chainId": 42161,
"name": "Arbitrum One",
"cwTokenCount": 12,
"hasFullCoreSuite": true,
"bridgeAvailable": true,
"pmmPoolCount": 0
},
{
"chainId": 42220,
"name": "Celo",
"cwTokenCount": 14,
"hasFullCoreSuite": true,
"bridgeAvailable": true,
"pmmPoolCount": 0
},
{
"chainId": 43114,
"name": "Avalanche C-Chain",
"cwTokenCount": 14,
"hasFullCoreSuite": true,
"bridgeAvailable": true,
"pmmPoolCount": 0
},
{
"chainId": 8453,
"name": "Base",
"cwTokenCount": 12,
"hasFullCoreSuite": true,
"bridgeAvailable": true,
"pmmPoolCount": 0
},
{
"chainId": 1111,
"name": "Wemix",
"cwTokenCount": 0,
"hasFullCoreSuite": false,
"bridgeAvailable": false,
"pmmPoolCount": 0
}
],
"desiredButNotLoaded": [
{
"chainId": 1111,
"name": "Wemix"
}
],
"wave1PoolMatrixCoverage": {
"totalWrappedSymbols": 10,
"coveredSymbols": 10,
"missingSymbols": []
},
"note": "The public EVM cW token mesh is complete on the currently loaded 10-chain set, but Wemix remains a desired target without a cW suite in deployment-status.json."
},
"transport": {
"liveTransportAssets": [
{
"code": "USD",
"name": "US Dollar"
}
],
"wave1": [
{
"code": "EUR",
"name": "Euro",
"wave": "wave1",
"manifestPresent": true,
"deployed": true,
"transportActive": false,
"x402Ready": false,
"canonicalSymbols": [
"cEURC",
"cEURT"
],
"wrappedSymbols": [
"cWEURC",
"cWEURT"
],
"currentState": "canonical_only",
"nextStep": "activate_transport_and_attach_public_liquidity"
},
{
"code": "JPY",
"name": "Japanese Yen",
"wave": "wave1",
"manifestPresent": true,
"deployed": true,
"transportActive": false,
"x402Ready": false,
"canonicalSymbols": [
"cJPYC"
],
"wrappedSymbols": [
"cWJPYC"
],
"currentState": "canonical_only",
"nextStep": "activate_transport_and_attach_public_liquidity"
},
{
"code": "GBP",
"name": "Pound Sterling",
"wave": "wave1",
"manifestPresent": true,
"deployed": true,
"transportActive": false,
"x402Ready": false,
"canonicalSymbols": [
"cGBPC",
"cGBPT"
],
"wrappedSymbols": [
"cWGBPC",
"cWGBPT"
],
"currentState": "canonical_only",
"nextStep": "activate_transport_and_attach_public_liquidity"
},
{
"code": "AUD",
"name": "Australian Dollar",
"wave": "wave1",
"manifestPresent": true,
"deployed": true,
"transportActive": false,
"x402Ready": false,
"canonicalSymbols": [
"cAUDC"
],
"wrappedSymbols": [
"cWAUDC"
],
"currentState": "canonical_only",
"nextStep": "activate_transport_and_attach_public_liquidity"
},
{
"code": "CAD",
"name": "Canadian Dollar",
"wave": "wave1",
"manifestPresent": true,
"deployed": true,
"transportActive": false,
"x402Ready": false,
"canonicalSymbols": [
"cCADC"
],
"wrappedSymbols": [
"cWCADC"
],
"currentState": "canonical_only",
"nextStep": "activate_transport_and_attach_public_liquidity"
},
{
"code": "CHF",
"name": "Swiss Franc",
"wave": "wave1",
"manifestPresent": true,
"deployed": true,
"transportActive": false,
"x402Ready": false,
"canonicalSymbols": [
"cCHFC"
],
"wrappedSymbols": [
"cWCHFC"
],
"currentState": "canonical_only",
"nextStep": "activate_transport_and_attach_public_liquidity"
},
{
"code": "XAU",
"name": "Gold",
"wave": "wave1",
"manifestPresent": true,
"deployed": true,
"transportActive": false,
"x402Ready": false,
"canonicalSymbols": [
"cXAUC",
"cXAUT"
],
"wrappedSymbols": [
"cWXAUC",
"cWXAUT"
],
"currentState": "canonical_only",
"nextStep": "activate_transport_and_attach_public_liquidity"
}
],
"note": "USD is the only live transport asset today. Wave 1 non-USD assets are deployed canonically on Chain 138 but are not yet promoted into the active transport overlay."
},
"protocols": {
"publicCwMesh": [
{
"key": "uniswap_v3",
"name": "Uniswap v3",
"activePublicCwPools": 0,
"destinationChainsWithPools": 0,
"status": "not_deployed_on_public_cw_mesh",
"notes": "No live public-chain cW* venue is recorded for this protocol in deployment-status.json yet."
},
{
"key": "balancer",
"name": "Balancer",
"activePublicCwPools": 0,
"destinationChainsWithPools": 0,
"status": "not_deployed_on_public_cw_mesh",
"notes": "No live public-chain cW* venue is recorded for this protocol in deployment-status.json yet."
},
{
"key": "curve_3",
"name": "Curve 3",
"activePublicCwPools": 0,
"destinationChainsWithPools": 0,
"status": "not_deployed_on_public_cw_mesh",
"notes": "No live public-chain cW* venue is recorded for this protocol in deployment-status.json yet."
},
{
"key": "dodo_pmm",
"name": "DODO PMM",
"activePublicCwPools": 10,
"destinationChainsWithPools": 1,
"status": "partial_live_on_public_cw_mesh",
"notes": "deployment-status.json now records live public-chain cW* DODO PMM pools on Mainnet, including recorded non-USD Wave 1 rows, and the recorded Mainnet pools now have bidirectional live execution proof. The broader public cW mesh is still partial."
},
{
"key": "one_inch",
"name": "1inch",
"activePublicCwPools": 0,
"destinationChainsWithPools": 0,
"status": "not_deployed_on_public_cw_mesh",
"notes": "No live public-chain cW* venue is recorded for this protocol in deployment-status.json yet."
}
],
"chain138CanonicalVenues": {
"note": "Chain 138 canonical routing is a separate surface: DODO PMM plus upstream-native Uniswap v3 and the funded pilot-compatible Balancer, Curve 3, and 1inch venues are live there.",
"liveProtocols": [
"DODO PMM",
"Uniswap v3",
"Balancer",
"Curve 3",
"1inch"
]
}
},
"bridgeRouteHealth": {
"arbitrumHubBlocker": {
"active": true,
"fromChain": 138,
"viaChain": 1,
"toChain": 42161,
"currentPath": "138 -> Mainnet -> Arbitrum",
"sourceBridge": "0xc9901ce2Ddb6490FAA183645147a87496d8b20B6",
"failedTxHash": "0x97df657f0e31341ca852666766e553650531bbcc86621246d041985d7261bb07",
"note": "Use Mainnet hub; direct 138 first hop to Arbitrum emitted MessageSent on 2026-04-04 without destination delivery."
}
},
"explorer": {
"tokenListApi": "https://explorer.d-bis.org/api/config/token-list",
"staticStatusPath": "https://explorer.d-bis.org/config/GRU_V2_PUBLIC_DEPLOYMENT_STATUS.json"
},
"blockers": [
"Desired public EVM targets still lack cW token suites: Wemix.",
"Wave 1 GRU assets are still canonical-only on Chain 138: EUR, JPY, GBP, AUD, CAD, CHF, XAU.",
"Public cW* protocol rollout is now partial: DODO PMM has recorded pools, while Uniswap v3, Balancer, Curve 3, and 1inch remain not live on the public cW mesh.",
"The ranked GRU global rollout still has 29 backlog assets outside the live manifest.",
"Desired non-EVM GRU targets remain planned / relay-dependent: Solana.",
"Arbitrum public-network bootstrap remains blocked on the current Mainnet hub leg: tx 0x97df657f0e31341ca852666766e553650531bbcc86621246d041985d7261bb07 reverted from 0xc9901ce2Ddb6490FAA183645147a87496d8b20B6 before any bridge event was emitted."
]
}

View File

@@ -4,6 +4,8 @@ import (
"encoding/json"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
)
@@ -204,14 +206,100 @@ func TestConfigCapabilitiesEndpointProvidesRPCCapabilityMatrix(t *testing.T) {
if !containsString(payload.HTTP.SupportedMethods, "eth_feeHistory") {
t.Fatal("expected eth_feeHistory support to be documented")
}
if !containsString(payload.HTTP.UnsupportedMethods, "eth_maxPriorityFeePerGas") {
t.Fatal("expected missing eth_maxPriorityFeePerGas support to be documented")
if !containsString(payload.HTTP.SupportedMethods, "eth_maxPriorityFeePerGas") {
t.Fatal("expected eth_maxPriorityFeePerGas support to be documented")
}
if !containsString(payload.Tracing.SupportedMethods, "trace_block") {
t.Fatal("expected trace_block support to be documented")
}
}
func TestConfigTokenListEndpointReloadsRuntimeFileWithoutRestart(t *testing.T) {
dir := t.TempDir()
file := filepath.Join(dir, "token-list.json")
first := `{"name":"Runtime Token List v1","tokens":[{"chainId":138,"address":"0x1111111111111111111111111111111111111111","symbol":"RT1","name":"Runtime One","decimals":6}]}`
second := `{"name":"Runtime Token List v2","tokens":[{"chainId":138,"address":"0x2222222222222222222222222222222222222222","symbol":"RT2","name":"Runtime Two","decimals":6}]}`
if err := os.WriteFile(file, []byte(first), 0o644); err != nil {
t.Fatalf("failed to write initial runtime file: %v", err)
}
t.Setenv("CONFIG_TOKEN_LIST_JSON_PATH", file)
handler := setupConfigHandler()
req1 := httptest.NewRequest(http.MethodGet, "/api/config/token-list", nil)
w1 := httptest.NewRecorder()
handler.ServeHTTP(w1, req1)
if w1.Code != http.StatusOK {
t.Fatalf("expected 200, got %d", w1.Code)
}
if got := w1.Header().Get("X-Config-Source"); got != "runtime-file" {
t.Fatalf("expected runtime-file config source, got %q", got)
}
etag1 := w1.Header().Get("ETag")
if etag1 == "" {
t.Fatal("expected ETag header on runtime-backed response")
}
var body1 testTokenList
if err := json.Unmarshal(w1.Body.Bytes(), &body1); err != nil {
t.Fatalf("failed to parse runtime token list v1: %v", err)
}
if body1.Name != "Runtime Token List v1" {
t.Fatalf("expected first runtime payload, got %q", body1.Name)
}
if err := os.WriteFile(file, []byte(second), 0o644); err != nil {
t.Fatalf("failed to write updated runtime file: %v", err)
}
req2 := httptest.NewRequest(http.MethodGet, "/api/config/token-list", nil)
w2 := httptest.NewRecorder()
handler.ServeHTTP(w2, req2)
if w2.Code != http.StatusOK {
t.Fatalf("expected 200 after runtime update, got %d", w2.Code)
}
if got := w2.Header().Get("ETag"); got == "" || got == etag1 {
t.Fatalf("expected changed ETag after runtime update, got %q", got)
}
var body2 testTokenList
if err := json.Unmarshal(w2.Body.Bytes(), &body2); err != nil {
t.Fatalf("failed to parse runtime token list v2: %v", err)
}
if body2.Name != "Runtime Token List v2" {
t.Fatalf("expected updated runtime payload, got %q", body2.Name)
}
}
func TestConfigTokenListEndpointSupportsETagRevalidation(t *testing.T) {
handler := setupConfigHandler()
req1 := httptest.NewRequest(http.MethodGet, "/api/config/token-list", nil)
w1 := httptest.NewRecorder()
handler.ServeHTTP(w1, req1)
if w1.Code != http.StatusOK {
t.Fatalf("expected 200, got %d", w1.Code)
}
etag := w1.Header().Get("ETag")
if etag == "" {
t.Fatal("expected ETag header")
}
req2 := httptest.NewRequest(http.MethodGet, "/api/config/token-list", nil)
req2.Header.Set("If-None-Match", etag)
w2 := httptest.NewRecorder()
handler.ServeHTTP(w2, req2)
if w2.Code != http.StatusNotModified {
t.Fatalf("expected 304, got %d", w2.Code)
}
}
func TestConfigEndpointsSupportOptionsPreflight(t *testing.T) {
handler := setupConfigHandler()
req := httptest.NewRequest(http.MethodOptions, "/api/config/token-list", nil)

View File

@@ -2,10 +2,13 @@ package rest
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"math/big"
"net/http"
"strconv"
"strings"
"time"
)
@@ -122,7 +125,7 @@ func (s *Server) handleEtherscanAPI(w http.ResponseWriter, r *http.Request) {
var timestamp time.Time
var transactionCount int
var gasUsed, gasLimit int64
var transactions []string
var transactions interface{}
query := `
SELECT hash, parent_hash, timestamp, miner, transaction_count, gas_used, gas_limit
@@ -142,40 +145,28 @@ func (s *Server) handleEtherscanAPI(w http.ResponseWriter, r *http.Request) {
break
}
// If boolean is true, get full transaction objects
if boolean {
txQuery := `
SELECT hash FROM transactions
WHERE chain_id = $1 AND block_number = $2
ORDER BY transaction_index
`
rows, err := s.db.Query(ctx, txQuery, s.chainID, blockNumber)
if err == nil {
defer rows.Close()
for rows.Next() {
var txHash string
if err := rows.Scan(&txHash); err == nil {
transactions = append(transactions, txHash)
}
txObjects, err := s.loadEtherscanBlockTransactions(ctx, blockNumber)
if err != nil {
response = EtherscanResponse{
Status: "0",
Message: "Error",
Result: nil,
}
break
}
transactions = txObjects
} else {
// Just get transaction hashes
txQuery := `
SELECT hash FROM transactions
WHERE chain_id = $1 AND block_number = $2
ORDER BY transaction_index
`
rows, err := s.db.Query(ctx, txQuery, s.chainID, blockNumber)
if err == nil {
defer rows.Close()
for rows.Next() {
var txHash string
if err := rows.Scan(&txHash); err == nil {
transactions = append(transactions, txHash)
}
txHashes, err := s.loadEtherscanBlockTransactionHashes(ctx, blockNumber)
if err != nil {
response = EtherscanResponse{
Status: "0",
Message: "Error",
Result: nil,
}
break
}
transactions = txHashes
}
blockResult := map[string]interface{}{
@@ -216,3 +207,92 @@ func (s *Server) handleEtherscanAPI(w http.ResponseWriter, r *http.Request) {
json.NewEncoder(w).Encode(response)
}
func (s *Server) loadEtherscanBlockTransactionHashes(ctx context.Context, blockNumber int64) ([]string, error) {
rows, err := s.db.Query(ctx, `
SELECT hash
FROM transactions
WHERE chain_id = $1 AND block_number = $2
ORDER BY transaction_index
`, s.chainID, blockNumber)
if err != nil {
return nil, err
}
defer rows.Close()
hashes := make([]string, 0)
for rows.Next() {
var txHash string
if err := rows.Scan(&txHash); err != nil {
return nil, err
}
hashes = append(hashes, txHash)
}
return hashes, rows.Err()
}
func (s *Server) loadEtherscanBlockTransactions(ctx context.Context, blockNumber int64) ([]map[string]interface{}, error) {
rows, err := s.db.Query(ctx, `
SELECT hash, block_hash, transaction_index, from_address, to_address, value::text,
COALESCE(gas_price, 0), gas_limit, nonce, COALESCE(input_data, '')
FROM transactions
WHERE chain_id = $1 AND block_number = $2
ORDER BY transaction_index
`, s.chainID, blockNumber)
if err != nil {
return nil, err
}
defer rows.Close()
transactions := make([]map[string]interface{}, 0)
for rows.Next() {
var hash, blockHash, fromAddress, value, inputData string
var toAddress sql.NullString
var transactionIndex int
var gasPrice, gasLimit, nonce int64
if err := rows.Scan(&hash, &blockHash, &transactionIndex, &fromAddress, &toAddress, &value, &gasPrice, &gasLimit, &nonce, &inputData); err != nil {
return nil, err
}
tx := map[string]interface{}{
"hash": hash,
"blockHash": blockHash,
"blockNumber": fmt.Sprintf("0x%x", blockNumber),
"transactionIndex": fmt.Sprintf("0x%x", transactionIndex),
"from": fromAddress,
"value": decimalStringToHex(value),
"gasPrice": fmt.Sprintf("0x%x", gasPrice),
"gas": fmt.Sprintf("0x%x", gasLimit),
"nonce": fmt.Sprintf("0x%x", nonce),
"input": normalizeHexInput(inputData),
}
if toAddress.Valid && toAddress.String != "" {
tx["to"] = toAddress.String
} else {
tx["to"] = nil
}
transactions = append(transactions, tx)
}
return transactions, rows.Err()
}
func decimalStringToHex(value string) string {
parsed, ok := new(big.Int).SetString(value, 10)
if !ok {
return "0x0"
}
return "0x" + parsed.Text(16)
}
func normalizeHexInput(input string) string {
trimmed := strings.TrimSpace(input)
if trimmed == "" {
return "0x"
}
if strings.HasPrefix(trimmed, "0x") {
return trimmed
}
return "0x" + trimmed
}

View File

@@ -0,0 +1,24 @@
package rest
import "testing"
func TestDecimalStringToHex(t *testing.T) {
got := decimalStringToHex("1000000000000000000")
if got != "0xde0b6b3a7640000" {
t.Fatalf("decimalStringToHex() = %s", got)
}
}
func TestNormalizeHexInput(t *testing.T) {
tests := map[string]string{
"": "0x",
"deadbeef": "0xdeadbeef",
"0x1234": "0x1234",
}
for input, want := range tests {
if got := normalizeHexInput(input); got != want {
t.Fatalf("normalizeHexInput(%q) = %q, want %q", input, got, want)
}
}
}

View File

@@ -17,6 +17,16 @@ func (rw *responseWriter) WriteHeader(code int) {
rw.ResponseWriter.WriteHeader(code)
}
func (rw *responseWriter) Unwrap() http.ResponseWriter {
return rw.ResponseWriter
}
func (rw *responseWriter) Flush() {
if f, ok := rw.ResponseWriter.(http.Flusher); ok {
f.Flush()
}
}
// loggingMiddleware logs requests with timing
func (s *Server) loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {

View File

@@ -0,0 +1,479 @@
package rest
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"sync/atomic"
"time"
)
var (
hexAddrRe = regexp.MustCompile(`(?i)^0x[0-9a-f]{40}$`)
hexTxRe = regexp.MustCompile(`(?i)^0x[0-9a-f]{64}$`)
)
type liquidityCacheEntry struct {
body []byte
until time.Time
ctype string
}
var liquidityPoolsCache sync.Map // string -> liquidityCacheEntry
var missionControlMetrics struct {
liquidityCacheHits uint64
liquidityCacheMisses uint64
liquidityUpstreamFailure uint64
bridgeTraceRequests uint64
bridgeTraceFailures uint64
}
func tokenAggregationBase() string {
for _, k := range []string{"TOKEN_AGGREGATION_BASE_URL", "TOKEN_AGGREGATION_URL"} {
if u := strings.TrimSpace(os.Getenv(k)); u != "" {
return strings.TrimRight(u, "/")
}
}
return ""
}
func blockscoutInternalBase() string {
u := strings.TrimSpace(os.Getenv("BLOCKSCOUT_INTERNAL_URL"))
if u == "" {
u = "http://127.0.0.1:4000"
}
return strings.TrimRight(u, "/")
}
func missionControlChainID() string {
if s := strings.TrimSpace(os.Getenv("CHAIN_ID")); s != "" {
return s
}
return "138"
}
func rpcURL() string {
if s := strings.TrimSpace(os.Getenv("RPC_URL")); s != "" {
return s
}
return ""
}
// handleMissionControlLiquidityTokenPath serves GET .../mission-control/liquidity/token/{addr}/pools (cached proxy to token-aggregation).
func (s *Server) handleMissionControlLiquidityTokenPath(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeMethodNotAllowed(w)
return
}
rest := strings.TrimPrefix(r.URL.Path, "/api/v1/mission-control/liquidity/token/")
rest = strings.Trim(rest, "/")
parts := strings.Split(rest, "/")
if len(parts) < 2 || parts[1] != "pools" {
writeError(w, http.StatusNotFound, "not_found", "expected /liquidity/token/{address}/pools")
return
}
addr := strings.TrimSpace(parts[0])
if !hexAddrRe.MatchString(addr) {
writeError(w, http.StatusBadRequest, "bad_request", "invalid token address")
return
}
base := tokenAggregationBase()
if base == "" {
writeError(w, http.StatusServiceUnavailable, "service_unavailable", "TOKEN_AGGREGATION_BASE_URL not configured")
return
}
chain := missionControlChainID()
cacheKey := strings.ToLower(addr) + "|" + chain
bypassCache := r.URL.Query().Get("refresh") == "1" ||
r.URL.Query().Get("noCache") == "1" ||
strings.Contains(strings.ToLower(r.Header.Get("Cache-Control")), "no-cache") ||
strings.Contains(strings.ToLower(r.Header.Get("Cache-Control")), "no-store")
if ent, ok := liquidityPoolsCache.Load(cacheKey); ok && !bypassCache {
e := ent.(liquidityCacheEntry)
if time.Now().Before(e.until) {
atomic.AddUint64(&missionControlMetrics.liquidityCacheHits, 1)
w.Header().Set("X-Mission-Control-Cache", "hit")
if e.ctype != "" {
w.Header().Set("Content-Type", e.ctype)
} else {
w.Header().Set("Content-Type", "application/json")
}
w.WriteHeader(http.StatusOK)
_, _ = w.Write(e.body)
return
}
}
atomic.AddUint64(&missionControlMetrics.liquidityCacheMisses, 1)
if bypassCache {
w.Header().Set("X-Mission-Control-Cache", "bypass")
} else {
w.Header().Set("X-Mission-Control-Cache", "miss")
}
up, err := url.Parse(base + "/api/v1/tokens/" + url.PathEscape(addr) + "/pools")
if err != nil {
writeInternalError(w, "bad upstream URL")
return
}
q := up.Query()
q.Set("chainId", chain)
up.RawQuery = q.Encode()
ctx, cancel := context.WithTimeout(r.Context(), 25*time.Second)
defer cancel()
req, err := http.NewRequestWithContext(ctx, http.MethodGet, up.String(), nil)
if err != nil {
writeInternalError(w, err.Error())
return
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
atomic.AddUint64(&missionControlMetrics.liquidityUpstreamFailure, 1)
log.Printf("mission_control liquidity_proxy addr=%s chain=%s cache=miss upstream_error=%v", strings.ToLower(addr), chain, err)
writeError(w, http.StatusBadGateway, "bad_gateway", err.Error())
return
}
defer resp.Body.Close()
body, err := io.ReadAll(io.LimitReader(resp.Body, 4<<20))
if err != nil {
atomic.AddUint64(&missionControlMetrics.liquidityUpstreamFailure, 1)
log.Printf("mission_control liquidity_proxy addr=%s chain=%s cache=miss read_error=%v", strings.ToLower(addr), chain, err)
writeError(w, http.StatusBadGateway, "bad_gateway", "read upstream body failed")
return
}
ctype := resp.Header.Get("Content-Type")
if ctype == "" {
ctype = "application/json"
}
if resp.StatusCode == http.StatusOK {
liquidityPoolsCache.Store(cacheKey, liquidityCacheEntry{
body: body,
until: time.Now().Add(30 * time.Second),
ctype: ctype,
})
cacheMode := "miss"
if bypassCache {
cacheMode = "bypass-refresh"
}
log.Printf("mission_control liquidity_proxy addr=%s chain=%s cache=%s stored_ttl_sec=30", strings.ToLower(addr), chain, cacheMode)
} else {
atomic.AddUint64(&missionControlMetrics.liquidityUpstreamFailure, 1)
log.Printf("mission_control liquidity_proxy addr=%s chain=%s cache=miss upstream_status=%d", strings.ToLower(addr), chain, resp.StatusCode)
}
w.Header().Set("Content-Type", ctype)
w.WriteHeader(resp.StatusCode)
_, _ = w.Write(body)
}
var (
registryOnce sync.Once
registryAddrToKey map[string]string
registryLoadErr error
)
func firstReadableFile(paths []string) ([]byte, string, error) {
for _, p := range paths {
if strings.TrimSpace(p) == "" {
continue
}
b, err := os.ReadFile(p)
if err == nil && len(b) > 0 {
return b, p, nil
}
}
return nil, "", fmt.Errorf("no readable file found")
}
func loadAddressRegistry138() map[string]string {
registryOnce.Do(func() {
registryAddrToKey = make(map[string]string)
var masterPaths []string
if p := strings.TrimSpace(os.Getenv("SMART_CONTRACTS_MASTER_JSON")); p != "" {
masterPaths = append(masterPaths, p)
}
masterPaths = append(masterPaths,
"config/smart-contracts-master.json",
"../config/smart-contracts-master.json",
"../../config/smart-contracts-master.json",
)
raw, masterPath, _ := firstReadableFile(masterPaths)
if len(raw) == 0 {
registryLoadErr = fmt.Errorf("smart-contracts-master.json not found")
return
}
var root map[string]interface{}
if err := json.Unmarshal(raw, &root); err != nil {
registryLoadErr = err
return
}
chains, _ := root["chains"].(map[string]interface{})
c138, _ := chains["138"].(map[string]interface{})
contracts, _ := c138["contracts"].(map[string]interface{})
for k, v := range contracts {
s, ok := v.(string)
if !ok || !hexAddrRe.MatchString(s) {
continue
}
registryAddrToKey[strings.ToLower(s)] = k
}
var inventoryPaths []string
if p := strings.TrimSpace(os.Getenv("EXPLORER_ADDRESS_INVENTORY_FILE")); p != "" {
inventoryPaths = append(inventoryPaths, p)
}
if masterPath != "" {
inventoryPaths = append(inventoryPaths, filepath.Join(filepath.Dir(masterPath), "address-inventory.json"))
}
inventoryPaths = append(inventoryPaths,
"explorer-monorepo/config/address-inventory.json",
"config/address-inventory.json",
"../config/address-inventory.json",
"../../config/address-inventory.json",
)
inventoryRaw, _, invErr := firstReadableFile(inventoryPaths)
if invErr != nil || len(inventoryRaw) == 0 {
return
}
var inventoryRoot struct {
Inventory map[string]string `json:"inventory"`
}
if err := json.Unmarshal(inventoryRaw, &inventoryRoot); err != nil {
return
}
for k, v := range inventoryRoot.Inventory {
if !hexAddrRe.MatchString(v) {
continue
}
addr := strings.ToLower(v)
if _, exists := registryAddrToKey[addr]; exists {
continue
}
registryAddrToKey[addr] = k
}
})
return registryAddrToKey
}
func jsonStringField(m map[string]interface{}, keys ...string) string {
for _, k := range keys {
if v, ok := m[k].(string); ok && v != "" {
return v
}
}
return ""
}
func extractEthAddress(val interface{}) string {
switch t := val.(type) {
case string:
if hexAddrRe.MatchString(strings.TrimSpace(t)) {
return strings.ToLower(strings.TrimSpace(t))
}
case map[string]interface{}:
if h := jsonStringField(t, "hash", "address"); h != "" && hexAddrRe.MatchString(h) {
return strings.ToLower(h)
}
}
return ""
}
func fetchBlockscoutTransaction(ctx context.Context, tx string) ([]byte, int, error) {
fetchURL := blockscoutInternalBase() + "/api/v2/transactions/" + url.PathEscape(tx)
timeouts := []time.Duration{15 * time.Second, 25 * time.Second}
var lastBody []byte
var lastStatus int
var lastErr error
for idx, timeout := range timeouts {
attemptCtx, cancel := context.WithTimeout(ctx, timeout)
req, err := http.NewRequestWithContext(attemptCtx, http.MethodGet, fetchURL, nil)
if err != nil {
cancel()
return nil, 0, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
cancel()
lastErr = err
if idx == len(timeouts)-1 {
return nil, 0, err
}
continue
}
body, readErr := io.ReadAll(io.LimitReader(resp.Body, 2<<20))
resp.Body.Close()
cancel()
if readErr != nil {
lastErr = readErr
if idx == len(timeouts)-1 {
return nil, 0, readErr
}
continue
}
lastBody = body
lastStatus = resp.StatusCode
if resp.StatusCode == http.StatusOK {
return body, resp.StatusCode, nil
}
if resp.StatusCode < 500 || idx == len(timeouts)-1 {
return body, resp.StatusCode, nil
}
}
return lastBody, lastStatus, lastErr
}
func fetchTransactionViaRPC(ctx context.Context, tx string) (string, string, error) {
base := rpcURL()
if base == "" {
return "", "", fmt.Errorf("RPC_URL not configured")
}
payload, err := json.Marshal(map[string]interface{}{
"jsonrpc": "2.0",
"id": 1,
"method": "eth_getTransactionByHash",
"params": []interface{}{tx},
})
if err != nil {
return "", "", err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, base, bytes.NewReader(payload))
if err != nil {
return "", "", err
}
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", "", err
}
defer resp.Body.Close()
body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20))
if err != nil {
return "", "", err
}
if resp.StatusCode != http.StatusOK {
return "", "", fmt.Errorf("rpc HTTP %d", resp.StatusCode)
}
var rpcResp struct {
Result map[string]interface{} `json:"result"`
Error map[string]interface{} `json:"error"`
}
if err := json.Unmarshal(body, &rpcResp); err != nil {
return "", "", err
}
if rpcResp.Error != nil {
return "", "", fmt.Errorf("rpc error")
}
if rpcResp.Result == nil {
return "", "", fmt.Errorf("transaction not found")
}
fromAddr := extractEthAddress(jsonStringField(rpcResp.Result, "from"))
toAddr := extractEthAddress(jsonStringField(rpcResp.Result, "to"))
if fromAddr == "" && toAddr == "" {
return "", "", fmt.Errorf("transaction missing from/to")
}
return fromAddr, toAddr, nil
}
// HandleMissionControlBridgeTrace handles GET /api/v1/mission-control/bridge/trace?tx=0x...
func (s *Server) HandleMissionControlBridgeTrace(w http.ResponseWriter, r *http.Request) {
atomic.AddUint64(&missionControlMetrics.bridgeTraceRequests, 1)
if r.Method != http.MethodGet {
writeMethodNotAllowed(w)
return
}
tx := strings.TrimSpace(r.URL.Query().Get("tx"))
if tx == "" {
writeError(w, http.StatusBadRequest, "bad_request", "missing tx query parameter")
return
}
if !hexTxRe.MatchString(tx) {
writeError(w, http.StatusBadRequest, "bad_request", "invalid transaction hash")
return
}
reg := loadAddressRegistry138()
publicBase := strings.TrimRight(strings.TrimSpace(os.Getenv("EXPLORER_PUBLIC_BASE")), "/")
if publicBase == "" {
publicBase = "https://explorer.d-bis.org"
}
fromAddr := ""
toAddr := ""
fromLabel := ""
toLabel := ""
source := "blockscout"
body, statusCode, err := fetchBlockscoutTransaction(r.Context(), tx)
if err == nil && statusCode == http.StatusOK {
var txDoc map[string]interface{}
if err := json.Unmarshal(body, &txDoc); err != nil {
err = fmt.Errorf("invalid blockscout JSON")
} else {
fromAddr = extractEthAddress(txDoc["from"])
toAddr = extractEthAddress(txDoc["to"])
}
}
if fromAddr == "" && toAddr == "" {
rpcFrom, rpcTo, rpcErr := fetchTransactionViaRPC(r.Context(), tx)
if rpcErr == nil {
fromAddr = rpcFrom
toAddr = rpcTo
source = "rpc_fallback"
} else {
atomic.AddUint64(&missionControlMetrics.bridgeTraceFailures, 1)
if err != nil {
log.Printf("mission_control bridge_trace tx=%s fetch_error=%v rpc_fallback_error=%v", strings.ToLower(tx), err, rpcErr)
writeError(w, http.StatusBadGateway, "bad_gateway", err.Error())
return
}
log.Printf("mission_control bridge_trace tx=%s upstream_status=%d rpc_fallback_error=%v", strings.ToLower(tx), statusCode, rpcErr)
writeError(w, http.StatusBadGateway, "blockscout_error",
fmt.Sprintf("blockscout HTTP %d", statusCode))
return
}
}
if fromAddr != "" {
fromLabel = reg[fromAddr]
}
if toAddr != "" {
toLabel = reg[toAddr]
}
out := map[string]interface{}{
"tx_hash": strings.ToLower(tx),
"from": fromAddr,
"from_registry": fromLabel,
"to": toAddr,
"to_registry": toLabel,
"blockscout_url": publicBase + "/tx/" + strings.ToLower(tx),
"source": source,
}
if registryLoadErr != nil && len(reg) == 0 {
out["registry_warning"] = registryLoadErr.Error()
}
log.Printf("mission_control bridge_trace tx=%s from=%s to=%s from_label=%s to_label=%s", strings.ToLower(tx), fromAddr, toAddr, fromLabel, toLabel)
writeJSON(w, http.StatusOK, map[string]interface{}{"data": out})
}

View File

@@ -0,0 +1,218 @@
package rest
import (
"encoding/json"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"github.com/stretchr/testify/require"
)
func resetMissionControlTestGlobals() {
liquidityPoolsCache = sync.Map{}
registryOnce = sync.Once{}
registryAddrToKey = nil
registryLoadErr = nil
}
func TestHandleMissionControlLiquidityTokenPathRequiresEnv(t *testing.T) {
resetMissionControlTestGlobals()
t.Setenv("TOKEN_AGGREGATION_BASE_URL", "")
t.Setenv("TOKEN_AGGREGATION_URL", "")
s := NewServer(nil, 138)
req := httptest.NewRequest(http.MethodGet, "/api/v1/mission-control/liquidity/token/0x93E66202A11B1772E55407B32B44e5Cd8eda7f22/pools", nil)
w := httptest.NewRecorder()
s.handleMissionControlLiquidityTokenPath(w, req)
require.Equal(t, http.StatusServiceUnavailable, w.Code)
require.Contains(t, w.Body.String(), "TOKEN_AGGREGATION_BASE_URL not configured")
}
func TestHandleMissionControlLiquidityTokenPathCachesSuccess(t *testing.T) {
resetMissionControlTestGlobals()
var hitCount int
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
hitCount++
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"data":{"count":1,"pools":[]}}`))
}))
defer upstream.Close()
t.Setenv("TOKEN_AGGREGATION_BASE_URL", upstream.URL)
t.Setenv("CHAIN_ID", "138")
s := NewServer(nil, 138)
path := "/api/v1/mission-control/liquidity/token/0x93E66202A11B1772E55407B32B44e5Cd8eda7f22/pools"
w1 := httptest.NewRecorder()
s.handleMissionControlLiquidityTokenPath(w1, httptest.NewRequest(http.MethodGet, path, nil))
require.Equal(t, http.StatusOK, w1.Code)
require.Equal(t, "miss", w1.Header().Get("X-Mission-Control-Cache"))
w2 := httptest.NewRecorder()
s.handleMissionControlLiquidityTokenPath(w2, httptest.NewRequest(http.MethodGet, path, nil))
require.Equal(t, http.StatusOK, w2.Code)
require.Equal(t, "hit", w2.Header().Get("X-Mission-Control-Cache"))
require.Equal(t, 1, hitCount, "second request should be served from the in-memory cache")
require.JSONEq(t, w1.Body.String(), w2.Body.String())
}
func TestHandleMissionControlLiquidityTokenPathBypassesCacheWhenRequested(t *testing.T) {
resetMissionControlTestGlobals()
var hitCount int
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
hitCount++
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"data":{"count":1,"pools":[]}}`))
}))
defer upstream.Close()
t.Setenv("TOKEN_AGGREGATION_BASE_URL", upstream.URL)
t.Setenv("CHAIN_ID", "138")
s := NewServer(nil, 138)
path := "/api/v1/mission-control/liquidity/token/0x93E66202A11B1772E55407B32B44e5Cd8eda7f22/pools"
w1 := httptest.NewRecorder()
s.handleMissionControlLiquidityTokenPath(w1, httptest.NewRequest(http.MethodGet, path, nil))
require.Equal(t, http.StatusOK, w1.Code)
require.Equal(t, "miss", w1.Header().Get("X-Mission-Control-Cache"))
req2 := httptest.NewRequest(http.MethodGet, path+"?refresh=1", nil)
req2.Header.Set("Cache-Control", "no-cache")
w2 := httptest.NewRecorder()
s.handleMissionControlLiquidityTokenPath(w2, req2)
require.Equal(t, http.StatusOK, w2.Code)
require.Equal(t, "bypass", w2.Header().Get("X-Mission-Control-Cache"))
require.Equal(t, 2, hitCount, "refresh=1 should force a fresh upstream read")
}
func TestHandleMissionControlBridgeTraceLabelsFromRegistry(t *testing.T) {
resetMissionControlTestGlobals()
fromAddr := "0x1111111111111111111111111111111111111111"
toAddr := "0x2222222222222222222222222222222222222222"
registryJSON := `{
"chains": {
"138": {
"contracts": {
"CHAIN138_SOURCE_BRIDGE": "` + fromAddr + `",
"CHAIN138_DEST_BRIDGE": "` + toAddr + `"
}
}
}
}`
registryPath := filepath.Join(t.TempDir(), "smart-contracts-master.json")
require.NoError(t, os.WriteFile(registryPath, []byte(registryJSON), 0o644))
blockscout := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
require.Equal(t, "/api/v2/transactions/0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", r.URL.Path)
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{
"from": {"hash":"` + fromAddr + `"},
"to": {"hash":"` + toAddr + `"}
}`))
}))
defer blockscout.Close()
t.Setenv("SMART_CONTRACTS_MASTER_JSON", registryPath)
t.Setenv("BLOCKSCOUT_INTERNAL_URL", blockscout.URL)
t.Setenv("EXPLORER_PUBLIC_BASE", "https://explorer.example.org")
s := NewServer(nil, 138)
req := httptest.NewRequest(http.MethodGet, "/api/v1/mission-control/bridge/trace?tx=0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", nil)
w := httptest.NewRecorder()
s.HandleMissionControlBridgeTrace(w, req)
require.Equal(t, http.StatusOK, w.Code)
var out struct {
Data map[string]any `json:"data"`
}
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &out))
require.Equal(t, strings.ToLower(fromAddr), out.Data["from"])
require.Equal(t, strings.ToLower(toAddr), out.Data["to"])
require.Equal(t, "CHAIN138_SOURCE_BRIDGE", out.Data["from_registry"])
require.Equal(t, "CHAIN138_DEST_BRIDGE", out.Data["to_registry"])
require.Equal(t, "https://explorer.example.org/tx/0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", out.Data["blockscout_url"])
}
func TestHandleMissionControlBridgeTraceFallsBackToAddressInventoryLabels(t *testing.T) {
resetMissionControlTestGlobals()
fromAddr := "0x4A666F96fC8764181194447A7dFdb7d471b301C8"
toAddr := "0x152ed3e9912161b76bdfd368d0c84b7c31c10de7"
tempDir := t.TempDir()
registryPath := filepath.Join(tempDir, "smart-contracts-master.json")
inventoryPath := filepath.Join(tempDir, "address-inventory.json")
require.NoError(t, os.WriteFile(registryPath, []byte(`{
"chains": {
"138": {
"contracts": {
"CCIP_Router": "0x42DAb7b888Dd382bD5Adcf9E038dBF1fD03b4817"
}
}
}
}`), 0o644))
require.NoError(t, os.WriteFile(inventoryPath, []byte(`{
"inventory": {
"DEPLOYER_ADMIN_138": "`+fromAddr+`",
"CW_L1_BRIDGE_CHAIN138": "`+toAddr+`"
}
}`), 0o644))
blockscout := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{
"from": {"hash":"` + fromAddr + `"},
"to": {"hash":"` + toAddr + `"}
}`))
}))
defer blockscout.Close()
t.Setenv("SMART_CONTRACTS_MASTER_JSON", registryPath)
t.Setenv("EXPLORER_ADDRESS_INVENTORY_FILE", inventoryPath)
t.Setenv("BLOCKSCOUT_INTERNAL_URL", blockscout.URL)
s := NewServer(nil, 138)
req := httptest.NewRequest(http.MethodGet, "/api/v1/mission-control/bridge/trace?tx=0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", nil)
w := httptest.NewRecorder()
s.HandleMissionControlBridgeTrace(w, req)
require.Equal(t, http.StatusOK, w.Code)
var out struct {
Data map[string]any `json:"data"`
}
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &out))
require.Equal(t, strings.ToLower(fromAddr), out.Data["from"])
require.Equal(t, strings.ToLower(toAddr), out.Data["to"])
require.Equal(t, "DEPLOYER_ADMIN_138", out.Data["from_registry"])
require.Equal(t, "CW_L1_BRIDGE_CHAIN138", out.Data["to_registry"])
}
func TestHandleMissionControlBridgeTraceRejectsBadHash(t *testing.T) {
resetMissionControlTestGlobals()
s := NewServer(nil, 138)
req := httptest.NewRequest(http.MethodGet, "/api/v1/mission-control/bridge/trace?tx=not-a-tx", nil)
w := httptest.NewRecorder()
s.HandleMissionControlBridgeTrace(w, req)
require.Equal(t, http.StatusBadRequest, w.Code)
require.Contains(t, w.Body.String(), "invalid transaction hash")
}

View File

@@ -104,12 +104,13 @@ func (s *Server) handleBlockDetail(w http.ResponseWriter, r *http.Request) {
if parts[1] == "hash" && len(parts) == 3 {
// Validate hash format
if !isValidHash(parts[2]) {
hash := normalizeHash(parts[2])
if !isValidHash(hash) {
writeValidationError(w, ErrInvalidHash)
return
}
// Get by hash
s.handleGetBlockByHash(w, r, parts[2])
s.handleGetBlockByHash(w, r, hash)
} else {
// Validate and parse block number
blockNumber, err := validateBlockNumber(parts[1])
@@ -143,7 +144,7 @@ func (s *Server) handleTransactionDetail(w http.ResponseWriter, r *http.Request)
}
// Validate hash format
hash := parts[1]
hash := normalizeHash(parts[1])
if !isValidHash(hash) {
writeValidationError(w, ErrInvalidHash)
return
@@ -174,13 +175,15 @@ func (s *Server) handleAddressDetail(w http.ResponseWriter, r *http.Request) {
}
// Validate address format
address := parts[1]
address := normalizeAddress(parts[1])
if !isValidAddress(address) {
writeValidationError(w, ErrInvalidAddress)
return
}
// Set address in query and call handler
r.URL.RawQuery = "address=" + address
query := r.URL.Query()
query.Set("address", address)
r.URL.RawQuery = query.Encode()
s.handleGetAddress(w, r)
}

View File

@@ -40,6 +40,12 @@ func (s *Server) proxyRouteTreeEndpoint(w http.ResponseWriter, r *http.Request,
}
proxy := httputil.NewSingleHostReverseProxy(target)
originalDirector := proxy.Director
proxy.Director = func(req *http.Request) {
originalDirector(req)
req.URL.Path = joinProxyPath(target.Path, path)
req.URL.RawPath = req.URL.Path
}
proxy.ErrorHandler = func(rw http.ResponseWriter, req *http.Request, proxyErr error) {
writeError(rw, http.StatusBadGateway, "bad_gateway", fmt.Sprintf("route tree proxy failed for %s: %v", path, proxyErr))
}
@@ -47,6 +53,17 @@ func (s *Server) proxyRouteTreeEndpoint(w http.ResponseWriter, r *http.Request,
proxy.ServeHTTP(w, r)
}
func joinProxyPath(basePath, path string) string {
switch {
case strings.HasSuffix(basePath, "/") && strings.HasPrefix(path, "/"):
return basePath + path[1:]
case !strings.HasSuffix(basePath, "/") && !strings.HasPrefix(path, "/"):
return basePath + "/" + path
default:
return basePath + path
}
}
func firstNonEmptyEnv(keys ...string) string {
for _, key := range keys {
if value := strings.TrimSpace(os.Getenv(key)); value != "" {

View File

@@ -0,0 +1,54 @@
package rest
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/require"
)
func TestRouteProxyPreservesTargetBasePath(t *testing.T) {
var gotPath string
var gotQuery string
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gotPath = r.URL.Path
gotQuery = r.URL.RawQuery
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"ok":true}`))
}))
defer upstream.Close()
t.Setenv("TOKEN_AGGREGATION_API_BASE", upstream.URL+"/token-aggregation")
server := NewServer(nil, 138)
req := httptest.NewRequest(http.MethodGet, "/api/v1/routes/tree?chainId=138&amountIn=1000000", nil)
w := httptest.NewRecorder()
server.handleRouteDecisionTree(w, req)
require.Equal(t, http.StatusOK, w.Code)
require.Equal(t, "/token-aggregation/api/v1/routes/tree", gotPath)
require.Equal(t, "chainId=138&amountIn=1000000", gotQuery)
}
func TestRouteProxyHandlesBaseURLWithoutPath(t *testing.T) {
var gotPath string
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gotPath = r.URL.Path
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"ok":true}`))
}))
defer upstream.Close()
t.Setenv("TOKEN_AGGREGATION_API_BASE", upstream.URL)
server := NewServer(nil, 138)
req := httptest.NewRequest(http.MethodGet, "/api/v1/routes/depth?chainId=138", nil)
w := httptest.NewRecorder()
server.handleRouteDepth(w, req)
require.Equal(t, http.StatusOK, w.Code)
require.Equal(t, "/api/v1/routes/depth", gotPath)
}

View File

@@ -38,17 +38,21 @@ func (s *Server) handleSearch(w http.ResponseWriter, r *http.Request) {
}
s.handleGetBlockByNumber(w, r, blockNumber)
case "transaction":
value = normalizeHash(value)
if !isValidHash(value) {
writeValidationError(w, ErrInvalidHash)
return
}
s.handleGetTransactionByHash(w, r, value)
case "address":
value = normalizeAddress(value)
if !isValidAddress(value) {
writeValidationError(w, ErrInvalidAddress)
return
}
r.URL.RawQuery = "address=" + value
query := r.URL.Query()
query.Set("address", value)
r.URL.RawQuery = query.Encode()
s.handleGetAddress(w, r)
default:
writeValidationError(w, fmt.Errorf("unsupported search type"))

View File

@@ -2,6 +2,7 @@ package rest
import (
"context"
"crypto/rand"
"database/sql"
"encoding/json"
"fmt"
@@ -29,11 +30,11 @@ type Server struct {
// NewServer creates a new REST API server
func NewServer(db *pgxpool.Pool, chainID int) *Server {
// Get JWT secret from environment or use default
// Get JWT secret from environment or generate an ephemeral secret.
jwtSecret := []byte(os.Getenv("JWT_SECRET"))
if len(jwtSecret) == 0 {
jwtSecret = []byte("change-me-in-production-use-strong-random-secret")
log.Println("WARNING: Using default JWT secret. Set JWT_SECRET environment variable in production!")
jwtSecret = generateEphemeralJWTSecret()
log.Println("WARNING: JWT_SECRET is unset. Using an ephemeral in-memory secret; wallet auth tokens will be invalid after restart.")
}
walletAuth := auth.NewWalletAuth(db, jwtSecret)
@@ -48,6 +49,17 @@ func NewServer(db *pgxpool.Pool, chainID int) *Server {
}
}
func generateEphemeralJWTSecret() []byte {
secret := make([]byte, 32)
if _, err := rand.Read(secret); err == nil {
return secret
}
fallback := []byte(fmt.Sprintf("ephemeral-jwt-secret-%d", time.Now().UnixNano()))
log.Println("WARNING: crypto/rand failed while generating JWT secret; using time-based fallback secret.")
return fallback
}
// Start starts the HTTP server
func (s *Server) Start(port int) error {
mux := http.NewServeMux()
@@ -99,7 +111,7 @@ func (s *Server) addMiddleware(next http.Handler) http.Handler {
}
w.Header().Set("Access-Control-Allow-Origin", origin)
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, X-API-Key")
w.Header().Set("Access-Control-Allow-Headers", "Authorization, Content-Type, X-API-Key")
// Handle preflight
if r.Method == "OPTIONS" {

View File

@@ -0,0 +1,19 @@
package rest
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestNewServerUsesEphemeralJWTSecretWhenUnset(t *testing.T) {
t.Setenv("JWT_SECRET", "")
first := NewServer(nil, 138)
second := NewServer(nil, 138)
require.NotEmpty(t, first.jwtSecret)
require.NotEmpty(t, second.jwtSecret)
require.NotEqual(t, []byte("change-me-in-production-use-strong-random-secret"), first.jwtSecret)
require.NotEqual(t, string(first.jwtSecret), string(second.jwtSecret))
}

View File

@@ -3,10 +3,64 @@ package rest
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/jackc/pgx/v5"
)
type explorerStats struct {
TotalBlocks int64 `json:"total_blocks"`
TotalTransactions int64 `json:"total_transactions"`
TotalAddresses int64 `json:"total_addresses"`
LatestBlock int64 `json:"latest_block"`
}
type statsQueryFunc func(ctx context.Context, sql string, args ...any) pgx.Row
func loadExplorerStats(ctx context.Context, chainID int, queryRow statsQueryFunc) (explorerStats, error) {
var stats explorerStats
if err := queryRow(ctx,
`SELECT COUNT(*) FROM blocks WHERE chain_id = $1`,
chainID,
).Scan(&stats.TotalBlocks); err != nil {
return explorerStats{}, fmt.Errorf("query total blocks: %w", err)
}
if err := queryRow(ctx,
`SELECT COUNT(*) FROM transactions WHERE chain_id = $1`,
chainID,
).Scan(&stats.TotalTransactions); err != nil {
return explorerStats{}, fmt.Errorf("query total transactions: %w", err)
}
if err := queryRow(ctx,
`SELECT COUNT(*) FROM (
SELECT from_address AS address
FROM transactions
WHERE chain_id = $1 AND from_address IS NOT NULL AND from_address <> ''
UNION
SELECT to_address AS address
FROM transactions
WHERE chain_id = $1 AND to_address IS NOT NULL AND to_address <> ''
) unique_addresses`,
chainID,
).Scan(&stats.TotalAddresses); err != nil {
return explorerStats{}, fmt.Errorf("query total addresses: %w", err)
}
if err := queryRow(ctx,
`SELECT COALESCE(MAX(number), 0) FROM blocks WHERE chain_id = $1`,
chainID,
).Scan(&stats.LatestBlock); err != nil {
return explorerStats{}, fmt.Errorf("query latest block: %w", err)
}
return stats, nil
}
// handleStats handles GET /api/v2/stats
func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
@@ -20,43 +74,12 @@ func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Get total blocks
var totalBlocks int64
err := s.db.QueryRow(ctx,
`SELECT COUNT(*) FROM blocks WHERE chain_id = $1`,
s.chainID,
).Scan(&totalBlocks)
stats, err := loadExplorerStats(ctx, s.chainID, s.db.QueryRow)
if err != nil {
totalBlocks = 0
}
// Get total transactions
var totalTransactions int64
err = s.db.QueryRow(ctx,
`SELECT COUNT(*) FROM transactions WHERE chain_id = $1`,
s.chainID,
).Scan(&totalTransactions)
if err != nil {
totalTransactions = 0
}
// Get total addresses
var totalAddresses int64
err = s.db.QueryRow(ctx,
`SELECT COUNT(DISTINCT from_address) + COUNT(DISTINCT to_address) FROM transactions WHERE chain_id = $1`,
s.chainID,
).Scan(&totalAddresses)
if err != nil {
totalAddresses = 0
}
stats := map[string]interface{}{
"total_blocks": totalBlocks,
"total_transactions": totalTransactions,
"total_addresses": totalAddresses,
writeError(w, http.StatusServiceUnavailable, "service_unavailable", "explorer stats are temporarily unavailable")
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(stats)
}

View File

@@ -0,0 +1,73 @@
package rest
import (
"context"
"errors"
"strings"
"testing"
"github.com/jackc/pgx/v5"
"github.com/stretchr/testify/require"
)
type fakeStatsRow struct {
scan func(dest ...any) error
}
func (r fakeStatsRow) Scan(dest ...any) error {
return r.scan(dest...)
}
func TestLoadExplorerStatsReturnsValues(t *testing.T) {
var call int
queryRow := func(_ context.Context, _ string, _ ...any) pgx.Row {
call++
return fakeStatsRow{
scan: func(dest ...any) error {
target, ok := dest[0].(*int64)
require.True(t, ok)
switch call {
case 1:
*target = 11
case 2:
*target = 22
case 3:
*target = 33
case 4:
*target = 44
default:
t.Fatalf("unexpected query call %d", call)
}
return nil
},
}
}
stats, err := loadExplorerStats(context.Background(), 138, queryRow)
require.NoError(t, err)
require.Equal(t, int64(11), stats.TotalBlocks)
require.Equal(t, int64(22), stats.TotalTransactions)
require.Equal(t, int64(33), stats.TotalAddresses)
require.Equal(t, int64(44), stats.LatestBlock)
}
func TestLoadExplorerStatsReturnsErrorWhenQueryFails(t *testing.T) {
queryRow := func(_ context.Context, query string, _ ...any) pgx.Row {
return fakeStatsRow{
scan: func(dest ...any) error {
if strings.Contains(query, "COUNT(*) FROM transactions") {
return errors.New("boom")
}
target, ok := dest[0].(*int64)
require.True(t, ok)
*target = 1
return nil
},
}
}
_, err := loadExplorerStats(context.Background(), 138, queryRow)
require.Error(t, err)
require.Contains(t, err.Error(), "query total transactions")
}

View File

@@ -41,6 +41,8 @@ tags:
description: Unified search endpoints
- name: Track1
description: Public RPC gateway endpoints (no auth required)
- name: MissionControl
description: Public mission-control health, bridge trace, and cached liquidity helpers
- name: Track2
description: Indexed explorer endpoints (auth required)
- name: Track3
@@ -232,6 +234,105 @@ paths:
schema:
$ref: '#/components/schemas/BlockListResponse'
/api/v1/mission-control/stream:
get:
tags:
- MissionControl
summary: Mission-control SSE stream
description: |
Server-Sent Events stream with the same inner `data` payload as `GET /api/v1/track1/bridge/status`.
Emits one event immediately, then refreshes every 20 seconds. Configure nginx with `proxy_buffering off`.
operationId: getMissionControlStream
responses:
'200':
description: SSE stream
content:
text/event-stream:
schema:
type: string
/api/v1/mission-control/liquidity/token/{address}/pools:
get:
tags:
- MissionControl
summary: Cached liquidity proxy
description: |
30-second in-memory cached proxy to the token-aggregation pools endpoint for the configured `CHAIN_ID`.
operationId: getMissionControlLiquidityPools
parameters:
- name: address
in: path
required: true
schema:
type: string
pattern: '^0x[a-fA-F0-9]{40}$'
responses:
'200':
description: Upstream JSON response
'400':
$ref: '#/components/responses/BadRequest'
'503':
description: `TOKEN_AGGREGATION_BASE_URL` not configured
/api/v1/mission-control/bridge/trace:
get:
tags:
- MissionControl
summary: Resolve a transaction through Blockscout and label 138-side contracts
description: |
Queries Blockscout using `BLOCKSCOUT_INTERNAL_URL` and labels the `from` and `to` addresses using Chain 138 entries from `SMART_CONTRACTS_MASTER_JSON`.
operationId: getMissionControlBridgeTrace
parameters:
- name: tx
in: query
required: true
schema:
type: string
pattern: '^0x[a-fA-F0-9]{64}$'
responses:
'200':
description: Labeled bridge trace
'400':
$ref: '#/components/responses/BadRequest'
'502':
description: Blockscout lookup failed
/api/v1/track4/operator/run-script:
post:
tags:
- Track4
summary: Run an allowlisted operator script
description: |
Track 4 endpoint. Requires authenticated wallet, IP allowlisting, `OPERATOR_SCRIPTS_ROOT`, and `OPERATOR_SCRIPT_ALLOWLIST`.
operationId: runOperatorScript
security:
- bearerAuth: []
requestBody:
required: true
content:
application/json:
schema:
type: object
required: [script]
properties:
script:
type: string
description: Path relative to `OPERATOR_SCRIPTS_ROOT`
args:
type: array
items:
type: string
maxItems: 24
responses:
'200':
description: Script execution result
'401':
$ref: '#/components/responses/Unauthorized'
'403':
$ref: '#/components/responses/Forbidden'
'503':
description: Script root or allowlist not configured
/api/v1/track2/search:
get:
tags:
@@ -427,4 +528,3 @@ components:
error:
code: "internal_error"
message: "An internal error occurred"

View File

@@ -56,20 +56,23 @@ func (s *Server) SetupTrackRoutes(mux *http.ServeMux, authMiddleware *middleware
mux.HandleFunc("/api/v1/track1/tx/", track1Server.HandleTransactionDetail)
mux.HandleFunc("/api/v1/track1/address/", track1Server.HandleAddressBalance)
mux.HandleFunc("/api/v1/track1/bridge/status", track1Server.HandleBridgeStatus)
mux.HandleFunc("/api/v1/mission-control/stream", track1Server.HandleMissionControlStream)
mux.HandleFunc("/api/v1/mission-control/liquidity/token/", s.handleMissionControlLiquidityTokenPath)
mux.HandleFunc("/api/v1/mission-control/bridge/trace", s.HandleMissionControlBridgeTrace)
// Initialize Track 2 server
track2Server := track2.NewServer(s.db, s.chainID)
// Track 2 routes (require Track 2+)
track2Middleware := authMiddleware.RequireTrack(2)
// Track 2 route handlers with auth
track2AuthHandler := func(handler http.HandlerFunc) http.HandlerFunc {
return authMiddleware.RequireAuth(track2Middleware(http.HandlerFunc(handler))).ServeHTTP
}
mux.HandleFunc("/api/v1/track2/search", track2AuthHandler(track2Server.HandleSearch))
// Address routes - need to parse path
mux.HandleFunc("/api/v1/track2/address/", track2AuthHandler(func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
@@ -77,14 +80,19 @@ func (s *Server) SetupTrackRoutes(mux *http.ServeMux, authMiddleware *middleware
if len(parts) >= 2 {
if parts[1] == "txs" {
track2Server.HandleAddressTransactions(w, r)
return
} else if parts[1] == "tokens" {
track2Server.HandleAddressTokens(w, r)
return
} else if parts[1] == "internal-txs" {
track2Server.HandleInternalTransactions(w, r)
return
}
}
writeError(w, http.StatusBadRequest, "bad_request", "Invalid Track 2 address path")
}))
mux.HandleFunc("/api/v1/track2/token/", track2AuthHandler(track2Server.HandleTokenInfo))
// Initialize Track 3 server
@@ -95,7 +103,7 @@ func (s *Server) SetupTrackRoutes(mux *http.ServeMux, authMiddleware *middleware
track3AuthHandler := func(handler http.HandlerFunc) http.HandlerFunc {
return authMiddleware.RequireAuth(track3Middleware(http.HandlerFunc(handler))).ServeHTTP
}
mux.HandleFunc("/api/v1/track3/analytics/flows", track3AuthHandler(track3Server.HandleFlows))
mux.HandleFunc("/api/v1/track3/analytics/bridge", track3AuthHandler(track3Server.HandleBridge))
mux.HandleFunc("/api/v1/track3/analytics/token-distribution/", track3AuthHandler(track3Server.HandleTokenDistribution))
@@ -109,10 +117,10 @@ func (s *Server) SetupTrackRoutes(mux *http.ServeMux, authMiddleware *middleware
track4AuthHandler := func(handler http.HandlerFunc) http.HandlerFunc {
return authMiddleware.RequireAuth(track4Middleware(http.HandlerFunc(handler))).ServeHTTP
}
mux.HandleFunc("/api/v1/track4/operator/bridge/events", track4AuthHandler(track4Server.HandleBridgeEvents))
mux.HandleFunc("/api/v1/track4/operator/validators", track4AuthHandler(track4Server.HandleValidators))
mux.HandleFunc("/api/v1/track4/operator/contracts", track4AuthHandler(track4Server.HandleContracts))
mux.HandleFunc("/api/v1/track4/operator/protocol-state", track4AuthHandler(track4Server.HandleProtocolState))
mux.HandleFunc("/api/v1/track4/operator/run-script", track4AuthHandler(track4Server.HandleRunScript))
}

View File

@@ -52,14 +52,22 @@ func (s *Server) handleListTransactions(w http.ResponseWriter, r *http.Request)
}
if fromAddress := r.URL.Query().Get("from_address"); fromAddress != "" {
query += fmt.Sprintf(" AND from_address = $%d", argIndex)
args = append(args, fromAddress)
if !isValidAddress(fromAddress) {
writeValidationError(w, ErrInvalidAddress)
return
}
query += fmt.Sprintf(" AND LOWER(from_address) = $%d", argIndex)
args = append(args, normalizeAddress(fromAddress))
argIndex++
}
if toAddress := r.URL.Query().Get("to_address"); toAddress != "" {
query += fmt.Sprintf(" AND to_address = $%d", argIndex)
args = append(args, toAddress)
if !isValidAddress(toAddress) {
writeValidationError(w, ErrInvalidAddress)
return
}
query += fmt.Sprintf(" AND LOWER(to_address) = $%d", argIndex)
args = append(args, normalizeAddress(toAddress))
argIndex++
}
@@ -139,6 +147,12 @@ func (s *Server) handleListTransactions(w http.ResponseWriter, r *http.Request)
// handleGetTransactionByHash handles GET /api/v1/transactions/{chain_id}/{hash}
func (s *Server) handleGetTransactionByHash(w http.ResponseWriter, r *http.Request, hash string) {
if !s.requireDB(w) {
return
}
hash = normalizeHash(hash)
// Validate hash format (already validated in routes.go, but double-check)
if !isValidHash(hash) {
writeValidationError(w, ErrInvalidHash)

View File

@@ -41,6 +41,14 @@ func isValidAddress(address string) bool {
return err == nil
}
func normalizeHash(hash string) string {
return strings.ToLower(strings.TrimSpace(hash))
}
func normalizeAddress(address string) string {
return strings.ToLower(strings.TrimSpace(address))
}
// validateBlockNumber validates and parses block number
func validateBlockNumber(blockStr string) (int64, error) {
blockNumber, err := strconv.ParseInt(blockStr, 10, 64)

View File

@@ -0,0 +1,23 @@
package rest
import "testing"
func TestNormalizeAddress(t *testing.T) {
input := " 0xAbCdEf1234567890ABCdef1234567890abCDef12 "
got := normalizeAddress(input)
want := "0xabcdef1234567890abcdef1234567890abcdef12"
if got != want {
t.Fatalf("normalizeAddress() = %q, want %q", got, want)
}
}
func TestNormalizeHash(t *testing.T) {
input := " 0xABCDEF1234567890ABCDEF1234567890ABCDEF1234567890ABCDEF1234567890 "
got := normalizeHash(input)
want := "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"
if got != want {
t.Fatalf("normalizeHash() = %q, want %q", got, want)
}
}

View File

@@ -0,0 +1,146 @@
package track1
import (
"context"
"os"
"strings"
"time"
)
func relaySnapshotStatus(relay map[string]interface{}) string {
if relay == nil {
return ""
}
if probe, ok := relay["url_probe"].(map[string]interface{}); ok {
if okValue, exists := probe["ok"].(bool); exists && !okValue {
return "down"
}
if body, ok := probe["body"].(map[string]interface{}); ok {
if status, ok := body["status"].(string); ok {
return strings.ToLower(strings.TrimSpace(status))
}
}
}
if _, ok := relay["file_snapshot_error"].(string); ok {
return "down"
}
if snapshot, ok := relay["file_snapshot"].(map[string]interface{}); ok {
if status, ok := snapshot["status"].(string); ok {
return strings.ToLower(strings.TrimSpace(status))
}
}
return ""
}
func relayNeedsAttention(relay map[string]interface{}) bool {
status := relaySnapshotStatus(relay)
switch status {
case "degraded", "stale", "stopped", "down":
return true
default:
return false
}
}
// BuildBridgeStatusData builds the inner `data` object for bridge/status and SSE payloads.
func (s *Server) BuildBridgeStatusData(ctx context.Context) map[string]interface{} {
rpc138 := strings.TrimSpace(os.Getenv("RPC_URL"))
if rpc138 == "" {
rpc138 = "http://localhost:8545"
}
var probes []RPCProbeResult
p138 := ProbeEVMJSONRPC(ctx, "chain-138", "138", rpc138)
probes = append(probes, p138)
if eth := strings.TrimSpace(os.Getenv("ETH_MAINNET_RPC_URL")); eth != "" {
probes = append(probes, ProbeEVMJSONRPC(ctx, "ethereum-mainnet", "1", eth))
}
for _, row := range ParseExtraRPCProbes() {
name, u, ck := row[0], row[1], row[2]
probes = append(probes, ProbeEVMJSONRPC(ctx, name, ck, u))
}
overall := "operational"
if !p138.OK {
overall = "degraded"
} else {
for _, p := range probes {
if !p.OK {
overall = "degraded"
break
}
}
}
now := time.Now().UTC().Format(time.RFC3339)
chains := map[string]interface{}{
"138": map[string]interface{}{
"name": "Defi Oracle Meta Mainnet",
"status": chainStatusFromProbe(p138),
"last_sync": now,
"latency_ms": p138.LatencyMs,
"head_age_sec": p138.HeadAgeSeconds,
"block_number": p138.BlockNumberDec,
"endpoint": p138.Endpoint,
"probe_error": p138.Error,
},
}
for _, p := range probes {
if p.ChainKey != "1" && p.Name != "ethereum-mainnet" {
continue
}
chains["1"] = map[string]interface{}{
"name": "Ethereum Mainnet",
"status": chainStatusFromProbe(p),
"last_sync": now,
"latency_ms": p.LatencyMs,
"head_age_sec": p.HeadAgeSeconds,
"block_number": p.BlockNumberDec,
"endpoint": p.Endpoint,
"probe_error": p.Error,
}
break
}
probeJSON := make([]map[string]interface{}, 0, len(probes))
for _, p := range probes {
probeJSON = append(probeJSON, map[string]interface{}{
"name": p.Name,
"chainKey": p.ChainKey,
"endpoint": p.Endpoint,
"ok": p.OK,
"latencyMs": p.LatencyMs,
"blockNumber": p.BlockNumber,
"blockNumberDec": p.BlockNumberDec,
"headAgeSeconds": p.HeadAgeSeconds,
"error": p.Error,
})
}
data := map[string]interface{}{
"status": overall,
"chains": chains,
"rpc_probe": probeJSON,
"checked_at": now,
}
if ov := readOptionalVerifyJSON(); ov != nil {
data["operator_verify"] = ov
}
if relays := FetchCCIPRelayHealths(ctx); relays != nil {
data["ccip_relays"] = relays
if ccip := primaryRelayHealth(relays); ccip != nil {
data["ccip_relay"] = ccip
}
for _, value := range relays {
relay, ok := value.(map[string]interface{})
if ok && relayNeedsAttention(relay) {
data["status"] = "degraded"
break
}
}
}
return data
}

View File

@@ -0,0 +1,182 @@
package track1
import (
"context"
"encoding/json"
"io"
"net/http"
"os"
"sort"
"strconv"
"strings"
"time"
)
type relayHealthTarget struct {
Name string
URL string
}
func fetchRelayHealthURL(ctx context.Context, u string) map[string]interface{} {
out := make(map[string]interface{})
c := &http.Client{Timeout: 4 * time.Second}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
if err != nil {
out["url_probe"] = map[string]interface{}{"ok": false, "error": err.Error()}
} else {
resp, err := c.Do(req)
if err != nil {
out["url_probe"] = map[string]interface{}{"ok": false, "error": err.Error()}
} else {
func() {
defer resp.Body.Close()
b, _ := io.ReadAll(io.LimitReader(resp.Body, 256*1024))
ok := resp.StatusCode >= 200 && resp.StatusCode < 300
var j interface{}
if json.Unmarshal(b, &j) == nil {
out["url_probe"] = map[string]interface{}{"ok": ok, "status": resp.StatusCode, "body": j}
} else {
out["url_probe"] = map[string]interface{}{"ok": ok, "status": resp.StatusCode, "raw": string(b)}
}
}()
}
}
return out
}
func fetchRelayHealthFileSnapshot(p string) map[string]interface{} {
out := make(map[string]interface{})
if p != "" {
b, err := os.ReadFile(p)
if err != nil {
out["file_snapshot_error"] = err.Error()
} else if len(b) > 512*1024 {
out["file_snapshot_error"] = "file too large"
} else {
var j interface{}
if err := json.Unmarshal(b, &j); err != nil {
out["file_snapshot_error"] = err.Error()
} else {
out["file_snapshot"] = j
}
}
}
return out
}
func buildRelayHealthSignal(ctx context.Context, url, filePath string) map[string]interface{} {
out := make(map[string]interface{})
if strings.TrimSpace(url) != "" {
for key, value := range fetchRelayHealthURL(ctx, url) {
out[key] = value
}
}
if strings.TrimSpace(filePath) != "" {
for key, value := range fetchRelayHealthFileSnapshot(filePath) {
out[key] = value
}
}
if len(out) == 0 {
return nil
}
return out
}
func normalizeRelayHealthName(raw string, index int) string {
name := strings.TrimSpace(strings.ToLower(raw))
if name == "" {
return "relay_" + strconv.Itoa(index)
}
replacer := strings.NewReplacer(" ", "_", "-", "_", "/", "_")
name = replacer.Replace(name)
return name
}
func parseRelayHealthTargets() []relayHealthTarget {
raw := strings.TrimSpace(os.Getenv("CCIP_RELAY_HEALTH_URLS"))
if raw == "" {
return nil
}
normalized := strings.NewReplacer("\n", ",", ";", ",").Replace(raw)
parts := strings.Split(normalized, ",")
targets := make([]relayHealthTarget, 0, len(parts))
for idx, part := range parts {
part = strings.TrimSpace(part)
if part == "" {
continue
}
name := ""
url := part
if strings.Contains(part, "=") {
chunks := strings.SplitN(part, "=", 2)
name = normalizeRelayHealthName(chunks[0], idx+1)
url = strings.TrimSpace(chunks[1])
} else {
name = normalizeRelayHealthName("", idx+1)
}
if url == "" {
continue
}
targets = append(targets, relayHealthTarget{Name: name, URL: url})
}
return targets
}
// FetchCCIPRelayHealths returns optional named CCIP / relay signals from URL probes and/or operator JSON files.
// Safe defaults: short timeouts, small body cap. Omit from payload when nothing is configured.
func FetchCCIPRelayHealths(ctx context.Context) map[string]interface{} {
relays := make(map[string]interface{})
if legacy := buildRelayHealthSignal(
ctx,
strings.TrimSpace(os.Getenv("CCIP_RELAY_HEALTH_URL")),
strings.TrimSpace(os.Getenv("MISSION_CONTROL_CCIP_JSON")),
); legacy != nil {
relays["mainnet"] = legacy
}
for _, target := range parseRelayHealthTargets() {
if _, exists := relays[target.Name]; exists {
continue
}
if relay := buildRelayHealthSignal(ctx, target.URL, ""); relay != nil {
relays[target.Name] = relay
}
}
if len(relays) == 0 {
return nil
}
return relays
}
func primaryRelayHealth(relays map[string]interface{}) map[string]interface{} {
if len(relays) == 0 {
return nil
}
preferred := []string{"mainnet_cw", "mainnet_weth", "mainnet"}
for _, key := range preferred {
if relay, ok := relays[key].(map[string]interface{}); ok {
return relay
}
}
keys := make([]string, 0, len(relays))
for key := range relays {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
if relay, ok := relays[key].(map[string]interface{}); ok {
return relay
}
}
return nil
}
// FetchCCIPRelayHealth returns the primary relay signal for legacy callers.
func FetchCCIPRelayHealth(ctx context.Context) map[string]interface{} {
return primaryRelayHealth(FetchCCIPRelayHealths(ctx))
}

View File

@@ -0,0 +1,203 @@
package track1
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestFetchCCIPRelayHealthFromURL(t *testing.T) {
relay := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"ok":true,"status":"operational","destination":{"chain_name":"Ethereum Mainnet"},"queue":{"size":0}}`))
}))
defer relay.Close()
t.Setenv("CCIP_RELAY_HEALTH_URL", relay.URL+"/healthz")
t.Setenv("CCIP_RELAY_HEALTH_URLS", "")
t.Setenv("MISSION_CONTROL_CCIP_JSON", "")
got := FetchCCIPRelayHealth(context.Background())
require.NotNil(t, got)
probe, ok := got["url_probe"].(map[string]interface{})
require.True(t, ok)
require.Equal(t, true, probe["ok"])
body, ok := probe["body"].(map[string]interface{})
require.True(t, ok)
require.Equal(t, "operational", body["status"])
dest, ok := body["destination"].(map[string]interface{})
require.True(t, ok)
require.Equal(t, "Ethereum Mainnet", dest["chain_name"])
}
func TestFetchCCIPRelayHealthsFromNamedURLs(t *testing.T) {
mainnet := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"status":"operational","destination":{"chain_name":"Ethereum Mainnet"},"queue":{"size":0}}`))
}))
defer mainnet.Close()
bsc := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"status":"operational","destination":{"chain_name":"BSC"},"queue":{"size":1}}`))
}))
defer bsc.Close()
t.Setenv("CCIP_RELAY_HEALTH_URL", "")
t.Setenv("MISSION_CONTROL_CCIP_JSON", "")
t.Setenv("CCIP_RELAY_HEALTH_URLS", "mainnet="+mainnet.URL+"/healthz,bsc="+bsc.URL+"/healthz")
got := FetchCCIPRelayHealths(context.Background())
require.NotNil(t, got)
mainnetRelay, ok := got["mainnet"].(map[string]interface{})
require.True(t, ok)
mainnetProbe, ok := mainnetRelay["url_probe"].(map[string]interface{})
require.True(t, ok)
require.Equal(t, true, mainnetProbe["ok"])
bscRelay, ok := got["bsc"].(map[string]interface{})
require.True(t, ok)
bscProbe, ok := bscRelay["url_probe"].(map[string]interface{})
require.True(t, ok)
body, ok := bscProbe["body"].(map[string]interface{})
require.True(t, ok)
dest, ok := body["destination"].(map[string]interface{})
require.True(t, ok)
require.Equal(t, "BSC", dest["chain_name"])
}
func TestFetchCCIPRelayHealthPrefersMainnetCW(t *testing.T) {
relays := map[string]interface{}{
"mainnet_weth": map[string]interface{}{"url_probe": map[string]interface{}{"ok": true}},
"mainnet_cw": map[string]interface{}{"url_probe": map[string]interface{}{"ok": true, "body": map[string]interface{}{"status": "operational"}}},
"bsc": map[string]interface{}{"url_probe": map[string]interface{}{"ok": true}},
}
got := primaryRelayHealth(relays)
require.NotNil(t, got)
require.Equal(t, relays["mainnet_cw"], got)
}
func TestFetchCCIPRelayHealthFromFileSnapshot(t *testing.T) {
dir := t.TempDir()
path := filepath.Join(dir, "relay-health.json")
require.NoError(t, os.WriteFile(path, []byte(`{"status":"paused","queue":{"size":3}}`), 0o644))
t.Setenv("CCIP_RELAY_HEALTH_URL", "")
t.Setenv("CCIP_RELAY_HEALTH_URLS", "")
t.Setenv("MISSION_CONTROL_CCIP_JSON", path)
got := FetchCCIPRelayHealth(context.Background())
require.NotNil(t, got)
snapshot, ok := got["file_snapshot"].(map[string]interface{})
require.True(t, ok)
require.Equal(t, "paused", snapshot["status"])
queue, ok := snapshot["queue"].(map[string]interface{})
require.True(t, ok)
require.Equal(t, float64(3), queue["size"])
}
func TestBuildBridgeStatusDataIncludesCCIPRelay(t *testing.T) {
rpc := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var req struct {
Method string `json:"method"`
}
require.NoError(t, json.NewDecoder(r.Body).Decode(&req))
w.Header().Set("Content-Type", "application/json")
switch req.Method {
case "eth_blockNumber":
_, _ = w.Write([]byte(`{"jsonrpc":"2.0","id":1,"result":"0x10"}`))
case "eth_getBlockByNumber":
ts := strconv.FormatInt(time.Now().Add(-2*time.Second).Unix(), 16)
_, _ = w.Write([]byte(`{"jsonrpc":"2.0","id":1,"result":{"timestamp":"0x` + ts + `"}}`))
default:
http.Error(w, `{"jsonrpc":"2.0","id":1,"error":{"message":"unsupported"}}`, http.StatusBadRequest)
}
}))
defer rpc.Close()
relay := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"ok":true,"status":"operational","queue":{"size":0}}`))
}))
defer relay.Close()
t.Setenv("RPC_URL", rpc.URL)
t.Setenv("ETH_MAINNET_RPC_URL", "")
t.Setenv("MISSION_CONTROL_EXTRA_RPCS", "")
t.Setenv("MISSION_CONTROL_VERIFY_JSON", "")
t.Setenv("CCIP_RELAY_HEALTH_URL", relay.URL+"/healthz")
t.Setenv("CCIP_RELAY_HEALTH_URLS", "")
t.Setenv("MISSION_CONTROL_CCIP_JSON", "")
s := &Server{}
got := s.BuildBridgeStatusData(context.Background())
ccip, ok := got["ccip_relay"].(map[string]interface{})
require.True(t, ok)
relays, ok := got["ccip_relays"].(map[string]interface{})
require.True(t, ok)
require.Contains(t, relays, "mainnet")
probe, ok := ccip["url_probe"].(map[string]interface{})
require.True(t, ok)
require.Equal(t, true, probe["ok"])
}
func TestBuildBridgeStatusDataDegradesWhenNamedRelayFails(t *testing.T) {
rpc := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var req struct {
Method string `json:"method"`
}
require.NoError(t, json.NewDecoder(r.Body).Decode(&req))
w.Header().Set("Content-Type", "application/json")
switch req.Method {
case "eth_blockNumber":
_, _ = w.Write([]byte(`{"jsonrpc":"2.0","id":1,"result":"0x10"}`))
case "eth_getBlockByNumber":
ts := strconv.FormatInt(time.Now().Add(-2*time.Second).Unix(), 16)
_, _ = w.Write([]byte(`{"jsonrpc":"2.0","id":1,"result":{"timestamp":"0x` + ts + `"}}`))
default:
http.Error(w, `{"jsonrpc":"2.0","id":1,"error":{"message":"unsupported"}}`, http.StatusBadRequest)
}
}))
defer rpc.Close()
mainnet := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write([]byte(`{"status":"operational","queue":{"size":0}}`))
}))
defer mainnet.Close()
bad := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, `{"status":"degraded"}`, http.StatusBadGateway)
}))
defer bad.Close()
t.Setenv("RPC_URL", rpc.URL)
t.Setenv("ETH_MAINNET_RPC_URL", "")
t.Setenv("MISSION_CONTROL_EXTRA_RPCS", "")
t.Setenv("MISSION_CONTROL_VERIFY_JSON", "")
t.Setenv("CCIP_RELAY_HEALTH_URL", "")
t.Setenv("MISSION_CONTROL_CCIP_JSON", "")
t.Setenv("CCIP_RELAY_HEALTH_URLS", "mainnet="+mainnet.URL+"/healthz,bsc="+bad.URL+"/healthz")
s := &Server{}
got := s.BuildBridgeStatusData(context.Background())
require.Equal(t, "degraded", got["status"])
}

View File

@@ -1,17 +1,22 @@
package track1
import (
"context"
"encoding/json"
"fmt"
"math/big"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/explorer/backend/libs/go-rpc-gateway"
)
var track1HashPattern = regexp.MustCompile(`^0x[a-fA-F0-9]{64}$`)
// Server handles Track 1 endpoints (uses RPC gateway from lib)
type Server struct {
rpcGateway *gateway.RPCGateway
@@ -173,7 +178,12 @@ func (s *Server) HandleBlockDetail(w http.ResponseWriter, r *http.Request) {
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track1/block/")
blockNumStr := fmt.Sprintf("0x%x", parseBlockNumber(path))
blockNumber, err := strconv.ParseInt(strings.TrimSpace(path), 10, 64)
if err != nil || blockNumber < 0 {
writeError(w, http.StatusBadRequest, "bad_request", "Invalid block number")
return
}
blockNumStr := fmt.Sprintf("0x%x", blockNumber)
blockResp, err := s.rpcGateway.GetBlockByNumber(r.Context(), blockNumStr, false)
if err != nil {
@@ -203,7 +213,11 @@ func (s *Server) HandleTransactionDetail(w http.ResponseWriter, r *http.Request)
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track1/tx/")
txHash := path
txHash := strings.TrimSpace(path)
if !track1HashPattern.MatchString(txHash) {
writeError(w, http.StatusBadRequest, "bad_request", "Invalid transaction hash")
return
}
txResp, err := s.rpcGateway.GetTransactionByHash(r.Context(), txHash)
if err != nil {
@@ -239,7 +253,11 @@ func (s *Server) HandleAddressBalance(w http.ResponseWriter, r *http.Request) {
return
}
address := parts[0]
address := strings.TrimSpace(parts[0])
if !common.IsHexAddress(address) {
writeError(w, http.StatusBadRequest, "bad_request", "Invalid address")
return
}
balanceResp, err := s.rpcGateway.GetBalance(r.Context(), address, "latest")
if err != nil {
writeError(w, http.StatusInternalServerError, "rpc_error", err.Error())
@@ -278,31 +296,25 @@ func (s *Server) HandleBridgeStatus(w http.ResponseWriter, r *http.Request) {
return
}
// Return bridge status (simplified - in production, query bridge contracts)
ctx, cancel := context.WithTimeout(r.Context(), 12*time.Second)
defer cancel()
data := s.BuildBridgeStatusData(ctx)
response := map[string]interface{}{
"data": map[string]interface{}{
"status": "operational",
"chains": map[string]interface{}{
"138": map[string]interface{}{
"name": "Defi Oracle Meta Mainnet",
"status": "operational",
"last_sync": time.Now().UTC().Format(time.RFC3339),
},
"1": map[string]interface{}{
"name": "Ethereum Mainnet",
"status": "operational",
"last_sync": time.Now().UTC().Format(time.RFC3339),
},
},
"total_transfers_24h": 150,
"total_volume_24h": "5000000000000000000000",
},
"data": data,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
func chainStatusFromProbe(p RPCProbeResult) string {
if p.OK {
return "operational"
}
return "unreachable"
}
// Helper functions
func writeError(w http.ResponseWriter, statusCode int, code, message string) {
w.Header().Set("Content-Type", "application/json")
@@ -320,14 +332,6 @@ func hexToInt(hex string) (int64, error) {
return strconv.ParseInt(hex, 16, 64)
}
func parseBlockNumber(s string) int64 {
num, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0
}
return num
}
func transformBlock(blockData map[string]interface{}) map[string]interface{} {
return map[string]interface{}{
"number": parseHexField(blockData["number"]),

View File

@@ -0,0 +1,43 @@
package track1
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestHandleBlockDetailRejectsInvalidBlockNumber(t *testing.T) {
server := &Server{}
req := httptest.NewRequest(http.MethodGet, "/api/v1/track1/block/not-a-number", nil)
w := httptest.NewRecorder()
server.HandleBlockDetail(w, req)
if w.Code != http.StatusBadRequest {
t.Fatalf("expected 400 for invalid block number, got %d", w.Code)
}
}
func TestHandleTransactionDetailRejectsInvalidHash(t *testing.T) {
server := &Server{}
req := httptest.NewRequest(http.MethodGet, "/api/v1/track1/tx/not-a-hash", nil)
w := httptest.NewRecorder()
server.HandleTransactionDetail(w, req)
if w.Code != http.StatusBadRequest {
t.Fatalf("expected 400 for invalid tx hash, got %d", w.Code)
}
}
func TestHandleAddressBalanceRejectsInvalidAddress(t *testing.T) {
server := &Server{}
req := httptest.NewRequest(http.MethodGet, "/api/v1/track1/address/not-an-address/balance", nil)
w := httptest.NewRecorder()
server.HandleAddressBalance(w, req)
if w.Code != http.StatusBadRequest {
t.Fatalf("expected 400 for invalid address, got %d", w.Code)
}
}

View File

@@ -0,0 +1,54 @@
package track1
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
)
// HandleMissionControlStream sends periodic text/event-stream payloads with full bridge/status data (for SPA or tooling).
func (s *Server) HandleMissionControlStream(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
controller := http.NewResponseController(w)
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
w.Header().Set("X-Accel-Buffering", "no")
tick := time.NewTicker(20 * time.Second)
defer tick.Stop()
send := func() bool {
ctx, cancel := context.WithTimeout(r.Context(), 12*time.Second)
defer cancel()
data := s.BuildBridgeStatusData(ctx)
payload, err := json.Marshal(map[string]interface{}{"data": data})
if err != nil {
return false
}
_, _ = fmt.Fprintf(w, "event: mission-control\ndata: %s\n\n", payload)
return controller.Flush() == nil
}
if !send() {
return
}
for {
select {
case <-r.Context().Done():
return
case <-tick.C:
if !send() {
return
}
}
}
}

View File

@@ -0,0 +1,72 @@
package track1
import (
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestHandleMissionControlStreamSendsInitialEvent(t *testing.T) {
rpc := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var req struct {
Method string `json:"method"`
}
require.NoError(t, json.NewDecoder(r.Body).Decode(&req))
w.Header().Set("Content-Type", "application/json")
switch req.Method {
case "eth_blockNumber":
_, _ = w.Write([]byte(`{"jsonrpc":"2.0","id":1,"result":"0x10"}`))
case "eth_getBlockByNumber":
ts := strconv.FormatInt(time.Now().Add(-2*time.Second).Unix(), 16)
_, _ = w.Write([]byte(`{"jsonrpc":"2.0","id":1,"result":{"timestamp":"0x` + ts + `"}}`))
default:
http.Error(w, `{"jsonrpc":"2.0","id":1,"error":{"message":"unsupported"}}`, http.StatusBadRequest)
}
}))
defer rpc.Close()
t.Setenv("RPC_URL", rpc.URL)
t.Setenv("ETH_MAINNET_RPC_URL", "")
t.Setenv("MISSION_CONTROL_EXTRA_RPCS", "")
t.Setenv("MISSION_CONTROL_VERIFY_JSON", "")
t.Setenv("CCIP_RELAY_HEALTH_URL", "")
t.Setenv("CCIP_RELAY_HEALTH_URLS", "")
t.Setenv("MISSION_CONTROL_CCIP_JSON", "")
s := &Server{}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
req := httptest.NewRequest(http.MethodGet, "/api/v1/mission-control/stream", nil).WithContext(ctx)
w := httptest.NewRecorder()
done := make(chan struct{})
go func() {
s.HandleMissionControlStream(w, req)
close(done)
}()
deadline := time.Now().Add(500 * time.Millisecond)
for time.Now().Before(deadline) {
if strings.Contains(w.Body.String(), "event: mission-control") {
break
}
time.Sleep(10 * time.Millisecond)
}
cancel()
<-done
require.Contains(t, w.Header().Get("Content-Type"), "text/event-stream")
require.Contains(t, w.Body.String(), "event: mission-control")
require.Contains(t, w.Body.String(), `"status":"operational"`)
require.Contains(t, w.Body.String(), `"chain-138"`)
}

View File

@@ -0,0 +1,204 @@
package track1
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
)
// RPCProbeResult is one JSON-RPC health check (URLs are redacted to origin only in JSON).
type RPCProbeResult struct {
Name string `json:"name"`
ChainKey string `json:"chainKey,omitempty"`
Endpoint string `json:"endpoint"`
OK bool `json:"ok"`
LatencyMs int64 `json:"latencyMs"`
BlockNumber string `json:"blockNumber,omitempty"`
BlockNumberDec string `json:"blockNumberDec,omitempty"`
HeadAgeSeconds float64 `json:"headAgeSeconds,omitempty"`
Error string `json:"error,omitempty"`
}
type jsonRPCReq struct {
JSONRPC string `json:"jsonrpc"`
Method string `json:"method"`
Params []interface{} `json:"params"`
ID int `json:"id"`
}
type jsonRPCResp struct {
Result json.RawMessage `json:"result"`
Error *struct {
Message string `json:"message"`
} `json:"error"`
}
func redactRPCOrigin(raw string) string {
raw = strings.TrimSpace(raw)
if raw == "" {
return ""
}
u, err := url.Parse(raw)
if err != nil || u.Host == "" {
return "hidden"
}
if u.Scheme == "" {
return u.Host
}
return u.Scheme + "://" + u.Host
}
func postJSONRPC(ctx context.Context, client *http.Client, rpcURL string, method string, params []interface{}) (json.RawMessage, int64, error) {
if client == nil {
client = http.DefaultClient
}
body, err := json.Marshal(jsonRPCReq{JSONRPC: "2.0", Method: method, Params: params, ID: 1})
if err != nil {
return nil, 0, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, rpcURL, bytes.NewReader(body))
if err != nil {
return nil, 0, err
}
req.Header.Set("Content-Type", "application/json")
start := time.Now()
resp, err := client.Do(req)
latency := time.Since(start).Milliseconds()
if err != nil {
return nil, latency, err
}
defer resp.Body.Close()
b, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20))
if err != nil {
return nil, latency, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return nil, latency, fmt.Errorf("http %d", resp.StatusCode)
}
var out jsonRPCResp
if err := json.Unmarshal(b, &out); err != nil {
return nil, latency, err
}
if out.Error != nil && out.Error.Message != "" {
return nil, latency, fmt.Errorf("rpc error: %s", out.Error.Message)
}
return out.Result, latency, nil
}
// ProbeEVMJSONRPC runs eth_blockNumber and eth_getBlockByNumber(latest) for head age.
func ProbeEVMJSONRPC(ctx context.Context, name, chainKey, rpcURL string) RPCProbeResult {
rpcURL = strings.TrimSpace(rpcURL)
res := RPCProbeResult{
Name: name,
ChainKey: chainKey,
Endpoint: redactRPCOrigin(rpcURL),
}
if rpcURL == "" {
res.Error = "empty rpc url"
return res
}
client := &http.Client{Timeout: 6 * time.Second}
numRaw, lat1, err := postJSONRPC(ctx, client, rpcURL, "eth_blockNumber", []interface{}{})
if err != nil {
res.LatencyMs = lat1
res.Error = err.Error()
return res
}
var numHex string
if err := json.Unmarshal(numRaw, &numHex); err != nil {
res.LatencyMs = lat1
res.Error = "blockNumber decode: " + err.Error()
return res
}
res.BlockNumber = numHex
if n, err := strconv.ParseInt(strings.TrimPrefix(strings.TrimSpace(numHex), "0x"), 16, 64); err == nil {
res.BlockNumberDec = strconv.FormatInt(n, 10)
}
blockRaw, lat2, err := postJSONRPC(ctx, client, rpcURL, "eth_getBlockByNumber", []interface{}{"latest", false})
res.LatencyMs = lat1 + lat2
if err != nil {
res.OK = true
res.Error = "head block timestamp unavailable: " + err.Error()
return res
}
var block struct {
Timestamp string `json:"timestamp"`
}
if err := json.Unmarshal(blockRaw, &block); err != nil || block.Timestamp == "" {
res.OK = true
if err != nil {
res.Error = "block decode: " + err.Error()
}
return res
}
tsHex := strings.TrimSpace(block.Timestamp)
ts, err := strconv.ParseInt(strings.TrimPrefix(tsHex, "0x"), 16, 64)
if err != nil {
res.OK = true
res.Error = "timestamp parse: " + err.Error()
return res
}
bt := time.Unix(ts, 0)
res.HeadAgeSeconds = time.Since(bt).Seconds()
res.OK = true
return res
}
func readOptionalVerifyJSON() map[string]interface{} {
path := strings.TrimSpace(os.Getenv("MISSION_CONTROL_VERIFY_JSON"))
if path == "" {
return nil
}
b, err := os.ReadFile(path)
if err != nil || len(b) == 0 {
return map[string]interface{}{"error": "unreadable or empty", "path": path}
}
if len(b) > 512*1024 {
return map[string]interface{}{"error": "file too large", "path": path}
}
var v map[string]interface{}
if err := json.Unmarshal(b, &v); err != nil {
return map[string]interface{}{"error": err.Error(), "path": path}
}
return v
}
// ParseExtraRPCProbes reads MISSION_CONTROL_EXTRA_RPCS lines "name|url" or "name|url|chainKey".
func ParseExtraRPCProbes() [][3]string {
raw := strings.TrimSpace(os.Getenv("MISSION_CONTROL_EXTRA_RPCS"))
if raw == "" {
return nil
}
var out [][3]string
for _, line := range strings.Split(raw, "\n") {
line = strings.TrimSpace(line)
if line == "" || strings.HasPrefix(line, "#") {
continue
}
parts := strings.Split(line, "|")
if len(parts) < 2 {
continue
}
name := strings.TrimSpace(parts[0])
u := strings.TrimSpace(parts[1])
ck := ""
if len(parts) > 2 {
ck = strings.TrimSpace(parts[2])
}
if name != "" && u != "" {
out = append(out, [3]string{name, u, ck})
}
}
return out
}

View File

@@ -1,14 +1,20 @@
package track2
import (
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/jackc/pgx/v5/pgxpool"
)
var track2HashPattern = regexp.MustCompile(`^0x[0-9a-fA-F]{64}$`)
// Server handles Track 2 endpoints
type Server struct {
db *pgxpool.Pool
@@ -29,6 +35,9 @@ func (s *Server) HandleAddressTransactions(w http.ResponseWriter, r *http.Reques
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if !s.requireDB(w) {
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track2/address/")
parts := strings.Split(path, "/")
@@ -37,7 +46,11 @@ func (s *Server) HandleAddressTransactions(w http.ResponseWriter, r *http.Reques
return
}
address := strings.ToLower(parts[0])
address, err := normalizeTrack2Address(parts[0])
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
if page < 1 {
page = 1
@@ -51,7 +64,7 @@ func (s *Server) HandleAddressTransactions(w http.ResponseWriter, r *http.Reques
query := `
SELECT hash, from_address, to_address, value, block_number, timestamp, status
FROM transactions
WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)
WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)
ORDER BY block_number DESC, timestamp DESC
LIMIT $3 OFFSET $4
`
@@ -92,7 +105,7 @@ func (s *Server) HandleAddressTransactions(w http.ResponseWriter, r *http.Reques
// Get total count
var total int
countQuery := `SELECT COUNT(*) FROM transactions WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)`
countQuery := `SELECT COUNT(*) FROM transactions WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)`
s.db.QueryRow(r.Context(), countQuery, s.chainID, address).Scan(&total)
response := map[string]interface{}{
@@ -115,6 +128,9 @@ func (s *Server) HandleAddressTokens(w http.ResponseWriter, r *http.Request) {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if !s.requireDB(w) {
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track2/address/")
parts := strings.Split(path, "/")
@@ -123,12 +139,16 @@ func (s *Server) HandleAddressTokens(w http.ResponseWriter, r *http.Request) {
return
}
address := strings.ToLower(parts[0])
address, err := normalizeTrack2Address(parts[0])
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
query := `
SELECT token_contract, balance, last_updated_timestamp
FROM token_balances
WHERE address = $1 AND chain_id = $2 AND balance > 0
WHERE LOWER(address) = $1 AND chain_id = $2 AND balance > 0
ORDER BY balance DESC
`
@@ -151,7 +171,7 @@ func (s *Server) HandleAddressTokens(w http.ResponseWriter, r *http.Request) {
tokens = append(tokens, map[string]interface{}{
"contract": contract,
"balance": balance,
"balance_formatted": balance, // TODO: Format with decimals
"balance_formatted": nil,
"last_updated": lastUpdated,
})
}
@@ -174,14 +194,40 @@ func (s *Server) HandleTokenInfo(w http.ResponseWriter, r *http.Request) {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if !s.requireDB(w) {
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track2/token/")
contract := strings.ToLower(path)
contract, err := normalizeTrack2Address(path)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
// Get token info from token_transfers
query := `
SELECT
COUNT(DISTINCT from_address) + COUNT(DISTINCT to_address) as holders,
(
SELECT COUNT(*)
FROM (
SELECT from_address AS address
FROM token_transfers
WHERE token_contract = $1
AND chain_id = $2
AND timestamp >= NOW() - INTERVAL '24 hours'
AND from_address IS NOT NULL
AND from_address <> ''
UNION
SELECT to_address AS address
FROM token_transfers
WHERE token_contract = $1
AND chain_id = $2
AND timestamp >= NOW() - INTERVAL '24 hours'
AND to_address IS NOT NULL
AND to_address <> ''
) holder_addresses
) as holders,
COUNT(*) as transfers_24h,
SUM(value) as volume_24h
FROM token_transfers
@@ -191,7 +237,7 @@ func (s *Server) HandleTokenInfo(w http.ResponseWriter, r *http.Request) {
var holders, transfers24h int
var volume24h string
err := s.db.QueryRow(r.Context(), query, contract, s.chainID).Scan(&holders, &transfers24h, &volume24h)
err = s.db.QueryRow(r.Context(), query, contract, s.chainID).Scan(&holders, &transfers24h, &volume24h)
if err != nil {
writeError(w, http.StatusNotFound, "not_found", "Token not found")
return
@@ -216,15 +262,16 @@ func (s *Server) HandleSearch(w http.ResponseWriter, r *http.Request) {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if !s.requireDB(w) {
return
}
query := r.URL.Query().Get("q")
query := strings.TrimSpace(r.URL.Query().Get("q"))
if query == "" {
writeError(w, http.StatusBadRequest, "bad_request", "Query parameter 'q' is required")
return
}
query = strings.ToLower(strings.TrimPrefix(query, "0x"))
// Try to detect type and search
var result map[string]interface{}
@@ -241,13 +288,14 @@ func (s *Server) HandleSearch(w http.ResponseWriter, r *http.Request) {
},
}
}
} else if len(query) == 64 || len(query) == 40 {
// Could be address or transaction hash
fullQuery := "0x" + query
// Check transaction
} else if track2HashPattern.MatchString(query) {
hash, err := normalizeTrack2Hash(query)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
var txHash string
err := s.db.QueryRow(r.Context(), `SELECT hash FROM transactions WHERE chain_id = $1 AND hash = $2`, s.chainID, fullQuery).Scan(&txHash)
err = s.db.QueryRow(r.Context(), `SELECT hash FROM transactions WHERE chain_id = $1 AND LOWER(hash) = $2`, s.chainID, hash).Scan(&txHash)
if err == nil {
result = map[string]interface{}{
"type": "transaction",
@@ -255,18 +303,44 @@ func (s *Server) HandleSearch(w http.ResponseWriter, r *http.Request) {
"hash": txHash,
},
}
} else {
// Check address
}
} else if common.IsHexAddress(query) {
address, err := normalizeTrack2Address(query)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
var exists bool
existsQuery := `
SELECT EXISTS (
SELECT 1
FROM addresses
WHERE chain_id = $1 AND LOWER(address) = $2
UNION
SELECT 1
FROM transactions
WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)
UNION
SELECT 1
FROM token_balances
WHERE chain_id = $1 AND LOWER(address) = $2
)
`
err = s.db.QueryRow(r.Context(), existsQuery, s.chainID, address).Scan(&exists)
if err == nil && exists {
var balance string
err := s.db.QueryRow(r.Context(), `SELECT COALESCE(SUM(balance), '0') FROM token_balances WHERE address = $1 AND chain_id = $2`, fullQuery, s.chainID).Scan(&balance)
if err == nil {
result = map[string]interface{}{
"type": "address",
"result": map[string]interface{}{
"address": fullQuery,
"balance": balance,
},
}
err = s.db.QueryRow(r.Context(), `SELECT COALESCE(SUM(balance), '0') FROM token_balances WHERE LOWER(address) = $1 AND chain_id = $2`, address, s.chainID).Scan(&balance)
if err != nil {
balance = "0"
}
result = map[string]interface{}{
"type": "address",
"result": map[string]interface{}{
"address": address,
"balance": balance,
},
}
}
}
@@ -290,6 +364,9 @@ func (s *Server) HandleInternalTransactions(w http.ResponseWriter, r *http.Reque
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if !s.requireDB(w) {
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track2/address/")
parts := strings.Split(path, "/")
@@ -298,7 +375,11 @@ func (s *Server) HandleInternalTransactions(w http.ResponseWriter, r *http.Reque
return
}
address := strings.ToLower(parts[0])
address, err := normalizeTrack2Address(parts[0])
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
if page < 1 {
page = 1
@@ -312,7 +393,7 @@ func (s *Server) HandleInternalTransactions(w http.ResponseWriter, r *http.Reque
query := `
SELECT transaction_hash, from_address, to_address, value, block_number, timestamp
FROM internal_transactions
WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)
WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)
ORDER BY block_number DESC, timestamp DESC
LIMIT $3 OFFSET $4
`
@@ -345,7 +426,7 @@ func (s *Server) HandleInternalTransactions(w http.ResponseWriter, r *http.Reque
}
var total int
countQuery := `SELECT COUNT(*) FROM internal_transactions WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)`
countQuery := `SELECT COUNT(*) FROM internal_transactions WHERE chain_id = $1 AND (LOWER(from_address) = $2 OR LOWER(to_address) = $2)`
s.db.QueryRow(r.Context(), countQuery, s.chainID, address).Scan(&total)
response := map[string]interface{}{
@@ -372,3 +453,30 @@ func writeError(w http.ResponseWriter, statusCode int, code, message string) {
},
})
}
func (s *Server) requireDB(w http.ResponseWriter) bool {
if s.db == nil {
writeError(w, http.StatusServiceUnavailable, "service_unavailable", "database not configured")
return false
}
return true
}
func normalizeTrack2Address(value string) (string, error) {
trimmed := strings.TrimSpace(value)
if !common.IsHexAddress(trimmed) {
return "", fmt.Errorf("invalid address format")
}
return strings.ToLower(common.HexToAddress(trimmed).Hex()), nil
}
func normalizeTrack2Hash(value string) (string, error) {
trimmed := strings.TrimSpace(value)
if !track2HashPattern.MatchString(trimmed) {
return "", fmt.Errorf("invalid transaction hash")
}
if _, err := hex.DecodeString(trimmed[2:]); err != nil {
return "", fmt.Errorf("invalid transaction hash")
}
return strings.ToLower(trimmed), nil
}

View File

@@ -2,11 +2,13 @@ package track3
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/explorer/backend/analytics"
"github.com/jackc/pgx/v5/pgxpool"
)
@@ -35,9 +37,29 @@ func NewServer(db *pgxpool.Pool, chainID int) *Server {
// HandleFlows handles GET /api/v1/track3/analytics/flows
func (s *Server) HandleFlows(w http.ResponseWriter, r *http.Request) {
from := r.URL.Query().Get("from")
to := r.URL.Query().Get("to")
token := r.URL.Query().Get("token")
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if !s.requireDB(w) {
return
}
from, err := normalizeTrack3OptionalAddress(r.URL.Query().Get("from"))
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
to, err := normalizeTrack3OptionalAddress(r.URL.Query().Get("to"))
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
token, err := normalizeTrack3OptionalAddress(r.URL.Query().Get("token"))
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
if limit < 1 || limit > 200 {
limit = 50
@@ -45,14 +67,20 @@ func (s *Server) HandleFlows(w http.ResponseWriter, r *http.Request) {
var startDate, endDate *time.Time
if startStr := r.URL.Query().Get("start_date"); startStr != "" {
if t, err := time.Parse(time.RFC3339, startStr); err == nil {
startDate = &t
t, err := time.Parse(time.RFC3339, startStr)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", "invalid start_date")
return
}
startDate = &t
}
if endStr := r.URL.Query().Get("end_date"); endStr != "" {
if t, err := time.Parse(time.RFC3339, endStr); err == nil {
endDate = &t
t, err := time.Parse(time.RFC3339, endStr)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", "invalid end_date")
return
}
endDate = &t
}
flows, err := s.flowTracker.GetFlows(r.Context(), from, to, token, startDate, endDate, limit)
@@ -73,28 +101,48 @@ func (s *Server) HandleFlows(w http.ResponseWriter, r *http.Request) {
// HandleBridge handles GET /api/v1/track3/analytics/bridge
func (s *Server) HandleBridge(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if !s.requireDB(w) {
return
}
var chainFrom, chainTo *int
if cf := r.URL.Query().Get("chain_from"); cf != "" {
if c, err := strconv.Atoi(cf); err == nil {
chainFrom = &c
c, err := strconv.Atoi(cf)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", "invalid chain_from")
return
}
chainFrom = &c
}
if ct := r.URL.Query().Get("chain_to"); ct != "" {
if c, err := strconv.Atoi(ct); err == nil {
chainTo = &c
c, err := strconv.Atoi(ct)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", "invalid chain_to")
return
}
chainTo = &c
}
var startDate, endDate *time.Time
if startStr := r.URL.Query().Get("start_date"); startStr != "" {
if t, err := time.Parse(time.RFC3339, startStr); err == nil {
startDate = &t
t, err := time.Parse(time.RFC3339, startStr)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", "invalid start_date")
return
}
startDate = &t
}
if endStr := r.URL.Query().Get("end_date"); endStr != "" {
if t, err := time.Parse(time.RFC3339, endStr); err == nil {
endDate = &t
t, err := time.Parse(time.RFC3339, endStr)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", "invalid end_date")
return
}
endDate = &t
}
stats, err := s.bridgeAnalytics.GetBridgeStats(r.Context(), chainFrom, chainTo, startDate, endDate)
@@ -113,8 +161,20 @@ func (s *Server) HandleBridge(w http.ResponseWriter, r *http.Request) {
// HandleTokenDistribution handles GET /api/v1/track3/analytics/token-distribution
func (s *Server) HandleTokenDistribution(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if !s.requireDB(w) {
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track3/analytics/token-distribution/")
contract := strings.ToLower(path)
contract, err := normalizeTrack3RequiredAddress(path)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
topN, _ := strconv.Atoi(r.URL.Query().Get("top_n"))
if topN < 1 || topN > 1000 {
@@ -137,8 +197,20 @@ func (s *Server) HandleTokenDistribution(w http.ResponseWriter, r *http.Request)
// HandleAddressRisk handles GET /api/v1/track3/analytics/address-risk/:addr
func (s *Server) HandleAddressRisk(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if !s.requireDB(w) {
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track3/analytics/address-risk/")
address := strings.ToLower(path)
address, err := normalizeTrack3RequiredAddress(path)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
analysis, err := s.riskAnalyzer.AnalyzeAddress(r.Context(), address)
if err != nil {
@@ -165,3 +237,32 @@ func writeError(w http.ResponseWriter, statusCode int, code, message string) {
})
}
func (s *Server) requireDB(w http.ResponseWriter) bool {
if s.db == nil {
writeError(w, http.StatusServiceUnavailable, "service_unavailable", "database not configured")
return false
}
return true
}
func normalizeTrack3OptionalAddress(value string) (string, error) {
trimmed := strings.TrimSpace(value)
if trimmed == "" {
return "", nil
}
if !common.IsHexAddress(trimmed) {
return "", fmt.Errorf("invalid address format")
}
return strings.ToLower(common.HexToAddress(trimmed).Hex()), nil
}
func normalizeTrack3RequiredAddress(value string) (string, error) {
normalized, err := normalizeTrack3OptionalAddress(value)
if err != nil {
return "", err
}
if normalized == "" {
return "", fmt.Errorf("address required")
}
return normalized, nil
}

View File

@@ -1,8 +1,15 @@
package track4
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/explorer/backend/auth"
@@ -11,48 +18,52 @@ import (
// Server handles Track 4 endpoints
type Server struct {
db *pgxpool.Pool
roleMgr *auth.RoleManager
chainID int
db *pgxpool.Pool
roleMgr roleManager
chainID int
}
// NewServer creates a new Track 4 server
func NewServer(db *pgxpool.Pool, chainID int) *Server {
return &Server{
db: db,
roleMgr: auth.NewRoleManager(db),
chainID: chainID,
db: db,
roleMgr: auth.NewRoleManager(db),
chainID: chainID,
}
}
// HandleBridgeEvents handles GET /api/v1/track4/operator/bridge/events
func (s *Server) HandleBridgeEvents(w http.ResponseWriter, r *http.Request) {
// Get operator address from context
operatorAddr, _ := r.Context().Value("user_address").(string)
if operatorAddr == "" {
writeError(w, http.StatusUnauthorized, "unauthorized", "Operator address required")
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
// Check IP whitelist
ipAddr := r.RemoteAddr
if whitelisted, _ := s.roleMgr.IsIPWhitelisted(r.Context(), operatorAddr, ipAddr); !whitelisted {
writeError(w, http.StatusForbidden, "forbidden", "IP address not whitelisted")
operatorAddr, ipAddr, ok := s.requireOperatorAccess(w, r)
if !ok {
return
}
// Log operator event
s.roleMgr.LogOperatorEvent(r.Context(), "bridge_events_read", &s.chainID, operatorAddr, "bridge/events", "read", map[string]interface{}{}, ipAddr, r.UserAgent())
events, lastUpdate, err := s.loadBridgeEvents(r.Context(), 100)
if err != nil {
writeError(w, http.StatusInternalServerError, "database_error", err.Error())
return
}
s.roleMgr.LogOperatorEvent(r.Context(), "bridge_events_read", &s.chainID, operatorAddr, "bridge/events", "read", map[string]interface{}{"event_count": len(events)}, ipAddr, r.UserAgent())
controlState := map[string]interface{}{
"paused": nil,
"maintenance_mode": nil,
"bridge_control_unavailable": true,
}
if !lastUpdate.IsZero() {
controlState["last_update"] = lastUpdate.UTC().Format(time.RFC3339)
}
// Return bridge events (simplified)
response := map[string]interface{}{
"data": map[string]interface{}{
"events": []map[string]interface{}{},
"control_state": map[string]interface{}{
"paused": false,
"maintenance_mode": false,
"last_update": time.Now().UTC().Format(time.RFC3339),
},
"events": events,
"control_state": controlState,
},
}
@@ -62,21 +73,29 @@ func (s *Server) HandleBridgeEvents(w http.ResponseWriter, r *http.Request) {
// HandleValidators handles GET /api/v1/track4/operator/validators
func (s *Server) HandleValidators(w http.ResponseWriter, r *http.Request) {
operatorAddr, _ := r.Context().Value("user_address").(string)
ipAddr := r.RemoteAddr
if whitelisted, _ := s.roleMgr.IsIPWhitelisted(r.Context(), operatorAddr, ipAddr); !whitelisted {
writeError(w, http.StatusForbidden, "forbidden", "IP address not whitelisted")
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
s.roleMgr.LogOperatorEvent(r.Context(), "validators_read", &s.chainID, operatorAddr, "validators", "read", map[string]interface{}{}, ipAddr, r.UserAgent())
operatorAddr, ipAddr, ok := s.requireOperatorAccess(w, r)
if !ok {
return
}
validators, err := s.loadValidatorStatus(r.Context())
if err != nil {
writeError(w, http.StatusInternalServerError, "database_error", err.Error())
return
}
s.roleMgr.LogOperatorEvent(r.Context(), "validators_read", &s.chainID, operatorAddr, "validators", "read", map[string]interface{}{"validator_count": len(validators)}, ipAddr, r.UserAgent())
response := map[string]interface{}{
"data": map[string]interface{}{
"validators": []map[string]interface{}{},
"total_validators": 0,
"active_validators": 0,
"validators": validators,
"total_validators": len(validators),
"active_validators": len(validators),
},
}
@@ -86,19 +105,38 @@ func (s *Server) HandleValidators(w http.ResponseWriter, r *http.Request) {
// HandleContracts handles GET /api/v1/track4/operator/contracts
func (s *Server) HandleContracts(w http.ResponseWriter, r *http.Request) {
operatorAddr, _ := r.Context().Value("user_address").(string)
ipAddr := r.RemoteAddr
if whitelisted, _ := s.roleMgr.IsIPWhitelisted(r.Context(), operatorAddr, ipAddr); !whitelisted {
writeError(w, http.StatusForbidden, "forbidden", "IP address not whitelisted")
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
s.roleMgr.LogOperatorEvent(r.Context(), "contracts_read", &s.chainID, operatorAddr, "contracts", "read", map[string]interface{}{}, ipAddr, r.UserAgent())
operatorAddr, ipAddr, ok := s.requireOperatorAccess(w, r)
if !ok {
return
}
chainID := s.chainID
if raw := strings.TrimSpace(r.URL.Query().Get("chain_id")); raw != "" {
parsed, err := strconv.Atoi(raw)
if err != nil || parsed < 0 {
writeError(w, http.StatusBadRequest, "bad_request", "invalid chain_id")
return
}
chainID = parsed
}
typeFilter := strings.TrimSpace(strings.ToLower(r.URL.Query().Get("type")))
contracts, err := s.loadContractStatus(r.Context(), chainID, typeFilter)
if err != nil {
writeError(w, http.StatusInternalServerError, "database_error", err.Error())
return
}
s.roleMgr.LogOperatorEvent(r.Context(), "contracts_read", &s.chainID, operatorAddr, "contracts", "read", map[string]interface{}{"contract_count": len(contracts), "chain_id": chainID, "type": typeFilter}, ipAddr, r.UserAgent())
response := map[string]interface{}{
"data": map[string]interface{}{
"contracts": []map[string]interface{}{},
"contracts": contracts,
},
}
@@ -108,35 +146,26 @@ func (s *Server) HandleContracts(w http.ResponseWriter, r *http.Request) {
// HandleProtocolState handles GET /api/v1/track4/operator/protocol-state
func (s *Server) HandleProtocolState(w http.ResponseWriter, r *http.Request) {
operatorAddr, _ := r.Context().Value("user_address").(string)
ipAddr := r.RemoteAddr
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
if whitelisted, _ := s.roleMgr.IsIPWhitelisted(r.Context(), operatorAddr, ipAddr); !whitelisted {
writeError(w, http.StatusForbidden, "forbidden", "IP address not whitelisted")
operatorAddr, ipAddr, ok := s.requireOperatorAccess(w, r)
if !ok {
return
}
state, err := s.loadProtocolState(r.Context())
if err != nil {
writeError(w, http.StatusInternalServerError, "database_error", err.Error())
return
}
s.roleMgr.LogOperatorEvent(r.Context(), "protocol_state_read", &s.chainID, operatorAddr, "protocol/state", "read", map[string]interface{}{}, ipAddr, r.UserAgent())
response := map[string]interface{}{
"data": map[string]interface{}{
"protocol_version": "1.0.0",
"chain_id": s.chainID,
"config": map[string]interface{}{
"bridge_enabled": true,
"max_transfer_amount": "1000000000000000000000000",
},
"state": map[string]interface{}{
"total_locked": "50000000000000000000000000",
"total_bridged": "10000000000000000000000000",
"active_bridges": 2,
},
"last_updated": time.Now().UTC().Format(time.RFC3339),
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
json.NewEncoder(w).Encode(map[string]interface{}{"data": state})
}
func writeError(w http.ResponseWriter, statusCode int, code, message string) {
@@ -150,3 +179,406 @@ func writeError(w http.ResponseWriter, statusCode int, code, message string) {
})
}
func (s *Server) requireOperatorAccess(w http.ResponseWriter, r *http.Request) (string, string, bool) {
if s.db == nil {
writeError(w, http.StatusServiceUnavailable, "service_unavailable", "database not configured")
return "", "", false
}
operatorAddr, _ := r.Context().Value("user_address").(string)
operatorAddr = strings.TrimSpace(operatorAddr)
if operatorAddr == "" {
writeError(w, http.StatusUnauthorized, "unauthorized", "Operator address required")
return "", "", false
}
ipAddr := clientIPAddress(r)
whitelisted, err := s.roleMgr.IsIPWhitelisted(r.Context(), operatorAddr, ipAddr)
if err != nil {
writeError(w, http.StatusInternalServerError, "database_error", err.Error())
return "", "", false
}
if !whitelisted {
writeError(w, http.StatusForbidden, "forbidden", "IP address not whitelisted")
return "", "", false
}
return operatorAddr, ipAddr, true
}
func (s *Server) loadBridgeEvents(ctx context.Context, limit int) ([]map[string]interface{}, time.Time, error) {
rows, err := s.db.Query(ctx, `
SELECT event_type, operator_address, target_resource, action, details, COALESCE(ip_address::text, ''), COALESCE(user_agent, ''), timestamp
FROM operator_events
WHERE (chain_id = $1 OR chain_id IS NULL)
AND (
event_type ILIKE '%bridge%'
OR target_resource ILIKE 'bridge%'
OR target_resource ILIKE '%bridge%'
)
ORDER BY timestamp DESC
LIMIT $2
`, s.chainID, limit)
if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to query bridge events: %w", err)
}
defer rows.Close()
events := make([]map[string]interface{}, 0, limit)
var latest time.Time
for rows.Next() {
var eventType, operatorAddress, targetResource, action, ipAddress, userAgent string
var detailsBytes []byte
var timestamp time.Time
if err := rows.Scan(&eventType, &operatorAddress, &targetResource, &action, &detailsBytes, &ipAddress, &userAgent, &timestamp); err != nil {
return nil, time.Time{}, fmt.Errorf("failed to scan bridge event: %w", err)
}
details := map[string]interface{}{}
if len(detailsBytes) > 0 && string(detailsBytes) != "null" {
_ = json.Unmarshal(detailsBytes, &details)
}
if latest.IsZero() {
latest = timestamp
}
events = append(events, map[string]interface{}{
"event_type": eventType,
"operator_address": operatorAddress,
"target_resource": targetResource,
"action": action,
"details": details,
"ip_address": ipAddress,
"user_agent": userAgent,
"timestamp": timestamp.UTC().Format(time.RFC3339),
})
}
return events, latest, rows.Err()
}
func (s *Server) loadValidatorStatus(ctx context.Context) ([]map[string]interface{}, error) {
rows, err := s.db.Query(ctx, `
SELECT r.address, COALESCE(r.roles, '{}'), COALESCE(oe.last_seen, r.updated_at, r.approved_at), r.track_level
FROM operator_roles r
LEFT JOIN LATERAL (
SELECT MAX(timestamp) AS last_seen
FROM operator_events
WHERE operator_address = r.address
) oe ON TRUE
WHERE r.approved = TRUE AND r.track_level >= 4
ORDER BY COALESCE(oe.last_seen, r.updated_at, r.approved_at) DESC NULLS LAST, r.address
`)
if err != nil {
return nil, fmt.Errorf("failed to query validator status: %w", err)
}
defer rows.Close()
validators := make([]map[string]interface{}, 0)
for rows.Next() {
var address string
var roles []string
var lastSeen time.Time
var trackLevel int
if err := rows.Scan(&address, &roles, &lastSeen, &trackLevel); err != nil {
return nil, fmt.Errorf("failed to scan validator row: %w", err)
}
roleScope := "operator"
if inferred := inferOperatorScope(roles); inferred != "" {
roleScope = inferred
}
row := map[string]interface{}{
"address": address,
"status": "active",
"stake": nil,
"uptime": nil,
"last_block": nil,
"track_level": trackLevel,
"roles": roles,
"role_scope": roleScope,
}
if !lastSeen.IsZero() {
row["last_seen"] = lastSeen.UTC().Format(time.RFC3339)
}
validators = append(validators, row)
}
return validators, rows.Err()
}
type contractRegistryEntry struct {
Address string
ChainID int
Name string
Type string
}
func (s *Server) loadContractStatus(ctx context.Context, chainID int, typeFilter string) ([]map[string]interface{}, error) {
type contractRow struct {
Name string
Status string
Compiler string
LastVerified *time.Time
}
dbRows := map[string]contractRow{}
rows, err := s.db.Query(ctx, `
SELECT LOWER(address), COALESCE(name, ''), verification_status, compiler_version, verified_at
FROM contracts
WHERE chain_id = $1
`, chainID)
if err != nil {
return nil, fmt.Errorf("failed to query contracts: %w", err)
}
defer rows.Close()
for rows.Next() {
var address string
var row contractRow
if err := rows.Scan(&address, &row.Name, &row.Status, &row.Compiler, &row.LastVerified); err != nil {
return nil, fmt.Errorf("failed to scan contract row: %w", err)
}
dbRows[address] = row
}
if err := rows.Err(); err != nil {
return nil, err
}
registryEntries, err := loadContractRegistry(chainID)
if err != nil {
registryEntries = nil
}
seen := map[string]bool{}
contracts := make([]map[string]interface{}, 0, len(registryEntries)+len(dbRows))
appendRow := func(address, name, contractType, status, version string, lastVerified *time.Time) {
if typeFilter != "" && contractType != typeFilter {
return
}
row := map[string]interface{}{
"address": address,
"chain_id": chainID,
"type": contractType,
"name": name,
"status": status,
}
if version != "" {
row["version"] = version
}
if lastVerified != nil && !lastVerified.IsZero() {
row["last_verified"] = lastVerified.UTC().Format(time.RFC3339)
}
contracts = append(contracts, row)
seen[address] = true
}
for _, entry := range registryEntries {
lowerAddress := strings.ToLower(entry.Address)
dbRow, ok := dbRows[lowerAddress]
status := "registry_only"
version := ""
name := entry.Name
var lastVerified *time.Time
if ok {
if dbRow.Name != "" {
name = dbRow.Name
}
status = dbRow.Status
version = dbRow.Compiler
lastVerified = dbRow.LastVerified
}
appendRow(lowerAddress, name, entry.Type, status, version, lastVerified)
}
for address, row := range dbRows {
if seen[address] {
continue
}
contractType := inferContractType(row.Name)
appendRow(address, fallbackString(row.Name, address), contractType, row.Status, row.Compiler, row.LastVerified)
}
sort.Slice(contracts, func(i, j int) bool {
left, _ := contracts[i]["name"].(string)
right, _ := contracts[j]["name"].(string)
if left == right {
return contracts[i]["address"].(string) < contracts[j]["address"].(string)
}
return left < right
})
return contracts, nil
}
func (s *Server) loadProtocolState(ctx context.Context) (map[string]interface{}, error) {
var totalBridged string
var activeBridges int
var lastBridgeAt *time.Time
err := s.db.QueryRow(ctx, `
SELECT
COALESCE(SUM(amount)::text, '0'),
COUNT(DISTINCT CONCAT(chain_from, ':', chain_to)),
MAX(timestamp)
FROM analytics_bridge_history
WHERE status ILIKE 'success%'
AND (chain_from = $1 OR chain_to = $1)
`, s.chainID).Scan(&totalBridged, &activeBridges, &lastBridgeAt)
if err != nil {
return nil, fmt.Errorf("failed to query protocol state: %w", err)
}
registryEntries, _ := loadContractRegistry(s.chainID)
bridgeEnabled := activeBridges > 0
if !bridgeEnabled {
for _, entry := range registryEntries {
if entry.Type == "bridge" {
bridgeEnabled = true
break
}
}
}
protocolVersion := strings.TrimSpace(os.Getenv("EXPLORER_PROTOCOL_VERSION"))
if protocolVersion == "" {
protocolVersion = strings.TrimSpace(os.Getenv("PROTOCOL_VERSION"))
}
if protocolVersion == "" {
protocolVersion = "unknown"
}
data := map[string]interface{}{
"protocol_version": protocolVersion,
"chain_id": s.chainID,
"config": map[string]interface{}{
"bridge_enabled": bridgeEnabled,
"max_transfer_amount": nil,
"max_transfer_amount_unavailable": true,
"fee_structure": nil,
},
"state": map[string]interface{}{
"total_locked": nil,
"total_locked_unavailable": true,
"total_bridged": totalBridged,
"active_bridges": activeBridges,
},
}
if lastBridgeAt != nil && !lastBridgeAt.IsZero() {
data["last_updated"] = lastBridgeAt.UTC().Format(time.RFC3339)
} else {
data["last_updated"] = time.Now().UTC().Format(time.RFC3339)
}
return data, nil
}
func loadContractRegistry(chainID int) ([]contractRegistryEntry, error) {
chainKey := strconv.Itoa(chainID)
candidates := []string{}
if env := strings.TrimSpace(os.Getenv("SMART_CONTRACTS_MASTER_JSON")); env != "" {
candidates = append(candidates, env)
}
candidates = append(candidates,
"config/smart-contracts-master.json",
"../config/smart-contracts-master.json",
"../../config/smart-contracts-master.json",
filepath.Join("explorer-monorepo", "config", "smart-contracts-master.json"),
)
var raw []byte
for _, candidate := range candidates {
if strings.TrimSpace(candidate) == "" {
continue
}
body, err := os.ReadFile(candidate)
if err == nil && len(body) > 0 {
raw = body
break
}
}
if len(raw) == 0 {
return nil, fmt.Errorf("smart-contracts-master.json not found")
}
var root struct {
Chains map[string]struct {
Contracts map[string]string `json:"contracts"`
} `json:"chains"`
}
if err := json.Unmarshal(raw, &root); err != nil {
return nil, fmt.Errorf("failed to parse contract registry: %w", err)
}
chain, ok := root.Chains[chainKey]
if !ok {
return nil, nil
}
entries := make([]contractRegistryEntry, 0, len(chain.Contracts))
for name, address := range chain.Contracts {
addr := strings.TrimSpace(address)
if addr == "" {
continue
}
entries = append(entries, contractRegistryEntry{
Address: addr,
ChainID: chainID,
Name: name,
Type: inferContractType(name),
})
}
sort.Slice(entries, func(i, j int) bool {
if entries[i].Name == entries[j].Name {
return strings.ToLower(entries[i].Address) < strings.ToLower(entries[j].Address)
}
return entries[i].Name < entries[j].Name
})
return entries, nil
}
func inferOperatorScope(roles []string) string {
for _, role := range roles {
lower := strings.ToLower(role)
switch {
case strings.Contains(lower, "validator"):
return "validator"
case strings.Contains(lower, "sequencer"):
return "sequencer"
case strings.Contains(lower, "bridge"):
return "bridge"
}
}
return ""
}
func inferContractType(name string) string {
lower := strings.ToLower(name)
switch {
case strings.Contains(lower, "bridge"):
return "bridge"
case strings.Contains(lower, "router"):
return "router"
case strings.Contains(lower, "pool"), strings.Contains(lower, "pmm"), strings.Contains(lower, "amm"):
return "liquidity"
case strings.Contains(lower, "oracle"):
return "oracle"
case strings.Contains(lower, "vault"):
return "vault"
case strings.Contains(lower, "token"), strings.Contains(lower, "weth"), strings.Contains(lower, "cw"), strings.Contains(lower, "usdt"), strings.Contains(lower, "usdc"):
return "token"
default:
return "contract"
}
}
func fallbackString(value, fallback string) string {
if strings.TrimSpace(value) == "" {
return fallback
}
return value
}

View File

@@ -0,0 +1,63 @@
package track4
import (
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
)
func TestHandleValidatorsRejectsNonGET(t *testing.T) {
server := NewServer(nil, 138)
req := httptest.NewRequest(http.MethodPost, "/api/v1/track4/operator/validators", nil)
w := httptest.NewRecorder()
server.HandleValidators(w, req)
if w.Code != http.StatusMethodNotAllowed {
t.Fatalf("expected 405 for non-GET validators request, got %d", w.Code)
}
}
func TestHandleContractsRequiresDatabase(t *testing.T) {
server := NewServer(nil, 138)
req := httptest.NewRequest(http.MethodGet, "/api/v1/track4/operator/contracts", nil)
w := httptest.NewRecorder()
server.HandleContracts(w, req)
if w.Code != http.StatusServiceUnavailable {
t.Fatalf("expected 503 when track4 DB is missing, got %d", w.Code)
}
}
func TestLoadContractRegistryReadsConfiguredFile(t *testing.T) {
tempDir := t.TempDir()
registryPath := filepath.Join(tempDir, "smart-contracts-master.json")
err := os.WriteFile(registryPath, []byte(`{
"chains": {
"138": {
"contracts": {
"CCIP_ROUTER": "0x1111111111111111111111111111111111111111",
"CHAIN138_BRIDGE": "0x2222222222222222222222222222222222222222"
}
}
}
}`), 0o644)
if err != nil {
t.Fatalf("failed to write temp registry: %v", err)
}
t.Setenv("SMART_CONTRACTS_MASTER_JSON", registryPath)
entries, err := loadContractRegistry(138)
if err != nil {
t.Fatalf("loadContractRegistry returned error: %v", err)
}
if len(entries) != 2 {
t.Fatalf("expected 2 registry entries, got %d", len(entries))
}
if entries[0].Type == "" || entries[1].Type == "" {
t.Fatal("expected contract types to be inferred")
}
}

View File

@@ -0,0 +1,209 @@
package track4
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
type runScriptRequest struct {
Script string `json:"script"`
Args []string `json:"args"`
}
// HandleRunScript handles POST /api/v1/track4/operator/run-script
// Requires Track 4 auth, IP whitelist, OPERATOR_SCRIPTS_ROOT, and OPERATOR_SCRIPT_ALLOWLIST.
func (s *Server) HandleRunScript(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
operatorAddr, _ := r.Context().Value("user_address").(string)
if operatorAddr == "" {
writeError(w, http.StatusUnauthorized, "unauthorized", "Operator address required")
return
}
ipAddr := clientIPAddress(r)
if whitelisted, _ := s.roleMgr.IsIPWhitelisted(r.Context(), operatorAddr, ipAddr); !whitelisted {
writeError(w, http.StatusForbidden, "forbidden", "IP address not whitelisted")
return
}
root := strings.TrimSpace(os.Getenv("OPERATOR_SCRIPTS_ROOT"))
if root == "" {
writeError(w, http.StatusServiceUnavailable, "service_unavailable", "OPERATOR_SCRIPTS_ROOT not configured")
return
}
rootAbs, err := filepath.Abs(root)
if err != nil || rootAbs == "" {
writeError(w, http.StatusInternalServerError, "internal_error", "invalid OPERATOR_SCRIPTS_ROOT")
return
}
allowRaw := strings.TrimSpace(os.Getenv("OPERATOR_SCRIPT_ALLOWLIST"))
if allowRaw == "" {
writeError(w, http.StatusServiceUnavailable, "service_unavailable", "OPERATOR_SCRIPT_ALLOWLIST not configured")
return
}
var allow []string
for _, p := range strings.Split(allowRaw, ",") {
p = strings.TrimSpace(p)
if p != "" {
allow = append(allow, p)
}
}
if len(allow) == 0 {
writeError(w, http.StatusServiceUnavailable, "service_unavailable", "OPERATOR_SCRIPT_ALLOWLIST empty")
return
}
var reqBody runScriptRequest
dec := json.NewDecoder(io.LimitReader(r.Body, 1<<20))
dec.DisallowUnknownFields()
if err := dec.Decode(&reqBody); err != nil {
writeError(w, http.StatusBadRequest, "bad_request", "invalid JSON body")
return
}
script := strings.TrimSpace(reqBody.Script)
if script == "" || strings.Contains(script, "..") {
writeError(w, http.StatusBadRequest, "bad_request", "invalid script path")
return
}
if len(reqBody.Args) > 24 {
writeError(w, http.StatusBadRequest, "bad_request", "too many args (max 24)")
return
}
for _, a := range reqBody.Args {
if strings.Contains(a, "\x00") {
writeError(w, http.StatusBadRequest, "bad_request", "invalid arg")
return
}
}
candidate := filepath.Join(rootAbs, filepath.Clean(script))
if rel, err := filepath.Rel(rootAbs, candidate); err != nil || strings.HasPrefix(rel, "..") {
writeError(w, http.StatusForbidden, "forbidden", "script outside OPERATOR_SCRIPTS_ROOT")
return
}
relPath, _ := filepath.Rel(rootAbs, candidate)
allowed := false
base := filepath.Base(relPath)
for _, a := range allow {
if a == relPath || a == base || filepath.Clean(a) == relPath {
allowed = true
break
}
}
if !allowed {
writeError(w, http.StatusForbidden, "forbidden", "script not in OPERATOR_SCRIPT_ALLOWLIST")
return
}
st, err := os.Stat(candidate)
if err != nil || st.IsDir() {
writeError(w, http.StatusNotFound, "not_found", "script not found")
return
}
isShell := strings.HasSuffix(strings.ToLower(candidate), ".sh")
if !isShell && st.Mode()&0o111 == 0 {
writeError(w, http.StatusForbidden, "forbidden", "refusing to run non-executable file (use .sh or chmod +x)")
return
}
timeout := 120 * time.Second
if v := strings.TrimSpace(os.Getenv("OPERATOR_SCRIPT_TIMEOUT_SEC")); v != "" {
if sec, err := parsePositiveInt(v); err == nil && sec > 0 && sec < 600 {
timeout = time.Duration(sec) * time.Second
}
}
ctx, cancel := context.WithTimeout(r.Context(), timeout)
defer cancel()
s.roleMgr.LogOperatorEvent(r.Context(), "operator_script_run", &s.chainID, operatorAddr, "operator/run-script", "execute",
map[string]interface{}{
"script": relPath,
"argc": len(reqBody.Args),
}, ipAddr, r.UserAgent())
var cmd *exec.Cmd
if isShell {
args := append([]string{candidate}, reqBody.Args...)
cmd = exec.CommandContext(ctx, "/bin/bash", args...)
} else {
cmd = exec.CommandContext(ctx, candidate, reqBody.Args...)
}
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
runErr := cmd.Run()
exit := 0
timedOut := errors.Is(ctx.Err(), context.DeadlineExceeded)
if runErr != nil {
var ee *exec.ExitError
if errors.As(runErr, &ee) {
exit = ee.ExitCode()
} else if timedOut {
exit = -1
} else {
writeError(w, http.StatusInternalServerError, "internal_error", runErr.Error())
return
}
}
status := "ok"
if timedOut {
status = "timed_out"
} else if exit != 0 {
status = "nonzero_exit"
}
s.roleMgr.LogOperatorEvent(r.Context(), "operator_script_result", &s.chainID, operatorAddr, "operator/run-script", status,
map[string]interface{}{
"script": relPath,
"argc": len(reqBody.Args),
"exit_code": exit,
"timed_out": timedOut,
"stdout_bytes": stdout.Len(),
"stderr_bytes": stderr.Len(),
}, ipAddr, r.UserAgent())
resp := map[string]interface{}{
"data": map[string]interface{}{
"script": relPath,
"exit_code": exit,
"stdout": strings.TrimSpace(stdout.String()),
"stderr": strings.TrimSpace(stderr.String()),
"timed_out": timedOut,
},
}
w.Header().Set("Content-Type", "application/json")
_ = json.NewEncoder(w).Encode(resp)
}
func parsePositiveInt(s string) (int, error) {
var n int
for _, c := range s {
if c < '0' || c > '9' {
return 0, errors.New("not digits")
}
n = n*10 + int(c-'0')
if n > 1e6 {
return 0, errors.New("too large")
}
}
if n == 0 {
return 0, errors.New("zero")
}
return n, nil
}

View File

@@ -0,0 +1,88 @@
package track4
import (
"bytes"
"context"
"encoding/json"
"os"
"path/filepath"
"testing"
"net/http"
"net/http/httptest"
"github.com/stretchr/testify/require"
)
type stubRoleManager struct {
allowed bool
gotIP string
logs int
}
func (s *stubRoleManager) IsIPWhitelisted(_ context.Context, _ string, ipAddress string) (bool, error) {
s.gotIP = ipAddress
return s.allowed, nil
}
func (s *stubRoleManager) LogOperatorEvent(_ context.Context, _ string, _ *int, _ string, _ string, _ string, _ map[string]interface{}, _ string, _ string) error {
s.logs++
return nil
}
func TestHandleRunScriptUsesForwardedClientIPAndRunsAllowlistedScript(t *testing.T) {
root := t.TempDir()
scriptPath := filepath.Join(root, "echo.sh")
require.NoError(t, os.WriteFile(scriptPath, []byte("#!/usr/bin/env bash\necho hello \"$1\"\n"), 0o644))
t.Setenv("OPERATOR_SCRIPTS_ROOT", root)
t.Setenv("OPERATOR_SCRIPT_ALLOWLIST", "echo.sh")
t.Setenv("OPERATOR_SCRIPT_TIMEOUT_SEC", "30")
t.Setenv("TRUST_PROXY_CIDRS", "10.0.0.0/8")
roleMgr := &stubRoleManager{allowed: true}
s := &Server{roleMgr: roleMgr, chainID: 138}
reqBody := []byte(`{"script":"echo.sh","args":["world"]}`)
req := httptest.NewRequest(http.MethodPost, "/api/v1/track4/operator/run-script", bytes.NewReader(reqBody))
req = req.WithContext(context.WithValue(req.Context(), "user_address", "0x4A666F96fC8764181194447A7dFdb7d471b301C8"))
req.RemoteAddr = "10.0.0.10:8080"
req.Header.Set("X-Forwarded-For", "203.0.113.9, 10.0.0.10")
w := httptest.NewRecorder()
s.HandleRunScript(w, req)
require.Equal(t, http.StatusOK, w.Code)
require.Equal(t, "203.0.113.9", roleMgr.gotIP)
require.Equal(t, 2, roleMgr.logs)
var out struct {
Data map[string]any `json:"data"`
}
require.NoError(t, json.Unmarshal(w.Body.Bytes(), &out))
require.Equal(t, "echo.sh", out.Data["script"])
require.Equal(t, float64(0), out.Data["exit_code"])
require.Equal(t, "hello world", out.Data["stdout"])
require.Equal(t, false, out.Data["timed_out"])
}
func TestHandleRunScriptRejectsNonAllowlistedScript(t *testing.T) {
root := t.TempDir()
require.NoError(t, os.WriteFile(filepath.Join(root, "allowed.sh"), []byte("#!/usr/bin/env bash\necho ok\n"), 0o644))
require.NoError(t, os.WriteFile(filepath.Join(root, "blocked.sh"), []byte("#!/usr/bin/env bash\necho blocked\n"), 0o644))
t.Setenv("OPERATOR_SCRIPTS_ROOT", root)
t.Setenv("OPERATOR_SCRIPT_ALLOWLIST", "allowed.sh")
s := &Server{roleMgr: &stubRoleManager{allowed: true}, chainID: 138}
req := httptest.NewRequest(http.MethodPost, "/api/v1/track4/operator/run-script", bytes.NewReader([]byte(`{"script":"blocked.sh"}`)))
req = req.WithContext(context.WithValue(req.Context(), "user_address", "0x4A666F96fC8764181194447A7dFdb7d471b301C8"))
req.RemoteAddr = "127.0.0.1:9999"
w := httptest.NewRecorder()
s.HandleRunScript(w, req)
require.Equal(t, http.StatusForbidden, w.Code)
require.Contains(t, w.Body.String(), "script not in OPERATOR_SCRIPT_ALLOWLIST")
}

View File

@@ -0,0 +1,17 @@
package track4
import (
"context"
"net/http"
httpmiddleware "github.com/explorer/backend/libs/go-http-middleware"
)
type roleManager interface {
IsIPWhitelisted(ctx context.Context, operatorAddress string, ipAddress string) (bool, error)
LogOperatorEvent(ctx context.Context, eventType string, chainID *int, operatorAddress string, targetResource string, action string, details map[string]interface{}, ipAddress string, userAgent string) error
}
func clientIPAddress(r *http.Request) string {
return httpmiddleware.ClientIP(r)
}

View File

@@ -3,7 +3,11 @@ package websocket
import (
"encoding/json"
"log"
"net"
"net/http"
"net/url"
"os"
"strings"
"sync"
"time"
@@ -12,10 +16,62 @@ import (
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true // Allow all origins in development
return websocketOriginAllowed(r)
},
}
func websocketOriginAllowed(r *http.Request) bool {
origin := strings.TrimSpace(r.Header.Get("Origin"))
if origin == "" {
return true
}
allowedOrigins := splitAllowedOrigins(os.Getenv("WEBSOCKET_ALLOWED_ORIGINS"))
if len(allowedOrigins) == 0 {
return sameOriginHost(origin, r.Host)
}
for _, allowed := range allowedOrigins {
if allowed == "*" || strings.EqualFold(allowed, origin) {
return true
}
}
return false
}
func splitAllowedOrigins(raw string) []string {
if strings.TrimSpace(raw) == "" {
return nil
}
parts := strings.Split(raw, ",")
origins := make([]string, 0, len(parts))
for _, part := range parts {
trimmed := strings.TrimSpace(part)
if trimmed != "" {
origins = append(origins, trimmed)
}
}
return origins
}
func sameOriginHost(origin, requestHost string) bool {
parsedOrigin, err := url.Parse(origin)
if err != nil {
return false
}
originHost := parsedOrigin.Hostname()
requestHostname := requestHost
if host, _, err := net.SplitHostPort(requestHost); err == nil {
requestHostname = host
}
return strings.EqualFold(originHost, requestHostname)
}
// Server represents the WebSocket server
type Server struct {
clients map[*Client]bool
@@ -27,9 +83,9 @@ type Server struct {
// Client represents a WebSocket client
type Client struct {
conn *websocket.Conn
send chan []byte
server *Server
conn *websocket.Conn
send chan []byte
server *Server
subscriptions map[string]bool
}
@@ -50,8 +106,9 @@ func (s *Server) Start() {
case client := <-s.register:
s.mu.Lock()
s.clients[client] = true
count := len(s.clients)
s.mu.Unlock()
log.Printf("Client connected. Total clients: %d", len(s.clients))
log.Printf("Client connected. Total clients: %d", count)
case client := <-s.unregister:
s.mu.Lock()
@@ -59,11 +116,12 @@ func (s *Server) Start() {
delete(s.clients, client)
close(client.send)
}
count := len(s.clients)
s.mu.Unlock()
log.Printf("Client disconnected. Total clients: %d", len(s.clients))
log.Printf("Client disconnected. Total clients: %d", count)
case message := <-s.broadcast:
s.mu.RLock()
s.mu.Lock()
for client := range s.clients {
select {
case client.send <- message:
@@ -72,7 +130,7 @@ func (s *Server) Start() {
delete(s.clients, client)
}
}
s.mu.RUnlock()
s.mu.Unlock()
}
}
}
@@ -189,7 +247,7 @@ func (c *Client) handleMessage(msg map[string]interface{}) {
channel, _ := msg["channel"].(string)
c.subscriptions[channel] = true
c.sendMessage(map[string]interface{}{
"type": "subscribed",
"type": "subscribed",
"channel": channel,
})
@@ -197,13 +255,13 @@ func (c *Client) handleMessage(msg map[string]interface{}) {
channel, _ := msg["channel"].(string)
delete(c.subscriptions, channel)
c.sendMessage(map[string]interface{}{
"type": "unsubscribed",
"type": "unsubscribed",
"channel": channel,
})
case "ping":
c.sendMessage(map[string]interface{}{
"type": "pong",
"type": "pong",
"timestamp": time.Now().Unix(),
})
}
@@ -222,4 +280,3 @@ func (c *Client) sendMessage(msg map[string]interface{}) {
close(c.send)
}
}

View File

@@ -0,0 +1,42 @@
package websocket
import (
"net/http/httptest"
"testing"
)
func TestWebsocketOriginAllowedDefaultsToSameHost(t *testing.T) {
t.Setenv("WEBSOCKET_ALLOWED_ORIGINS", "")
req := httptest.NewRequest("GET", "http://example.com/ws", nil)
req.Host = "example.com:8080"
req.Header.Set("Origin", "https://example.com")
if !websocketOriginAllowed(req) {
t.Fatal("expected same-host websocket origin to be allowed by default")
}
}
func TestWebsocketOriginAllowedRejectsCrossOriginByDefault(t *testing.T) {
t.Setenv("WEBSOCKET_ALLOWED_ORIGINS", "")
req := httptest.NewRequest("GET", "http://example.com/ws", nil)
req.Host = "example.com:8080"
req.Header.Set("Origin", "https://attacker.example")
if websocketOriginAllowed(req) {
t.Fatal("expected cross-origin websocket request to be rejected by default")
}
}
func TestWebsocketOriginAllowedHonorsExplicitAllowlist(t *testing.T) {
t.Setenv("WEBSOCKET_ALLOWED_ORIGINS", "https://app.example, https://ops.example")
req := httptest.NewRequest("GET", "http://example.com/ws", nil)
req.Host = "example.com:8080"
req.Header.Set("Origin", "https://ops.example")
if !websocketOriginAllowed(req) {
t.Fatal("expected allowlisted websocket origin to be accepted")
}
}

View File

@@ -4,7 +4,9 @@ import (
"context"
"crypto/rand"
"encoding/hex"
"errors"
"fmt"
"strings"
"time"
"github.com/ethereum/go-ethereum/accounts"
@@ -14,6 +16,13 @@ import (
"github.com/jackc/pgx/v5/pgxpool"
)
var (
ErrWalletAuthStorageNotInitialized = errors.New("wallet authentication storage is not initialized; run migration 0010_track_schema")
ErrWalletNonceNotFoundOrExpired = errors.New("nonce not found or expired")
ErrWalletNonceExpired = errors.New("nonce expired")
ErrWalletNonceInvalid = errors.New("invalid nonce")
)
// WalletAuth handles wallet-based authentication
type WalletAuth struct {
db *pgxpool.Pool
@@ -28,6 +37,10 @@ func NewWalletAuth(db *pgxpool.Pool, jwtSecret []byte) *WalletAuth {
}
}
func isMissingWalletNonceTableError(err error) bool {
return err != nil && strings.Contains(err.Error(), `relation "wallet_nonces" does not exist`)
}
// NonceRequest represents a nonce request
type NonceRequest struct {
Address string `json:"address"`
@@ -84,6 +97,9 @@ func (w *WalletAuth) GenerateNonce(ctx context.Context, address string) (*NonceR
`
_, err := w.db.Exec(ctx, query, normalizedAddr, nonce, expiresAt)
if err != nil {
if isMissingWalletNonceTableError(err) {
return nil, ErrWalletAuthStorageNotInitialized
}
return nil, fmt.Errorf("failed to store nonce: %w", err)
}
@@ -110,22 +126,25 @@ func (w *WalletAuth) AuthenticateWallet(ctx context.Context, req *WalletAuthRequ
query := `SELECT nonce, expires_at FROM wallet_nonces WHERE address = $1`
err := w.db.QueryRow(ctx, query, normalizedAddr).Scan(&storedNonce, &expiresAt)
if err != nil {
return nil, fmt.Errorf("nonce not found or expired")
if isMissingWalletNonceTableError(err) {
return nil, ErrWalletAuthStorageNotInitialized
}
return nil, ErrWalletNonceNotFoundOrExpired
}
if time.Now().After(expiresAt) {
return nil, fmt.Errorf("nonce expired")
return nil, ErrWalletNonceExpired
}
if storedNonce != req.Nonce {
return nil, fmt.Errorf("invalid nonce")
return nil, ErrWalletNonceInvalid
}
// Verify signature
message := fmt.Sprintf("Sign this message to authenticate with SolaceScanScout Explorer.\n\nNonce: %s", req.Nonce)
messageHash := accounts.TextHash([]byte(message))
sigBytes, err := hex.DecodeString(req.Signature[2:]) // Remove 0x prefix
sigBytes, err := decodeWalletSignature(req.Signature)
if err != nil {
return nil, fmt.Errorf("invalid signature format: %w", err)
}
@@ -241,9 +260,45 @@ func (w *WalletAuth) ValidateJWT(tokenString string) (string, int, error) {
}
track := int(trackFloat)
if w.db == nil {
return address, track, nil
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
currentTrack, err := w.getUserTrack(ctx, address)
if err != nil {
return "", 0, fmt.Errorf("failed to resolve current track: %w", err)
}
if currentTrack < track {
track = currentTrack
}
return address, track, nil
}
func decodeWalletSignature(signature string) ([]byte, error) {
if len(signature) < 2 || !strings.EqualFold(signature[:2], "0x") {
return nil, fmt.Errorf("signature must start with 0x")
}
raw := signature[2:]
if len(raw) != 130 {
return nil, fmt.Errorf("invalid signature length")
}
sigBytes, err := hex.DecodeString(raw)
if err != nil {
return nil, err
}
if len(sigBytes) != 65 {
return nil, fmt.Errorf("invalid signature length")
}
return sigBytes, nil
}
// getPermissionsForTrack returns permissions for a track level
func getPermissionsForTrack(track int) []string {
permissions := []string{

View File

@@ -0,0 +1,28 @@
package auth
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestDecodeWalletSignatureRejectsMalformedValues(t *testing.T) {
_, err := decodeWalletSignature("deadbeef")
require.ErrorContains(t, err, "signature must start with 0x")
_, err = decodeWalletSignature("0x1234")
require.ErrorContains(t, err, "invalid signature length")
}
func TestValidateJWTReturnsClaimsWhenDBUnavailable(t *testing.T) {
secret := []byte("test-secret")
auth := NewWalletAuth(nil, secret)
token, _, err := auth.generateJWT("0x4A666F96fC8764181194447A7dFdb7d471b301C8", 4)
require.NoError(t, err)
address, track, err := auth.ValidateJWT(token)
require.NoError(t, err)
require.Equal(t, "0x4A666F96fC8764181194447A7dFdb7d471b301C8", address)
require.Equal(t, 4, track)
}

BIN
backend/cmd Executable file

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,60 @@
-- Migration: Track auth/operator tables for shared Blockscout database
-- Description: Creates only the explorer-owned auth/operator tables that do not
-- conflict with Blockscout's existing addresses/token_transfers schema.
CREATE TABLE IF NOT EXISTS operator_events (
id SERIAL PRIMARY KEY,
event_type VARCHAR(100) NOT NULL,
chain_id INTEGER,
operator_address VARCHAR(42) NOT NULL,
target_resource VARCHAR(200),
action VARCHAR(100) NOT NULL,
details JSONB,
ip_address INET,
user_agent TEXT,
timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_operator_events_type ON operator_events(event_type);
CREATE INDEX IF NOT EXISTS idx_operator_events_operator ON operator_events(operator_address);
CREATE INDEX IF NOT EXISTS idx_operator_events_timestamp ON operator_events(timestamp);
CREATE INDEX IF NOT EXISTS idx_operator_events_chain ON operator_events(chain_id);
CREATE TABLE IF NOT EXISTS operator_ip_whitelist (
id SERIAL PRIMARY KEY,
operator_address VARCHAR(42) NOT NULL,
ip_address INET NOT NULL,
description TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(operator_address, ip_address)
);
CREATE INDEX IF NOT EXISTS idx_operator_whitelist_operator ON operator_ip_whitelist(operator_address);
CREATE INDEX IF NOT EXISTS idx_operator_whitelist_ip ON operator_ip_whitelist(ip_address);
CREATE TABLE IF NOT EXISTS operator_roles (
id SERIAL PRIMARY KEY,
address VARCHAR(42) NOT NULL UNIQUE,
track_level INTEGER NOT NULL DEFAULT 4,
roles TEXT[],
approved BOOLEAN DEFAULT FALSE,
approved_by VARCHAR(42),
approved_at TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_operator_roles_address ON operator_roles(address);
CREATE INDEX IF NOT EXISTS idx_operator_roles_approved ON operator_roles(approved);
CREATE TABLE IF NOT EXISTS wallet_nonces (
id SERIAL PRIMARY KEY,
address VARCHAR(42) NOT NULL UNIQUE,
nonce VARCHAR(64) NOT NULL,
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_wallet_nonces_address ON wallet_nonces(address);
CREATE INDEX IF NOT EXISTS idx_wallet_nonces_expires ON wallet_nonces(expires_at);

View File

@@ -0,0 +1,111 @@
package httpmiddleware
import (
"net"
"net/http"
"os"
"strings"
)
// ClientIP returns the best-known client IP for a request.
//
// Forwarded headers are only trusted when the immediate remote address belongs
// to an explicitly trusted proxy listed in TRUST_PROXY_IPS and/or
// TRUST_PROXY_CIDRS.
func ClientIP(r *http.Request) string {
remoteIP := parseRemoteIP(r.RemoteAddr)
if remoteIP == "" {
remoteIP = strings.TrimSpace(r.RemoteAddr)
}
if !isTrustedProxy(remoteIP) {
return remoteIP
}
if forwarded := forwardedClientIP(r); forwarded != "" {
return forwarded
}
return remoteIP
}
func parseRemoteIP(raw string) string {
trimmed := strings.TrimSpace(raw)
if trimmed == "" {
return ""
}
if host, _, err := net.SplitHostPort(trimmed); err == nil {
return host
}
if ip := net.ParseIP(trimmed); ip != nil {
return ip.String()
}
return trimmed
}
func forwardedClientIP(r *http.Request) string {
for _, header := range []string{"X-Forwarded-For", "X-Real-IP"} {
raw := strings.TrimSpace(r.Header.Get(header))
if raw == "" {
continue
}
if header == "X-Forwarded-For" {
for _, part := range strings.Split(raw, ",") {
candidate := strings.TrimSpace(part)
if ip := net.ParseIP(candidate); ip != nil {
return ip.String()
}
}
continue
}
if ip := net.ParseIP(raw); ip != nil {
return ip.String()
}
}
return ""
}
func isTrustedProxy(remoteIP string) bool {
ip := net.ParseIP(strings.TrimSpace(remoteIP))
if ip == nil {
return false
}
for _, exact := range splitEnvList("TRUST_PROXY_IPS") {
if trusted := net.ParseIP(exact); trusted != nil && trusted.Equal(ip) {
return true
}
}
for _, cidr := range splitEnvList("TRUST_PROXY_CIDRS") {
_, network, err := net.ParseCIDR(cidr)
if err == nil && network.Contains(ip) {
return true
}
}
return false
}
func splitEnvList(key string) []string {
raw := strings.TrimSpace(os.Getenv(key))
if raw == "" {
return nil
}
parts := strings.Split(raw, ",")
values := make([]string, 0, len(parts))
for _, part := range parts {
trimmed := strings.TrimSpace(part)
if trimmed != "" {
values = append(values, trimmed)
}
}
return values
}

View File

@@ -0,0 +1,31 @@
package httpmiddleware
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/require"
)
func TestClientIPFallsBackToRemoteAddrWhenProxyIsUntrusted(t *testing.T) {
t.Setenv("TRUST_PROXY_IPS", "")
t.Setenv("TRUST_PROXY_CIDRS", "")
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.RemoteAddr = "10.0.0.10:8443"
req.Header.Set("X-Forwarded-For", "203.0.113.9, 10.0.0.10")
require.Equal(t, "10.0.0.10", ClientIP(req))
}
func TestClientIPUsesForwardedHeadersFromTrustedProxy(t *testing.T) {
t.Setenv("TRUST_PROXY_IPS", "")
t.Setenv("TRUST_PROXY_CIDRS", "10.0.0.0/8")
req := httptest.NewRequest(http.MethodGet, "/", nil)
req.RemoteAddr = "10.0.0.10:8443"
req.Header.Set("X-Forwarded-For", "203.0.113.9, 10.0.0.10")
require.Equal(t, "203.0.113.9", ClientIP(req))
}

View File

@@ -1,68 +1,98 @@
{
"description": "Address inventory moved out of explorer-monorepo/.env during dotenv cleanup. This file preserves the previous env-based address reference set for scripts and documentation review.",
"updated": "2026-03-27",
"updated": "2026-04-04",
"inventory": {
"LINK_TOKEN": "0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03",
"ORACLE_AGGREGATOR_ADDRESS": "0x99b3511a2d315a497c8112c1fdd8d508d4b1e506",
"CCIP_ROUTER_ADDRESS": "0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e",
"CCIPWETH9_BRIDGE": "0x89dd12025bfCD38A168455A44B400e913ED33BE2",
"CCIP_ROUTER_ADDRESS": "0x42DAb7b888Dd382bD5Adcf9E038dBF1fD03b4817",
"CCIP_ROUTER_DIRECT_LEGACY": "0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e",
"CCIPWETH9_BRIDGE": "0xcacfd227A040002e49e2e01626363071324f820a",
"CCIPWETH9_BRIDGE_DIRECT_LEGACY": "0x971cD9D156f193df8051E48043C476e53ECd4693",
"CCIPWETH10_BRIDGE": "0xe0E93247376aa097dB308B92e6Ba36bA015535D0",
"CCIP_CHAIN138_FEE_TOKEN": "0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03",
"WETH9_ADDRESS": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2",
"WETH10_ADDRESS": "0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f",
"COMPLIANT_USDT_V2": "0x9FBfab33882Efe0038DAa608185718b772EE5660",
"COMPLIANT_USDC_V2": "0x219522c60e83dEe01FC5b0329d6fA8fD84b9D13d",
"CUSDT_V2_ADDRESS_138": "0x9FBfab33882Efe0038DAa608185718b772EE5660",
"CUSDC_V2_ADDRESS_138": "0x219522c60e83dEe01FC5b0329d6fA8fD84b9D13d",
"CCIP_RECEIVER": "0x6C4BEE679d37629330daeF141BEd5b4eD2Ec14f6",
"CCIP_LOGGER": "0xF597ABbe5E1544845C6Ba92a6884B4D601ffa334",
"CW_L1_BRIDGE_CHAIN138": "0x152ed3e9912161b76bdfd368d0c84b7c31c10de7",
"DEPLOYER_ADMIN_138": "0x4A666F96fC8764181194447A7dFdb7d471b301C8",
"ORACLE_PROXY_ADDRESS": "0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6",
"CCIP_ROUTER_138": "0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e",
"CCIP_ROUTER_138": "0x42DAb7b888Dd382bD5Adcf9E038dBF1fD03b4817",
"CCIP_SENDER_138": "0x105F8A15b819948a89153505762444Ee9f324684",
"CCIP_RECEIVER_138": "0x6C4BEE679d37629330daeF141BEd5b4eD2Ec14f6",
"CCIP_LOGGER_138": "0xF597ABbe5E1544845C6Ba92a6884B4D601ffa334",
"CCIPWETH9_BRIDGE_138": "0x89dd12025bfCD38A168455A44B400e913ED33BE2",
"CCIPWETH9_BRIDGE_138": "0xcacfd227A040002e49e2e01626363071324f820a",
"CCIPWETH10_BRIDGE_138": "0xe0E93247376aa097dB308B92e6Ba36bA015535D0",
"LINK_TOKEN_138": "0x514910771AF9Ca656af840dff83E8264EcF986CA",
"ENHANCED_SWAP_ROUTER_V2_ADDRESS": "0xF1c93F54A5C2fc0d7766Ccb0Ad8f157DFB4C99Ce",
"INTENT_BRIDGE_COORDINATOR_V2_ADDRESS": "0x7D0022B7e8360172fd9C0bB6778113b7Ea3674E7",
"DODO_ROUTE_EXECUTOR_ADAPTER": "0x88495B3dccEA93b0633390fDE71992683121Fa62",
"DODO_V3_ROUTE_EXECUTOR_ADAPTER": "0x9Cb97adD29c52e3B81989BcA2E33D46074B530eF",
"UNISWAP_V3_ROUTE_EXECUTOR_ADAPTER": "0x960D6db4E78705f82995690548556fb2266308EA",
"BALANCER_ROUTE_EXECUTOR_ADAPTER": "0x4E1B71B69188Ab45021c797039b4887a4924157A",
"CURVE_ROUTE_EXECUTOR_ADAPTER": "0x5f0E07071c41ACcD2A1b1032D3bd49b323b9ADE6",
"ONEINCH_ROUTE_EXECUTOR_ADAPTER": "0x8168083d29b3293F215392A49D16e7FeF4a02600",
"UNISWAP_V3_ROUTER": "0xde9cD8ee2811E6E64a41D5F68Be315d33995975E",
"UNISWAP_QUOTER_ADDRESS": "0x6abbB1CEb2468e748a03A00CD6aA9BFE893AFa1f",
"CHAIN_138_UNISWAP_V3_FACTORY": "0x2f7219276e3ce367dB9ec74C1196a8ecEe67841C",
"CHAIN_138_UNISWAP_V3_ROUTER": "0xde9cD8ee2811E6E64a41D5F68Be315d33995975E",
"UNISWAP_V3_WETH_USDT_POOL": "0xa893add35aEfe6A6d858EB01828bE4592f12C9F5",
"UNISWAP_V3_WETH_USDC_POOL": "0xEC745bfb6b3cd32f102d594E5F432d8d85B19391",
"BALANCER_VAULT": "0x96423d7C1727698D8a25EbFB88131e9422d1a3C3",
"BALANCER_WETH_USDT_POOL_ID": "0x877cd220759e8c94b82f55450c85d382ae06856c426b56d93092a420facbc324",
"BALANCER_WETH_USDC_POOL_ID": "0xd8dfb18a6baf9b29d8c2dbd74639db87ac558af120df5261dab8e2a5de69013b",
"CURVE_3POOL": "0xE440Ec15805BE4C7BabCD17A63B8C8A08a492e0f",
"ONEINCH_ROUTER": "0x500B84b1Bc6F59C1898a5Fe538eA20A758757A4F",
"CROSS_CHAIN_FLASH_BRIDGE_ADAPTER": "0xBe9e0B2d4cF6A3b2994d6f2f0904D2B165eB8ffC",
"CROSS_CHAIN_FLASH_REPAY_RECEIVER": "0xD084b68cB4B1ef2cBA09CF99FB1B6552fd9b4859",
"CROSS_CHAIN_FLASH_VAULT_CREDIT_RECEIVER": "0x89F7a1fcbBe104BeE96Da4b4b6b7d3AF85f7E661",
"LINK_TOKEN_138": "0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03",
"WETH9_138": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2",
"WETH10_138": "0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f",
"CCIP_ROUTER_MAINNET": "0x80226fc0Ee2b096224EeAc085Bb9a8cba1146f7D",
"CCIPWETH9_BRIDGE_MAINNET": "0x3304b747E565a97ec8AC220b0B6A1f6ffDB837e6",
"CCIPWETH10_BRIDGE_MAINNET": "0x8078A09637e47Fa5Ed34F626046Ea2094a5CDE5e",
"CCIPWETH9_BRIDGE_MAINNET": "0xc9901ce2Ddb6490FAA183645147a87496d8b20B6",
"CCIPWETH10_BRIDGE_MAINNET": "0x04E1e22B0D41e99f4275bd40A50480219bc9A223",
"LINK_TOKEN_MAINNET": "0x514910771AF9Ca656af840dff83E8264EcF986CA",
"WETH9_MAINNET": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2",
"WETH10_MAINNET": "0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f",
"TRANSACTION_MIRROR_MAINNET": "0x4CF42c4F1dBa748601b8938be3E7ABD732E87cE9",
"MAINNET_TETHER_MAINNET": "0x15DF1D5BFDD8Aa4b380445D4e3E9B38d34283619",
"CCIP_ROUTER_BSC": "0xE1053aE1857476f36F3bAdEe8D26609d1887a44A",
"CCIPWETH9_BRIDGE_BSC": "0x8078a09637e47fa5ed34f626046ea2094a5cde5e",
"CCIPWETH10_BRIDGE_BSC": "0x105f8a15b819948a89153505762444ee9f324684",
"CCIPWETH9_BRIDGE_BSC": "0x24293CA562aE1100E60a4640FF49bd656cFf93B4",
"CCIPWETH10_BRIDGE_BSC": "0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c",
"LINK_TOKEN_BSC": "0x404460C6A5EdE2D891e8297795264fDe62ADBB75",
"WETH9_BSC": "0xe0E93247376aa097dB308B92e6Ba36bA015535D0",
"WETH10_BSC": "0xAb57BF30F1354CA0590af22D8974c7f24DB2DbD7",
"CCIP_ROUTER_POLYGON": "0x3C3D92629A02a8D95D5CB9650fe49C3544f69B43",
"CCIPWETH9_BRIDGE_POLYGON": "0xa780ef19a041745d353c9432f2a7f5a241335ffe",
"CCIPWETH10_BRIDGE_POLYGON": "0xdab0591e5e89295ffad75a71dcfc30c5625c4fa2",
"CCIPWETH9_BRIDGE_POLYGON": "0xF7736443f02913e7e0773052103296CfE1637448",
"CCIPWETH10_BRIDGE_POLYGON": "0x0CA60e6f8589c540200daC9D9Cb27BC2e48eE66A",
"LINK_TOKEN_POLYGON": "0x53E0bca35eC356BD5ddDFebbD1Fc0fD03FaBad39",
"WETH9_POLYGON": "0x24293CA562aE1100E60a4640FF49bd656cFf93B4",
"WETH10_POLYGON": "0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c",
"CCIP_ROUTER_AVALANCHE": "0xF694E193200268f9a4868e4Aa017A0118C9a8177",
"CCIPWETH9_BRIDGE_AVALANCHE": "0x8078a09637e47fa5ed34f626046ea2094a5cde5e",
"CCIPWETH10_BRIDGE_AVALANCHE": "0x105f8a15b819948a89153505762444ee9f324684",
"CCIPWETH9_BRIDGE_AVALANCHE": "0x24293CA562aE1100E60a4640FF49bd656cFf93B4",
"CCIPWETH10_BRIDGE_AVALANCHE": "0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c",
"LINK_TOKEN_AVALANCHE": "0x5947BB275c521040051E823857d752Cac58008AD",
"WETH9_AVALANCHE": "0xa4B9DD039565AeD9641D45b57061f99d9cA6Df08",
"WETH10_AVALANCHE": "0x89dd12025bfCD38A168455A44B400e913ED33BE2",
"CCIP_ROUTER_BASE": "0xcc22AB6F94F1aBB4de9CCF9046f7a0AD1Ce4d716",
"CCIPWETH9_BRIDGE_BASE": "0x8078a09637e47fa5ed34f626046ea2094a5cde5e",
"CCIPWETH10_BRIDGE_BASE": "0x105f8a15b819948a89153505762444ee9f324684",
"CCIPWETH9_BRIDGE_BASE": "0x24293CA562aE1100E60a4640FF49bd656cFf93B4",
"CCIPWETH10_BRIDGE_BASE": "0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c",
"LINK_TOKEN_BASE": "0x88Fb150BDc53A65fe94Dea0c9Ba0e666F144f907",
"WETH9_BASE": "0xe0E93247376aa097dB308B92e6Ba36bA015535D0",
"WETH10_BASE": "0xAb57BF30F1354CA0590af22D8974c7f24DB2DbD7",
"CCIP_ROUTER_ARBITRUM": "0x1619DE6B6B20eD217a58d00f37B9d47C7663feca",
"CCIPWETH9_BRIDGE_ARBITRUM": "0x8078a09637e47fa5ed34f626046ea2094a5cde5e",
"CCIPWETH10_BRIDGE_ARBITRUM": "0x105f8a15b819948a89153505762444ee9f324684",
"CCIPWETH9_BRIDGE_ARBITRUM": "0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c",
"CCIPWETH10_BRIDGE_ARBITRUM": "0x73376eB92c16977B126dB9112936A20Fa0De3442",
"LINK_TOKEN_ARBITRUM": "0xf97f4df75117a78c1A5a0DBb814Af92458539FB4",
"WETH9_ARBITRUM": "0x89dd12025bfCD38A168455A44B400e913ED33BE2",
"WETH10_ARBITRUM": "0xe0E93247376aa097dB308B92e6Ba36bA015535D0",
"CCIP_ROUTER_OPTIMISM": "0x261c05167db67Be2E2dc4a347C4E6B000C677852",
"CCIPWETH9_BRIDGE_OPTIMISM": "0x8078a09637e47fa5ed34f626046ea2094a5cde5e",
"CCIPWETH10_BRIDGE_OPTIMISM": "0x105f8a15b819948a89153505762444ee9f324684",
"CCIPWETH9_BRIDGE_OPTIMISM": "0x6e94e53F73893b2a6784Df663920D31043A6dE07",
"CCIPWETH10_BRIDGE_OPTIMISM": "0x24293CA562aE1100E60a4640FF49bd656cFf93B4",
"LINK_TOKEN_OPTIMISM": "0x350a791Bfc2C21F9Ed5d10980Dad2e2638ffa7f6",
"WETH9_OPTIMISM": "0x89dd12025bfCD38A168455A44B400e913ED33BE2",
"WETH10_OPTIMISM": "0xe0E93247376aa097dB308B92e6Ba36bA015535D0",

View File

@@ -1,54 +1,54 @@
{
"description": "Canonical CCIP destination selector and bridge matrix used by explorer-monorepo operator scripts.",
"updated": "2026-03-27",
"updated": "2026-04-03",
"chains": [
{
"name": "BSC",
"selector": "11344663589394136015",
"weth9Bridge": "0x8078a09637e47fa5ed34f626046ea2094a5cde5e",
"weth10Bridge": "0x105f8a15b819948a89153505762444ee9f324684",
"weth9Bridge": "0x24293CA562aE1100E60a4640FF49bd656cFf93B4",
"weth10Bridge": "0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c",
"rpcUrl": "https://bsc-dataseed.binance.org"
},
{
"name": "Polygon",
"selector": "4051577828743386545",
"weth9Bridge": "0xa780ef19a041745d353c9432f2a7f5a241335ffe",
"weth10Bridge": "0xdab0591e5e89295ffad75a71dcfc30c5625c4fa2",
"weth9Bridge": "0xF7736443f02913e7e0773052103296CfE1637448",
"weth10Bridge": "0x0CA60e6f8589c540200daC9D9Cb27BC2e48eE66A",
"rpcUrl": "https://polygon-rpc.com"
},
{
"name": "Avalanche",
"selector": "6433500567565415381",
"weth9Bridge": "0x8078a09637e47fa5ed34f626046ea2094a5cde5e",
"weth10Bridge": "0x105f8a15b819948a89153505762444ee9f324684",
"weth9Bridge": "0x24293CA562aE1100E60a4640FF49bd656cFf93B4",
"weth10Bridge": "0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c",
"rpcUrl": "https://api.avax.network/ext/bc/C/rpc"
},
{
"name": "Base",
"selector": "15971525489660198786",
"weth9Bridge": "0x8078a09637e47fa5ed34f626046ea2094a5cde5e",
"weth10Bridge": "0x105f8a15b819948a89153505762444ee9f324684",
"weth9Bridge": "0x24293CA562aE1100E60a4640FF49bd656cFf93B4",
"weth10Bridge": "0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c",
"rpcUrl": "https://mainnet.base.org"
},
{
"name": "Arbitrum",
"selector": "4949039107694359620",
"weth9Bridge": "0x8078a09637e47fa5ed34f626046ea2094a5cde5e",
"weth10Bridge": "0x105f8a15b819948a89153505762444ee9f324684",
"weth9Bridge": "0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c",
"weth10Bridge": "0x73376eB92c16977B126dB9112936A20Fa0De3442",
"rpcUrl": "https://arb1.arbitrum.io/rpc"
},
{
"name": "Optimism",
"selector": "3734403246176062136",
"weth9Bridge": "0x8078a09637e47fa5ed34f626046ea2094a5cde5e",
"weth10Bridge": "0x105f8a15b819948a89153505762444ee9f324684",
"weth9Bridge": "0x6e94e53F73893b2a6784Df663920D31043A6dE07",
"weth10Bridge": "0x24293CA562aE1100E60a4640FF49bd656cFf93B4",
"rpcUrl": "https://mainnet.optimism.io"
},
{
"name": "Ethereum",
"selector": "5009297550715157269",
"weth9Bridge": "0x2A0840e5117683b11682ac46f5CF5621E67269E3",
"weth10Bridge": "0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03",
"weth9Bridge": "0xc9901ce2Ddb6490FAA183645147a87496d8b20B6",
"weth10Bridge": "0x04E1e22B0D41e99f4275bd40A50480219bc9A223",
"rpcUrl": "https://eth.llamarpc.com"
}
]

View File

@@ -1,183 +1,40 @@
# Deployment Summary
## Complete Deployment Package
This directory contains two different kinds of deployment material:
All deployment files and scripts have been created and are ready for use.
- current production references for the live explorer stack
- older monolithic deployment scaffolding that is still useful as background, but is no longer the authoritative description of production
## 📁 File Structure
## Current Production Summary
```
deployment/
├── DEPLOYMENT_GUIDE.md # Complete step-by-step guide (1,079 lines)
├── DEPLOYMENT_TASKS.md # Detailed 71-task checklist (561 lines)
├── DEPLOYMENT_CHECKLIST.md # Interactive checklist (204 lines)
├── DEPLOYMENT_SUMMARY.md # This file
├── QUICK_DEPLOY.md # Quick command reference
├── README.md # Documentation overview
├── ENVIRONMENT_TEMPLATE.env # Environment variables template
├── nginx/
│ └── explorer.conf # Complete Nginx configuration
├── cloudflare/
│ └── tunnel-config.yml # Cloudflare Tunnel template
├── systemd/
│ ├── explorer-indexer.service
│ ├── explorer-api.service
│ ├── explorer-frontend.service
│ └── cloudflared.service
├── fail2ban/
│ ├── nginx.conf # Nginx filter
│ └── jail.local # Jail configuration
└── scripts/
├── deploy-lxc.sh # Automated LXC setup
├── install-services.sh # Install systemd services
├── setup-nginx.sh # Setup Nginx
├── setup-cloudflare-tunnel.sh # Setup Cloudflare Tunnel
├── setup-firewall.sh # Configure firewall
├── setup-fail2ban.sh # Configure Fail2ban
├── setup-backup.sh # Setup backup system
├── setup-health-check.sh # Setup health monitoring
├── build-all.sh # Build all applications
├── verify-deployment.sh # Verify deployment
└── full-deploy.sh # Full automated deployment
```
Start with [`LIVE_DEPLOYMENT_MAP.md`](./LIVE_DEPLOYMENT_MAP.md).
## 🚀 Quick Start
The live explorer is currently assembled from separate deployment paths:
### Option 1: Automated Deployment
```bash
# Run full automated deployment
sudo ./deployment/scripts/full-deploy.sh
```
| Component | Live service | Canonical deploy path |
|---|---|---|
| Next frontend | `solacescanscout-frontend.service` | [`scripts/deploy-next-frontend-to-vmid5000.sh`](../scripts/deploy-next-frontend-to-vmid5000.sh) |
| Explorer config/API | `explorer-config-api.service` | [`scripts/deploy-explorer-ai-to-vmid5000.sh`](../scripts/deploy-explorer-ai-to-vmid5000.sh) |
| Static config assets | nginx static files under `/var/www/html` | [`scripts/deploy-explorer-config-to-vmid5000.sh`](../scripts/deploy-explorer-config-to-vmid5000.sh) |
| Relay fleet | `ccip-relay*.service` on `r630-01` | host-side `config/systemd/ccip-relay*.service` |
### Option 2: Step-by-Step Manual
```bash
# 1. Read the guide
cat deployment/DEPLOYMENT_GUIDE.md
## Public Verification
# 2. Follow tasks
# Use deployment/DEPLOYMENT_TASKS.md
- [`check-explorer-health.sh`](../scripts/check-explorer-health.sh)
- [`check-explorer-e2e.sh`](../../scripts/verify/check-explorer-e2e.sh)
- `https://explorer.d-bis.org/api/config/capabilities`
- `https://explorer.d-bis.org/explorer-api/v1/track1/bridge/status`
- `https://explorer.d-bis.org/explorer-api/v1/mission-control/stream`
# 3. Track progress
# Use deployment/DEPLOYMENT_CHECKLIST.md
```
## Legacy Material In This Directory
## 📋 Deployment Phases
These files remain in the repo, but they describe an older generalized package:
1. **LXC Container Setup** (8 tasks)
- Create container
- Configure resources
- Install base packages
2. **Application Installation** (12 tasks)
- Install Go, Node.js, Docker
- Clone repository
- Build applications
3. **Database Setup** (10 tasks)
- Install PostgreSQL + TimescaleDB
- Create database
- Run migrations
4. **Infrastructure Services** (6 tasks)
- Deploy Elasticsearch
- Deploy Redis
5. **Application Services** (10 tasks)
- Configure environment
- Create systemd services
- Start services
6. **Nginx Reverse Proxy** (9 tasks)
- Install Nginx
- Configure reverse proxy
- Set up SSL
7. **Cloudflare Configuration** (18 tasks)
- Configure DNS
- Set up SSL/TLS
- Configure Tunnel
- Set up WAF
- Configure caching
8. **Security Hardening** (12 tasks)
- Configure firewall
- Set up Fail2ban
- Configure backups
- Harden SSH
9. **Monitoring** (8 tasks)
- Set up health checks
- Configure logging
- Set up alerts
## 🔧 Available Scripts
| Script | Purpose |
|--------|---------|
| `deploy-lxc.sh` | Automated LXC container setup |
| `build-all.sh` | Build all applications |
| `install-services.sh` | Install systemd service files |
| `setup-nginx.sh` | Configure Nginx |
| `setup-cloudflare-tunnel.sh` | Setup Cloudflare Tunnel |
| `setup-firewall.sh` | Configure UFW firewall |
| `setup-fail2ban.sh` | Configure Fail2ban |
| `setup-backup.sh` | Setup backup system |
| `setup-health-check.sh` | Setup health monitoring |
| `verify-deployment.sh` | Verify deployment |
| `full-deploy.sh` | Full automated deployment |
## 📝 Configuration Files
- **Nginx**: `nginx/explorer.conf`
- **Cloudflare Tunnel**: `cloudflare/tunnel-config.yml`
- **Systemd Services**: `systemd/*.service`
- **Fail2ban**: `fail2ban/*.conf`
- **Environment Template**: `ENVIRONMENT_TEMPLATE.env`
## ✅ Verification Checklist
After deployment, verify:
- [ ] All services running
- [ ] API responding: `curl http://localhost:8080/health`
- [ ] Frontend loading: `curl http://localhost:3000`
- [ ] Nginx proxying: `curl http://localhost/api/health`
- [ ] Database accessible
- [ ] DNS resolving
- [ ] SSL working (if direct connection)
- [ ] Cloudflare Tunnel connected (if using)
- [ ] Firewall configured
- [ ] Backups running
## 🆘 Troubleshooting
See `QUICK_DEPLOY.md` for:
- Common issues
- Quick fixes
- Emergency procedures
## 📊 Statistics
- **Total Tasks**: 71
- **Documentation**: 1,844+ lines
- **Scripts**: 11 automation scripts
- **Config Files**: 8 configuration templates
- **Estimated Time**: 6-8 hours (first deployment)
## 🎯 Next Steps
1. Review `DEPLOYMENT_GUIDE.md`
2. Prepare environment (Proxmox, Cloudflare)
3. Run deployment scripts
4. Verify deployment
5. Configure monitoring
---
**All deployment files are ready!**
- `DEPLOYMENT_GUIDE.md`
- `DEPLOYMENT_TASKS.md`
- `DEPLOYMENT_CHECKLIST.md`
- `QUICK_DEPLOY.md`
- `systemd/explorer-api.service`
- `systemd/explorer-frontend.service`
Treat those as scaffold or historical reference unless they have been explicitly updated to match the live split architecture.

View File

@@ -0,0 +1,94 @@
# Live Deployment Map
Current production deployment map for `explorer.d-bis.org`.
This file is the authoritative reference for the live explorer stack as of `2026-04-05`. It supersedes the older monolithic deployment notes in this directory when the question is "what is running in production right now?"
## Public Entry Point
- Public domain: `https://explorer.d-bis.org`
- Primary container: VMID `5000` (`192.168.11.140`, `blockscout-1`)
- Public edge: nginx on VMID `5000`
## VMID 5000 Internal Topology
| Surface | Internal listener | Owner | Public paths |
|---|---:|---|---|
| nginx | `80`, `443` | VMID `5000` | terminates public traffic |
| Next frontend | `127.0.0.1:3000` | `solacescanscout-frontend.service` | `/`, `/bridge`, `/routes`, `/more`, `/wallet`, `/liquidity`, `/pools`, `/analytics`, `/operator`, `/system`, `/weth` |
| Explorer config/API | `127.0.0.1:8081` | `explorer-config-api.service` | `/api/config/*`, `/explorer-api/v1/*` |
| Blockscout | `127.0.0.1:4000` | existing Blockscout stack | `/api/v2/*` and Blockscout-backed explorer data |
| Token aggregation | `127.0.0.1:3001` | token-aggregation service | `/token-aggregation/api/v1/*` |
| Static config assets | `/var/www/html/config`, `/var/www/html/token-icons` | nginx static files | `/config/*`, `/token-icons/*` |
## Canonical Deploy Scripts
| Component | Canonical deploy path | Notes |
|---|---|---|
| Next frontend | [`deploy-next-frontend-to-vmid5000.sh`](../scripts/deploy-next-frontend-to-vmid5000.sh) | Builds the Next standalone bundle and installs `solacescanscout-frontend.service` on port `3000` |
| Explorer config assets | [`deploy-explorer-config-to-vmid5000.sh`](../scripts/deploy-explorer-config-to-vmid5000.sh) | Publishes token list, networks, capabilities, topology, verification example, and token icons |
| Explorer config/API backend | [`deploy-explorer-ai-to-vmid5000.sh`](../scripts/deploy-explorer-ai-to-vmid5000.sh) | Builds and installs `explorer-config-api.service` on port `8081` and normalizes nginx `/explorer-api/v1/*` routing |
## Relay Topology
CCIP relay workers do not run inside VMID `5000`. They run on host `r630-01` and are consumed by the explorer API through relay-health probes.
| Service file | Profile | Port | Current role |
|---|---|---:|---|
| [`ccip-relay.service`](../../config/systemd/ccip-relay.service) | `mainnet-weth` | `9860` | Mainnet WETH lane, intentionally paused |
| [`ccip-relay-mainnet-cw.service`](../../config/systemd/ccip-relay-mainnet-cw.service) | `mainnet-cw` | `9863` | Mainnet cW lane |
| [`ccip-relay-bsc.service`](../../config/systemd/ccip-relay-bsc.service) | `bsc` | `9861` | BSC lane |
| [`ccip-relay-avax.service`](../../config/systemd/ccip-relay-avax.service) | `avax` | `9862` | Avalanche lane |
| [`ccip-relay-avax-cw.service`](../../config/systemd/ccip-relay-avax-cw.service) | `avax-cw` | `9864` | Avalanche cW lane |
| [`ccip-relay-avax-to-138.service`](../../config/systemd/ccip-relay-avax-to-138.service) | `avax-to-138` | `9865` | Reverse Avalanche to Chain 138 lane |
The explorer backend reads these through `CCIP_RELAY_HEALTH_URL` or `CCIP_RELAY_HEALTH_URLS`; see [`backend/api/rest/README.md`](../backend/api/rest/README.md).
## Public Verification Points
The following endpoints currently describe the live deployment contract:
- `https://explorer.d-bis.org/`
- `https://explorer.d-bis.org/bridge`
- `https://explorer.d-bis.org/routes`
- `https://explorer.d-bis.org/liquidity`
- `https://explorer.d-bis.org/api/config/capabilities`
- `https://explorer.d-bis.org/config/CHAIN138_RPC_CAPABILITIES.json`
- `https://explorer.d-bis.org/explorer-api/v1/features`
- `https://explorer.d-bis.org/explorer-api/v1/track1/bridge/status`
- `https://explorer.d-bis.org/explorer-api/v1/mission-control/stream`
- `https://explorer.d-bis.org/token-aggregation/api/v1/routes/matrix`
## Recommended Rollout Order
When a change spans multiple explorer surfaces, use this order:
1. Deploy static config assets with [`deploy-explorer-config-to-vmid5000.sh`](../scripts/deploy-explorer-config-to-vmid5000.sh).
2. Deploy the explorer config/API backend with [`deploy-explorer-ai-to-vmid5000.sh`](../scripts/deploy-explorer-ai-to-vmid5000.sh).
3. Deploy the Next frontend with [`deploy-next-frontend-to-vmid5000.sh`](../scripts/deploy-next-frontend-to-vmid5000.sh).
4. If nginx routing changed, verify the VMID `5000` nginx site before reload.
5. Run [`check-explorer-health.sh`](../scripts/check-explorer-health.sh) against the public domain.
6. Confirm relay visibility on `/explorer-api/v1/track1/bridge/status` and mission-control SSE.
When a change spans relays as well:
1. Deploy or restart the relevant `ccip-relay*.service` unit on `r630-01`.
2. Ensure the explorer backend relay probe env still matches the active host ports.
3. Recheck `/explorer-api/v1/track1/bridge/status` and `/explorer-api/v1/mission-control/stream`.
## Current Gaps And Legacy Footguns
- Older docs in this directory still describe a monolithic `explorer-api.service` plus `explorer-frontend.service` package. That is no longer the production deployment shape.
- [`ALL_VMIDS_ENDPOINTS.md`](../../docs/04-configuration/ALL_VMIDS_ENDPOINTS.md) is still correct at the public ingress level, but it intentionally compresses the explorer into `:80/:443` and Blockscout `:4000`. Use this file for the detailed internal listener split.
- There is no single one-shot script in this repo that fully deploys Blockscout, nginx, token aggregation, explorer-config-api, Next frontend, and host-side relays together. Production is currently assembled from the component deploy scripts above.
- `mainnet-weth` is deployed but intentionally paused until that bridge lane is funded again.
- `Etherlink` and `XDC Zero` remain separate bridge programs; they are not part of the current CCIP relay fleet described here.
## Source Of Truth
Use these in order:
1. This file for the live explorer deployment map.
2. [`ALL_VMIDS_ENDPOINTS.md`](../../docs/04-configuration/ALL_VMIDS_ENDPOINTS.md) for VMID, IP, and public ingress inventory.
3. The deploy scripts themselves for exact install behavior.
4. [`check-explorer-health.sh`](../scripts/check-explorer-health.sh) plus [`check-explorer-e2e.sh`](../../scripts/verify/check-explorer-e2e.sh) for public verification.

View File

@@ -1,118 +1,41 @@
# Deployment Documentation
Complete deployment documentation for the ChainID 138 Explorer Platform.
Deployment docs for the Chain 138 explorer stack.
## Documentation Files
## Read This First
### 📘 DEPLOYMENT_GUIDE.md
**Complete step-by-step guide** with detailed instructions for:
- LXC container setup
- Application installation
- Database configuration
- Nginx reverse proxy setup
- Cloudflare DNS, SSL, and Tunnel configuration
- Security hardening
- Monitoring setup
For the current production deployment shape, start with [`LIVE_DEPLOYMENT_MAP.md`](./LIVE_DEPLOYMENT_MAP.md).
**Use this for**: Full deployment walkthrough
That file reflects the live split deployment now in production:
### 📋 DEPLOYMENT_TASKS.md
**Detailed task checklist** with all 71 tasks organized by phase:
- Pre-deployment (5 tasks)
- Phase 1: LXC Setup (8 tasks)
- Phase 2: Application Installation (12 tasks)
- Phase 3: Database Setup (10 tasks)
- Phase 4: Infrastructure Services (6 tasks)
- Phase 5: Application Services (10 tasks)
- Phase 6: Nginx Reverse Proxy (9 tasks)
- Phase 7: Cloudflare Configuration (18 tasks)
- Phase 8: Security Hardening (12 tasks)
- Phase 9: Monitoring (8 tasks)
- Post-Deployment Verification (13 tasks)
- Optional Enhancements (8 tasks)
- Next frontend on `127.0.0.1:3000` via `solacescanscout-frontend.service`
- explorer config/API on `127.0.0.1:8081` via `explorer-config-api.service`
- Blockscout on `127.0.0.1:4000`
- token aggregation on `127.0.0.1:3001`
- static config assets under `/var/www/html/config`
- CCIP relay workers on host `r630-01`, outside VMID `5000`
**Use this for**: Tracking deployment progress
## Current Canonical Deployment Paths
### ✅ DEPLOYMENT_CHECKLIST.md
**Interactive checklist** for tracking deployment completion.
- Frontend deploy: [`scripts/deploy-next-frontend-to-vmid5000.sh`](../scripts/deploy-next-frontend-to-vmid5000.sh)
- Config deploy: [`scripts/deploy-explorer-config-to-vmid5000.sh`](../scripts/deploy-explorer-config-to-vmid5000.sh)
- Explorer config/API deploy: [`scripts/deploy-explorer-ai-to-vmid5000.sh`](../scripts/deploy-explorer-ai-to-vmid5000.sh)
- Public health audit: [`scripts/check-explorer-health.sh`](../scripts/check-explorer-health.sh)
- Full public smoke: [`check-explorer-e2e.sh`](../../scripts/verify/check-explorer-e2e.sh)
**Use this for**: Marking off completed items
## Legacy And Greenfield Docs
### ⚡ QUICK_DEPLOY.md
**Quick reference** with essential commands and common issues.
The rest of this directory is still useful, but it should be treated as legacy scaffold or greenfield reference unless it explicitly matches the live split architecture above.
**Use this for**: Quick command lookup during deployment
- `DEPLOYMENT_GUIDE.md`: older full-stack walkthrough
- `DEPLOYMENT_TASKS.md`: older monolithic deployment checklist
- `DEPLOYMENT_CHECKLIST.md`: older tracking checklist
- `QUICK_DEPLOY.md`: command reference for the legacy package
## Configuration Files
## Practical Rule
### nginx/explorer.conf
Complete Nginx configuration with:
- Rate limiting
- SSL/TLS settings
- Reverse proxy configuration
- Security headers
- Caching rules
- WebSocket support
### cloudflare/tunnel-config.yml
Cloudflare Tunnel configuration template.
### scripts/deploy-lxc.sh
Automated deployment script for initial setup.
## Deployment Architecture
```
Internet
Cloudflare (DNS, SSL, WAF, CDN)
Cloudflare Tunnel (optional)
LXC Container
├── Nginx (Reverse Proxy)
│ ├── → Frontend (Port 3000)
│ └── → API (Port 8080)
├── PostgreSQL + TimescaleDB
├── Elasticsearch
├── Redis
└── Application Services
├── Indexer
├── API Server
└── Frontend Server
```
## Quick Start
1. **Read the deployment guide**: `DEPLOYMENT_GUIDE.md`
2. **Use the task list**: `DEPLOYMENT_TASKS.md`
3. **Track progress**: `DEPLOYMENT_CHECKLIST.md`
4. **Quick reference**: `QUICK_DEPLOY.md`
## Prerequisites
- Proxmox VE with LXC support
- Cloudflare account with domain
- 16GB+ RAM, 4+ CPU cores, 100GB+ storage
- Ubuntu 22.04 LTS template
- SSH access to Proxmox host
## Estimated Time
- **First deployment**: 6-8 hours
- **Subsequent deployments**: 2-3 hours
- **Updates**: 30-60 minutes
## Support
For issues during deployment:
1. Check `QUICK_DEPLOY.md` for common issues
2. Review service logs: `journalctl -u <service-name> -f`
3. Check Nginx logs: `tail -f /var/log/nginx/explorer-error.log`
4. Verify Cloudflare tunnel: `systemctl status cloudflared`
## Version
**Version**: 1.0.0
**Last Updated**: 2024-12-23
If the question is "how do we update production today?", use:
1. [`LIVE_DEPLOYMENT_MAP.md`](./LIVE_DEPLOYMENT_MAP.md)
2. the specific deploy script for the component being changed
3. the public health scripts for verification

View File

@@ -0,0 +1,17 @@
# Include inside the same server block as /explorer-api/ (or equivalent Go upstream).
# SSE responses must not be buffered by nginx or clients stall until the ticker fires.
location = /explorer-api/v1/mission-control/stream {
proxy_pass http://127.0.0.1:8080;
proxy_http_version 1.1;
proxy_set_header Connection '';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
proxy_cache off;
gzip off;
proxy_read_timeout 3600s;
add_header X-Accel-Buffering no;
}

View File

@@ -0,0 +1,36 @@
# Next.js frontend proxy locations for SolaceScanScout.
# Keep the existing higher-priority locations for:
# - /api/
# - /api/config/token-list
# - /api/config/networks
# - /api/config/capabilities
# - /explorer-api/v1/
# - /token-aggregation/api/v1/
# - /snap/
# - /health
#
# Include these locations after those API/static locations and before any legacy
# catch-all that serves /var/www/html/index.html directly.
location ^~ /_next/ {
proxy_pass http://127.0.0.1:3000;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location / {
proxy_pass http://127.0.0.1:3000;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Connection "";
proxy_buffering off;
proxy_hide_header Cache-Control;
add_header Cache-Control "no-store, no-cache, must-revalidate" always;
add_header Content-Security-Policy "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://cdn.jsdelivr.net https://unpkg.com https://cdnjs.cloudflare.com; style-src 'self' 'unsafe-inline' https://cdnjs.cloudflare.com; img-src 'self' data: https:; font-src 'self' https://cdnjs.cloudflare.com; connect-src 'self' https://explorer.d-bis.org wss://explorer.d-bis.org https://rpc-http-pub.d-bis.org wss://rpc-ws-pub.d-bis.org http://192.168.11.221:8545 ws://192.168.11.221:8546;" always;
}

View File

@@ -13,6 +13,13 @@ Environment=PORT=8080
Environment=DB_HOST=localhost
Environment=DB_NAME=explorer
Environment=CHAIN_ID=138
Environment=RPC_URL=https://rpc-http-pub.d-bis.org
Environment=TOKEN_AGGREGATION_BASE_URL=http://127.0.0.1:3000
Environment=BLOCKSCOUT_INTERNAL_URL=http://127.0.0.1:4000
Environment=EXPLORER_PUBLIC_BASE=https://explorer.d-bis.org
Environment=OPERATOR_SCRIPTS_ROOT=/opt/explorer/scripts
Environment=OPERATOR_SCRIPT_ALLOWLIST=check-health.sh,check-bridges.sh
Environment=OPERATOR_SCRIPT_TIMEOUT_SEC=120
ExecStart=/opt/explorer/bin/api-server
Restart=on-failure
RestartSec=5

View File

@@ -93,6 +93,9 @@ services:
- PORT=8080
- CHAIN_ID=138
- REDIS_URL=redis://redis:6379
# Optional relay health for mission-control / bridge UI (see backend CCIP_RELAY_HEALTH_URLS)
- CCIP_RELAY_HEALTH_URL=${CCIP_RELAY_HEALTH_URL:-}
- CCIP_RELAY_HEALTH_URLS=${CCIP_RELAY_HEALTH_URLS:-}
ports:
- "8080:8080"
depends_on:

View File

@@ -0,0 +1,28 @@
[Unit]
Description=SolaceScanScout Next Frontend Service
After=network.target
Wants=network.target
[Service]
Type=simple
User=www-data
Group=www-data
WorkingDirectory=/opt/solacescanscout/frontend/current
Environment=NODE_ENV=production
Environment=HOSTNAME=127.0.0.1
Environment=PORT=3000
ExecStart=/usr/bin/node /opt/solacescanscout/frontend/current/server.js
Restart=always
RestartSec=5
StandardOutput=journal
StandardError=journal
SyslogIdentifier=solacescanscout-frontend
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=/opt/solacescanscout/frontend
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@@ -131,25 +131,27 @@ All checks should now pass.
| Chain | Selector | Bridge Address |
|-------|----------|----------------|
| BSC | 11344663589394136015 | `0x8078a09637e47fa5ed34f626046ea2094a5cde5e` |
| Polygon | 4051577828743386545 | `0xa780ef19a041745d353c9432f2a7f5a241335ffe` |
| Avalanche | 6433500567565415381 | `0x8078a09637e47fa5ed34f626046ea2094a5cde5e` |
| Base | 15971525489660198786 | `0x8078a09637e47fa5ed34f626046ea2094a5cde5e` |
| Arbitrum | 4949039107694359620 | `0x8078a09637e47fa5ed34f626046ea2094a5cde5e` |
| Optimism | 3734403246176062136 | `0x8078a09637e47fa5ed34f626046ea2094a5cde5e` |
| Ethereum Mainnet | 5009297550715157269 | **TBD** (needs deployment/address) |
| BSC | 11344663589394136015 | `0x24293CA562aE1100E60a4640FF49bd656cFf93B4` |
| Polygon | 4051577828743386545 | `0xF7736443f02913e7e0773052103296CfE1637448` |
| Avalanche | 6433500567565415381 | `0x24293CA562aE1100E60a4640FF49bd656cFf93B4` |
| Base | 15971525489660198786 | `0x24293CA562aE1100E60a4640FF49bd656cFf93B4` |
| Arbitrum | 4949039107694359620 | `0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c` |
| Optimism | 3734403246176062136 | `0x6e94e53F73893b2a6784Df663920D31043A6dE07` |
| Ethereum Mainnet | 5009297550715157269 | `0xc9901ce2Ddb6490FAA183645147a87496d8b20B6` |
### WETH10 Bridge Destinations
| Chain | Selector | Bridge Address |
|-------|----------|----------------|
| BSC | 11344663589394136015 | `0x105f8a15b819948a89153505762444ee9f324684` |
| Polygon | 4051577828743386545 | `0xdab0591e5e89295ffad75a71dcfc30c5625c4fa2` |
| Avalanche | 6433500567565415381 | `0x105f8a15b819948a89153505762444ee9f324684` |
| Base | 15971525489660198786 | `0x105f8a15b819948a89153505762444ee9f324684` |
| Arbitrum | 4949039107694359620 | `0x105f8a15b819948a89153505762444ee9f324684` |
| Optimism | 3734403246176062136 | `0x105f8a15b819948a89153505762444ee9f324684` |
| Ethereum Mainnet | 5009297550715157269 | **TBD** (needs deployment/address) |
| BSC | 11344663589394136015 | `0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c` |
| Polygon | 4051577828743386545 | `0x0CA60e6f8589c540200daC9D9Cb27BC2e48eE66A` |
| Avalanche | 6433500567565415381 | `0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c` |
| Base | 15971525489660198786 | `0x937824f2516fa58f25aeAb92E7BFf7D74F463B4c` |
| Arbitrum | 4949039107694359620 | `0x73376eB92c16977B126dB9112936A20Fa0De3442` |
| Optimism | 3734403246176062136 | `0x24293CA562aE1100E60a4640FF49bd656cFf93B4` |
| Ethereum Mainnet | 5009297550715157269 | `0x04E1e22B0D41e99f4275bd40A50480219bc9A223` |
Note: Arbitrum remains operationally blocked on the current Mainnet hub leg until the `0xc990... -> 42161` WETH9 path is repaired, even though the destination bridge addresses are known.
---
@@ -203,4 +205,3 @@ All checks should now pass.
---
**Last Updated**: $(date)

View File

@@ -7,9 +7,10 @@ All implementation steps have been completed successfully. The tiered architectu
## Completed Components
### 1. ✅ Database Schema
- Migration file: `backend/database/migrations/0010_track_schema.up.sql`
- Full migration file: `backend/database/migrations/0010_track_schema.up.sql`
- Shared-DB auth/operator migration: `backend/database/migrations/0010_track_schema.auth_only.sql`
- Rollback file: `backend/database/migrations/0010_track_schema.down.sql`
- Script: `scripts/run-migration-0010.sh`
- Helper script: `scripts/run-migration-0010.sh` (auto-detects DB mode)
### 2. ✅ JWT Secret Configuration
- Server reads `JWT_SECRET` from environment variable
@@ -65,7 +66,7 @@ bash scripts/setup-tiered-architecture.sh
export JWT_SECRET="your-strong-secret-here"
export RPC_URL="http://192.168.11.250:8545"
# 3. Run migration
# 3. Run migration helper
bash scripts/run-migration-0010.sh
# 4. Start server
@@ -94,4 +95,3 @@ The implementation is complete and ready for:
5. Production deployment
All code has been verified, linter errors resolved, and documentation completed.

View File

@@ -1,30 +1,45 @@
# Database Connection Guide
## Important: Two Different Database Users
## Supported Database Layouts
There are **two separate database systems**:
The explorer backend supports **two deployment modes**:
1. **Blockscout Database** (for Blockscout explorer)
- User: `blockscout`
- Password: `blockscout`
- Database: `blockscout`
1. **Standalone explorer DB**
- User: usually `explorer`
- Database: usually `explorer`
- Migration mode: full Track 2-4 schema
2. **Custom Explorer Backend Database** (for tiered architecture)
- User: `explorer`
- Password: `L@ker$2010`
- Database: `explorer`
2. **Shared Blockscout DB**
- User: usually `blockscout`
- Database: usually `blockscout`
- Migration mode: explorer auth/operator subset only
Use `bash scripts/run-migration-0010.sh` for both modes. The helper auto-detects whether it is connected to a standalone explorer database or a shared Blockscout database and chooses the safe migration path automatically.
## Correct Connection Command
For the **custom explorer backend** (tiered architecture), use:
For a **standalone explorer database**, use:
```bash
PGPASSWORD='L@ker$2010' psql -h localhost -U explorer -d explorer -c "SELECT 1;"
export DB_PASSWORD='<your explorer DB password>'
PGPASSWORD="$DB_PASSWORD" psql -h localhost -U explorer -d explorer -c "SELECT 1;"
```
For a **shared Blockscout database**, use:
```bash
export DB_HOST=localhost
export DB_USER=blockscout
export DB_NAME=blockscout
export DB_PASSWORD='<your Blockscout DB password>'
PGPASSWORD="$DB_PASSWORD" psql -h "$DB_HOST" -U "$DB_USER" -d "$DB_NAME" -c "SELECT 1;"
```
Do **not** run the full `0010_track_schema.up.sql` directly against the shared Blockscout DB.
**NOT:**
```bash
# ❌ Wrong - this is for Blockscout
# ❌ Wrong - mismatched user/database pair
PGPASSWORD='blockscout' psql -h localhost -U blockscout -d explorer -c "SELECT 1;"
```
@@ -34,40 +49,32 @@ PGPASSWORD='blockscout' psql -h localhost -U blockscout -d explorer -c "SELECT 1
```bash
# Test connection to custom explorer database
PGPASSWORD='L@ker$2010' psql -h localhost -U explorer -d explorer -c "SELECT version();"
export DB_PASSWORD='<your explorer DB password>'
PGPASSWORD="$DB_PASSWORD" psql -h localhost -U explorer -d explorer -c "SELECT version();"
```
### 2. Check if Tables Exist
```bash
# Check for track schema tables
PGPASSWORD='L@ker$2010' psql -h localhost -U explorer -d explorer -c "
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name IN ('wallet_nonces', 'operator_roles', 'addresses', 'token_transfers')
ORDER BY table_name;
"
# Check the database mode and required tables
bash scripts/check-database-connection.sh
```
### 3. Run Migration (if tables don't exist)
```bash
cd explorer-monorepo
PGPASSWORD='L@ker$2010' psql -h localhost -U explorer -d explorer \
-f backend/database/migrations/0010_track_schema.up.sql
export DB_PASSWORD='<your explorer DB password>'
bash scripts/run-migration-0010.sh
```
### 4. Verify Migration
```bash
# Should return 4 or more
PGPASSWORD='L@ker$2010' psql -h localhost -U explorer -d explorer -c "
SELECT COUNT(*) as table_count
FROM information_schema.tables
WHERE table_schema = 'public'
AND table_name IN ('wallet_nonces', 'operator_roles', 'addresses', 'token_transfers', 'analytics_flows', 'operator_events');
"
# Standalone explorer DB should include Track 2-4 tables plus auth/operator tables.
# Shared Blockscout DB should include at least:
# wallet_nonces, operator_roles, operator_events, operator_ip_whitelist
bash scripts/check-database-connection.sh
```
## Troubleshooting
@@ -94,10 +101,10 @@ AND table_name IN ('wallet_nonces', 'operator_roles', 'addresses', 'token_transf
You should see both `blockscout` and `explorer` databases.
4. **Create user and database if missing:**
4. **Create a standalone explorer user and database if you want a dedicated backend DB:**
```bash
sudo -u postgres psql << EOF
CREATE USER explorer WITH PASSWORD 'L@ker\$2010';
CREATE USER explorer WITH PASSWORD '<set-a-strong-password>';
CREATE DATABASE explorer OWNER explorer;
GRANT ALL PRIVILEGES ON DATABASE explorer TO explorer;
\q
@@ -106,9 +113,10 @@ AND table_name IN ('wallet_nonces', 'operator_roles', 'addresses', 'token_transf
### If Password Authentication Fails
1. **Verify password is correct:**
- Custom explorer: `L@ker$2010`
- Blockscout: `blockscout`
1. **Verify the correct password is exported in `DB_PASSWORD`**
2. **Confirm you are connecting with the right mode pair**
- standalone explorer DB: `explorer` / `explorer`
- shared Blockscout DB: `blockscout` / `blockscout`
2. **Check pg_hba.conf:**
```bash
@@ -128,7 +136,7 @@ Use the provided script:
```bash
cd explorer-monorepo
export DB_PASSWORD='L@ker$2010'
export DB_PASSWORD='<your explorer DB password>'
bash scripts/fix-database-connection.sh
```
@@ -144,7 +152,7 @@ This script will:
```bash
pkill -f api-server
cd explorer-monorepo/backend
export DB_PASSWORD='L@ker$2010'
export DB_PASSWORD='<your explorer DB password>'
export JWT_SECRET='your-secret-here'
./bin/api-server
```
@@ -162,10 +170,10 @@ This script will:
-H 'Content-Type: application/json' \
-d '{"address":"0x1234567890123456789012345678901234567890"}'
```
If the response mentions `wallet_nonces`, returns `service_unavailable`, or the wallet popup shows `Nonce: undefined`, rerun `bash scripts/run-migration-0010.sh`, restart the backend, and retry.
## Summary
- **Custom Explorer Backend:** Use `explorer` user with password `L@ker$2010`
- **Blockscout:** Use `blockscout` user with password `blockscout`
- **They are separate systems** with separate databases
- **Standalone explorer DB:** use the `explorer` user/database pair and the full Track 2-4 schema
- **Shared Blockscout DB:** use the Blockscout credentials and let `scripts/run-migration-0010.sh` apply only the auth/operator subset
- **Do not** apply `0010_track_schema.up.sql` directly to the shared Blockscout DB

View File

@@ -10,6 +10,25 @@
### Quick Deploy
For the current frontend, use the Next standalone deploy path:
```bash
# From explorer-monorepo root
./scripts/deploy-next-frontend-to-vmid5000.sh
```
This builds `frontend/`, uploads the standalone bundle, installs the
`solacescanscout-frontend.service` unit, and starts the frontend on
`127.0.0.1:3000` inside VMID 5000.
Nginx should keep the existing explorer API routes and proxy `/` plus `/_next/`
to the frontend service. Use
[nginx-next-frontend-proxy.conf](/home/intlc/projects/proxmox/explorer-monorepo/deployment/common/nginx-next-frontend-proxy.conf)
inside the explorer server block after `/api`, `/api/config/*`, `/explorer-api/*`,
`/token-aggregation/api/v1/*`, `/snap/`, and `/health`.
### Legacy Static Deploy
```bash
# From explorer-monorepo root
./scripts/deploy.sh
@@ -18,7 +37,10 @@
### Manual Deploy
```bash
# Copy files manually
# Canonical Next deployment:
./scripts/deploy-next-frontend-to-vmid5000.sh
# Legacy static fallback only:
scp frontend/public/index.html root@192.168.11.140:/var/www/html/index.html
```
@@ -34,6 +56,33 @@ The deployment script uses these environment variables:
IP=192.168.11.140 DOMAIN=explorer.d-bis.org ./scripts/deploy.sh
```
## Mission-control and Track 4 runtime wiring
If you are deploying the Go explorer API with the mission-control additions enabled, set these backend env vars as well:
- `RPC_URL` - Chain 138 RPC for Track 1 and mission-control status/SSE data
- `TOKEN_AGGREGATION_BASE_URL` - used by `GET /api/v1/mission-control/liquidity/token/{address}/pools`
- `BLOCKSCOUT_INTERNAL_URL` - used by `GET /api/v1/mission-control/bridge/trace`
- `EXPLORER_PUBLIC_BASE` - public base URL returned in bridge trace links
- `CCIP_RELAY_HEALTH_URL` - optional relay probe URL, for example `http://192.168.11.11:9860/healthz`
- `CCIP_RELAY_HEALTH_URLS` - optional comma-separated named relay probes, for example `mainnet=http://192.168.11.11:9860/healthz,bsc=http://192.168.11.11:9861/healthz,avax=http://192.168.11.11:9862/healthz`
- `MISSION_CONTROL_CCIP_JSON` - optional JSON-file fallback for relay health snapshots
- `OPERATOR_SCRIPTS_ROOT` - root directory for Track 4 script execution
- `OPERATOR_SCRIPT_ALLOWLIST` - comma-separated allowlist for `POST /api/v1/track4/operator/run-script`
- `OPERATOR_SCRIPT_TIMEOUT_SEC` - optional per-script timeout in seconds
For nginx, include [nginx-mission-control-sse.conf](/home/intlc/projects/proxmox/explorer-monorepo/deployment/common/nginx-mission-control-sse.conf) inside the same server block that proxies `/explorer-api/`, and update the `proxy_pass` target if your Go API is not listening on `127.0.0.1:8080`.
### Quick verification
```bash
curl -N https://explorer.d-bis.org/explorer-api/v1/mission-control/stream
curl "https://explorer.d-bis.org/explorer-api/v1/mission-control/bridge/trace?tx=0x..."
curl "https://explorer.d-bis.org/explorer-api/v1/mission-control/liquidity/token/0x93E66202A11B1772E55407B32B44e5Cd8eda7f22/pools"
# Optional relay probe from the explorer host:
curl http://192.168.11.11:9860/healthz
```
## Rollback
If deployment fails, rollback to previous version:
@@ -43,6 +92,10 @@ ssh root@192.168.11.140
cp /var/www/html/index.html.backup.* /var/www/html/index.html
```
For the Next standalone path, restart the previous release by repointing
`/opt/solacescanscout/frontend/current` to the prior release and restarting
`solacescanscout-frontend`.
## Testing
After deployment, test the explorer:
@@ -56,4 +109,3 @@ Or manually:
```bash
curl -k -I https://explorer.d-bis.org/
```

View File

@@ -25,7 +25,7 @@
-`/api/v1/auth/nonce` - Endpoint active
-`/api/v1/auth/wallet` - Endpoint active
- ✅ JWT token generation configured
- ⚠️ Requires database for nonce storage
- ⚠️ Requires database plus the `run-migration-0010.sh` helper for nonce storage
### 4. Feature Flags
-`/api/v1/features` - Working
@@ -120,7 +120,7 @@ DB_NAME=explorer
# Set correct password
export DB_PASSWORD='your-actual-password'
# Run migration
# Run migration helper
bash scripts/run-migration-0010.sh
# Restart server
@@ -143,6 +143,8 @@ curl -X POST http://localhost:8080/api/v1/auth/wallet \
-d '{"address":"...","signature":"...","nonce":"..."}'
```
If the nonce request returns `service_unavailable`, mentions `wallet_nonces`, or the wallet signature popup shows `Nonce: undefined`, the backend is still missing the wallet-auth schema. Run `bash scripts/run-migration-0010.sh`, restart the backend, and retry. The helper auto-detects standalone explorer DB vs shared Blockscout DB and uses the safe migration path for each.
### 3. Approve Users
```bash
# Approve for Track 2
@@ -217,4 +219,3 @@ The tiered architecture has been **successfully deployed and tested**. The API s
5. Production deployment
**Deployment Status: ✅ COMPLETE**

View File

@@ -34,7 +34,7 @@
1. **Database Connection**
- Status: ⚠️ Not connected
- Impact: Track 1 endpoints work (use RPC), Track 2-4 require database
- Solution: Set `DB_PASSWORD` environment variable and run migration
- Solution: Set `DB_PASSWORD` environment variable and run `bash scripts/run-migration-0010.sh`
2. **Health Endpoint**
- Status: ⚠️ Returns degraded status (due to database)
@@ -49,7 +49,7 @@
| `/api/v1/features` | ✅ Working | Returns track level and features |
| `/api/v1/track1/blocks/latest` | ✅ Working | HTTP 200 |
| `/api/v1/track1/bridge/status` | ✅ Working | Returns bridge status |
| `/api/v1/auth/nonce` | ⚠️ HTTP 400 | Requires valid address format |
| `/api/v1/auth/nonce` | ⚠️ DB-backed | Requires both a valid address and the `wallet_nonces` table created by `scripts/run-migration-0010.sh` |
| `/api/v1/track2/search` | ✅ Working | Correctly requires auth (401) |
### Environment Configuration
@@ -94,6 +94,7 @@ DB_NAME=explorer
-H 'Content-Type: application/json' \
-d '{"address":"...","signature":"...","nonce":"..."}'
```
If the response mentions `wallet_nonces` or the wallet popup shows `Nonce: undefined`, rerun `bash scripts/run-migration-0010.sh` and restart the backend before retrying.
### Production Deployment
@@ -104,7 +105,7 @@ DB_NAME=explorer
2. **Configure Database**
- Set proper `DB_PASSWORD`
- Run migration: `bash scripts/run-migration-0010.sh`
- Run migration helper: `bash scripts/run-migration-0010.sh`
- Verify connection: `bash scripts/check-database-connection.sh`
3. **Start as Service**
@@ -156,4 +157,3 @@ curl http://localhost:8080/api/v1/features
The tiered architecture has been successfully deployed. The API server is running and responding to requests. Track 1 endpoints (public RPC gateway) are fully functional. Track 2-4 endpoints are configured but require database connectivity for full functionality.
**Deployment Status: ✅ SUCCESSFUL**

View File

@@ -63,8 +63,8 @@
| Contract | Address | In .env | Variable Name | Status |
|----------|---------|---------|---------------|--------|
| **CCIPWETH9Bridge** | `0x2A0840e5117683b11682ac46f5CF5621E67269E3` | ✅ | `CCIPWETH9_BRIDGE_MAINNET` | ✅ Verified |
| **CCIPWETH10Bridge** | `0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03` | ✅ | `CCIPWETH10_BRIDGE_MAINNET` | ✅ Verified |
| **CCIPWETH9Bridge** | `0xc9901ce2Ddb6490FAA183645147a87496d8b20B6` | ✅ | `CCIPWETH9_BRIDGE_MAINNET` | ✅ Verified |
| **CCIPWETH10Bridge** | `0x04E1e22B0D41e99f4275bd40A50480219bc9A223` | ✅ | `CCIPWETH10_BRIDGE_MAINNET` | ✅ Verified |
### Cross-Chain Contracts

View File

@@ -156,8 +156,8 @@ PRICEFEED_KEEPER_138=0xD3AD6831aacB5386B8A25BB8D8176a6C8a026f04
### Ethereum Mainnet Variables
```bash
# Bridges
CCIPWETH9_BRIDGE_MAINNET=0x2A0840e5117683b11682ac46f5CF5621E67269E3
CCIPWETH10_BRIDGE_MAINNET=0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03
CCIPWETH9_BRIDGE_MAINNET=0xc9901ce2Ddb6490FAA183645147a87496d8b20B6
CCIPWETH10_BRIDGE_MAINNET=0x04E1e22B0D41e99f4275bd40A50480219bc9A223
# Cross-Chain
TRANSACTION_MIRROR_MAINNET=0x4CF42c4F1dBa748601b8938be3E7ABD732E87cE9

View File

@@ -13,6 +13,50 @@ The frontend is reachable at **https://explorer.d-bis.org** (FQDN) or by **VM IP
2. **Same-origin /api** When the site is served from the explorer host (FQDN `https://explorer.d-bis.org` or VM IP `http://192.168.11.140` / `https://192.168.11.140`), the frontend uses relative `/api` so all requests go through the same nginx proxy. If you open the frontend from elsewhere, the code falls back to the full Blockscout URL (CORS must allow it).
- If the API returns **200** but the UI still shows no data, check the browser console for JavaScript errors (e.g. CSP or network errors).
## Wallet connect fails with “nonce not found or expired”
If the explorer shows a MetaMask sign-in failure such as:
```text
Authentication failed: nonce not found or expired
```
or the wallet signature request itself shows:
```text
Nonce: undefined
```
check the nonce endpoint directly:
```bash
curl -sS -H 'Content-Type: application/json' \
-X POST https://explorer.d-bis.org/explorer-api/v1/auth/nonce \
--data '{"address":"0x4A666F96fC8764181194447A7dFdb7d471b301C8"}'
```
If that returns an error mentioning:
```text
relation "wallet_nonces" does not exist
```
then the explorer backend is running without the wallet-auth schema migration. Run:
```bash
cd explorer-monorepo
bash scripts/run-migration-0010.sh
```
`scripts/run-migration-0010.sh` now auto-detects the database layout:
- **Standalone explorer DB**: applies the full Track 2-4 schema from `0010_track_schema.up.sql`
- **Shared Blockscout DB**: applies only the explorer-owned auth/operator tables from `0010_track_schema.auth_only.sql`
Do **not** pipe `0010_track_schema.up.sql` directly into the shared Blockscout database on VMID 5000; its `addresses` and `token_transfers` tables already exist with Blockscout's schema and the full migration will collide with them.
The `Nonce: undefined` popup means the frontend asked for a nonce, got back an error instead of a nonce, and the old deployed frontend still opened the signature request anyway. After the helper migration, retry the nonce request and then retry wallet connect in the browser.
### Frontend env contract
For the Next frontend in `frontend/`, keep the runtime base URL at the **host origin**, not the `/api` subpath:
@@ -340,6 +384,11 @@ The script checks:
- HTTP 200 on `/api/v2/stats`, `/api/v2/blocks`, `/api/v2/transactions`.
- Explorer frontend at `/` returns 200.
- Chain 138 Snap companion site at `/snap/` returns 200 or 301 and contains expected content when 200.
- The static Visual Command Center at `/chain138-command-center.html` returns 200 and contains expected architecture text.
- Mission Control endpoints return healthy responses:
- `/explorer-api/v1/mission-control/stream`
- `/explorer-api/v1/mission-control/bridge/trace`
- `/explorer-api/v1/mission-control/liquidity/token/{address}/pools`
**Full verification (single place for all checks — API, explorer, Snap):**
@@ -347,7 +396,7 @@ The script checks:
bash scripts/verify-vmid5000-all.sh [BASE_URL]
```
Run this after every deploy or nginx change to confirm explorer and Snap site are reachable and correct.
Run this after every deploy or nginx change to confirm explorer, Snap site, Visual Command Center, and Mission Control are reachable and correct.
---

View File

@@ -23,9 +23,10 @@ The SolaceScanScout tiered architecture has been successfully deployed and teste
- ✅ RPC integration working
3. **Authentication System**
- ✅ Nonce endpoint active
- ✅ Nonce endpoint wired
- ✅ Wallet authentication configured
- ✅ JWT token generation ready
- ⚠️ Wallet sign-in requires database connectivity plus the `run-migration-0010.sh` helper (`wallet_nonces`)
4. **Feature Flags**
- ✅ Endpoint operational
@@ -39,8 +40,8 @@ The SolaceScanScout tiered architecture has been successfully deployed and teste
### ⚠️ Database Connection
**Status:** Password authentication issue
**Impact:** Track 2-4 endpoints require database for full functionality
**Status:** Password authentication or schema issue
**Impact:** Track 2-4 endpoints and wallet sign-in require database for full functionality
**Workaround:** Track 1 endpoints work without database
**To Fix:**
@@ -48,14 +49,16 @@ The SolaceScanScout tiered architecture has been successfully deployed and teste
# Verify PostgreSQL is running
systemctl status postgresql
# Test connection with password
PGPASSWORD='L@ker$2010' psql -h localhost -U explorer -d explorer -c "SELECT 1;"
# Test connection with the configured explorer DB password
export DB_PASSWORD='<your explorer DB password>'
PGPASSWORD="$DB_PASSWORD" psql -h localhost -U explorer -d explorer -c "SELECT 1;"
# If connection works, run migration
PGPASSWORD='L@ker$2010' psql -h localhost -U explorer -d explorer \
-f backend/database/migrations/0010_track_schema.up.sql
# If connection works, run the migration helper
bash scripts/run-migration-0010.sh
```
The helper auto-detects standalone explorer DB vs shared Blockscout DB and picks the safe migration path.
## Test Results
### ✅ Passing Tests
@@ -68,7 +71,7 @@ PGPASSWORD='L@ker$2010' psql -h localhost -U explorer -d explorer \
| Track 1 | Blocks | ✅ PASS |
| Track 1 | Transactions | ✅ PASS |
| Track 1 | Bridge | ✅ PASS |
| Auth | Nonce | PASS |
| Auth | Nonce | ⚠️ PASS only when DB is reachable and `wallet_nonces` exists |
| Track 2 | Auth Check | ✅ PASS (401) |
| Track 3 | Auth Check | ✅ PASS (401) |
| Track 4 | Auth Check | ✅ PASS (401) |
@@ -100,7 +103,7 @@ CHAIN_ID=138
PORT=8080
DB_HOST=localhost
DB_USER=explorer
DB_PASSWORD=L@ker$2010
DB_PASSWORD=<set in environment>
DB_NAME=explorer
```
@@ -120,7 +123,8 @@ sudo systemctl start postgresql
**Option B: Verify Credentials**
```bash
# Test connection
PGPASSWORD='L@ker$2010' psql -h localhost -U explorer -d explorer -c "SELECT 1;"
export DB_PASSWORD='<your explorer DB password>'
PGPASSWORD="$DB_PASSWORD" psql -h localhost -U explorer -d explorer -c "SELECT 1;"
# If this fails, check:
# 1. User exists: psql -U postgres -c "\du"
@@ -128,12 +132,11 @@ PGPASSWORD='L@ker$2010' psql -h localhost -U explorer -d explorer -c "SELECT 1;"
# 3. Password is correct
```
**Option C: Run Migration**
**Option C: Run Migration Helper**
```bash
cd explorer-monorepo
export DB_PASSWORD='L@ker$2010'
PGPASSWORD='L@ker$2010' psql -h localhost -U explorer -d explorer \
-f backend/database/migrations/0010_track_schema.up.sql
export DB_PASSWORD='<your explorer DB password>'
bash scripts/run-migration-0010.sh
```
### 2. Restart Server with Database
@@ -144,7 +147,7 @@ pkill -f api-server
# Start with database
cd backend
export DB_PASSWORD='L@ker$2010'
export DB_PASSWORD='<your explorer DB password>'
export JWT_SECRET='your-secret-here'
./bin/api-server
```
@@ -169,10 +172,12 @@ curl http://localhost:8080/api/v1/track2/search?q=test \
```bash
# After database is connected
export DB_PASSWORD='L@ker$2010'
export DB_PASSWORD='<your explorer DB password>'
bash scripts/approve-user.sh <address> <track_level>
```
If the nonce request mentions `wallet_nonces`, returns `service_unavailable`, or the wallet popup shows `Nonce: undefined`, rerun `bash scripts/run-migration-0010.sh`, restart the backend, and retry. On the shared VMID 5000 Blockscout database, this helper applies only the auth/operator subset and avoids colliding with Blockscout's existing `addresses` schema.
## Monitoring
### Server Logs
@@ -213,4 +218,3 @@ The tiered architecture deployment is **complete and operational**. Track 1 (pub
- User authentication testing
- User approval workflow
- Indexer startup

View File

@@ -9,7 +9,9 @@ The explorer (SolaceScanScout) provides add-to-MetaMask and token list discovery
- **Path:** `/api/config/token-list`
- **Full URL:** `{EXPLORER_API_BASE}/api/config/token-list` (e.g. `https://explorer.d-bis.org/api/config/token-list` if the API is on the same origin).
Add this URL in MetaMask **Settings → Token lists** so tokens for Chain 138 and Mainnet appear automatically.
As of April 3, 2026, the public explorer token list exposes `190` entries, including the full Mainnet `cW*` suite.
- **Networks config:** `/api/config/networks` returns the same chain params (Chain 138 + Ethereum Mainnet) in JSON for programmatic use.
- **GRU v2 public rollout status:** the explorer also publishes static status surfaces at `/config/GRU_V2_PUBLIC_DEPLOYMENT_STATUS.json` and `/config/GRU_V2_DEPLOYMENT_QUEUE.json`. Together they summarize the public EVM `cW*` mesh, Wave 1 transport posture, the current public-protocol truth for `Uniswap v3`, `Balancer`, `Curve 3`, `DODO PMM`, and `1inch`, and the remaining operator queue by asset/chain/protocol.
## Provider and feature parity
@@ -32,8 +34,11 @@ Discovery is via **token list** (hosted at the explorer token list URL above), *
- **Wallet page:** https://explorer.d-bis.org/wallet
- **Token list URL:** https://explorer.d-bis.org/api/config/token-list
- **Networks config:** https://explorer.d-bis.org/api/config/networks
- **GRU v2 public rollout status:** https://explorer.d-bis.org/config/GRU_V2_PUBLIC_DEPLOYMENT_STATUS.json
- **GRU v2 deployment queue:** https://explorer.d-bis.org/config/GRU_V2_DEPLOYMENT_QUEUE.json
For backend deployment and integration tests, see [EXPLORER_D_BIS_ORG_INTEGRATION.md](../../docs/04-configuration/metamask/EXPLORER_D_BIS_ORG_INTEGRATION.md).
For token-list publishing, use `explorer-monorepo/scripts/deploy-explorer-config-to-vmid5000.sh`; it now falls back through the Proxmox host automatically when local `pct` is not installed.
## Related

View File

@@ -10,11 +10,12 @@ Before running the Explorer API and indexer in production:
See `deployment/ENVIRONMENT_TEMPLATE.env` for all required variables.
2. **Run database migrations**
Apply migrations before starting the API and indexer, e.g.:
Apply migrations before starting the API and indexer:
```bash
psql -U explorer -d explorer -f backend/database/migrations/0010_track_schema.up.sql
export DB_PASSWORD='<your DB password>'
bash scripts/run-migration-0010.sh
```
Or use your migration runner (e.g. `go run backend/database/migrations/migrate.go --up` if applicable).
`scripts/run-migration-0010.sh` auto-detects standalone explorer DB vs shared Blockscout DB. Do **not** apply `backend/database/migrations/0010_track_schema.up.sql` directly to a shared Blockscout database.
3. **Configure DB and RPC**
Ensure `DB_*`, `RPC_URL`, `WS_URL`, and `CHAIN_ID` are set correctly for the target environment.

View File

@@ -18,16 +18,24 @@ Overview of documentation for the ChainID 138 Explorer (SolaceScanScout).
| Doc | Description |
|-----|-------------|
| **[../frontend/FRONTEND_REVIEW.md](../frontend/FRONTEND_REVIEW.md)** | Frontend code review (SPA + React) |
| **[../frontend/FRONTEND_REVIEW.md](../frontend/FRONTEND_REVIEW.md)** | Frontend code review (Next app + legacy SPA) |
| **[../frontend/FRONTEND_TASKS_AND_REVIEW.md](../frontend/FRONTEND_TASKS_AND_REVIEW.md)** | Task list C1L4 and detail review |
**Deploy the live SPA (VMID 5000):**
**Deploy the current Next standalone frontend (VMID 5000):**
```bash
./scripts/deploy-next-frontend-to-vmid5000.sh
```
Nginx should preserve `/api`, `/api/config/*`, `/explorer-api/*`, `/token-aggregation/api/v1/*`, `/snap/`, and `/health`, then proxy `/` and `/_next/` using [deployment/common/nginx-next-frontend-proxy.conf](/home/intlc/projects/proxmox/explorer-monorepo/deployment/common/nginx-next-frontend-proxy.conf).
**Legacy static SPA deploy (fallback only):**
```bash
./scripts/deploy-frontend-to-vmid5000.sh
```
**Full fix (Blockscout + nginx + frontend):**
**Full explorer/API fix (Blockscout + nginx + frontend):**
```bash
./scripts/complete-explorer-api-access.sh

View File

@@ -30,7 +30,7 @@ All components have been implemented according to the plan:
### ✅ Phase 4: Track 2 (Full Indexed Explorer)
- **Indexers**: Block, transaction, and token indexers (`backend/indexer/track2/`)
- **Track 2 API**: All endpoints implemented (`backend/api/track2/endpoints.go`)
- **Database Schema**: Complete schema for indexed data (`backend/database/migrations/0010_track_schema.up.sql`)
- **Database Schema**: Full Track 2-4 schema plus shared-DB-safe auth/operator subset (`backend/database/migrations/0010_track_schema.up.sql`, `backend/database/migrations/0010_track_schema.auth_only.sql`)
### ✅ Phase 5: Track 3 (Analytics)
- **Analytics Engine**: Flow tracking, bridge analytics, token distribution (`backend/analytics/`)
@@ -80,7 +80,8 @@ Backend
- `backend/analytics/` - Analytics engine
### Database
- `backend/database/migrations/0010_track_schema.up.sql` - Track 2-4 schema
- `backend/database/migrations/0010_track_schema.up.sql` - full Track 2-4 schema
- `backend/database/migrations/0010_track_schema.auth_only.sql` - shared Blockscout DB auth/operator subset
### Frontend
- Updated `frontend/public/index.html` with feature gating
@@ -89,9 +90,10 @@ Backend
1. **Run Database Migrations**:
```bash
cd explorer-monorepo/backend/database/migrations
# Run migration 0010_track_schema.up.sql
cd explorer-monorepo
bash scripts/run-migration-0010.sh
```
The helper auto-detects standalone explorer DB vs shared Blockscout DB and chooses the safe migration path automatically.
2. **Configure JWT Secret**:
- Update `backend/api/rest/auth.go` to use environment variable for JWT secret
@@ -126,4 +128,3 @@ Test each track level:
- JWT secret is hardcoded in auth.go - move to environment variable
- Track routes are commented in routes.go - uncomment and wire up middleware when ready
- Frontend feature gating is implemented but needs testing with actual API responses

View File

@@ -26,9 +26,9 @@ cd backend
### Required
- `DB_HOST` - PostgreSQL host (default: localhost)
- `DB_USER` - Database user (default: explorer)
- `DB_USER` - Database user (default: explorer; use `blockscout` for the shared Blockscout DB mode)
- `DB_PASSWORD` - Database password (default: changeme)
- `DB_NAME` - Database name (default: explorer)
- `DB_NAME` - Database name (default: explorer; use `blockscout` for the shared Blockscout DB mode)
### Recommended (Production)
- `JWT_SECRET` - Strong random secret for JWT signing (required for production)
@@ -38,17 +38,18 @@ cd backend
## Database Migration
Run the Track 2-4 schema migration:
Run the migration helper:
```bash
bash scripts/run-migration-0010.sh
```
This creates:
- Track 2 tables: `addresses`, `token_transfers`, `token_balances`, `internal_transactions`
- Track 3 tables: `analytics_flows`, `analytics_bridge_history`, `token_distribution` (materialized view)
- Track 4 tables: `operator_events`, `operator_ip_whitelist`, `operator_roles`
- Auth table: `wallet_nonces`
The helper auto-detects the database layout:
- **Standalone explorer DB**: creates the full Track 2-4 schema
- **Shared Blockscout DB**: creates only `operator_events`, `operator_ip_whitelist`, `operator_roles`, and `wallet_nonces`
Do **not** apply `backend/database/migrations/0010_track_schema.up.sql` directly to the shared Blockscout DB.
## User Management
@@ -152,11 +153,13 @@ The indexers will:
### Migration Fails
- Check database connection: `psql -h $DB_HOST -U $DB_USER -d $DB_NAME -c "SELECT 1"`
- Verify user has CREATE TABLE permissions
- If you are using the shared Blockscout DB, keep `DB_USER` and `DB_NAME` aligned to `blockscout`
### Authentication Fails
- Check JWT_SECRET is set
- Verify wallet_nonces table exists
- Check database connection in auth handlers
- If the wallet popup shows `Nonce: undefined`, the nonce request failed before signing. Run `bash scripts/run-migration-0010.sh`, restart the backend, and retry.
### Track Routes Not Working
- Verify user is approved: Check `operator_roles` table
@@ -179,4 +182,3 @@ The indexers will:
7. ✅ Verify feature gating
For detailed API documentation, see: `docs/api/track-api-contracts.md`

View File

@@ -0,0 +1,104 @@
openapi: 3.0.3
info:
title: Explorer mission-control API
version: "1.0"
description: |
Public health, liquidity proxy, and bridge-trace helpers on the Go REST service.
SSE for `/mission-control/stream` should be proxied with **proxy_buffering off** so chunks flush (see `deployment/common/nginx-mission-control-sse.conf`).
servers:
- url: /explorer-api/v1
paths:
/mission-control/stream:
get:
summary: Server-Sent Events stream of bridge/RPC health
description: |
`Content-Type: text/event-stream`. Emits `event: mission-control` with JSON `{"data":{...}}` immediately, then every 20s.
Same inner `data` shape as `GET /track1/bridge/status`.
responses:
"200":
description: SSE stream
content:
text/event-stream:
schema:
type: string
/mission-control/liquidity/token/{address}/pools:
get:
summary: Cached proxy to token-aggregation pools
parameters:
- name: address
in: path
required: true
schema:
type: string
pattern: '^0x[a-fA-F0-9]{40}$'
responses:
"200":
description: Upstream JSON (pass-through)
"503":
description: TOKEN_AGGREGATION_BASE_URL not set
/mission-control/bridge/trace:
get:
summary: Resolve tx `to`/`from` via Blockscout and label with smart-contracts-master
parameters:
- name: tx
in: query
required: true
schema:
type: string
pattern: '^0x[a-fA-F0-9]{64}$'
responses:
"200":
description: Labeled trace
content:
application/json:
schema:
type: object
properties:
data:
type: object
properties:
tx_hash:
type: string
from:
type: string
to:
type: string
from_registry:
type: string
to_registry:
type: string
blockscout_url:
type: string
/track4/operator/run-script:
post:
summary: Run an allowlisted script under OPERATOR_SCRIPTS_ROOT (Track 4 + IP whitelist)
security:
- bearerAuth: []
requestBody:
required: true
content:
application/json:
schema:
type: object
required: [script]
properties:
script:
type: string
description: Path relative to OPERATOR_SCRIPTS_ROOT (no ..)
args:
type: array
items:
type: string
maxItems: 24
responses:
"200":
description: stdout/stderr and exit code
"403":
description: Not allowlisted or not whitelisted IP
"503":
description: Root or allowlist not configured
components:
securitySchemes:
bearerAuth:
type: http
scheme: bearer

View File

@@ -1,7 +1,7 @@
# Explorer API base URL (used for blocks, transactions, addresses, and /api/config/token-list).
# Production at https://explorer.d-bis.org: leave empty or set to https://explorer.d-bis.org (same origin).
# Local dev: http://localhost:8080 (or your API port).
NEXT_PUBLIC_API_URL=https://explorer.d-bis.org
# Production behind the nginx proxy: leave empty to use same-origin automatically.
# Local dev against a standalone backend: http://localhost:8080 (or your API port).
NEXT_PUBLIC_API_URL=
# Chain ID for the explorer (default: Chain 138 - DeFi Oracle Meta Mainnet).
NEXT_PUBLIC_CHAIN_ID=138

View File

@@ -1,2 +1,2 @@
NEXT_PUBLIC_API_URL=https://explorer.d-bis.org
NEXT_PUBLIC_API_URL=
NEXT_PUBLIC_CHAIN_ID=138

View File

@@ -0,0 +1,31 @@
import { describe, expect, it } from 'vitest'
import { resolveExplorerApiBase } from './api-base'
describe('resolveExplorerApiBase', () => {
it('prefers an explicit env value when present', () => {
expect(
resolveExplorerApiBase({
envValue: 'https://explorer.d-bis.org/',
browserOrigin: 'http://127.0.0.1:3000',
})
).toBe('https://explorer.d-bis.org')
})
it('falls back to same-origin in the browser when env is empty', () => {
expect(
resolveExplorerApiBase({
envValue: '',
browserOrigin: 'http://127.0.0.1:3000/',
})
).toBe('http://127.0.0.1:3000')
})
it('falls back to the local backend on the server when no other base is available', () => {
expect(
resolveExplorerApiBase({
envValue: '',
browserOrigin: '',
})
).toBe('http://localhost:8080')
})
})

View File

@@ -1,4 +1,5 @@
import axios, { AxiosInstance, AxiosRequestConfig, AxiosResponse } from 'axios'
import { resolveExplorerApiBase } from './api-base'
export interface ApiResponse<T> {
data: T
@@ -21,9 +22,9 @@ export interface ApiError {
}
}
export function createApiClient(baseURL: string = process.env.NEXT_PUBLIC_API_URL || 'http://localhost:8080', getApiKey?: () => string | null) {
export function createApiClient(baseURL?: string, getApiKey?: () => string | null) {
const client = axios.create({
baseURL,
baseURL: baseURL || resolveExplorerApiBase(),
timeout: 30000,
headers: { 'Content-Type': 'application/json' },
})

View File

@@ -25,24 +25,51 @@ export function Address({
: address
const handleCopy = async () => {
await navigator.clipboard.writeText(address)
setCopied(true)
setTimeout(() => setCopied(false), 2000)
try {
await navigator.clipboard.writeText(address)
setCopied(true)
setTimeout(() => setCopied(false), 2000)
} catch {
setCopied(false)
}
}
return (
<div className={clsx('flex items-center gap-2', className)}>
<span className="font-mono text-sm">{displayAddress}</span>
<div
className={clsx(
'flex min-w-0 items-start gap-2',
truncate ? 'flex-nowrap' : 'flex-wrap',
className
)}
>
<span
className={clsx(
'min-w-0 font-mono text-sm leading-6 text-gray-900 dark:text-gray-100',
truncate ? 'truncate' : 'break-all'
)}
>
{displayAddress}
</span>
{showCopy && (
<button
type="button"
onClick={handleCopy}
className="text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-200"
className="shrink-0 rounded-md p-1 text-gray-500 transition hover:bg-gray-100 hover:text-gray-700 dark:text-gray-400 dark:hover:bg-gray-800 dark:hover:text-gray-200"
title="Copy address"
aria-label="Copy address"
>
{copied ? '✓' : '📋'}
{copied ? (
<svg className="h-4 w-4" viewBox="0 0 20 20" fill="currentColor" aria-hidden>
<path fillRule="evenodd" d="M16.704 5.29a1 1 0 0 1 .006 1.414l-7.25 7.313a1 1 0 0 1-1.42 0L4.79 10.766a1 1 0 1 1 1.414-1.414l2.546 2.546 6.544-6.602a1 1 0 0 1 1.41-.006Z" clipRule="evenodd" />
</svg>
) : (
<svg className="h-4 w-4" viewBox="0 0 20 20" fill="currentColor" aria-hidden>
<path d="M6 2.75A2.25 2.25 0 0 0 3.75 5v8A2.25 2.25 0 0 0 6 15.25h1.25V14H6A1 1 0 0 1 5 13V5a1 1 0 0 1 1-1h5a1 1 0 0 1 1 1v1.25h1.25V5A2.25 2.25 0 0 0 11 2.75H6Z" />
<path d="M9 6.75A2.25 2.25 0 0 0 6.75 9v6A2.25 2.25 0 0 0 9 17.25h5A2.25 2.25 0 0 0 16.25 15V9A2.25 2.25 0 0 0 14 6.75H9Zm0 1.25h5a1 1 0 0 1 1 1v6a1 1 0 0 1-1 1H9a1 1 0 0 1-1-1V9a1 1 0 0 1 1-1Z" />
</svg>
)}
</button>
)}
</div>
)
}

View File

@@ -17,7 +17,7 @@ export function Button({
return (
<button
className={clsx(
'font-medium rounded-lg transition-colors',
'font-medium rounded-lg transition-colors disabled:cursor-not-allowed disabled:opacity-50',
{
'bg-primary-600 text-white hover:bg-primary-700': variant === 'primary',
'bg-gray-200 text-gray-900 hover:bg-gray-300': variant === 'secondary',
@@ -34,4 +34,3 @@ export function Button({
</button>
)
}

View File

@@ -11,12 +11,12 @@ export function Card({ children, className, title }: CardProps) {
return (
<div
className={clsx(
'bg-white dark:bg-gray-800 rounded-lg shadow-md p-6',
'rounded-xl bg-white p-4 shadow-md dark:bg-gray-800 sm:p-6',
className
)}
>
{title && (
<h3 className="text-xl font-semibold mb-4 text-gray-900 dark:text-white">
<h3 className="mb-3 text-lg font-semibold text-gray-900 dark:text-white sm:mb-4 sm:text-xl">
{title}
</h3>
)}
@@ -24,4 +24,3 @@ export function Card({ children, className, title }: CardProps) {
</div>
)
}

View File

@@ -11,48 +11,91 @@ interface TableProps<T> {
columns: Column<T>[]
data: T[]
className?: string
emptyMessage?: string
/** Stable key for each row (e.g. row => row.id or row => row.hash). Falls back to index if not provided. */
keyExtractor?: (row: T) => string | number
}
export function Table<T>({ columns, data, className, keyExtractor }: TableProps<T>) {
export function Table<T>({
columns,
data,
className,
emptyMessage = 'No data available right now.',
keyExtractor,
}: TableProps<T>) {
if (data.length === 0) {
return (
<div
className={clsx(
'rounded-xl border border-dashed border-gray-300 bg-white px-4 py-6 text-sm text-gray-600 dark:border-gray-700 dark:bg-gray-900 dark:text-gray-400',
className,
)}
>
{emptyMessage}
</div>
)
}
return (
<div className={clsx('overflow-x-auto', className)}>
<table className="min-w-full divide-y divide-gray-200 dark:divide-gray-700">
<thead className="bg-gray-50 dark:bg-gray-800">
<tr>
{columns.map((column, index) => (
<th
key={index}
className={clsx(
'px-6 py-3 text-left text-xs font-medium text-gray-500 dark:text-gray-400 uppercase tracking-wider',
column.className
)}
>
{column.header}
</th>
))}
</tr>
</thead>
<tbody className="bg-white dark:bg-gray-900 divide-y divide-gray-200 dark:divide-gray-700">
{data.map((row, rowIndex) => (
<tr key={keyExtractor ? keyExtractor(row) : rowIndex} className="hover:bg-gray-50 dark:hover:bg-gray-800">
<div className={clsx('space-y-3', className)}>
<div className="grid gap-3 md:hidden">
{data.map((row, rowIndex) => (
<div
key={keyExtractor ? keyExtractor(row) : rowIndex}
className="rounded-xl border border-gray-200 bg-white p-4 shadow-sm dark:border-gray-700 dark:bg-gray-900"
>
<dl className="space-y-3">
{columns.map((column, colIndex) => (
<td
key={colIndex}
<div key={colIndex} className="space-y-1">
<dt className="text-[11px] font-semibold uppercase tracking-wide text-gray-500 dark:text-gray-400">
{column.header}
</dt>
<dd className={clsx('min-w-0 text-sm text-gray-900 dark:text-gray-100', column.className)}>
{column.accessor(row)}
</dd>
</div>
))}
</dl>
</div>
))}
</div>
<div className="hidden overflow-x-auto md:block">
<table className="min-w-full divide-y divide-gray-200 dark:divide-gray-700">
<thead className="bg-gray-50 dark:bg-gray-800">
<tr>
{columns.map((column, index) => (
<th
key={index}
className={clsx(
'px-6 py-4 whitespace-nowrap text-sm text-gray-900 dark:text-gray-100',
'px-4 py-3 text-left text-xs font-medium uppercase tracking-wider text-gray-500 dark:text-gray-400 lg:px-6',
column.className
)}
>
{column.accessor(row)}
</td>
{column.header}
</th>
))}
</tr>
))}
</tbody>
</table>
</thead>
<tbody className="divide-y divide-gray-200 bg-white dark:divide-gray-700 dark:bg-gray-900">
{data.map((row, rowIndex) => (
<tr key={keyExtractor ? keyExtractor(row) : rowIndex} className="hover:bg-gray-50 dark:hover:bg-gray-800">
{columns.map((column, colIndex) => (
<td
key={colIndex}
className={clsx(
'px-4 py-4 align-top text-sm text-gray-900 dark:text-gray-100 lg:px-6',
column.className
)}
>
{column.accessor(row)}
</td>
))}
</tr>
))}
</tbody>
</table>
</div>
</div>
)
}

View File

@@ -4,8 +4,8 @@ const nextConfig = {
output: 'standalone',
// If you see a workspace lockfile warning: align on one package manager (npm or pnpm) in frontend, or ignore for dev/build.
env: {
NEXT_PUBLIC_API_URL: process.env.NEXT_PUBLIC_API_URL || 'http://localhost:8080',
NEXT_PUBLIC_CHAIN_ID: process.env.NEXT_PUBLIC_CHAIN_ID || '138',
NEXT_PUBLIC_API_URL: process.env.NEXT_PUBLIC_API_URL ?? '',
NEXT_PUBLIC_CHAIN_ID: process.env.NEXT_PUBLIC_CHAIN_ID ?? '138',
},
}

View File

@@ -8,7 +8,7 @@
"build": "next build",
"build:check": "npm run lint && npm run type-check && npm run build",
"smoke:routes": "node ./scripts/smoke-routes.mjs",
"start": "PORT=${PORT:-3000} node .next/standalone/server.js",
"start": "PORT=${PORT:-3000} node ./scripts/start-standalone.mjs",
"start:next": "next start",
"lint": "next lint",
"type-check": "tsc --noEmit -p tsconfig.check.json",

View File

@@ -0,0 +1,696 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Chain 138 — Visual Command Center</title>
<!-- Mermaid: local copy (vendor via explorer-monorepo/scripts/vendor-mermaid-for-command-center.sh). CDN fallback: jsdelivr mermaid@10 -->
<script src="/thirdparty/mermaid.min.js"></script>
<style>
:root {
--bg: #0b0f14;
--panel: #0f172a;
--header: #111827;
--border: #1f2937;
--text: #e6edf3;
--muted: #94a3b8;
--accent: #2563eb;
--accent-hover: #1d4ed8;
}
* { box-sizing: border-box; }
body {
margin: 0;
font-family: system-ui, -apple-system, Segoe UI, Roboto, Ubuntu, sans-serif;
background: var(--bg);
color: var(--text);
min-height: 100vh;
}
header {
padding: 1rem 1.25rem;
background: var(--header);
border-bottom: 1px solid var(--border);
}
header h1 {
margin: 0;
font-size: 1.25rem;
font-weight: 700;
}
header p {
margin: 0.35rem 0 0;
font-size: 0.875rem;
color: var(--muted);
max-width: 52rem;
line-height: 1.45;
}
.toolbar {
display: flex;
flex-wrap: wrap;
align-items: center;
gap: 0.5rem;
padding: 0.65rem 1rem;
border-bottom: 1px solid var(--border);
background: rgba(17, 24, 39, 0.85);
position: sticky;
top: 0;
z-index: 10;
}
.tabs {
display: flex;
flex-wrap: wrap;
gap: 0.25rem;
}
.tab {
padding: 0.5rem 0.85rem;
cursor: pointer;
border: 1px solid transparent;
border-radius: 8px;
font-size: 0.8125rem;
font-weight: 600;
color: var(--muted);
background: transparent;
}
.tab:hover {
color: var(--text);
background: var(--border);
}
.tab.active {
color: #fff;
background: var(--accent);
border-color: var(--accent-hover);
}
.toolbar a.back {
margin-left: auto;
font-size: 0.8125rem;
color: #93c5fd;
text-decoration: none;
}
.toolbar a.back:hover { text-decoration: underline; }
.content {
display: none;
padding: 1.25rem;
max-width: 120rem;
margin: 0 auto;
}
.content.active { display: block; }
.panel-desc {
color: var(--muted);
font-size: 0.875rem;
line-height: 1.5;
margin-bottom: 1rem;
max-width: 56rem;
}
.mermaid-wrap {
background: var(--panel);
padding: 1.25rem;
border-radius: 12px;
border: 1px solid var(--border);
overflow-x: auto;
margin-bottom: 1.25rem;
}
.mermaid-wrap h3 {
margin: 0 0 0.75rem;
font-size: 0.95rem;
font-weight: 700;
color: #cbd5e1;
}
.mermaid-wrap + .mermaid-wrap { margin-top: 0.5rem; }
footer {
padding: 1.5rem;
border-top: 1px solid var(--border);
font-size: 0.75rem;
color: var(--muted);
text-align: center;
}
footer code { color: #a5b4fc; }
</style>
</head>
<body>
<header>
<h1>Chain 138 — deployment and liquidity topology</h1>
<p>Operator-style view of the architecture in <code>docs/02-architecture/SMOM_DBIS_138_FULL_DEPLOYMENT_FLOW_MAP.md</code>. Diagrams are informational only; contract addresses live in explorer config and repo references. The live Mission Control visual surfaces remain in the main explorer SPA. Deep links: <code>?tab=mission-control</code> or numeric <code>?tab=0</code><code>8</code> (slug per tab).</p>
</header>
<div class="toolbar">
<div class="tabs" role="tablist" aria-label="Topology panels">
<button type="button" id="tab-0" class="tab active" role="tab" aria-selected="true" aria-controls="panel-0" data-tab="0" tabindex="0">Master map</button>
<button type="button" id="tab-1" class="tab" role="tab" aria-selected="false" aria-controls="panel-1" data-tab="1" tabindex="-1">Network</button>
<button type="button" id="tab-2" class="tab" role="tab" aria-selected="false" aria-controls="panel-2" data-tab="2" tabindex="-1">Stack</button>
<button type="button" id="tab-3" class="tab" role="tab" aria-selected="false" aria-controls="panel-3" data-tab="3" tabindex="-1">Flows</button>
<button type="button" id="tab-4" class="tab" role="tab" aria-selected="false" aria-controls="panel-4" data-tab="4" tabindex="-1">Cross-chain</button>
<button type="button" id="tab-5" class="tab" role="tab" aria-selected="false" aria-controls="panel-5" data-tab="5" tabindex="-1">Public cW</button>
<button type="button" id="tab-6" class="tab" role="tab" aria-selected="false" aria-controls="panel-6" data-tab="6" tabindex="-1">Off-chain</button>
<button type="button" id="tab-7" class="tab" role="tab" aria-selected="false" aria-controls="panel-7" data-tab="7" tabindex="-1">Integrations</button>
<button type="button" id="tab-8" class="tab" role="tab" aria-selected="false" aria-controls="panel-8" data-tab="8" tabindex="-1">Mission Control</button>
</div>
<a class="back" href="/more">Back to More</a>
</div>
<!-- 0 Master -->
<div class="content active" id="panel-0" role="tabpanel" aria-labelledby="tab-0">
<p class="panel-desc">Hub, leaf endings, CCIP destinations, Alltra, the dedicated Avalanche cW corridor, the public cW mesh, and pending programs. Mainnet cW mint corridors and the optional TRUU rail are summarized under the Ethereum anchor.</p>
<div class="mermaid-wrap"><div class="mermaid" id="g-master">
flowchart TB
subgraph LEAF_INGRESS["Leaves — access to 138"]
WU[Wallets · MetaMask Snaps · Ledger · Chainlist · SDKs · ethers.js]
OPS[Operators · Foundry scripts · relay · systemd · deploy hooks]
RPCPUB[Public RPC FQDNs · thirdweb mirrors]
FB[Fireblocks Web3 RPC]
end
subgraph LEAF_EDGE["Leaves — services that index or front 138"]
EXP[Explorer · Blockscout · token-aggregation]
INFO[info.defi-oracle.io]
DAPP[dapp.d-bis.org bridge UI]
DBIS[dbis-api Core hosts]
X402[x402 payment API]
MCP[MCP PMM controller]
end
subgraph HUB["CHAIN 138 — origin hub"]
C138["Besu EVM · tokens core · DODO PMM V2/V3 · RouterV2 · UniV3 / Balancer / Curve / 1inch pilots · CCIP bridges + router · AlltraAdapter · BridgeVault · ISO channels · mirror reserve vault settlement · Lockbox · Truth / Tron / Solana adapters"]
end
subgraph CCIP_ETH["Ethereum 1 — CCIP anchor"]
ETH1["WETH9 / WETH10 bridges · CCIPRelayRouter · RelayBridge · Logger · optional trustless stack"]
LEAF_ETH["Leaf — Mainnet native DEX venues · Li.Fi touchpoints on other chains · first-wave cW DODO pools · optional TRUU PMM rail"]
end
subgraph CCIP_L2["Other live CCIP EVM destinations"]
L2CLU["OP 10 · Base 8453 · Arb 42161 · Polygon 137 · BSC 56 · Avax 43114 · Gnosis 100 · Celo 42220 · Cronos 25"]
LEAF_L2["Leaf — per-chain native DEX · cW token transport · partial edge pools"]
end
subgraph ALLTRA["ALL Mainnet 651940"]
A651["AlltraAdapter peer · AUSDT · WETH · WALL · HYDX · DEX env placeholders"]
LEAF_651["Leaf — ALL native venues when configured"]
end
subgraph SPECIAL["Dedicated corridor from 138"]
AVAXCW["138 cUSDT to Avax cWUSDT mint path"]
LEAF_AVAX["Leaf — recipient on 43114"]
end
subgraph CW_MESH["Public cW GRU mesh"]
CW["Cross-public-EVM token matrix · pool design · Mainnet DODO concentration"]
end
subgraph PENDING["Pending separate scaffold"]
WEMIX[Wemix 1111 CCIP pending]
XDC[XDC Zero parallel program]
SCAFF[Etherlink Tezos OP L2 design]
PNON[Truth pointer · Tron adapter · Solana partial]
end
WU --> RPCPUB
RPCPUB --> C138
WU --> C138
OPS --> C138
EXP --> C138
INFO --> C138
DAPP --> C138
DBIS --> C138
X402 --> C138
MCP --> C138
FB --> C138
C138 <--> ETH1
C138 <--> L2CLU
C138 <--> A651
C138 --> AVAXCW
AVAXCW --> LEAF_AVAX
ETH1 <--> L2CLU
ETH1 --> LEAF_ETH
L2CLU --> LEAF_L2
A651 --> LEAF_651
CW -.->|pool and peg design| LEAF_ETH
CW -.->|token mesh| L2CLU
C138 -.-> WEMIX
C138 -.-> XDC
C138 -.-> SCAFF
C138 -.-> PNON
</div></div>
</div>
<!-- 1 Network -->
<div class="content" id="panel-1" role="tabpanel" aria-labelledby="tab-1" hidden>
<p class="panel-desc">Chain 138 to the public EVM mesh, Alltra, pending or scaffold targets, Avalanche cW minting, and the separate Mainnet cW mint corridor that sits alongside the standard WETH-class CCIP rail.</p>
<div class="mermaid-wrap"><div class="mermaid">
flowchart TB
subgraph C138["Chain 138 — primary"]
CORE[Core registry vault oracle ISO router]
PMM[DODO PMM V2 DVM + pools]
R2[EnhancedSwapRouterV2]
D3[D3MM pilot]
CCIPB[CCIP WETH9 WETH10 bridges]
ALLA[AlltraAdapter]
ADP[Truth Tron Solana adapters partial]
end
subgraph PUB["Public EVM mesh (cW*)"]
E1[Ethereum 1]
E10[Optimism 10]
E25[Cronos 25]
E56[BSC 56]
E100[Gnosis 100]
E137[Polygon 137]
E42161[Arbitrum 42161]
E43114[Avalanche 43114]
E8453[Base 8453]
E42220[Celo 42220]
end
subgraph PEND["Pending or separate"]
WEMIX[Wemix 1111 CCIP pending]
XDC[XDC Zero parallel program]
SCAFF[Etherlink Tezos OP L2 scaffold design]
end
subgraph A651["ALL Mainnet 651940"]
ALLTOK[AUSDT USDC WETH WALL HYDX]
end
C138 -->|CCIP WETH| PUB
C138 -->|CCIP WETH| E1
C138 -->|mainnet cW mint corridor| E1
C138 -->|AlltraAdapter| A651
PUB -->|CCIP return| C138
E1 -->|CCIP return| C138
C138 -.->|operator completion| WEMIX
C138 -.->|not CCIP matrix row| XDC
C138 -.->|future gated| SCAFF
C138 -->|avax cw corridor| E43114
</div></div>
<p class="panel-desc">Topology note: Mainnet now represents two Ethereum-facing patterns in production, the standard WETH-class CCIP rail and the dedicated <code>cUSDC/cUSDT -&gt; cWUSDC/cWUSDT</code> mint corridor.</p>
</div>
<!-- 2 Stack -->
<div class="content" id="panel-2" role="tabpanel" aria-labelledby="tab-2" hidden>
<p class="panel-desc">On-chain layers: tokens, core, liquidity, cross-domain, reserve and settlement.</p>
<div class="mermaid-wrap"><div class="mermaid">
flowchart TB
subgraph L1["Tokens and compliance"]
CT[cUSDT · cUSDC · cEUR* · cXAU* · mirrors · USDT · USDC]
GEN[WETH WETH10 LINK]
end
subgraph L2["Core infrastructure"]
REG[Compliance TokenFactory TokenRegistry BridgeVault]
POL[PolicyManager DebtRegistry FeeCollector]
ISO[ISO20022Router]
end
subgraph L3["Liquidity and execution"]
DVM[DVMFactory VendingMachine DODOPMMIntegration]
PRV[DODOPMMProvider PrivatePoolRegistry]
R2[EnhancedSwapRouterV2]
VEN[Uniswap v3 lane Balancer Curve 1inch pilots]
D3[D3Oracle D3Vault D3Proxy D3MMFactory]
end
subgraph L4["Cross-domain"]
CCIP[CCIP Router CCIPWETH9 CCIPWETH10]
ALL[AlltraAdapter]
LBX[Lockbox138]
CH[PaymentChannel Mirror AddressMapper]
end
subgraph L5["Reserve vault settlement"]
RS[ReserveSystem OraclePriceFeed]
VF[VaultFactory Ledger Liquidation XAUOracle]
MSR[MerchantSettlementRegistry WithdrawalEscrow]
end
L1 --> L2
L2 --> L3
L3 --> R2
R2 --> VEN
L2 --> L4
L2 --> L5
DVM --> PRV
</div></div>
</div>
<!-- 3 Flows -->
<div class="content" id="panel-3" role="tabpanel" aria-labelledby="tab-3" hidden>
<p class="panel-desc">Same-chain 138: PMM pools, RouterV2 venues, D3 pilot.</p>
<div class="mermaid-wrap"><div class="mermaid">
flowchart LR
subgraph inputs["Typical inputs"]
U1[cUSDT]
U2[cUSDC]
U3[USDT mirror]
U4[USDC mirror]
U5[cEURT]
U6[cXAUC]
end
subgraph path_pmm["DODO PMM"]
INT[DODOPMMIntegration]
POOL[Stable pools XAU public pools Private XAU registry]
end
subgraph path_r2["Router v2"]
R2[EnhancedSwapRouterV2]
UV3[Uniswap v3 WETH stable]
PILOT[Balancer Curve 1inch]
end
subgraph path_d3["Pilot"]
D3[D3MM WETH10 pilot pool]
end
inputs --> INT
INT --> POOL
inputs --> R2
R2 --> UV3
R2 --> PILOT
GEN2[WETH WETH10] --> R2
GEN2 --> D3
</div></div>
</div>
<!-- 4 Cross-chain -->
<div class="content" id="panel-4" role="tabpanel" aria-labelledby="tab-4" hidden>
<p class="panel-desc">CCIP transport, Alltra round-trip, the dedicated c-to-cW mint corridors, and the orchestrated swap-bridge-swap target.</p>
<div class="mermaid-wrap">
<h3>CCIP — WETH primary transport</h3>
<div class="mermaid">
sequenceDiagram
participant U as User or bot
participant C138 as Chain 138
participant BR as CCIPWETH9 or WETH10 bridge
participant R as CCIP Router
participant D as Destination EVM
U->>C138: Fund WETH bridge fee LINK
U->>BR: Initiate cross-chain WETH transfer
BR->>R: CCIP message
R->>D: Deliver WETH class asset
Note over D: Native DEX or cW pools where deployed
D->>R: Optional return leg
R->>C138: Inbound to receiver bridge
</div>
</div>
<div class="mermaid-wrap">
<h3>Alltra — 138 to ALL Mainnet</h3>
<div class="mermaid">
flowchart LR
A[Chain 138] -->|AlltraAdapter| B[ALL 651940]
B -->|AlltraAdapter| A
</div>
</div>
<div class="mermaid-wrap">
<h3>Special corridors — c* to cW* mint</h3>
<div class="mermaid">
flowchart LR
S1[cUSDT on 138] -->|avax cw relay mint| T1[cWUSDT on Avalanche]
S2[cUSDC on 138] -->|mainnet relay mint| T2[cWUSDC on Mainnet]
S3[cUSDT on 138] -->|mainnet relay mint| T3[cWUSDT on Mainnet]
</div>
</div>
<div class="mermaid-wrap">
<h3>Orchestrated swap-bridge-swap (design target)</h3>
<div class="mermaid">
flowchart LR
Q[QuoteService POST api bridge quote] --> S1[Source leg e.g. 138 PMM]
S1 --> BR[Bridge CCIP Alltra or special]
BR --> S2[Destination leg DEX or cW pool]
</div>
</div>
</div>
<!-- 5 Public cW -->
<div class="content" id="panel-5" role="tabpanel" aria-labelledby="tab-5" hidden>
<p class="panel-desc">Ethereum Mainnet first-wave cW DODO mesh, plus the separate optional TRUU PMM rail. See PMM_DEX_ROUTING_STATUS and cross-chain-pmm-lps deployment-status for live detail.</p>
<div class="mermaid-wrap"><div class="mermaid">
flowchart TB
subgraph ETH["Ethereum Mainnet"]
CW[cWUSDT cWUSDC cWEURC cWGBPC cWAUDC cWCADC cWJPYC cWCHFC]
HUB[USDC USDT]
DODO[DODO PMM Wave 1 pools]
end
CW <--> DODO
HUB <--> DODO
</div></div>
<p class="panel-desc">TRUU note: the optional Mainnet Truth rail is a separate volatile PMM lane and is not part of the default cW stable mesh.</p>
<div class="mermaid-wrap">
<h3>Mainnet TRUU PMM (volatile, optional)</h3>
<div class="mermaid">
flowchart LR
subgraph TRUUmesh["Mainnet TRUU rail optional"]
CWu[cWUSDT or cWUSDC]
TRUU[TRUU ERC-20]
PMM[DODO PMM integration]
end
CWu <--> PMM
TRUU <--> PMM
</div>
</div>
</div>
<!-- 6 Off-chain -->
<div class="content" id="panel-6" role="tabpanel" aria-labelledby="tab-6" hidden>
<p class="panel-desc">Wallets, edge FQDNs, APIs, operators feeding Chain 138 RPC, plus the explorer-hosted Mission Control visual surfaces.</p>
<div class="mermaid-wrap"><div class="mermaid">
flowchart TB
subgraph users["Wallets and tools"]
MM[MetaMask custom network Snaps]
MCP[MCP PMM controller allowlist 138]
end
subgraph edge["Public edge"]
EXP[explorer.d-bis.org Blockscout token-aggregation]
MC[Mission Control visual panels]
INFO[info.defi-oracle.io]
DAPP[dapp.d-bis.org bridge UI]
RPC[rpc-http-pub.d-bis.org public RPC]
end
subgraph api["APIs"]
TA[token-aggregation v1 v2 quote pools bridge routes]
DBIS[dbis-api Core runtime]
X402[x402-api readiness surface]
end
subgraph ops["Operator"]
REL[CCIP relay systemd]
SCR[smom-dbis-138 forge scripts]
end
users --> edge
EXP --> MC
edge --> api
MC --> api
api --> C138[Chain 138 RPC]
ops --> C138
</div></div>
<p class="panel-desc">Mission Control note: the live visual display lives in the main explorer SPA, especially the bridge-monitoring and operator surfaces. This command center stays focused on the static architecture view.</p>
</div>
<!-- 7 Integrations -->
<div class="content" id="panel-7" role="tabpanel" aria-labelledby="tab-7" hidden>
<p class="panel-desc">Contract families vs wallet/client integrations not spelled out in every zoom diagram. Wormhole remains docs/MCP scope, not canonical 138 addresses.</p>
<div class="mermaid-wrap"><div class="mermaid">
flowchart LR
subgraph chain138_tech["Chain 138 contract families"]
A[Besu EVM]
B[ERC-20 core registries]
C[DODO V2 V3]
D[UniV3 Bal Curve 1inch pilots]
E[CCIP bridges router]
F[Alltra Vault ISO channels]
end
subgraph public_integrations["Wallet and client integrations"]
L[Ledger]
CL[Chainlist]
TW[thirdweb RPC]
ETH[ethers.js]
MM[MetaMask Snaps]
end
chain138_tech --> public_integrations
</div></div>
</div>
<!-- 8 Mission Control -->
<div class="content" id="panel-8" role="tabpanel" aria-labelledby="tab-8" hidden>
<p class="panel-desc">Mission Control is the live explorer surface for SSE health, labeled bridge traces, cached liquidity proxy results, and operator-facing API references. The interactive controls live in the main explorer SPA; this tab is the architecture companion with direct entry points.</p>
<div class="mermaid-wrap">
<h3>Mission Control visual flow</h3>
<div class="mermaid">
flowchart LR
UI[Explorer SPA Mission Control panels]
SSE[SSE stream]
TRACE[Bridge trace]
LIQ[Liquidity proxy]
T4[Track 4 script API]
API[Explorer Go API]
UP[Blockscout and token-aggregation upstreams]
UI --> SSE
UI --> TRACE
UI --> LIQ
UI -.->|operator-only| T4
SSE --> API
TRACE --> API
LIQ --> API
T4 --> API
TRACE --> UP
LIQ --> UP
</div>
</div>
<div class="mermaid-wrap">
<h3>Live entry points</h3>
<p class="panel-desc">Use the main explorer UI for the visual Mission Control experience, then open the raw APIs when you need direct payloads or verification.</p>
<div style="display:grid; grid-template-columns:repeat(auto-fit, minmax(220px, 1fr)); gap:0.75rem;">
<a href="/operator" style="display:block; text-decoration:none; color:inherit; border:1px solid var(--border); border-radius:14px; padding:1rem; background:var(--panel);"><div style="font-weight:700; margin-bottom:0.3rem;">Operator hub</div><div style="color:var(--muted); line-height:1.5;">Explorer SPA surface with Mission Control and operator-facing API references.</div></a>
<a href="/bridge" style="display:block; text-decoration:none; color:inherit; border:1px solid var(--border); border-radius:14px; padding:1rem; background:var(--panel);"><div style="font-weight:700; margin-bottom:0.3rem;">Bridge monitoring</div><div style="color:var(--muted); line-height:1.5;">Includes the visible Mission Control bridge-trace card and SSE stream entry point.</div></a>
<a href="/explorer-api/v1/mission-control/stream" target="_blank" rel="noopener noreferrer" style="display:block; text-decoration:none; color:inherit; border:1px solid var(--border); border-radius:14px; padding:1rem; background:var(--panel);"><div style="font-weight:700; margin-bottom:0.3rem;">SSE stream</div><div style="color:var(--muted); line-height:1.5;"><code>GET /explorer-api/v1/mission-control/stream</code></div></a>
<a href="/explorer-api/v1/mission-control/bridge/trace?tx=0x2f31d4f9a97be754b800f4af1a9eedf3b107d353bfa1a19e81417497a76c05c2" target="_blank" rel="noopener noreferrer" style="display:block; text-decoration:none; color:inherit; border:1px solid var(--border); border-radius:14px; padding:1rem; background:var(--panel);"><div style="font-weight:700; margin-bottom:0.3rem;">Bridge trace example</div><div style="color:var(--muted); line-height:1.5;"><code>GET /explorer-api/v1/mission-control/bridge/trace</code></div></a>
<a href="/explorer-api/v1/mission-control/liquidity/token/0x93E66202A11B1772E55407B32B44e5Cd8eda7f22/pools" target="_blank" rel="noopener noreferrer" style="display:block; text-decoration:none; color:inherit; border:1px solid var(--border); border-radius:14px; padding:1rem; background:var(--panel);"><div style="font-weight:700; margin-bottom:0.3rem;">Liquidity example</div><div style="color:var(--muted); line-height:1.5;"><code>GET /explorer-api/v1/mission-control/liquidity/token/{address}/pools</code></div></a>
<div style="border:1px solid var(--border); border-radius:14px; padding:1rem; background:var(--panel);"><div style="font-weight:700; margin-bottom:0.3rem;">Track 4 script API</div><div style="color:var(--muted); line-height:1.5;"><code>POST /explorer-api/v1/track4/operator/run-script</code><br>Requires wallet auth, IP allowlisting, and backend allowlist config.</div></div>
</div>
</div>
</div>
<footer>
Source: <code>proxmox/docs/02-architecture/SMOM_DBIS_138_FULL_DEPLOYMENT_FLOW_MAP.md</code> — addresses: <code>config/smart-contracts-master.json</code> and CONTRACT_ADDRESSES_REFERENCE.
</footer>
<script>
(function () {
mermaid.initialize({
startOnLoad: false,
theme: 'dark',
securityLevel: 'loose',
flowchart: { curve: 'basis', padding: 12 },
sequence: { actorMargin: 24, boxMargin: 8 }
});
var TAB_SLUGS = ['master', 'network', 'stack', 'flows', 'cross-chain', 'public-cw', 'off-chain', 'integrations', 'mission-control'];
var TAB_BY_NAME = {};
TAB_SLUGS.forEach(function (s, i) { TAB_BY_NAME[s] = i; });
var tabs = document.querySelectorAll('.tab');
var panels = document.querySelectorAll('.content[role="tabpanel"]');
var tablist = document.querySelector('[role="tablist"]');
var done = {};
function parseInitialTab() {
var q = new URLSearchParams(window.location.search).get('tab');
if (q == null || q === '') return 0;
var n = parseInt(q, 10);
if (!isNaN(n) && n >= 0 && n < tabs.length) return n;
var key = String(q).toLowerCase().trim().replace(/\s+/g, '-');
if (Object.prototype.hasOwnProperty.call(TAB_BY_NAME, key)) return TAB_BY_NAME[key];
return 0;
}
function syncUrl(index) {
var slug = TAB_SLUGS[index] != null ? TAB_SLUGS[index] : String(index);
try {
var u = new URL(window.location.href);
u.searchParams.set('tab', slug);
history.replaceState(null, '', u.pathname + u.search + u.hash);
} catch (e) { /* file:// or restricted */ }
}
function setActive(index) {
if (index < 0) index = 0;
if (index >= tabs.length) index = tabs.length - 1;
tabs.forEach(function (t, i) {
var on = i === index;
t.classList.toggle('active', on);
t.setAttribute('aria-selected', on ? 'true' : 'false');
t.setAttribute('tabindex', on ? '0' : '-1');
});
panels.forEach(function (p, i) {
var on = i === index;
p.classList.toggle('active', on);
if (on) p.removeAttribute('hidden');
else p.setAttribute('hidden', '');
});
}
async function renderPanel(index) {
var panel = document.getElementById('panel-' + index);
if (!panel || done[index]) return;
done[index] = true;
var nodes = panel.querySelectorAll('.mermaid');
if (nodes.length) {
try {
await mermaid.run({ nodes: nodes });
} catch (e) {
console.error('Mermaid render failed for panel', index, e);
}
}
}
async function showTab(index, opts) {
opts = opts || {};
setActive(index);
await renderPanel(index);
if (!opts.skipUrl) syncUrl(index);
}
tabs.forEach(function (tab) {
tab.addEventListener('click', function () {
var i = parseInt(tab.getAttribute('data-tab'), 10);
showTab(i);
});
});
if (tablist) {
tablist.addEventListener('keydown', function (e) {
var cur = -1;
tabs.forEach(function (t, idx) {
if (t.getAttribute('aria-selected') === 'true') cur = idx;
});
if (cur < 0) return;
var next = cur;
if (e.key === 'ArrowRight') {
e.preventDefault();
next = (cur + 1) % tabs.length;
} else if (e.key === 'ArrowLeft') {
e.preventDefault();
next = (cur - 1 + tabs.length) % tabs.length;
} else if (e.key === 'Home') {
e.preventDefault();
next = 0;
} else if (e.key === 'End') {
e.preventDefault();
next = tabs.length - 1;
} else {
return;
}
showTab(next).then(function () {
tabs[next].focus();
});
});
}
async function boot() {
var initial = parseInitialTab();
await showTab(initial, { skipUrl: true });
try {
var u = new URL(window.location.href);
if (u.searchParams.has('tab')) syncUrl(initial);
} catch (e2) { /* ignore */ }
}
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', boot);
} else {
boot();
}
})();
</script>
</body>
</html>

Some files were not shown because too many files have changed in this diff Show More