Add full monorepo: virtual-banker, backend, frontend, docs, scripts, deployment

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
defiQUG
2026-02-10 11:32:49 -08:00
parent aafcd913c2
commit 88bc76da91
815 changed files with 125522 additions and 264 deletions

25
backend/Dockerfile.api Normal file
View File

@@ -0,0 +1,25 @@
FROM golang:1.21-alpine AS builder
WORKDIR /app
# Copy go mod files
COPY go.mod go.sum ./
RUN go mod download
# Copy source code
COPY . .
# Build API server
WORKDIR /app/api/rest
RUN CGO_ENABLED=0 GOOS=linux go build -o api .
FROM alpine:latest
RUN apk --no-cache add ca-certificates
WORKDIR /root/
COPY --from=builder /app/api/rest/api .
CMD ["./api"]

View File

@@ -0,0 +1,25 @@
FROM golang:1.21-alpine AS builder
WORKDIR /app
# Copy go mod files
COPY go.mod go.sum ./
RUN go mod download
# Copy source code
COPY . .
# Build indexer
WORKDIR /app/indexer
RUN CGO_ENABLED=0 GOOS=linux go build -o indexer .
FROM alpine:latest
RUN apk --no-cache add ca-certificates
WORKDIR /root/
COPY --from=builder /app/indexer/indexer .
CMD ["./indexer"]

227
backend/README_TESTING.md Normal file
View File

@@ -0,0 +1,227 @@
# Testing Guide
## Backend API Testing Documentation
This document describes the testing infrastructure for the SolaceScanScout backend.
---
## Test Structure
```
backend/
├── api/
│ ├── rest/
│ │ └── api_test.go # REST API integration tests
│ └── track1/
│ ├── cache_test.go # Cache unit tests
│ └── rate_limiter_test.go # Rate limiter unit tests
├── benchmarks/
│ └── benchmark_test.go # Performance benchmarks
└── README_TESTING.md # This file
```
---
## Running Tests
### Unit Tests
```bash
# Run all tests
go test ./...
# Run tests with coverage
go test -cover ./...
# Run tests with verbose output
go test -v ./...
# Run specific test
go test -v ./api/track1 -run TestInMemoryCache_GetSet
```
### Integration Tests
```bash
# Run integration tests (requires test database)
go test -tags=integration ./api/rest/...
# With database connection
DB_HOST=localhost DB_USER=test DB_PASSWORD=test DB_NAME=test go test -tags=integration ./api/rest/...
```
### Benchmarks
```bash
# Run all benchmarks
go test -bench=. ./benchmarks/...
# Run specific benchmark
go test -bench=BenchmarkInMemoryCache_Get ./benchmarks/...
# With memory profiling
go test -bench=. -benchmem ./benchmarks/...
```
---
## Test Coverage
### Current Coverage
- ✅ Cache: Unit tests for in-memory cache
- ✅ Rate Limiter: Unit tests for in-memory rate limiter
- ✅ API Endpoints: Integration tests for REST API
- ⚠️ Database: Requires test database setup
- ⚠️ Redis: Requires Redis test instance
### Coverage Goals
- **Unit Tests**: 80%+ coverage
- **Integration Tests**: All critical paths
- **E2E Tests**: Core user flows
---
## Test Database Setup
### Option 1: Docker Test Database
```bash
# Start test database
docker run -d \
--name test-postgres \
-e POSTGRES_USER=test \
-e POSTGRES_PASSWORD=test \
-e POSTGRES_DB=test \
-p 5433:5432 \
postgres:16
# Run migrations
cd database/migrations
go run migrate.go --up
# Run tests
DB_HOST=localhost DB_PORT=5433 DB_USER=test DB_PASSWORD=test DB_NAME=test go test ./...
```
### Option 2: Local Test Database
```bash
# Create test database
createdb test_explorer
# Run migrations
cd database/migrations
go run migrate.go --up
# Run tests
DB_HOST=localhost DB_USER=postgres DB_NAME=test_explorer go test ./...
```
---
## Writing Tests
### Unit Test Example
```go
func TestInMemoryCache_GetSet(t *testing.T) {
cache := track1.NewInMemoryCache()
key := "test-key"
value := []byte("test-value")
ttl := 5 * time.Minute
// Test Set
err := cache.Set(key, value, ttl)
require.NoError(t, err)
// Test Get
retrieved, err := cache.Get(key)
require.NoError(t, err)
assert.Equal(t, value, retrieved)
}
```
### Integration Test Example
```go
func TestListBlocks(t *testing.T) {
_, mux := setupTestServer(t)
req := httptest.NewRequest("GET", "/api/v1/blocks?limit=10&page=1", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
assert.Equal(t, http.StatusOK, w.Code)
}
```
---
## Continuous Integration
### GitHub Actions Example
```yaml
name: Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:16
env:
POSTGRES_USER: test
POSTGRES_PASSWORD: test
POSTGRES_DB: test
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: '1.21'
- run: go test -v -cover ./...
```
---
## Best Practices
1. **Use Table-Driven Tests**: For multiple test cases
2. **Test Edge Cases**: Empty inputs, boundary values, errors
3. **Mock External Dependencies**: Database, Redis, RPC calls
4. **Clean Up**: Use `defer` for cleanup operations
5. **Parallel Tests**: Use `t.Parallel()` for independent tests
6. **Test Names**: Use descriptive names: `TestFunctionName_Scenario_ExpectedResult`
---
## Troubleshooting
### Tests Failing
1. **Check Database Connection**: Ensure test database is running
2. **Check Environment Variables**: Verify test configuration
3. **Check Test Isolation**: Ensure tests don't interfere with each other
4. **Check Logs**: Review test output for error messages
### Slow Tests
1. **Use Test Database**: Don't use production database
2. **Parallel Execution**: Enable `-parallel` flag
3. **Skip Integration Tests**: Use build tags to skip slow tests
4. **Mock External Services**: Don't make real network calls
---
**Last Updated**: $(date)

View File

@@ -0,0 +1,100 @@
package analytics
import (
"context"
"github.com/jackc/pgx/v5/pgxpool"
)
// AddressRiskAnalyzer analyzes address risk
type AddressRiskAnalyzer struct {
db *pgxpool.Pool
chainID int
}
// NewAddressRiskAnalyzer creates a new address risk analyzer
func NewAddressRiskAnalyzer(db *pgxpool.Pool, chainID int) *AddressRiskAnalyzer {
return &AddressRiskAnalyzer{
db: db,
chainID: chainID,
}
}
// RiskAnalysis represents address risk analysis
type RiskAnalysis struct {
Address string
RiskScore float64
RiskLevel string
Factors map[string]bool
Flags []string
}
// AnalyzeAddress analyzes risk for an address
func (ara *AddressRiskAnalyzer) AnalyzeAddress(ctx context.Context, address string) (*RiskAnalysis, error) {
// Get address statistics
query := `
SELECT
tx_count_sent,
tx_count_received,
total_sent_wei,
total_received_wei
FROM addresses
WHERE address = $1 AND chain_id = $2
`
var txSent, txReceived int
var totalSent, totalReceived string
err := ara.db.QueryRow(ctx, query, address, ara.chainID).Scan(&txSent, &txReceived, &totalSent, &totalReceived)
if err != nil {
// Address not found, return low risk
return &RiskAnalysis{
Address: address,
RiskScore: 0.0,
RiskLevel: "low",
Factors: make(map[string]bool),
Flags: []string{},
}, nil
}
// Calculate risk score (simplified)
riskScore := 0.0
factors := make(map[string]bool)
flags := []string{}
// High volume = lower risk
if txSent+txReceived > 100 {
factors["high_volume"] = true
riskScore -= 0.1
}
// Check for suspicious patterns (simplified)
if txSent > 1000 && txReceived == 0 {
factors["suspicious_activity"] = true
riskScore += 0.3
flags = append(flags, "high_outbound_only")
}
// Normalize risk score
if riskScore < 0 {
riskScore = 0
}
if riskScore > 1 {
riskScore = 1
}
riskLevel := "low"
if riskScore > 0.7 {
riskLevel = "high"
} else if riskScore > 0.4 {
riskLevel = "medium"
}
return &RiskAnalysis{
Address: address,
RiskScore: riskScore,
RiskLevel: riskLevel,
Factors: factors,
Flags: flags,
}, nil
}

View File

@@ -0,0 +1,127 @@
package analytics
import (
"context"
"fmt"
"time"
"github.com/jackc/pgx/v5/pgxpool"
)
// BridgeAnalytics provides bridge analytics
type BridgeAnalytics struct {
db *pgxpool.Pool
}
// NewBridgeAnalytics creates a new bridge analytics instance
func NewBridgeAnalytics(db *pgxpool.Pool) *BridgeAnalytics {
return &BridgeAnalytics{db: db}
}
// BridgeStats represents bridge statistics
type BridgeStats struct {
Transfers24h int
Volume24h string
Chains map[int]ChainStats
TopTokens []TokenStats
}
// ChainStats represents chain statistics
type ChainStats struct {
Outbound int
Inbound int
VolumeOut string
VolumeIn string
}
// TokenStats represents token statistics
type TokenStats struct {
Token string
Symbol string
Transfers int
Volume string
}
// GetBridgeStats gets bridge statistics
func (ba *BridgeAnalytics) GetBridgeStats(ctx context.Context, chainFrom, chainTo *int, startDate, endDate *time.Time) (*BridgeStats, error) {
query := `
SELECT
COUNT(*) as transfers_24h,
SUM(amount) as volume_24h
FROM analytics_bridge_history
WHERE timestamp >= NOW() - INTERVAL '24 hours'
`
args := []interface{}{}
argIndex := 1
if chainFrom != nil {
query += fmt.Sprintf(" AND chain_from = $%d", argIndex)
args = append(args, *chainFrom)
argIndex++
}
if chainTo != nil {
query += fmt.Sprintf(" AND chain_to = $%d", argIndex)
args = append(args, *chainTo)
argIndex++
}
if startDate != nil {
query += fmt.Sprintf(" AND timestamp >= $%d", argIndex)
args = append(args, *startDate)
argIndex++
}
if endDate != nil {
query += fmt.Sprintf(" AND timestamp <= $%d", argIndex)
args = append(args, *endDate)
argIndex++
}
var transfers24h int
var volume24h string
err := ba.db.QueryRow(ctx, query, args...).Scan(&transfers24h, &volume24h)
if err != nil {
return nil, fmt.Errorf("failed to get bridge stats: %w", err)
}
stats := &BridgeStats{
Transfers24h: transfers24h,
Volume24h: volume24h,
Chains: make(map[int]ChainStats),
TopTokens: []TokenStats{},
}
// Get chain stats
chainQuery := `
SELECT
chain_from,
COUNT(*) FILTER (WHERE chain_from = $1) as outbound,
COUNT(*) FILTER (WHERE chain_to = $1) as inbound,
SUM(amount) FILTER (WHERE chain_from = $1) as volume_out,
SUM(amount) FILTER (WHERE chain_to = $1) as volume_in
FROM analytics_bridge_history
WHERE (chain_from = $1 OR chain_to = $1) AND timestamp >= NOW() - INTERVAL '24 hours'
GROUP BY chain_from
`
// Simplified - in production, iterate over all chains
rows, _ := ba.db.Query(ctx, chainQuery, 138)
for rows.Next() {
var chainID, outbound, inbound int
var volumeOut, volumeIn string
if err := rows.Scan(&chainID, &outbound, &inbound, &volumeOut, &volumeIn); err == nil {
stats.Chains[chainID] = ChainStats{
Outbound: outbound,
Inbound: inbound,
VolumeOut: volumeOut,
VolumeIn: volumeIn,
}
}
}
rows.Close()
return stats, nil
}

View File

@@ -0,0 +1,133 @@
package analytics
import (
"context"
"fmt"
"github.com/jackc/pgx/v5/pgxpool"
)
// Calculator calculates network analytics
type Calculator struct {
db *pgxpool.Pool
chainID int
}
// NewCalculator creates a new analytics calculator
func NewCalculator(db *pgxpool.Pool, chainID int) *Calculator {
return &Calculator{
db: db,
chainID: chainID,
}
}
// NetworkStats represents network statistics
type NetworkStats struct {
CurrentBlock int64 `json:"current_block"`
TPS float64 `json:"tps"`
GPS float64 `json:"gps"`
AvgGasPrice int64 `json:"avg_gas_price"`
PendingTransactions int `json:"pending_transactions"`
BlockTime float64 `json:"block_time_seconds"`
}
// CalculateNetworkStats calculates current network statistics
func (c *Calculator) CalculateNetworkStats(ctx context.Context) (*NetworkStats, error) {
// Get current block
var currentBlock int64
err := c.db.QueryRow(ctx,
`SELECT MAX(number) FROM blocks WHERE chain_id = $1`,
c.chainID,
).Scan(&currentBlock)
if err != nil {
return nil, fmt.Errorf("failed to get current block: %w", err)
}
// Get transactions in last 10 blocks
var txCount int
var totalGas int64
var blockTimeSum float64
query := `
SELECT
COUNT(*) as tx_count,
SUM(gas_used) as total_gas,
EXTRACT(EPOCH FROM (MAX(timestamp) - MIN(timestamp))) / COUNT(DISTINCT block_number) as avg_block_time
FROM transactions
WHERE chain_id = $1 AND block_number > $2
`
err = c.db.QueryRow(ctx, query, c.chainID, currentBlock-10).Scan(&txCount, &totalGas, &blockTimeSum)
if err != nil {
txCount = 0
totalGas = 0
blockTimeSum = 0
}
// Calculate TPS and GPS
tps := float64(txCount) / (blockTimeSum * 10)
gps := float64(totalGas) / (blockTimeSum * 10)
// Get average gas price
var avgGasPrice int64
c.db.QueryRow(ctx,
`SELECT AVG(gas_price) FROM transactions WHERE chain_id = $1 AND block_number > $2 AND gas_price IS NOT NULL`,
c.chainID, currentBlock-100,
).Scan(&avgGasPrice)
// Get pending transactions
var pendingTx int
c.db.QueryRow(ctx,
`SELECT COUNT(*) FROM mempool_transactions WHERE chain_id = $1 AND status = 'pending'`,
c.chainID,
).Scan(&pendingTx)
return &NetworkStats{
CurrentBlock: currentBlock,
TPS: tps,
GPS: gps,
AvgGasPrice: avgGasPrice,
PendingTransactions: pendingTx,
BlockTime: blockTimeSum,
}, nil
}
// TopContracts gets top contracts by transaction count
func (c *Calculator) TopContracts(ctx context.Context, limit int) ([]ContractStats, error) {
query := `
SELECT
to_address as contract_address,
COUNT(*) as tx_count,
SUM(value) as total_value
FROM transactions
WHERE chain_id = $1 AND to_address IS NOT NULL
GROUP BY to_address
ORDER BY tx_count DESC
LIMIT $2
`
rows, err := c.db.Query(ctx, query, c.chainID, limit)
if err != nil {
return nil, fmt.Errorf("failed to query top contracts: %w", err)
}
defer rows.Close()
var contracts []ContractStats
for rows.Next() {
var contract ContractStats
if err := rows.Scan(&contract.Address, &contract.TransactionCount, &contract.TotalValue); err != nil {
continue
}
contracts = append(contracts, contract)
}
return contracts, nil
}
// ContractStats represents contract statistics
type ContractStats struct {
Address string
TransactionCount int64
TotalValue string
}

View File

@@ -0,0 +1,119 @@
package analytics
import (
"context"
"fmt"
"time"
"github.com/jackc/pgx/v5/pgxpool"
)
// FlowTracker tracks address-to-address flows
type FlowTracker struct {
db *pgxpool.Pool
chainID int
}
// NewFlowTracker creates a new flow tracker
func NewFlowTracker(db *pgxpool.Pool, chainID int) *FlowTracker {
return &FlowTracker{
db: db,
chainID: chainID,
}
}
// Flow represents a flow between addresses
type Flow struct {
From string
To string
Token string
Amount string
Count int
FirstSeen time.Time
LastSeen time.Time
}
// TrackFlow tracks a flow between addresses
func (ft *FlowTracker) TrackFlow(ctx context.Context, from, to, token string, amount string) error {
query := `
INSERT INTO analytics_flows (
chain_id, from_address, to_address, token_contract,
total_amount, transfer_count, first_seen, last_seen
) VALUES ($1, $2, $3, $4, $5, 1, NOW(), NOW())
ON CONFLICT (chain_id, from_address, to_address, token_contract) DO UPDATE SET
total_amount = analytics_flows.total_amount + $5::numeric,
transfer_count = analytics_flows.transfer_count + 1,
last_seen = NOW(),
updated_at = NOW()
`
_, err := ft.db.Exec(ctx, query, ft.chainID, from, to, token, amount)
if err != nil {
return fmt.Errorf("failed to track flow: %w", err)
}
return nil
}
// GetFlows gets flows matching criteria
func (ft *FlowTracker) GetFlows(ctx context.Context, from, to, token string, startDate, endDate *time.Time, limit int) ([]Flow, error) {
query := `
SELECT from_address, to_address, token_contract, total_amount, transfer_count, first_seen, last_seen
FROM analytics_flows
WHERE chain_id = $1
`
args := []interface{}{ft.chainID}
argIndex := 2
if from != "" {
query += fmt.Sprintf(" AND from_address = $%d", argIndex)
args = append(args, from)
argIndex++
}
if to != "" {
query += fmt.Sprintf(" AND to_address = $%d", argIndex)
args = append(args, to)
argIndex++
}
if token != "" {
query += fmt.Sprintf(" AND token_contract = $%d", argIndex)
args = append(args, token)
argIndex++
}
if startDate != nil {
query += fmt.Sprintf(" AND last_seen >= $%d", argIndex)
args = append(args, *startDate)
argIndex++
}
if endDate != nil {
query += fmt.Sprintf(" AND last_seen <= $%d", argIndex)
args = append(args, *endDate)
argIndex++
}
query += " ORDER BY last_seen DESC LIMIT $" + fmt.Sprintf("%d", argIndex)
args = append(args, limit)
rows, err := ft.db.Query(ctx, query, args...)
if err != nil {
return nil, fmt.Errorf("failed to query flows: %w", err)
}
defer rows.Close()
flows := []Flow{}
for rows.Next() {
var f Flow
if err := rows.Scan(&f.From, &f.To, &f.Token, &f.Amount, &f.Count, &f.FirstSeen, &f.LastSeen); err != nil {
continue
}
flows = append(flows, f)
}
return flows, nil
}

View File

@@ -0,0 +1,104 @@
package analytics
import (
"context"
"fmt"
"github.com/jackc/pgx/v5/pgxpool"
)
// TokenDistribution provides token distribution analytics
type TokenDistribution struct {
db *pgxpool.Pool
chainID int
}
// NewTokenDistribution creates a new token distribution analyzer
func NewTokenDistribution(db *pgxpool.Pool, chainID int) *TokenDistribution {
return &TokenDistribution{
db: db,
chainID: chainID,
}
}
// DistributionStats represents token distribution statistics
type DistributionStats struct {
Contract string
Symbol string
TotalSupply string
Holders int
Distribution map[string]string
TopHolders []HolderInfo
}
// HolderInfo represents holder information
type HolderInfo struct {
Address string
Balance string
Percentage string
}
// GetTokenDistribution gets token distribution for a contract
func (td *TokenDistribution) GetTokenDistribution(ctx context.Context, contract string, topN int) (*DistributionStats, error) {
// Refresh materialized view
_, err := td.db.Exec(ctx, `REFRESH MATERIALIZED VIEW CONCURRENTLY token_distribution`)
if err != nil {
// Ignore error if view doesn't exist yet
}
// Get distribution from materialized view
query := `
SELECT holder_count, total_balance
FROM token_distribution
WHERE token_contract = $1 AND chain_id = $2
`
var holders int
var totalSupply string
err = td.db.QueryRow(ctx, query, contract, td.chainID).Scan(&holders, &totalSupply)
if err != nil {
return nil, fmt.Errorf("failed to get distribution: %w", err)
}
// Get top holders
topHoldersQuery := `
SELECT address, balance
FROM token_balances
WHERE token_contract = $1 AND chain_id = $2 AND balance > 0
ORDER BY balance DESC
LIMIT $3
`
rows, err := td.db.Query(ctx, topHoldersQuery, contract, td.chainID, topN)
if err != nil {
return nil, fmt.Errorf("failed to get top holders: %w", err)
}
defer rows.Close()
topHolders := []HolderInfo{}
for rows.Next() {
var holder HolderInfo
if err := rows.Scan(&holder.Address, &holder.Balance); err != nil {
continue
}
// Calculate percentage (simplified)
holder.Percentage = "0.0" // TODO: Calculate from total supply
topHolders = append(topHolders, holder)
}
stats := &DistributionStats{
Contract: contract,
Holders: holders,
TotalSupply: totalSupply,
Distribution: make(map[string]string),
TopHolders: topHolders,
}
// Calculate distribution metrics
stats.Distribution["top_10_percent"] = "0.0" // TODO: Calculate
stats.Distribution["top_1_percent"] = "0.0" // TODO: Calculate
stats.Distribution["gini_coefficient"] = "0.0" // TODO: Calculate
return stats, nil
}

View File

@@ -0,0 +1,32 @@
package main
import (
"log"
"os"
"strconv"
"github.com/explorer/backend/api/gateway"
)
func main() {
apiURL := os.Getenv("API_URL")
if apiURL == "" {
apiURL = "http://localhost:8080"
}
gw, err := gateway.NewGateway(apiURL)
if err != nil {
log.Fatalf("Failed to create gateway: %v", err)
}
port := 8081
if envPort := os.Getenv("GATEWAY_PORT"); envPort != "" {
if p, err := strconv.Atoi(envPort); err == nil {
port = p
}
}
if err := gw.Start(port); err != nil {
log.Fatalf("Failed to start gateway: %v", err)
}
}

View File

@@ -0,0 +1,140 @@
package gateway
import (
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
)
// Gateway represents the API gateway
type Gateway struct {
apiURL *url.URL
rateLimiter *RateLimiter
auth *AuthMiddleware
}
// NewGateway creates a new API gateway
func NewGateway(apiURL string) (*Gateway, error) {
parsedURL, err := url.Parse(apiURL)
if err != nil {
return nil, fmt.Errorf("invalid API URL: %w", err)
}
return &Gateway{
apiURL: parsedURL,
rateLimiter: NewRateLimiter(),
auth: NewAuthMiddleware(),
}, nil
}
// Start starts the gateway server
func (g *Gateway) Start(port int) error {
mux := http.NewServeMux()
// Proxy to API server
proxy := httputil.NewSingleHostReverseProxy(g.apiURL)
mux.HandleFunc("/", g.handleRequest(proxy))
addr := fmt.Sprintf(":%d", port)
log.Printf("Starting API Gateway on %s", addr)
return http.ListenAndServe(addr, mux)
}
// handleRequest handles incoming requests with middleware
func (g *Gateway) handleRequest(proxy *httputil.ReverseProxy) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// Add security headers
g.addSecurityHeaders(w)
// Authentication
if !g.auth.Authenticate(r) {
http.Error(w, "Unauthorized", http.StatusUnauthorized)
return
}
// Rate limiting
if !g.rateLimiter.Allow(r) {
http.Error(w, "Rate limit exceeded", http.StatusTooManyRequests)
return
}
// Add headers
r.Header.Set("X-Forwarded-For", r.RemoteAddr)
if apiKey := g.auth.GetAPIKey(r); apiKey != "" {
r.Header.Set("X-API-Key", apiKey)
}
// Add branding header
w.Header().Set("X-Explorer-Name", "SolaceScanScout")
w.Header().Set("X-Explorer-Version", "1.0.0")
// Proxy request
proxy.ServeHTTP(w, r)
}
}
// addSecurityHeaders adds security headers to responses
func (g *Gateway) addSecurityHeaders(w http.ResponseWriter) {
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("X-Frame-Options", "DENY")
w.Header().Set("X-XSS-Protection", "1; mode=block")
w.Header().Set("Strict-Transport-Security", "max-age=31536000; includeSubDomains")
w.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin")
// CSP will be set per route if needed
w.Header().Set("Permissions-Policy", "geolocation=(), microphone=(), camera=()")
}
// RateLimiter handles rate limiting
type RateLimiter struct {
// Simple in-memory rate limiter (should use Redis in production)
limits map[string]*limitEntry
}
type limitEntry struct {
count int
resetAt int64
}
func NewRateLimiter() *RateLimiter {
return &RateLimiter{
limits: make(map[string]*limitEntry),
}
}
func (rl *RateLimiter) Allow(r *http.Request) bool {
_ = r.RemoteAddr // Will be used in production for per-IP limiting
// In production, use Redis with token bucket algorithm
// For now, simple per-IP limiting
return true // Simplified - implement proper rate limiting
}
// AuthMiddleware handles authentication
type AuthMiddleware struct {
// In production, validate against database
}
func NewAuthMiddleware() *AuthMiddleware {
return &AuthMiddleware{}
}
func (am *AuthMiddleware) Authenticate(r *http.Request) bool {
// Allow anonymous access for now
// In production, validate API key
apiKey := am.GetAPIKey(r)
return apiKey != "" || true // Allow anonymous for MVP
}
func (am *AuthMiddleware) GetAPIKey(r *http.Request) string {
// Check header first
if key := r.Header.Get("X-API-Key"); key != "" {
return key
}
// Check query parameter
if key := r.URL.Query().Get("api_key"); key != "" {
return key
}
return ""
}

View File

@@ -0,0 +1,81 @@
package graphql
import (
"context"
"fmt"
"github.com/jackc/pgx/v5/pgxpool"
)
// Resolver handles GraphQL queries
type Resolver struct {
db *pgxpool.Pool
chainID int
}
// NewResolver creates a new GraphQL resolver
func NewResolver(db *pgxpool.Pool, chainID int) *Resolver {
return &Resolver{
db: db,
chainID: chainID,
}
}
// BlockResolver resolves Block queries
type BlockResolver struct {
db *pgxpool.Pool
chainID int
block *Block
}
// Block represents a block in GraphQL
type Block struct {
ChainID int32
Number int32
Hash string
ParentHash string
Timestamp string
Miner string
TransactionCount int32
GasUsed int64
GasLimit int64
}
// TransactionResolver resolves Transaction queries
type TransactionResolver struct {
db *pgxpool.Pool
chainID int
tx *Transaction
}
// Transaction represents a transaction in GraphQL
type Transaction struct {
ChainID int32
Hash string
BlockNumber int32
From string
To *string
Value string
GasPrice *int64
GasUsed *int64
Status *int32
}
// ResolveBlock resolves block query
func (r *Resolver) ResolveBlock(ctx context.Context, args struct {
ChainID int32
Number *int32
}) (*BlockResolver, error) {
// Implementation would fetch block from database
return nil, fmt.Errorf("not implemented")
}
// ResolveTransaction resolves transaction query
func (r *Resolver) ResolveTransaction(ctx context.Context, args struct {
ChainID int32
Hash string
}) (*TransactionResolver, error) {
// Implementation would fetch transaction from database
return nil, fmt.Errorf("not implemented")
}

View File

@@ -0,0 +1,102 @@
type Query {
block(chainId: Int!, number: Int): Block
blockByHash(chainId: Int!, hash: String!): Block
blocks(chainId: Int!, page: Int, pageSize: Int): BlockConnection!
transaction(chainId: Int!, hash: String!): Transaction
transactions(chainId: Int!, page: Int, pageSize: Int): TransactionConnection!
address(chainId: Int!, address: String!): Address
}
type Block {
chainId: Int!
number: Int!
hash: String!
parentHash: String!
timestamp: String!
miner: String!
transactionCount: Int!
gasUsed: Int!
gasLimit: Int!
transactions: [Transaction!]!
}
type Transaction {
chainId: Int!
hash: String!
blockNumber: Int!
from: String!
to: String
value: String!
gasPrice: Int
gasUsed: Int
status: Int
logs: [Log!]!
trace: Trace
}
type Log {
address: String!
topics: [String!]!
data: String!
logIndex: Int!
}
type Trace {
calls: [CallTrace!]!
}
type CallTrace {
type: String!
from: String!
to: String!
value: String!
gas: Int!
gasUsed: Int!
input: String!
output: String!
}
type Address {
address: String!
chainId: Int!
transactionCount: Int!
tokenCount: Int!
isContract: Boolean!
label: String
tags: [String!]!
}
type BlockConnection {
edges: [BlockEdge!]!
pageInfo: PageInfo!
}
type BlockEdge {
node: Block!
cursor: String!
}
type TransactionConnection {
edges: [TransactionEdge!]!
pageInfo: PageInfo!
}
type TransactionEdge {
node: Transaction!
cursor: String!
}
type PageInfo {
hasNextPage: Boolean!
hasPreviousPage: Boolean!
startCursor: String
endCursor: String
}
type Subscription {
newBlock(chainId: Int!): Block!
newTransaction(chainId: Int!): Transaction!
}

View File

@@ -0,0 +1,73 @@
package labels
import (
"context"
"fmt"
"github.com/jackc/pgx/v5/pgxpool"
)
// LabelService handles address labeling
type LabelService struct {
db *pgxpool.Pool
}
// NewLabelService creates a new label service
func NewLabelService(db *pgxpool.Pool) *LabelService {
return &LabelService{db: db}
}
// AddLabel adds a label to an address
func (l *LabelService) AddLabel(ctx context.Context, chainID int, address, label, labelType string, userID *string) error {
query := `
INSERT INTO address_labels (chain_id, address, label, label_type, user_id)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (chain_id, address, label_type, user_id) DO UPDATE SET
label = $3,
updated_at = NOW()
`
_, err := l.db.Exec(ctx, query, chainID, address, label, labelType, userID)
return err
}
// GetLabels gets labels for an address
func (l *LabelService) GetLabels(ctx context.Context, chainID int, address string) ([]Label, error) {
query := `
SELECT label, label_type, user_id, source, created_at
FROM address_labels
WHERE chain_id = $1 AND address = $2
ORDER BY created_at DESC
`
rows, err := l.db.Query(ctx, query, chainID, address)
if err != nil {
return nil, fmt.Errorf("failed to query labels: %w", err)
}
defer rows.Close()
var labels []Label
for rows.Next() {
var label Label
var userID *string
if err := rows.Scan(&label.Label, &label.LabelType, &userID, &label.Source, &label.CreatedAt); err != nil {
continue
}
if userID != nil {
label.UserID = *userID
}
labels = append(labels, label)
}
return labels, nil
}
// Label represents an address label
type Label struct {
Label string
LabelType string
UserID string
Source string
CreatedAt string
}

View File

@@ -0,0 +1,123 @@
package middleware
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/explorer/backend/auth"
"github.com/explorer/backend/featureflags"
)
// AuthMiddleware handles authentication and authorization
type AuthMiddleware struct {
walletAuth *auth.WalletAuth
}
// NewAuthMiddleware creates a new auth middleware
func NewAuthMiddleware(walletAuth *auth.WalletAuth) *AuthMiddleware {
return &AuthMiddleware{
walletAuth: walletAuth,
}
}
// RequireAuth is middleware that requires authentication
func (m *AuthMiddleware) RequireAuth(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
address, track, err := m.extractAuth(r)
if err != nil {
writeUnauthorized(w)
return
}
// Add user context
ctx := context.WithValue(r.Context(), "user_address", address)
ctx = context.WithValue(ctx, "user_track", track)
ctx = context.WithValue(ctx, "authenticated", true)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
// RequireTrack is middleware that requires a specific track level
func (m *AuthMiddleware) RequireTrack(requiredTrack int) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Extract track from context (set by RequireAuth or OptionalAuth)
track, ok := r.Context().Value("user_track").(int)
if !ok {
track = 1 // Default to Track 1 (public)
}
if !featureflags.HasAccess(track, requiredTrack) {
writeForbidden(w, requiredTrack)
return
}
next.ServeHTTP(w, r)
})
}
}
// OptionalAuth is middleware that optionally authenticates (for Track 1 endpoints)
func (m *AuthMiddleware) OptionalAuth(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
address, track, err := m.extractAuth(r)
if err != nil {
// No auth provided, default to Track 1 (public)
ctx := context.WithValue(r.Context(), "user_address", "")
ctx = context.WithValue(ctx, "user_track", 1)
ctx = context.WithValue(ctx, "authenticated", false)
next.ServeHTTP(w, r.WithContext(ctx))
return
}
// Auth provided, add user context
ctx := context.WithValue(r.Context(), "user_address", address)
ctx = context.WithValue(ctx, "user_track", track)
ctx = context.WithValue(ctx, "authenticated", true)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
// extractAuth extracts authentication information from request
func (m *AuthMiddleware) extractAuth(r *http.Request) (string, int, error) {
// Get Authorization header
authHeader := r.Header.Get("Authorization")
if authHeader == "" {
return "", 0, http.ErrMissingFile
}
// Check for Bearer token
parts := strings.Split(authHeader, " ")
if len(parts) != 2 || parts[0] != "Bearer" {
return "", 0, http.ErrMissingFile
}
token := parts[1]
// Validate JWT token
address, track, err := m.walletAuth.ValidateJWT(token)
if err != nil {
return "", 0, err
}
return address, track, nil
}
// writeUnauthorized writes a 401 Unauthorized response
func writeUnauthorized(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusUnauthorized)
w.Write([]byte(`{"error":{"code":"unauthorized","message":"Authentication required"}}`))
}
// writeForbidden writes a 403 Forbidden response
func writeForbidden(w http.ResponseWriter, requiredTrack int) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusForbidden)
w.Write([]byte(`{"error":{"code":"forbidden","message":"Insufficient permissions","required_track":` + fmt.Sprintf("%d", requiredTrack) + `}}`))
}

View File

@@ -0,0 +1,63 @@
package middleware
import (
"net/http"
"strings"
)
// SecurityMiddleware adds security headers
type SecurityMiddleware struct{}
// NewSecurityMiddleware creates a new security middleware
func NewSecurityMiddleware() *SecurityMiddleware {
return &SecurityMiddleware{}
}
// AddSecurityHeaders adds security headers to responses
func (m *SecurityMiddleware) AddSecurityHeaders(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Content Security Policy
// unsafe-eval required by ethers.js v5 UMD from CDN (ABI decoding)
w.Header().Set("Content-Security-Policy", "default-src 'self'; script-src 'self' 'unsafe-inline' 'unsafe-eval' https://cdn.jsdelivr.net https://unpkg.com https://cdnjs.cloudflare.com; style-src 'self' 'unsafe-inline' https://cdnjs.cloudflare.com; font-src 'self' https://cdnjs.cloudflare.com; img-src 'self' data: https:; connect-src 'self' https://explorer.d-bis.org https://rpc-http-pub.d-bis.org wss://rpc-ws-pub.d-bis.org http://192.168.11.221:8545 ws://192.168.11.221:8546;")
// X-Frame-Options (click-jacking protection)
w.Header().Set("X-Frame-Options", "DENY")
// X-Content-Type-Options
w.Header().Set("X-Content-Type-Options", "nosniff")
// X-XSS-Protection
w.Header().Set("X-XSS-Protection", "1; mode=block")
// Strict-Transport-Security
w.Header().Set("Strict-Transport-Security", "max-age=31536000; includeSubDomains")
// Referrer-Policy
w.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin")
// Permissions-Policy
w.Header().Set("Permissions-Policy", "geolocation=(), microphone=(), camera=()")
next.ServeHTTP(w, r)
})
}
// BlockWriteCalls blocks contract write calls except WETH operations
func (m *SecurityMiddleware) BlockWriteCalls(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Only apply to POST requests (write operations)
if r.Method == http.MethodPost {
// Check if this is a WETH operation (allowed)
path := r.URL.Path
if !strings.Contains(path, "weth") && !strings.Contains(path, "wrap") && !strings.Contains(path, "unwrap") {
// Block other write operations for Track 1
if strings.HasPrefix(path, "/api/v1/track1") {
http.Error(w, "Write operations not allowed for Track 1 (public)", http.StatusForbidden)
return
}
}
}
next.ServeHTTP(w, r)
})
}

View File

@@ -0,0 +1,69 @@
# REST API Server
REST API implementation for the ChainID 138 Explorer Platform.
## Structure
- `server.go` - Main server setup and route configuration
- `routes.go` - Route handlers and URL parsing
- `blocks.go` - Block-related endpoints
- `transactions.go` - Transaction-related endpoints
- `addresses.go` - Address-related endpoints
- `search.go` - Unified search endpoint
- `validation.go` - Input validation utilities
- `middleware.go` - HTTP middleware (logging, compression)
- `errors.go` - Error response utilities
## API Endpoints
### Blocks
- `GET /api/v1/blocks` - List blocks (paginated)
- `GET /api/v1/blocks/{chain_id}/{number}` - Get block by number
- `GET /api/v1/blocks/{chain_id}/hash/{hash}` - Get block by hash
### Transactions
- `GET /api/v1/transactions` - List transactions (paginated, filterable)
- `GET /api/v1/transactions/{chain_id}/{hash}` - Get transaction by hash
### Addresses
- `GET /api/v1/addresses/{chain_id}/{address}` - Get address information
### Search
- `GET /api/v1/search?q={query}` - Unified search (auto-detects type: block number, address, or transaction hash)
### Health
- `GET /health` - Health check endpoint
## Features
- Input validation (addresses, hashes, block numbers)
- Pagination support
- Query timeouts for database operations
- CORS headers
- Request logging
- Error handling with consistent error format
- Health checks with database connectivity
## Running
```bash
cd backend/api/rest
go run main.go
```
Or use the development script:
```bash
./scripts/run-dev.sh
```
## Configuration
Set environment variables:
- `DB_HOST` - Database host
- `DB_PORT` - Database port
- `DB_USER` - Database user
- `DB_PASSWORD` - Database password
- `DB_NAME` - Database name
- `PORT` - API server port (default: 8080)
- `CHAIN_ID` - Chain ID (default: 138)

View File

@@ -0,0 +1,108 @@
package rest
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/http"
"time"
)
// handleGetAddress handles GET /api/v1/addresses/{chain_id}/{address}
func (s *Server) handleGetAddress(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Parse address from URL
address := r.URL.Query().Get("address")
if address == "" {
http.Error(w, "Address required", http.StatusBadRequest)
return
}
// Validate address format
if !isValidAddress(address) {
http.Error(w, "Invalid address format", http.StatusBadRequest)
return
}
// Add query timeout
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Get transaction count
var txCount int64
err := s.db.QueryRow(ctx,
`SELECT COUNT(*) FROM transactions WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)`,
s.chainID, address,
).Scan(&txCount)
if err != nil {
http.Error(w, fmt.Sprintf("Database error: %v", err), http.StatusInternalServerError)
return
}
// Get token count
var tokenCount int
err = s.db.QueryRow(ctx,
`SELECT COUNT(DISTINCT token_address) FROM token_transfers WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)`,
s.chainID, address,
).Scan(&tokenCount)
if err != nil {
tokenCount = 0
}
// Get label
var label sql.NullString
s.db.QueryRow(ctx,
`SELECT label FROM address_labels WHERE chain_id = $1 AND address = $2 AND label_type = 'public' LIMIT 1`,
s.chainID, address,
).Scan(&label)
// Get tags
rows, _ := s.db.Query(ctx,
`SELECT tag FROM address_tags WHERE chain_id = $1 AND address = $2`,
s.chainID, address,
)
defer rows.Close()
tags := []string{}
for rows.Next() {
var tag string
if err := rows.Scan(&tag); err == nil {
tags = append(tags, tag)
}
}
// Check if contract
var isContract bool
s.db.QueryRow(ctx,
`SELECT EXISTS(SELECT 1 FROM contracts WHERE chain_id = $1 AND address = $2)`,
s.chainID, address,
).Scan(&isContract)
// Get balance (if we have RPC access, otherwise 0)
balance := "0"
// TODO: Add RPC call to get balance if needed
response := map[string]interface{}{
"address": address,
"chain_id": s.chainID,
"balance": balance,
"transaction_count": txCount,
"token_count": tokenCount,
"is_contract": isContract,
"tags": tags,
}
if label.Valid {
response["label"] = label.String
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"data": response,
})
}

View File

@@ -0,0 +1,231 @@
package rest_test
import (
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"github.com/explorer/backend/api/rest"
"github.com/jackc/pgx/v5/pgxpool"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// setupTestServer creates a test server with a test database
func setupTestServer(t *testing.T) (*rest.Server, *http.ServeMux) {
// Use test database or in-memory database
// For now, we'll use a mock approach
db, err := setupTestDB(t)
if err != nil {
t.Skipf("Skipping test: database not available: %v", err)
return nil, nil
}
server := rest.NewServer(db, 138) // ChainID 138
mux := http.NewServeMux()
server.SetupRoutes(mux)
return server, mux
}
// setupTestDB creates a test database connection
func setupTestDB(t *testing.T) (*pgxpool.Pool, error) {
// In a real test, you would use a test database
// For now, return nil to skip database-dependent tests
// TODO: Set up test database connection
// This allows tests to run without a database connection
return nil, nil
}
// TestHealthEndpoint tests the health check endpoint
func TestHealthEndpoint(t *testing.T) {
_, mux := setupTestServer(t)
req := httptest.NewRequest("GET", "/health", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
assert.Equal(t, http.StatusOK, w.Code)
var response map[string]interface{}
err := json.Unmarshal(w.Body.Bytes(), &response)
require.NoError(t, err)
assert.Equal(t, "ok", response["status"])
}
// TestListBlocks tests the blocks list endpoint
func TestListBlocks(t *testing.T) {
_, mux := setupTestServer(t)
req := httptest.NewRequest("GET", "/api/v1/blocks?limit=10&page=1", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
// Should return 200 or 500 depending on database connection
assert.True(t, w.Code == http.StatusOK || w.Code == http.StatusInternalServerError)
}
// TestGetBlockByNumber tests getting a block by number
func TestGetBlockByNumber(t *testing.T) {
_, mux := setupTestServer(t)
req := httptest.NewRequest("GET", "/api/v1/blocks/138/1000", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
// Should return 200, 404, or 500 depending on database and block existence
assert.True(t, w.Code == http.StatusOK || w.Code == http.StatusNotFound || w.Code == http.StatusInternalServerError)
}
// TestListTransactions tests the transactions list endpoint
func TestListTransactions(t *testing.T) {
_, mux := setupTestServer(t)
req := httptest.NewRequest("GET", "/api/v1/transactions?limit=10&page=1", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
assert.True(t, w.Code == http.StatusOK || w.Code == http.StatusInternalServerError)
}
// TestGetTransactionByHash tests getting a transaction by hash
func TestGetTransactionByHash(t *testing.T) {
_, mux := setupTestServer(t)
req := httptest.NewRequest("GET", "/api/v1/transactions/138/0x1234567890abcdef", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
assert.True(t, w.Code == http.StatusOK || w.Code == http.StatusNotFound || w.Code == http.StatusInternalServerError)
}
// TestSearchEndpoint tests the unified search endpoint
func TestSearchEndpoint(t *testing.T) {
_, mux := setupTestServer(t)
testCases := []struct {
name string
query string
wantCode int
}{
{"block number", "?q=1000", http.StatusOK},
{"address", "?q=0x1234567890abcdef1234567890abcdef12345678", http.StatusOK},
{"transaction hash", "?q=0xabcdef1234567890abcdef1234567890abcdef12", http.StatusOK},
{"empty query", "?q=", http.StatusBadRequest},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest("GET", "/api/v1/search"+tc.query, nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
assert.True(t, w.Code == tc.wantCode || w.Code == http.StatusInternalServerError)
})
}
}
// TestTrack1Endpoints tests Track 1 (public) endpoints
func TestTrack1Endpoints(t *testing.T) {
_, mux := setupTestServer(t)
testCases := []struct {
name string
endpoint string
method string
}{
{"latest blocks", "/api/v1/track1/blocks/latest", "GET"},
{"latest transactions", "/api/v1/track1/txs/latest", "GET"},
{"bridge status", "/api/v1/track1/bridge/status", "GET"},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest(tc.method, tc.endpoint, nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
// Track 1 endpoints should be accessible without auth
assert.True(t, w.Code == http.StatusOK || w.Code == http.StatusInternalServerError)
})
}
}
// TestCORSHeaders tests CORS headers are present
func TestCORSHeaders(t *testing.T) {
_, mux := setupTestServer(t)
req := httptest.NewRequest("GET", "/health", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
// Check for CORS headers (if implemented)
// This is a placeholder - actual implementation may vary
assert.NotNil(t, w.Header())
}
// TestErrorHandling tests error responses
func TestErrorHandling(t *testing.T) {
_, mux := setupTestServer(t)
// Test invalid block number
req := httptest.NewRequest("GET", "/api/v1/blocks/138/invalid", nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
assert.True(t, w.Code >= http.StatusBadRequest)
var errorResponse map[string]interface{}
err := json.Unmarshal(w.Body.Bytes(), &errorResponse)
if err == nil {
assert.NotNil(t, errorResponse["error"])
}
}
// TestPagination tests pagination parameters
func TestPagination(t *testing.T) {
_, mux := setupTestServer(t)
testCases := []struct {
name string
query string
wantCode int
}{
{"valid pagination", "?limit=10&page=1", http.StatusOK},
{"large limit", "?limit=1000&page=1", http.StatusOK}, // Should be capped
{"invalid page", "?limit=10&page=0", http.StatusBadRequest},
{"negative limit", "?limit=-10&page=1", http.StatusBadRequest},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
req := httptest.NewRequest("GET", "/api/v1/blocks"+tc.query, nil)
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
assert.True(t, w.Code == tc.wantCode || w.Code == http.StatusInternalServerError)
})
}
}
// TestRequestTimeout tests request timeout handling
func TestRequestTimeout(t *testing.T) {
// This would test timeout behavior
// Implementation depends on timeout middleware
t.Skip("Requires timeout middleware implementation")
}
// BenchmarkListBlocks benchmarks the blocks list endpoint
func BenchmarkListBlocks(b *testing.B) {
_, mux := setupTestServer(&testing.T{})
req := httptest.NewRequest("GET", "/api/v1/blocks?limit=10&page=1", nil)
b.ResetTimer()
for i := 0; i < b.N; i++ {
w := httptest.NewRecorder()
mux.ServeHTTP(w, req)
}
}

57
backend/api/rest/auth.go Normal file
View File

@@ -0,0 +1,57 @@
package rest
import (
"encoding/json"
"net/http"
"github.com/explorer/backend/auth"
)
// handleAuthNonce handles POST /api/v1/auth/nonce
func (s *Server) handleAuthNonce(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
var req auth.NonceRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "bad_request", "Invalid request body")
return
}
// Generate nonce
nonceResp, err := s.walletAuth.GenerateNonce(r.Context(), req.Address)
if err != nil {
writeError(w, http.StatusBadRequest, "bad_request", err.Error())
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(nonceResp)
}
// handleAuthWallet handles POST /api/v1/auth/wallet
func (s *Server) handleAuthWallet(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
var req auth.WalletAuthRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeError(w, http.StatusBadRequest, "bad_request", "Invalid request body")
return
}
// Authenticate wallet
authResp, err := s.walletAuth.AuthenticateWallet(r.Context(), &req)
if err != nil {
writeError(w, http.StatusUnauthorized, "unauthorized", err.Error())
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(authResp)
}

134
backend/api/rest/blocks.go Normal file
View File

@@ -0,0 +1,134 @@
package rest
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/http"
"time"
)
// handleGetBlockByNumber handles GET /api/v1/blocks/{chain_id}/{number}
func (s *Server) handleGetBlockByNumber(w http.ResponseWriter, r *http.Request, blockNumber int64) {
// Validate input (already validated in routes.go, but double-check)
if blockNumber < 0 {
writeValidationError(w, ErrInvalidBlockNumber)
return
}
// Add query timeout
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
query := `
SELECT chain_id, number, hash, parent_hash, timestamp, timestamp_iso, miner,
transaction_count, gas_used, gas_limit, size, logs_bloom
FROM blocks
WHERE chain_id = $1 AND number = $2
`
var chainID, number, transactionCount int
var hash, parentHash, miner string
var timestamp time.Time
var timestampISO sql.NullString
var gasUsed, gasLimit, size int64
var logsBloom sql.NullString
err := s.db.QueryRow(ctx, query, s.chainID, blockNumber).Scan(
&chainID, &number, &hash, &parentHash, &timestamp, &timestampISO, &miner,
&transactionCount, &gasUsed, &gasLimit, &size, &logsBloom,
)
if err != nil {
http.Error(w, fmt.Sprintf("Block not found: %v", err), http.StatusNotFound)
return
}
block := map[string]interface{}{
"chain_id": chainID,
"number": number,
"hash": hash,
"parent_hash": parentHash,
"timestamp": timestamp,
"miner": miner,
"transaction_count": transactionCount,
"gas_used": gasUsed,
"gas_limit": gasLimit,
"size": size,
}
if timestampISO.Valid {
block["timestamp_iso"] = timestampISO.String
}
if logsBloom.Valid {
block["logs_bloom"] = logsBloom.String
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"data": block,
})
}
// handleGetBlockByHash handles GET /api/v1/blocks/{chain_id}/hash/{hash}
func (s *Server) handleGetBlockByHash(w http.ResponseWriter, r *http.Request, hash string) {
// Validate hash format (already validated in routes.go, but double-check)
if !isValidHash(hash) {
writeValidationError(w, ErrInvalidHash)
return
}
// Add query timeout
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
query := `
SELECT chain_id, number, hash, parent_hash, timestamp, timestamp_iso, miner,
transaction_count, gas_used, gas_limit, size, logs_bloom
FROM blocks
WHERE chain_id = $1 AND hash = $2
`
var chainID, number, transactionCount int
var blockHash, parentHash, miner string
var timestamp time.Time
var timestampISO sql.NullString
var gasUsed, gasLimit, size int64
var logsBloom sql.NullString
err := s.db.QueryRow(ctx, query, s.chainID, hash).Scan(
&chainID, &number, &blockHash, &parentHash, &timestamp, &timestampISO, &miner,
&transactionCount, &gasUsed, &gasLimit, &size, &logsBloom,
)
if err != nil {
http.Error(w, fmt.Sprintf("Block not found: %v", err), http.StatusNotFound)
return
}
block := map[string]interface{}{
"chain_id": chainID,
"number": number,
"hash": blockHash,
"parent_hash": parentHash,
"timestamp": timestamp,
"miner": miner,
"transaction_count": transactionCount,
"gas_used": gasUsed,
"gas_limit": gasLimit,
"size": size,
}
if timestampISO.Valid {
block["timestamp_iso"] = timestampISO.String
}
if logsBloom.Valid {
block["logs_bloom"] = logsBloom.String
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"data": block,
})
}

BIN
backend/api/rest/cmd/api-server Executable file

Binary file not shown.

View File

@@ -0,0 +1,57 @@
package main
import (
"context"
"log"
"os"
"strconv"
"time"
"github.com/explorer/backend/api/rest"
"github.com/explorer/backend/database/config"
"github.com/jackc/pgx/v5/pgxpool"
)
func main() {
ctx := context.Background()
// Load database configuration
dbConfig := config.LoadDatabaseConfig()
poolConfig, err := dbConfig.PoolConfig()
if err != nil {
log.Fatalf("Failed to create pool config: %v", err)
}
// Connect to database
db, err := pgxpool.NewWithConfig(ctx, poolConfig)
if err != nil {
log.Fatalf("Failed to connect to database: %v", err)
}
defer db.Close()
// Configure connection pool
db.Config().MaxConns = 25
db.Config().MinConns = 5
db.Config().MaxConnLifetime = 5 * time.Minute
db.Config().MaxConnIdleTime = 10 * time.Minute
chainID := 138
if envChainID := os.Getenv("CHAIN_ID"); envChainID != "" {
if id, err := strconv.Atoi(envChainID); err == nil {
chainID = id
}
}
port := 8080
if envPort := os.Getenv("PORT"); envPort != "" {
if p, err := strconv.Atoi(envPort); err == nil {
port = p
}
}
// Create and start server
server := rest.NewServer(db, chainID)
if err := server.Start(port); err != nil {
log.Fatalf("Failed to start server: %v", err)
}
}

View File

@@ -0,0 +1,36 @@
package rest
import (
_ "embed"
"net/http"
)
//go:embed config/metamask/DUAL_CHAIN_NETWORKS.json
var dualChainNetworksJSON []byte
//go:embed config/metamask/DUAL_CHAIN_TOKEN_LIST.tokenlist.json
var dualChainTokenListJSON []byte
// handleConfigNetworks serves GET /api/config/networks (Chain 138 + Ethereum Mainnet params for wallet_addEthereumChain).
func (s *Server) handleConfigNetworks(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
w.Header().Set("Allow", "GET")
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "public, max-age=3600")
w.Write(dualChainNetworksJSON)
}
// handleConfigTokenList serves GET /api/config/token-list (Uniswap token list format for MetaMask).
func (s *Server) handleConfigTokenList(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
w.Header().Set("Allow", "GET")
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Cache-Control", "public, max-age=3600")
w.Write(dualChainTokenListJSON)
}

View File

@@ -0,0 +1,61 @@
{
"name": "MetaMask Multi-Chain Networks (Chain 138 + Ethereum Mainnet + ALL Mainnet)",
"version": { "major": 1, "minor": 1, "patch": 0 },
"chains": [
{
"chainId": "0x8a",
"chainIdDecimal": 138,
"chainName": "DeFi Oracle Meta Mainnet",
"rpcUrls": [
"https://rpc-http-pub.d-bis.org",
"https://rpc.d-bis.org",
"https://rpc2.d-bis.org",
"https://rpc.defi-oracle.io"
],
"nativeCurrency": {
"name": "Ether",
"symbol": "ETH",
"decimals": 18
},
"blockExplorerUrls": ["https://explorer.d-bis.org"],
"iconUrls": [
"https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png"
]
},
{
"chainId": "0x1",
"chainIdDecimal": 1,
"chainName": "Ethereum Mainnet",
"rpcUrls": [
"https://eth.llamarpc.com",
"https://rpc.ankr.com/eth",
"https://ethereum.publicnode.com",
"https://1rpc.io/eth"
],
"nativeCurrency": {
"name": "Ether",
"symbol": "ETH",
"decimals": 18
},
"blockExplorerUrls": ["https://etherscan.io"],
"iconUrls": [
"https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png"
]
},
{
"chainId": "0x9f2c4",
"chainIdDecimal": 651940,
"chainName": "ALL Mainnet",
"rpcUrls": ["https://mainnet-rpc.alltra.global"],
"nativeCurrency": {
"name": "Ether",
"symbol": "ETH",
"decimals": 18
},
"blockExplorerUrls": ["https://alltra.global"],
"iconUrls": [
"https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png"
]
}
]
}

View File

@@ -0,0 +1,115 @@
{
"name": "Multi-Chain Token List (Chain 138 + Ethereum Mainnet + ALL Mainnet)",
"version": { "major": 1, "minor": 1, "patch": 0 },
"timestamp": "2026-01-30T00:00:00.000Z",
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tokens": [
{
"chainId": 138,
"address": "0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6",
"name": "ETH/USD Price Feed",
"symbol": "ETH-USD",
"decimals": 8,
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tags": ["oracle", "price-feed"]
},
{
"chainId": 138,
"address": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2",
"name": "Wrapped Ether",
"symbol": "WETH",
"decimals": 18,
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tags": ["defi", "wrapped"]
},
{
"chainId": 138,
"address": "0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f",
"name": "Wrapped Ether v10",
"symbol": "WETH10",
"decimals": 18,
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tags": ["defi", "wrapped"]
},
{
"chainId": 138,
"address": "0x93E66202A11B1772E55407B32B44e5Cd8eda7f22",
"name": "Compliant Tether USD",
"symbol": "cUSDT",
"decimals": 6,
"logoURI": "https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xdAC17F958D2ee523a2206206994597C13D831ec7/logo.png",
"tags": ["stablecoin", "defi", "compliant"]
},
{
"chainId": 138,
"address": "0xf22258f57794CC8E06237084b353Ab30fFfa640b",
"name": "Compliant USD Coin",
"symbol": "cUSDC",
"decimals": 6,
"logoURI": "https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48/logo.png",
"tags": ["stablecoin", "defi", "compliant"]
},
{
"chainId": 1,
"address": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2",
"name": "Wrapped Ether",
"symbol": "WETH",
"decimals": 18,
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tags": ["defi", "wrapped"]
},
{
"chainId": 1,
"address": "0xdAC17F958D2ee523a2206206994597C13D831ec7",
"name": "Tether USD",
"symbol": "USDT",
"decimals": 6,
"logoURI": "https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xdAC17F958D2ee523a2206206994597C13D831ec7/logo.png",
"tags": ["stablecoin", "defi"]
},
{
"chainId": 1,
"address": "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48",
"name": "USD Coin",
"symbol": "USDC",
"decimals": 6,
"logoURI": "https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48/logo.png",
"tags": ["stablecoin", "defi"]
},
{
"chainId": 1,
"address": "0x6B175474E89094C44Da98b954EedeAC495271d0F",
"name": "Dai Stablecoin",
"symbol": "DAI",
"decimals": 18,
"logoURI": "https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0x6B175474E89094C44Da98b954EedeAC495271d0F/logo.png",
"tags": ["stablecoin", "defi"]
},
{
"chainId": 1,
"address": "0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419",
"name": "ETH/USD Price Feed",
"symbol": "ETH-USD",
"decimals": 8,
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tags": ["oracle", "price-feed"]
},
{
"chainId": 651940,
"address": "0xa95EeD79f84E6A0151eaEb9d441F9Ffd50e8e881",
"name": "USD Coin",
"symbol": "USDC",
"decimals": 6,
"logoURI": "https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48/logo.png",
"tags": ["stablecoin", "defi"]
}
],
"tags": {
"defi": { "name": "DeFi", "description": "Decentralized Finance tokens" },
"wrapped": { "name": "Wrapped", "description": "Wrapped tokens representing native assets" },
"oracle": { "name": "Oracle", "description": "Oracle price feed contracts" },
"price-feed": { "name": "Price Feed", "description": "Price feed oracle contracts" },
"stablecoin": { "name": "Stablecoin", "description": "Stable value tokens pegged to fiat" },
"compliant": { "name": "Compliant", "description": "Regulatory compliant tokens" }
}
}

View File

@@ -0,0 +1,51 @@
package rest
import (
"encoding/json"
"net/http"
)
// ErrorResponse represents an API error response
type ErrorResponse struct {
Error ErrorDetail `json:"error"`
}
// ErrorDetail contains error details
type ErrorDetail struct {
Code string `json:"code"`
Message string `json:"message"`
Details string `json:"details,omitempty"`
}
// writeError writes an error response
func writeError(w http.ResponseWriter, statusCode int, code, message string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
json.NewEncoder(w).Encode(ErrorResponse{
Error: ErrorDetail{
Code: code,
Message: message,
},
})
}
// writeNotFound writes a 404 error response
func writeNotFound(w http.ResponseWriter, resource string) {
writeError(w, http.StatusNotFound, "NOT_FOUND", resource+" not found")
}
// writeInternalError writes a 500 error response
func writeInternalError(w http.ResponseWriter, message string) {
writeError(w, http.StatusInternalServerError, "INTERNAL_ERROR", message)
}
// writeUnauthorized writes a 401 error response
func writeUnauthorized(w http.ResponseWriter) {
writeError(w, http.StatusUnauthorized, "UNAUTHORIZED", "Authentication required")
}
// writeForbidden writes a 403 error response
func writeForbidden(w http.ResponseWriter) {
writeError(w, http.StatusForbidden, "FORBIDDEN", "Access denied")
}

View File

@@ -0,0 +1,215 @@
package rest
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strconv"
"time"
)
// handleEtherscanAPI handles GET /api?module=...&action=...
// This provides Etherscan-compatible API endpoints
func (s *Server) handleEtherscanAPI(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
module := r.URL.Query().Get("module")
action := r.URL.Query().Get("action")
// Etherscan-compatible response structure
type EtherscanResponse struct {
Status string `json:"status"`
Message string `json:"message"`
Result interface{} `json:"result"`
}
// Validate required parameters
if module == "" || action == "" {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
response := EtherscanResponse{
Status: "0",
Message: "Params 'module' and 'action' are required parameters",
Result: nil,
}
json.NewEncoder(w).Encode(response)
return
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
var response EtherscanResponse
switch module {
case "block":
switch action {
case "eth_block_number":
// Get latest block number
var blockNumber int64
err := s.db.QueryRow(ctx,
`SELECT MAX(number) FROM blocks WHERE chain_id = $1`,
s.chainID,
).Scan(&blockNumber)
if err != nil {
response = EtherscanResponse{
Status: "0",
Message: "Error",
Result: "0x0",
}
} else {
response = EtherscanResponse{
Status: "1",
Message: "OK",
Result: fmt.Sprintf("0x%x", blockNumber),
}
}
case "eth_get_block_by_number":
tag := r.URL.Query().Get("tag")
boolean := r.URL.Query().Get("boolean") == "true"
// Parse block number from tag (can be "latest", "0x...", or decimal)
var blockNumber int64
if tag == "latest" {
err := s.db.QueryRow(ctx,
`SELECT MAX(number) FROM blocks WHERE chain_id = $1`,
s.chainID,
).Scan(&blockNumber)
if err != nil {
response = EtherscanResponse{
Status: "0",
Message: "Error",
Result: nil,
}
break
}
} else if len(tag) > 2 && tag[:2] == "0x" {
// Hex format
parsed, err := strconv.ParseInt(tag[2:], 16, 64)
if err != nil {
response = EtherscanResponse{
Status: "0",
Message: "Invalid block number",
Result: nil,
}
break
}
blockNumber = parsed
} else {
// Decimal format
parsed, err := strconv.ParseInt(tag, 10, 64)
if err != nil {
response = EtherscanResponse{
Status: "0",
Message: "Invalid block number",
Result: nil,
}
break
}
blockNumber = parsed
}
// Get block data
var hash, parentHash, miner string
var timestamp time.Time
var transactionCount int
var gasUsed, gasLimit int64
var transactions []string
query := `
SELECT hash, parent_hash, timestamp, miner, transaction_count, gas_used, gas_limit
FROM blocks
WHERE chain_id = $1 AND number = $2
`
err := s.db.QueryRow(ctx, query, s.chainID, blockNumber).Scan(
&hash, &parentHash, &timestamp, &miner, &transactionCount, &gasUsed, &gasLimit,
)
if err != nil {
response = EtherscanResponse{
Status: "0",
Message: "Block not found",
Result: nil,
}
break
}
// If boolean is true, get full transaction objects
if boolean {
txQuery := `
SELECT hash FROM transactions
WHERE chain_id = $1 AND block_number = $2
ORDER BY transaction_index
`
rows, err := s.db.Query(ctx, txQuery, s.chainID, blockNumber)
if err == nil {
defer rows.Close()
for rows.Next() {
var txHash string
if err := rows.Scan(&txHash); err == nil {
transactions = append(transactions, txHash)
}
}
}
} else {
// Just get transaction hashes
txQuery := `
SELECT hash FROM transactions
WHERE chain_id = $1 AND block_number = $2
ORDER BY transaction_index
`
rows, err := s.db.Query(ctx, txQuery, s.chainID, blockNumber)
if err == nil {
defer rows.Close()
for rows.Next() {
var txHash string
if err := rows.Scan(&txHash); err == nil {
transactions = append(transactions, txHash)
}
}
}
}
blockResult := map[string]interface{}{
"number": fmt.Sprintf("0x%x", blockNumber),
"hash": hash,
"parentHash": parentHash,
"timestamp": fmt.Sprintf("0x%x", timestamp.Unix()),
"miner": miner,
"transactions": transactions,
"transactionCount": fmt.Sprintf("0x%x", transactionCount),
"gasUsed": fmt.Sprintf("0x%x", gasUsed),
"gasLimit": fmt.Sprintf("0x%x", gasLimit),
}
response = EtherscanResponse{
Status: "1",
Message: "OK",
Result: blockResult,
}
default:
response = EtherscanResponse{
Status: "0",
Message: "Invalid action",
Result: nil,
}
}
default:
response = EtherscanResponse{
Status: "0",
Message: "Invalid module",
Result: nil,
}
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}

View File

@@ -0,0 +1,82 @@
package rest
import (
"encoding/json"
"net/http"
"github.com/explorer/backend/featureflags"
)
// handleFeatures handles GET /api/v1/features
// Returns available features for the current user based on their track level
func (s *Server) handleFeatures(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
// Extract user track from context (set by auth middleware)
// Default to Track 1 (public) if not authenticated
userTrack := 1
if track, ok := r.Context().Value("user_track").(int); ok {
userTrack = track
}
// Get enabled features for this track
enabledFeatures := featureflags.GetEnabledFeatures(userTrack)
// Get permissions based on track
permissions := getPermissionsForTrack(userTrack)
response := map[string]interface{}{
"track": userTrack,
"features": enabledFeatures,
"permissions": permissions,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// getPermissionsForTrack returns permissions for a given track level
func getPermissionsForTrack(track int) []string {
permissions := []string{
"explorer.read.blocks",
"explorer.read.transactions",
"explorer.read.address.basic",
"explorer.read.bridge.status",
"weth.wrap",
"weth.unwrap",
}
if track >= 2 {
permissions = append(permissions,
"explorer.read.address.full",
"explorer.read.tokens",
"explorer.read.tx_history",
"explorer.read.internal_txs",
"explorer.search.enhanced",
)
}
if track >= 3 {
permissions = append(permissions,
"analytics.read.flows",
"analytics.read.bridge",
"analytics.read.token_distribution",
"analytics.read.address_risk",
)
}
if track >= 4 {
permissions = append(permissions,
"operator.read.bridge_events",
"operator.read.validators",
"operator.read.contracts",
"operator.read.protocol_state",
"operator.write.bridge_control",
)
}
return permissions
}

View File

@@ -0,0 +1,44 @@
package rest
import (
"log"
"net/http"
"time"
)
// responseWriter wraps http.ResponseWriter to capture status code
type responseWriter struct {
http.ResponseWriter
statusCode int
}
func (rw *responseWriter) WriteHeader(code int) {
rw.statusCode = code
rw.ResponseWriter.WriteHeader(code)
}
// loggingMiddleware logs requests with timing
func (s *Server) loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
wrapped := &responseWriter{ResponseWriter: w, statusCode: http.StatusOK}
next.ServeHTTP(wrapped, r)
duration := time.Since(start)
// Log request (in production, use structured logger)
log.Printf("%s %s %d %v", r.Method, r.URL.Path, wrapped.statusCode, duration)
})
}
// compressionMiddleware adds gzip compression (simplified - use gorilla/handlers in production)
func (s *Server) compressionMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Check if client accepts gzip
if r.Header.Get("Accept-Encoding") != "" {
// In production, use gorilla/handlers.CompressHandler
// For now, just pass through
}
next.ServeHTTP(w, r)
})
}

166
backend/api/rest/routes.go Normal file
View File

@@ -0,0 +1,166 @@
package rest
import (
"fmt"
"net/http"
"strings"
)
// SetupRoutes sets up all API routes
func (s *Server) SetupRoutes(mux *http.ServeMux) {
// Block routes
mux.HandleFunc("/api/v1/blocks", s.handleListBlocks)
mux.HandleFunc("/api/v1/blocks/", s.handleBlockDetail)
// Transaction routes
mux.HandleFunc("/api/v1/transactions", s.handleListTransactions)
mux.HandleFunc("/api/v1/transactions/", s.handleTransactionDetail)
// Address routes
mux.HandleFunc("/api/v1/addresses/", s.handleAddressDetail)
// Search route
mux.HandleFunc("/api/v1/search", s.handleSearch)
// Stats route
mux.HandleFunc("/api/v2/stats", s.handleStats)
// Etherscan-compatible API route
mux.HandleFunc("/api", s.handleEtherscanAPI)
// Health check
mux.HandleFunc("/health", s.handleHealth)
// MetaMask / dual-chain config (Chain 138 + Ethereum Mainnet)
mux.HandleFunc("/api/config/networks", s.handleConfigNetworks)
mux.HandleFunc("/api/config/token-list", s.handleConfigTokenList)
// Feature flags endpoint
mux.HandleFunc("/api/v1/features", s.handleFeatures)
// Auth endpoints
mux.HandleFunc("/api/v1/auth/nonce", s.handleAuthNonce)
mux.HandleFunc("/api/v1/auth/wallet", s.handleAuthWallet)
// Track 1 routes (public, optional auth)
// Note: Track 1 endpoints should be registered with OptionalAuth middleware
// mux.HandleFunc("/api/v1/track1/blocks/latest", s.track1Server.handleLatestBlocks)
// mux.HandleFunc("/api/v1/track1/txs/latest", s.track1Server.handleLatestTransactions)
// mux.HandleFunc("/api/v1/track1/block/", s.track1Server.handleBlockDetail)
// mux.HandleFunc("/api/v1/track1/tx/", s.track1Server.handleTransactionDetail)
// mux.HandleFunc("/api/v1/track1/address/", s.track1Server.handleAddressBalance)
// mux.HandleFunc("/api/v1/track1/bridge/status", s.track1Server.handleBridgeStatus)
// Track 2 routes (require Track 2+)
// Note: Track 2 endpoints should be registered with RequireAuth + RequireTrack(2) middleware
// mux.HandleFunc("/api/v1/track2/address/", s.track2Server.handleAddressTransactions)
// mux.HandleFunc("/api/v1/track2/token/", s.track2Server.handleTokenInfo)
// mux.HandleFunc("/api/v1/track2/search", s.track2Server.handleSearch)
// Track 3 routes (require Track 3+)
// Note: Track 3 endpoints should be registered with RequireAuth + RequireTrack(3) middleware
// mux.HandleFunc("/api/v1/track3/analytics/flows", s.track3Server.handleFlows)
// mux.HandleFunc("/api/v1/track3/analytics/bridge", s.track3Server.handleBridge)
// mux.HandleFunc("/api/v1/track3/analytics/token-distribution/", s.track3Server.handleTokenDistribution)
// mux.HandleFunc("/api/v1/track3/analytics/address-risk/", s.track3Server.handleAddressRisk)
// Track 4 routes (require Track 4)
// Note: Track 4 endpoints should be registered with RequireAuth + RequireTrack(4) + IP whitelist middleware
// mux.HandleFunc("/api/v1/track4/operator/bridge/events", s.track4Server.handleBridgeEvents)
// mux.HandleFunc("/api/v1/track4/operator/validators", s.track4Server.handleValidators)
// mux.HandleFunc("/api/v1/track4/operator/contracts", s.track4Server.handleContracts)
// mux.HandleFunc("/api/v1/track4/operator/protocol-state", s.track4Server.handleProtocolState)
}
// handleBlockDetail handles GET /api/v1/blocks/{chain_id}/{number} or /api/v1/blocks/{chain_id}/hash/{hash}
func (s *Server) handleBlockDetail(w http.ResponseWriter, r *http.Request) {
path := strings.TrimPrefix(r.URL.Path, "/api/v1/blocks/")
parts := strings.Split(path, "/")
if len(parts) < 2 {
writeValidationError(w, fmt.Errorf("invalid block path"))
return
}
// Validate chain ID
if err := validateChainID(parts[0], s.chainID); err != nil {
writeValidationError(w, err)
return
}
if parts[1] == "hash" && len(parts) == 3 {
// Validate hash format
if !isValidHash(parts[2]) {
writeValidationError(w, ErrInvalidHash)
return
}
// Get by hash
s.handleGetBlockByHash(w, r, parts[2])
} else {
// Validate and parse block number
blockNumber, err := validateBlockNumber(parts[1])
if err != nil {
writeValidationError(w, err)
return
}
s.handleGetBlockByNumber(w, r, blockNumber)
}
}
// handleGetBlockByNumber and handleGetBlockByHash are in blocks.go
// handleTransactionDetail handles GET /api/v1/transactions/{chain_id}/{hash}
func (s *Server) handleTransactionDetail(w http.ResponseWriter, r *http.Request) {
path := strings.TrimPrefix(r.URL.Path, "/api/v1/transactions/")
parts := strings.Split(path, "/")
if len(parts) < 2 {
writeValidationError(w, fmt.Errorf("invalid transaction path"))
return
}
// Validate chain ID
if err := validateChainID(parts[0], s.chainID); err != nil {
writeValidationError(w, err)
return
}
// Validate hash format
hash := parts[1]
if !isValidHash(hash) {
writeValidationError(w, ErrInvalidHash)
return
}
s.handleGetTransactionByHash(w, r, hash)
}
// handleGetTransactionByHash is implemented in transactions.go
// handleAddressDetail handles GET /api/v1/addresses/{chain_id}/{address}
func (s *Server) handleAddressDetail(w http.ResponseWriter, r *http.Request) {
path := strings.TrimPrefix(r.URL.Path, "/api/v1/addresses/")
parts := strings.Split(path, "/")
if len(parts) < 2 {
writeValidationError(w, fmt.Errorf("invalid address path"))
return
}
// Validate chain ID
if err := validateChainID(parts[0], s.chainID); err != nil {
writeValidationError(w, err)
return
}
// Validate address format
address := parts[1]
if !isValidAddress(address) {
writeValidationError(w, ErrInvalidAddress)
return
}
// Set address in query and call handler
r.URL.RawQuery = "address=" + address
s.handleGetAddress(w, r)
}

View File

@@ -0,0 +1,53 @@
package rest
import (
"fmt"
"net/http"
)
// handleSearch handles GET /api/v1/search
func (s *Server) handleSearch(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
query := r.URL.Query().Get("q")
if query == "" {
writeValidationError(w, fmt.Errorf("search query required"))
return
}
// Validate and determine search type
searchType, value, err := validateSearchQuery(query)
if err != nil {
writeValidationError(w, err)
return
}
// Route to appropriate handler based on search type
switch searchType {
case "block":
blockNumber, err := validateBlockNumber(value)
if err != nil {
writeValidationError(w, err)
return
}
s.handleGetBlockByNumber(w, r, blockNumber)
case "transaction":
if !isValidHash(value) {
writeValidationError(w, ErrInvalidHash)
return
}
s.handleGetTransactionByHash(w, r, value)
case "address":
if !isValidAddress(value) {
writeValidationError(w, ErrInvalidAddress)
return
}
r.URL.RawQuery = "address=" + value
s.handleGetAddress(w, r)
default:
writeValidationError(w, fmt.Errorf("unsupported search type"))
}
}

224
backend/api/rest/server.go Normal file
View File

@@ -0,0 +1,224 @@
package rest
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strings"
"time"
"github.com/explorer/backend/auth"
"github.com/explorer/backend/api/middleware"
"github.com/jackc/pgx/v5/pgxpool"
)
// Server represents the REST API server
type Server struct {
db *pgxpool.Pool
chainID int
walletAuth *auth.WalletAuth
jwtSecret []byte
}
// NewServer creates a new REST API server
func NewServer(db *pgxpool.Pool, chainID int) *Server {
// Get JWT secret from environment or use default
jwtSecret := []byte(os.Getenv("JWT_SECRET"))
if len(jwtSecret) == 0 {
jwtSecret = []byte("change-me-in-production-use-strong-random-secret")
log.Println("WARNING: Using default JWT secret. Set JWT_SECRET environment variable in production!")
}
walletAuth := auth.NewWalletAuth(db, jwtSecret)
return &Server{
db: db,
chainID: chainID,
walletAuth: walletAuth,
jwtSecret: jwtSecret,
}
}
// Start starts the HTTP server
func (s *Server) Start(port int) error {
mux := http.NewServeMux()
s.SetupRoutes(mux)
// Initialize auth middleware
authMiddleware := middleware.NewAuthMiddleware(s.walletAuth)
// Setup track routes with proper middleware
s.SetupTrackRoutes(mux, authMiddleware)
// Initialize security middleware
securityMiddleware := middleware.NewSecurityMiddleware()
// Add middleware for all routes (outermost to innermost)
handler := securityMiddleware.AddSecurityHeaders(
authMiddleware.OptionalAuth( // Optional auth for Track 1, required for others
s.addMiddleware(
s.loggingMiddleware(
s.compressionMiddleware(mux),
),
),
),
)
addr := fmt.Sprintf(":%d", port)
log.Printf("Starting SolaceScanScout REST API server on %s", addr)
log.Printf("Tiered architecture enabled: Track 1 (public), Track 2-4 (authenticated)")
return http.ListenAndServe(addr, handler)
}
// addMiddleware adds common middleware to all routes
func (s *Server) addMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Add branding headers
w.Header().Set("X-Explorer-Name", "SolaceScanScout")
w.Header().Set("X-Explorer-Version", "1.0.0")
w.Header().Set("X-Powered-By", "SolaceScanScout")
// Add CORS headers for API routes
if strings.HasPrefix(r.URL.Path, "/api/") {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, X-API-Key")
// Handle preflight
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
}
next.ServeHTTP(w, r)
})
}
// handleListBlocks handles GET /api/v1/blocks
func (s *Server) handleListBlocks(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Validate pagination
page, pageSize, err := validatePagination(
r.URL.Query().Get("page"),
r.URL.Query().Get("page_size"),
)
if err != nil {
writeValidationError(w, err)
return
}
offset := (page - 1) * pageSize
query := `
SELECT chain_id, number, hash, timestamp, timestamp_iso, miner, transaction_count, gas_used, gas_limit
FROM blocks
WHERE chain_id = $1
ORDER BY number DESC
LIMIT $2 OFFSET $3
`
// Add query timeout
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
rows, err := s.db.Query(ctx, query, s.chainID, pageSize, offset)
if err != nil {
http.Error(w, fmt.Sprintf("Database error: %v", err), http.StatusInternalServerError)
return
}
defer rows.Close()
blocks := []map[string]interface{}{}
for rows.Next() {
var chainID, number, transactionCount int
var hash, miner string
var timestamp time.Time
var timestampISO sql.NullString
var gasUsed, gasLimit int64
if err := rows.Scan(&chainID, &number, &hash, &timestamp, &timestampISO, &miner, &transactionCount, &gasUsed, &gasLimit); err != nil {
continue
}
block := map[string]interface{}{
"chain_id": chainID,
"number": number,
"hash": hash,
"timestamp": timestamp,
"miner": miner,
"transaction_count": transactionCount,
"gas_used": gasUsed,
"gas_limit": gasLimit,
}
if timestampISO.Valid {
block["timestamp_iso"] = timestampISO.String
}
blocks = append(blocks, block)
}
response := map[string]interface{}{
"data": blocks,
"meta": map[string]interface{}{
"pagination": map[string]interface{}{
"page": page,
"page_size": pageSize,
},
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// handleGetBlock, handleListTransactions, handleGetTransaction, handleGetAddress
// are implemented in blocks.go, transactions.go, and addresses.go respectively
// handleHealth handles GET /health
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Header().Set("X-Explorer-Name", "SolaceScanScout")
w.Header().Set("X-Explorer-Version", "1.0.0")
// Check database connection
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
dbStatus := "ok"
if err := s.db.Ping(ctx); err != nil {
dbStatus = "error: " + err.Error()
}
health := map[string]interface{}{
"status": "healthy",
"timestamp": time.Now().UTC().Format(time.RFC3339),
"services": map[string]string{
"database": dbStatus,
"api": "ok",
},
"chain_id": s.chainID,
"explorer": map[string]string{
"name": "SolaceScanScout",
"version": "1.0.0",
},
}
statusCode := http.StatusOK
if dbStatus != "ok" {
statusCode = http.StatusServiceUnavailable
health["status"] = "degraded"
}
w.WriteHeader(statusCode)
json.NewEncoder(w).Encode(health)
}

59
backend/api/rest/stats.go Normal file
View File

@@ -0,0 +1,59 @@
package rest
import (
"context"
"encoding/json"
"net/http"
"time"
)
// handleStats handles GET /api/v2/stats
func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Get total blocks
var totalBlocks int64
err := s.db.QueryRow(ctx,
`SELECT COUNT(*) FROM blocks WHERE chain_id = $1`,
s.chainID,
).Scan(&totalBlocks)
if err != nil {
totalBlocks = 0
}
// Get total transactions
var totalTransactions int64
err = s.db.QueryRow(ctx,
`SELECT COUNT(*) FROM transactions WHERE chain_id = $1`,
s.chainID,
).Scan(&totalTransactions)
if err != nil {
totalTransactions = 0
}
// Get total addresses
var totalAddresses int64
err = s.db.QueryRow(ctx,
`SELECT COUNT(DISTINCT from_address) + COUNT(DISTINCT to_address) FROM transactions WHERE chain_id = $1`,
s.chainID,
).Scan(&totalAddresses)
if err != nil {
totalAddresses = 0
}
stats := map[string]interface{}{
"total_blocks": totalBlocks,
"total_transactions": totalTransactions,
"total_addresses": totalAddresses,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(stats)
}

View File

@@ -0,0 +1,430 @@
openapi: 3.0.3
info:
title: SolaceScanScout API
description: |
Blockchain Explorer API for ChainID 138 with tiered access control.
## Authentication
Track 1 endpoints are public and require no authentication.
Track 2-4 endpoints require JWT authentication via wallet signature.
## Rate Limiting
- Track 1: 100 requests/minute per IP
- Track 2-4: Based on user tier and subscription
version: 1.0.0
contact:
name: API Support
email: support@d-bis.org
license:
name: MIT
url: https://opensource.org/licenses/MIT
servers:
- url: https://api.d-bis.org
description: Production server
- url: http://localhost:8080
description: Development server
tags:
- name: Health
description: Health check endpoints
- name: Blocks
description: Block-related endpoints
- name: Transactions
description: Transaction-related endpoints
- name: Addresses
description: Address-related endpoints
- name: Search
description: Unified search endpoints
- name: Track1
description: Public RPC gateway endpoints (no auth required)
- name: Track2
description: Indexed explorer endpoints (auth required)
- name: Track3
description: Analytics endpoints (Track 3+ required)
- name: Track4
description: Operator endpoints (Track 4 + IP whitelist)
paths:
/health:
get:
tags:
- Health
summary: Health check
description: Returns the health status of the API
operationId: getHealth
responses:
'200':
description: Service is healthy
content:
application/json:
schema:
type: object
properties:
status:
type: string
example: ok
timestamp:
type: string
format: date-time
database:
type: string
example: connected
/api/v1/blocks:
get:
tags:
- Blocks
summary: List blocks
description: Returns a paginated list of blocks
operationId: listBlocks
parameters:
- name: limit
in: query
description: Number of blocks to return
required: false
schema:
type: integer
minimum: 1
maximum: 100
default: 20
- name: page
in: query
description: Page number
required: false
schema:
type: integer
minimum: 1
default: 1
- name: chain_id
in: query
description: Chain ID filter
required: false
schema:
type: integer
default: 138
responses:
'200':
description: List of blocks
content:
application/json:
schema:
$ref: '#/components/schemas/BlockListResponse'
'400':
$ref: '#/components/responses/BadRequest'
'500':
$ref: '#/components/responses/InternalServerError'
/api/v1/blocks/{chain_id}/{number}:
get:
tags:
- Blocks
summary: Get block by number
description: Returns block details by chain ID and block number
operationId: getBlockByNumber
parameters:
- name: chain_id
in: path
required: true
description: Chain ID
schema:
type: integer
example: 138
- name: number
in: path
required: true
description: Block number
schema:
type: integer
example: 1000
responses:
'200':
description: Block details
content:
application/json:
schema:
$ref: '#/components/schemas/Block'
'404':
$ref: '#/components/responses/NotFound'
'500':
$ref: '#/components/responses/InternalServerError'
/api/v1/transactions:
get:
tags:
- Transactions
summary: List transactions
description: Returns a paginated list of transactions
operationId: listTransactions
parameters:
- name: limit
in: query
schema:
type: integer
default: 20
- name: page
in: query
schema:
type: integer
default: 1
- name: chain_id
in: query
schema:
type: integer
default: 138
responses:
'200':
description: List of transactions
content:
application/json:
schema:
$ref: '#/components/schemas/TransactionListResponse'
/api/v1/search:
get:
tags:
- Search
summary: Unified search
description: |
Searches for blocks, transactions, or addresses.
Automatically detects the type based on the query format.
operationId: search
parameters:
- name: q
in: query
required: true
description: Search query (block number, address, or transaction hash)
schema:
type: string
example: "0x1234567890abcdef"
responses:
'200':
description: Search results
content:
application/json:
schema:
$ref: '#/components/schemas/SearchResponse'
'400':
$ref: '#/components/responses/BadRequest'
/api/v1/track1/blocks/latest:
get:
tags:
- Track1
summary: Get latest blocks (Public)
description: Returns the latest blocks via RPC gateway. No authentication required.
operationId: getLatestBlocks
parameters:
- name: limit
in: query
schema:
type: integer
default: 10
maximum: 50
responses:
'200':
description: Latest blocks
content:
application/json:
schema:
$ref: '#/components/schemas/BlockListResponse'
/api/v1/track2/search:
get:
tags:
- Track2
summary: Advanced search (Auth Required)
description: Advanced search with indexed data. Requires Track 2+ authentication.
operationId: track2Search
security:
- bearerAuth: []
parameters:
- name: q
in: query
required: true
schema:
type: string
responses:
'200':
description: Search results
'401':
$ref: '#/components/responses/Unauthorized'
'403':
$ref: '#/components/responses/Forbidden'
components:
securitySchemes:
bearerAuth:
type: http
scheme: bearer
bearerFormat: JWT
description: JWT token obtained from /api/v1/auth/wallet
schemas:
Block:
type: object
properties:
chain_id:
type: integer
example: 138
number:
type: integer
example: 1000
hash:
type: string
example: "0x1234567890abcdef"
parent_hash:
type: string
timestamp:
type: string
format: date-time
miner:
type: string
transaction_count:
type: integer
gas_used:
type: integer
gas_limit:
type: integer
Transaction:
type: object
properties:
chain_id:
type: integer
hash:
type: string
block_number:
type: integer
from_address:
type: string
to_address:
type: string
value:
type: string
gas:
type: integer
gas_price:
type: string
status:
type: string
enum: [success, failed]
BlockListResponse:
type: object
properties:
data:
type: array
items:
$ref: '#/components/schemas/Block'
pagination:
$ref: '#/components/schemas/Pagination'
TransactionListResponse:
type: object
properties:
data:
type: array
items:
$ref: '#/components/schemas/Transaction'
pagination:
$ref: '#/components/schemas/Pagination'
Pagination:
type: object
properties:
page:
type: integer
limit:
type: integer
total:
type: integer
total_pages:
type: integer
SearchResponse:
type: object
properties:
query:
type: string
results:
type: array
items:
type: object
properties:
type:
type: string
enum: [block, transaction, address]
data:
type: object
Error:
type: object
properties:
error:
type: object
properties:
code:
type: string
message:
type: string
responses:
BadRequest:
description: Bad request
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
example:
error:
code: "bad_request"
message: "Invalid request parameters"
Unauthorized:
description: Unauthorized - Authentication required
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
example:
error:
code: "unauthorized"
message: "Authentication required"
Forbidden:
description: Forbidden - Insufficient permissions
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
example:
error:
code: "forbidden"
message: "Insufficient permissions. Track 2+ required."
NotFound:
description: Resource not found
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
example:
error:
code: "not_found"
message: "Resource not found"
InternalServerError:
description: Internal server error
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
example:
error:
code: "internal_error"
message: "An internal error occurred"

View File

@@ -0,0 +1,113 @@
package rest
import (
"net/http"
"os"
"strings"
"github.com/explorer/backend/api/middleware"
"github.com/explorer/backend/api/track1"
"github.com/explorer/backend/api/track2"
"github.com/explorer/backend/api/track3"
"github.com/explorer/backend/api/track4"
)
// SetupTrackRoutes sets up track-specific routes with proper middleware
func (s *Server) SetupTrackRoutes(mux *http.ServeMux, authMiddleware *middleware.AuthMiddleware) {
// Initialize Track 1 (RPC Gateway)
rpcURL := os.Getenv("RPC_URL")
if rpcURL == "" {
rpcURL = "http://localhost:8545"
}
// Use Redis if available, otherwise fall back to in-memory
cache, err := track1.NewCache()
if err != nil {
// Fallback to in-memory cache if Redis fails
cache = track1.NewInMemoryCache()
}
rateLimiter, err := track1.NewRateLimiter(track1.RateLimitConfig{
RequestsPerSecond: 10,
RequestsPerMinute: 100,
BurstSize: 20,
})
if err != nil {
// Fallback to in-memory rate limiter if Redis fails
rateLimiter = track1.NewInMemoryRateLimiter(track1.RateLimitConfig{
RequestsPerSecond: 10,
RequestsPerMinute: 100,
BurstSize: 20,
})
}
rpcGateway := track1.NewRPCGateway(rpcURL, cache, rateLimiter)
track1Server := track1.NewServer(rpcGateway)
// Track 1 routes (public, optional auth)
mux.HandleFunc("/api/v1/track1/blocks/latest", track1Server.HandleLatestBlocks)
mux.HandleFunc("/api/v1/track1/txs/latest", track1Server.HandleLatestTransactions)
mux.HandleFunc("/api/v1/track1/block/", track1Server.HandleBlockDetail)
mux.HandleFunc("/api/v1/track1/tx/", track1Server.HandleTransactionDetail)
mux.HandleFunc("/api/v1/track1/address/", track1Server.HandleAddressBalance)
mux.HandleFunc("/api/v1/track1/bridge/status", track1Server.HandleBridgeStatus)
// Initialize Track 2 server
track2Server := track2.NewServer(s.db, s.chainID)
// Track 2 routes (require Track 2+)
track2Middleware := authMiddleware.RequireTrack(2)
// Track 2 route handlers with auth
track2AuthHandler := func(handler http.HandlerFunc) http.HandlerFunc {
return authMiddleware.RequireAuth(track2Middleware(http.HandlerFunc(handler))).ServeHTTP
}
mux.HandleFunc("/api/v1/track2/search", track2AuthHandler(track2Server.HandleSearch))
// Address routes - need to parse path
mux.HandleFunc("/api/v1/track2/address/", track2AuthHandler(func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
parts := strings.Split(strings.TrimPrefix(path, "/api/v1/track2/address/"), "/")
if len(parts) >= 2 {
if parts[1] == "txs" {
track2Server.HandleAddressTransactions(w, r)
} else if parts[1] == "tokens" {
track2Server.HandleAddressTokens(w, r)
} else if parts[1] == "internal-txs" {
track2Server.HandleInternalTransactions(w, r)
}
}
}))
mux.HandleFunc("/api/v1/track2/token/", track2AuthHandler(track2Server.HandleTokenInfo))
// Initialize Track 3 server
track3Server := track3.NewServer(s.db, s.chainID)
// Track 3 routes (require Track 3+)
track3Middleware := authMiddleware.RequireTrack(3)
track3AuthHandler := func(handler http.HandlerFunc) http.HandlerFunc {
return authMiddleware.RequireAuth(track3Middleware(http.HandlerFunc(handler))).ServeHTTP
}
mux.HandleFunc("/api/v1/track3/analytics/flows", track3AuthHandler(track3Server.HandleFlows))
mux.HandleFunc("/api/v1/track3/analytics/bridge", track3AuthHandler(track3Server.HandleBridge))
mux.HandleFunc("/api/v1/track3/analytics/token-distribution/", track3AuthHandler(track3Server.HandleTokenDistribution))
mux.HandleFunc("/api/v1/track3/analytics/address-risk/", track3AuthHandler(track3Server.HandleAddressRisk))
// Initialize Track 4 server
track4Server := track4.NewServer(s.db, s.chainID)
// Track 4 routes (require Track 4 + IP whitelist)
track4Middleware := authMiddleware.RequireTrack(4)
track4AuthHandler := func(handler http.HandlerFunc) http.HandlerFunc {
return authMiddleware.RequireAuth(track4Middleware(http.HandlerFunc(handler))).ServeHTTP
}
mux.HandleFunc("/api/v1/track4/operator/bridge/events", track4AuthHandler(track4Server.HandleBridgeEvents))
mux.HandleFunc("/api/v1/track4/operator/validators", track4AuthHandler(track4Server.HandleValidators))
mux.HandleFunc("/api/v1/track4/operator/contracts", track4AuthHandler(track4Server.HandleContracts))
mux.HandleFunc("/api/v1/track4/operator/protocol-state", track4AuthHandler(track4Server.HandleProtocolState))
}

View File

@@ -0,0 +1,236 @@
package rest
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/http"
"strconv"
"time"
)
// handleListTransactions handles GET /api/v1/transactions
func (s *Server) handleListTransactions(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Validate pagination
page, pageSize, err := validatePagination(
r.URL.Query().Get("page"),
r.URL.Query().Get("page_size"),
)
if err != nil {
writeValidationError(w, err)
return
}
offset := (page - 1) * pageSize
query := `
SELECT t.chain_id, t.hash, t.block_number, t.transaction_index, t.from_address, t.to_address,
t.value, t.gas_price, t.gas_used, t.status, t.created_at, t.timestamp_iso
FROM transactions t
WHERE t.chain_id = $1
`
args := []interface{}{s.chainID}
argIndex := 2
// Add filters
if blockNumber := r.URL.Query().Get("block_number"); blockNumber != "" {
if bn, err := strconv.ParseInt(blockNumber, 10, 64); err == nil {
query += fmt.Sprintf(" AND block_number = $%d", argIndex)
args = append(args, bn)
argIndex++
}
}
if fromAddress := r.URL.Query().Get("from_address"); fromAddress != "" {
query += fmt.Sprintf(" AND from_address = $%d", argIndex)
args = append(args, fromAddress)
argIndex++
}
if toAddress := r.URL.Query().Get("to_address"); toAddress != "" {
query += fmt.Sprintf(" AND to_address = $%d", argIndex)
args = append(args, toAddress)
argIndex++
}
query += " ORDER BY block_number DESC, transaction_index DESC"
query += fmt.Sprintf(" LIMIT $%d OFFSET $%d", argIndex, argIndex+1)
args = append(args, pageSize, offset)
// Add query timeout
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
rows, err := s.db.Query(ctx, query, args...)
if err != nil {
http.Error(w, fmt.Sprintf("Database error: %v", err), http.StatusInternalServerError)
return
}
defer rows.Close()
transactions := []map[string]interface{}{}
for rows.Next() {
var chainID, blockNumber, transactionIndex int
var hash, fromAddress string
var toAddress sql.NullString
var value string
var gasPrice, gasUsed sql.NullInt64
var status sql.NullInt64
var createdAt time.Time
var timestampISO sql.NullString
if err := rows.Scan(&chainID, &hash, &blockNumber, &transactionIndex, &fromAddress, &toAddress,
&value, &gasPrice, &gasUsed, &status, &createdAt, &timestampISO); err != nil {
continue
}
tx := map[string]interface{}{
"chain_id": chainID,
"hash": hash,
"block_number": blockNumber,
"transaction_index": transactionIndex,
"from_address": fromAddress,
"value": value,
"created_at": createdAt,
}
if timestampISO.Valid {
tx["timestamp_iso"] = timestampISO.String
}
if toAddress.Valid {
tx["to_address"] = toAddress.String
}
if gasPrice.Valid {
tx["gas_price"] = gasPrice.Int64
}
if gasUsed.Valid {
tx["gas_used"] = gasUsed.Int64
}
if status.Valid {
tx["status"] = status.Int64
}
transactions = append(transactions, tx)
}
response := map[string]interface{}{
"data": transactions,
"meta": map[string]interface{}{
"pagination": map[string]interface{}{
"page": page,
"page_size": pageSize,
},
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// handleGetTransactionByHash handles GET /api/v1/transactions/{chain_id}/{hash}
func (s *Server) handleGetTransactionByHash(w http.ResponseWriter, r *http.Request, hash string) {
// Validate hash format (already validated in routes.go, but double-check)
if !isValidHash(hash) {
writeValidationError(w, ErrInvalidHash)
return
}
// Add query timeout
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
query := `
SELECT chain_id, hash, block_number, block_hash, transaction_index,
from_address, to_address, value, gas_price, max_fee_per_gas,
max_priority_fee_per_gas, gas_limit, gas_used, nonce, input_data,
status, contract_address, cumulative_gas_used, effective_gas_price,
created_at, timestamp_iso
FROM transactions
WHERE chain_id = $1 AND hash = $2
`
var chainID, blockNumber, transactionIndex int
var txHash, blockHash, fromAddress string
var toAddress sql.NullString
var value string
var gasPrice, maxFeePerGas, maxPriorityFeePerGas, gasLimit, gasUsed, nonce sql.NullInt64
var inputData sql.NullString
var status sql.NullInt64
var contractAddress sql.NullString
var cumulativeGasUsed int64
var effectiveGasPrice sql.NullInt64
var createdAt time.Time
var timestampISO sql.NullString
err := s.db.QueryRow(ctx, query, s.chainID, hash).Scan(
&chainID, &txHash, &blockNumber, &blockHash, &transactionIndex,
&fromAddress, &toAddress, &value, &gasPrice, &maxFeePerGas,
&maxPriorityFeePerGas, &gasLimit, &gasUsed, &nonce, &inputData,
&status, &contractAddress, &cumulativeGasUsed, &effectiveGasPrice,
&createdAt, &timestampISO,
)
if err != nil {
http.Error(w, fmt.Sprintf("Transaction not found: %v", err), http.StatusNotFound)
return
}
tx := map[string]interface{}{
"chain_id": chainID,
"hash": txHash,
"block_number": blockNumber,
"block_hash": blockHash,
"transaction_index": transactionIndex,
"from_address": fromAddress,
"value": value,
"gas_limit": gasLimit.Int64,
"cumulative_gas_used": cumulativeGasUsed,
"created_at": createdAt,
}
if timestampISO.Valid {
tx["timestamp_iso"] = timestampISO.String
}
if toAddress.Valid {
tx["to_address"] = toAddress.String
}
if gasPrice.Valid {
tx["gas_price"] = gasPrice.Int64
}
if maxFeePerGas.Valid {
tx["max_fee_per_gas"] = maxFeePerGas.Int64
}
if maxPriorityFeePerGas.Valid {
tx["max_priority_fee_per_gas"] = maxPriorityFeePerGas.Int64
}
if gasUsed.Valid {
tx["gas_used"] = gasUsed.Int64
}
if nonce.Valid {
tx["nonce"] = nonce.Int64
}
if inputData.Valid {
tx["input_data"] = inputData.String
}
if status.Valid {
tx["status"] = status.Int64
}
if contractAddress.Valid {
tx["contract_address"] = contractAddress.String
}
if effectiveGasPrice.Valid {
tx["effective_gas_price"] = effectiveGasPrice.Int64
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"data": tx,
})
}

View File

@@ -0,0 +1,127 @@
package rest
import (
"encoding/hex"
"encoding/json"
"fmt"
"net/http"
"regexp"
"strconv"
"strings"
)
// Validation errors
var (
ErrInvalidAddress = fmt.Errorf("invalid address format")
ErrInvalidHash = fmt.Errorf("invalid hash format")
ErrInvalidBlockNumber = fmt.Errorf("invalid block number")
)
// isValidHash validates if a string is a valid hex hash (0x + 64 hex chars)
func isValidHash(hash string) bool {
if !strings.HasPrefix(hash, "0x") {
return false
}
if len(hash) != 66 {
return false
}
_, err := hex.DecodeString(hash[2:])
return err == nil
}
// isValidAddress validates if a string is a valid Ethereum address (0x + 40 hex chars)
func isValidAddress(address string) bool {
if !strings.HasPrefix(address, "0x") {
return false
}
if len(address) != 42 {
return false
}
_, err := hex.DecodeString(address[2:])
return err == nil
}
// validateBlockNumber validates and parses block number
func validateBlockNumber(blockStr string) (int64, error) {
blockNumber, err := strconv.ParseInt(blockStr, 10, 64)
if err != nil {
return 0, ErrInvalidBlockNumber
}
if blockNumber < 0 {
return 0, ErrInvalidBlockNumber
}
return blockNumber, nil
}
// validateChainID validates chain ID matches expected
func validateChainID(chainIDStr string, expectedChainID int) error {
chainID, err := strconv.Atoi(chainIDStr)
if err != nil {
return fmt.Errorf("invalid chain ID format")
}
if chainID != expectedChainID {
return fmt.Errorf("chain ID mismatch: expected %d, got %d", expectedChainID, chainID)
}
return nil
}
// validatePagination validates and normalizes pagination parameters
func validatePagination(pageStr, pageSizeStr string) (page, pageSize int, err error) {
page = 1
if pageStr != "" {
page, err = strconv.Atoi(pageStr)
if err != nil || page < 1 {
return 0, 0, fmt.Errorf("invalid page number")
}
}
pageSize = 20
if pageSizeStr != "" {
pageSize, err = strconv.Atoi(pageSizeStr)
if err != nil || pageSize < 1 {
return 0, 0, fmt.Errorf("invalid page size")
}
if pageSize > 100 {
pageSize = 100 // Max page size
}
}
return page, pageSize, nil
}
// writeValidationError writes a validation error response
func writeValidationError(w http.ResponseWriter, err error) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusBadRequest)
json.NewEncoder(w).Encode(map[string]interface{}{
"error": map[string]interface{}{
"code": "VALIDATION_ERROR",
"message": err.Error(),
},
})
}
// validateSearchQuery validates search query format
func validateSearchQuery(query string) (searchType string, value string, err error) {
query = strings.TrimSpace(query)
if query == "" {
return "", "", fmt.Errorf("search query cannot be empty")
}
// Block number (numeric)
if matched, _ := regexp.MatchString(`^\d+$`, query); matched {
return "block", query, nil
}
// Address (0x + 40 hex chars)
if matched, _ := regexp.MatchString(`^0x[a-fA-F0-9]{40}$`, query); matched {
return "address", query, nil
}
// Transaction hash (0x + 64 hex chars)
if matched, _ := regexp.MatchString(`^0x[a-fA-F0-9]{64}$`, query); matched {
return "transaction", query, nil
}
return "", "", fmt.Errorf("invalid search query format")
}

View File

@@ -0,0 +1,42 @@
package main
import (
"log"
"net/http"
"os"
"github.com/elastic/go-elasticsearch/v8"
"github.com/explorer/backend/api/search"
"github.com/explorer/backend/search/config"
)
func main() {
searchConfig := config.LoadSearchConfig()
esConfig := elasticsearch.Config{
Addresses: []string{searchConfig.URL},
}
if searchConfig.Username != "" {
esConfig.Username = searchConfig.Username
esConfig.Password = searchConfig.Password
}
client, err := elasticsearch.NewClient(esConfig)
if err != nil {
log.Fatalf("Failed to create Elasticsearch client: %v", err)
}
service := search.NewSearchService(client, searchConfig.IndexPrefix)
mux := http.NewServeMux()
mux.HandleFunc("/api/v1/search", service.HandleSearch)
port := os.Getenv("SEARCH_PORT")
if port == "" {
port = "8082"
}
log.Printf("Starting search service on :%s", port)
log.Fatal(http.ListenAndServe(":"+port, mux))
}

View File

@@ -0,0 +1,172 @@
package search
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/elastic/go-elasticsearch/v8"
"github.com/elastic/go-elasticsearch/v8/esapi"
)
// SearchService handles unified search
type SearchService struct {
client *elasticsearch.Client
indexPrefix string
}
// NewSearchService creates a new search service
func NewSearchService(client *elasticsearch.Client, indexPrefix string) *SearchService {
return &SearchService{
client: client,
indexPrefix: indexPrefix,
}
}
// Search performs unified search across all indices
func (s *SearchService) Search(ctx context.Context, query string, chainID *int, limit int) ([]SearchResult, error) {
// Build search query
var indices []string
if chainID != nil {
indices = []string{
fmt.Sprintf("%s-blocks-%d", s.indexPrefix, *chainID),
fmt.Sprintf("%s-transactions-%d", s.indexPrefix, *chainID),
fmt.Sprintf("%s-addresses-%d", s.indexPrefix, *chainID),
}
} else {
// Search all chains (simplified - would need to enumerate)
indices = []string{
fmt.Sprintf("%s-blocks-*", s.indexPrefix),
fmt.Sprintf("%s-transactions-*", s.indexPrefix),
fmt.Sprintf("%s-addresses-*", s.indexPrefix),
}
}
searchQuery := map[string]interface{}{
"query": map[string]interface{}{
"multi_match": map[string]interface{}{
"query": query,
"fields": []string{"hash", "address", "from_address", "to_address"},
"type": "best_fields",
},
},
"size": limit,
}
queryJSON, _ := json.Marshal(searchQuery)
queryString := string(queryJSON)
// Execute search
req := esapi.SearchRequest{
Index: indices,
Body: strings.NewReader(queryString),
Pretty: true,
}
res, err := req.Do(ctx, s.client)
if err != nil {
return nil, fmt.Errorf("search failed: %w", err)
}
defer res.Body.Close()
if res.IsError() {
return nil, fmt.Errorf("elasticsearch error: %s", res.String())
}
var result map[string]interface{}
if err := json.NewDecoder(res.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
// Parse results
results := []SearchResult{}
if hits, ok := result["hits"].(map[string]interface{}); ok {
if hitsList, ok := hits["hits"].([]interface{}); ok {
for _, hit := range hitsList {
if hitMap, ok := hit.(map[string]interface{}); ok {
if source, ok := hitMap["_source"].(map[string]interface{}); ok {
result := s.parseResult(source)
results = append(results, result)
}
}
}
}
}
return results, nil
}
// SearchResult represents a search result
type SearchResult struct {
Type string `json:"type"`
ChainID int `json:"chain_id"`
Data map[string]interface{} `json:"data"`
Score float64 `json:"score"`
}
func (s *SearchService) parseResult(source map[string]interface{}) SearchResult {
result := SearchResult{
Data: source,
}
if chainID, ok := source["chain_id"].(float64); ok {
result.ChainID = int(chainID)
}
// Determine type based on fields
if _, ok := source["block_number"]; ok {
result.Type = "block"
} else if _, ok := source["transaction_index"]; ok {
result.Type = "transaction"
} else if _, ok := source["address"]; ok {
result.Type = "address"
}
return result
}
// HandleSearch handles HTTP search requests
func (s *SearchService) HandleSearch(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
query := r.URL.Query().Get("q")
if query == "" {
http.Error(w, "Query parameter 'q' is required", http.StatusBadRequest)
return
}
var chainID *int
if chainIDStr := r.URL.Query().Get("chain_id"); chainIDStr != "" {
if id, err := strconv.Atoi(chainIDStr); err == nil {
chainID = &id
}
}
limit := 50
if limitStr := r.URL.Query().Get("limit"); limitStr != "" {
if l, err := strconv.Atoi(limitStr); err == nil {
limit = l
}
}
results, err := s.Search(r.Context(), query, chainID, limit)
if err != nil {
http.Error(w, fmt.Sprintf("Search failed: %v", err), http.StatusInternalServerError)
return
}
response := map[string]interface{}{
"query": query,
"results": results,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}

View File

@@ -0,0 +1,90 @@
package track1
import (
"sync"
"time"
)
// InMemoryCache is a simple in-memory cache
// In production, use Redis for distributed caching
type InMemoryCache struct {
items map[string]*cacheItem
mu sync.RWMutex
}
// cacheItem represents a cached item
type cacheItem struct {
value []byte
expiresAt time.Time
}
// NewInMemoryCache creates a new in-memory cache
func NewInMemoryCache() *InMemoryCache {
cache := &InMemoryCache{
items: make(map[string]*cacheItem),
}
// Start cleanup goroutine
go cache.cleanup()
return cache
}
// Get retrieves a value from cache
func (c *InMemoryCache) Get(key string) ([]byte, error) {
c.mu.RLock()
defer c.mu.RUnlock()
item, exists := c.items[key]
if !exists {
return nil, ErrCacheMiss
}
if time.Now().After(item.expiresAt) {
return nil, ErrCacheMiss
}
return item.value, nil
}
// Set stores a value in cache with TTL
func (c *InMemoryCache) Set(key string, value []byte, ttl time.Duration) error {
c.mu.Lock()
defer c.mu.Unlock()
c.items[key] = &cacheItem{
value: value,
expiresAt: time.Now().Add(ttl),
}
return nil
}
// cleanup removes expired items
func (c *InMemoryCache) cleanup() {
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for range ticker.C {
c.mu.Lock()
now := time.Now()
for key, item := range c.items {
if now.After(item.expiresAt) {
delete(c.items, key)
}
}
c.mu.Unlock()
}
}
// ErrCacheMiss is returned when a cache key is not found
var ErrCacheMiss = &CacheError{Message: "cache miss"}
// CacheError represents a cache error
type CacheError struct {
Message string
}
func (e *CacheError) Error() string {
return e.Message
}

View File

@@ -0,0 +1,79 @@
package track1
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestInMemoryCache_GetSet(t *testing.T) {
cache := NewInMemoryCache()
key := "test-key"
value := []byte("test-value")
ttl := 5 * time.Minute
// Test Set
err := cache.Set(key, value, ttl)
require.NoError(t, err)
// Test Get
retrieved, err := cache.Get(key)
require.NoError(t, err)
assert.Equal(t, value, retrieved)
}
func TestInMemoryCache_Expiration(t *testing.T) {
cache := NewInMemoryCache()
key := "test-key"
value := []byte("test-value")
ttl := 100 * time.Millisecond
err := cache.Set(key, value, ttl)
require.NoError(t, err)
// Should be available immediately
retrieved, err := cache.Get(key)
require.NoError(t, err)
assert.Equal(t, value, retrieved)
// Wait for expiration
time.Sleep(150 * time.Millisecond)
// Should be expired
_, err = cache.Get(key)
assert.Error(t, err)
assert.Equal(t, ErrCacheMiss, err)
}
func TestInMemoryCache_Miss(t *testing.T) {
cache := NewInMemoryCache()
_, err := cache.Get("non-existent-key")
assert.Error(t, err)
assert.Equal(t, ErrCacheMiss, err)
}
func TestInMemoryCache_Cleanup(t *testing.T) {
cache := NewInMemoryCache()
// Set multiple keys with short TTL
for i := 0; i < 10; i++ {
key := "test-key-" + string(rune(i))
cache.Set(key, []byte("value"), 50*time.Millisecond)
}
// Wait for expiration
time.Sleep(200 * time.Millisecond)
// All should be expired after cleanup
for i := 0; i < 10; i++ {
key := "test-key-" + string(rune(i))
_, err := cache.Get(key)
assert.Error(t, err)
}
}

View File

@@ -0,0 +1,391 @@
package track1
import (
"encoding/json"
"fmt"
"math/big"
"net/http"
"strconv"
"strings"
"time"
)
// Server handles Track 1 endpoints
type Server struct {
rpcGateway *RPCGateway
}
// NewServer creates a new Track 1 server
func NewServer(rpcGateway *RPCGateway) *Server {
return &Server{
rpcGateway: rpcGateway,
}
}
// HandleLatestBlocks handles GET /api/v1/track1/blocks/latest
func (s *Server) HandleLatestBlocks(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
limit := 10
if limitStr := r.URL.Query().Get("limit"); limitStr != "" {
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 50 {
limit = l
}
}
// Get latest block number
blockNumResp, err := s.rpcGateway.GetBlockNumber(r.Context())
if err != nil {
writeError(w, http.StatusInternalServerError, "rpc_error", err.Error())
return
}
blockNumHex, ok := blockNumResp.Result.(string)
if !ok {
writeError(w, http.StatusInternalServerError, "invalid_response", "Invalid block number response")
return
}
// Parse block number
blockNum, err := hexToInt(blockNumHex)
if err != nil {
writeError(w, http.StatusInternalServerError, "parse_error", err.Error())
return
}
// Fetch blocks
blocks := []map[string]interface{}{}
for i := 0; i < limit && blockNum-int64(i) >= 0; i++ {
blockNumStr := fmt.Sprintf("0x%x", blockNum-int64(i))
blockResp, err := s.rpcGateway.GetBlockByNumber(r.Context(), blockNumStr, false)
if err != nil {
continue // Skip failed blocks
}
blockData, ok := blockResp.Result.(map[string]interface{})
if !ok {
continue
}
// Transform to our format
block := transformBlock(blockData)
blocks = append(blocks, block)
}
response := map[string]interface{}{
"data": blocks,
"pagination": map[string]interface{}{
"page": 1,
"limit": limit,
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleLatestTransactions handles GET /api/v1/track1/txs/latest
func (s *Server) HandleLatestTransactions(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
limit := 10
if limitStr := r.URL.Query().Get("limit"); limitStr != "" {
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 50 {
limit = l
}
}
// Get latest block number
blockNumResp, err := s.rpcGateway.GetBlockNumber(r.Context())
if err != nil {
writeError(w, http.StatusInternalServerError, "rpc_error", err.Error())
return
}
blockNumHex, ok := blockNumResp.Result.(string)
if !ok {
writeError(w, http.StatusInternalServerError, "invalid_response", "Invalid block number response")
return
}
blockNum, err := hexToInt(blockNumHex)
if err != nil {
writeError(w, http.StatusInternalServerError, "parse_error", err.Error())
return
}
// Fetch transactions from recent blocks
transactions := []map[string]interface{}{}
for i := 0; i < 20 && len(transactions) < limit && blockNum-int64(i) >= 0; i++ {
blockNumStr := fmt.Sprintf("0x%x", blockNum-int64(i))
blockResp, err := s.rpcGateway.GetBlockByNumber(r.Context(), blockNumStr, true)
if err != nil {
continue
}
blockData, ok := blockResp.Result.(map[string]interface{})
if !ok {
continue
}
txs, ok := blockData["transactions"].([]interface{})
if !ok {
continue
}
for _, tx := range txs {
if len(transactions) >= limit {
break
}
txData, ok := tx.(map[string]interface{})
if !ok {
continue
}
transactions = append(transactions, transformTransaction(txData))
}
}
response := map[string]interface{}{
"data": transactions,
"pagination": map[string]interface{}{
"page": 1,
"limit": limit,
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleBlockDetail handles GET /api/v1/track1/block/:number
func (s *Server) HandleBlockDetail(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track1/block/")
blockNumStr := fmt.Sprintf("0x%x", parseBlockNumber(path))
blockResp, err := s.rpcGateway.GetBlockByNumber(r.Context(), blockNumStr, false)
if err != nil {
writeError(w, http.StatusNotFound, "not_found", "Block not found")
return
}
blockData, ok := blockResp.Result.(map[string]interface{})
if !ok {
writeError(w, http.StatusInternalServerError, "invalid_response", "Invalid block response")
return
}
response := map[string]interface{}{
"data": transformBlock(blockData),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleTransactionDetail handles GET /api/v1/track1/tx/:hash
func (s *Server) HandleTransactionDetail(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track1/tx/")
txHash := path
txResp, err := s.rpcGateway.GetTransactionByHash(r.Context(), txHash)
if err != nil {
writeError(w, http.StatusNotFound, "not_found", "Transaction not found")
return
}
txData, ok := txResp.Result.(map[string]interface{})
if !ok {
writeError(w, http.StatusInternalServerError, "invalid_response", "Invalid transaction response")
return
}
response := map[string]interface{}{
"data": transformTransaction(txData),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleAddressBalance handles GET /api/v1/track1/address/:addr/balance
func (s *Server) HandleAddressBalance(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track1/address/")
parts := strings.Split(path, "/")
if len(parts) < 2 || parts[1] != "balance" {
writeError(w, http.StatusBadRequest, "bad_request", "Invalid path")
return
}
address := parts[0]
balanceResp, err := s.rpcGateway.GetBalance(r.Context(), address, "latest")
if err != nil {
writeError(w, http.StatusInternalServerError, "rpc_error", err.Error())
return
}
balanceHex, ok := balanceResp.Result.(string)
if !ok {
writeError(w, http.StatusInternalServerError, "invalid_response", "Invalid balance response")
return
}
balance, err := hexToBigInt(balanceHex)
if err != nil {
writeError(w, http.StatusInternalServerError, "parse_error", err.Error())
return
}
response := map[string]interface{}{
"data": map[string]interface{}{
"address": address,
"balance": balance.String(),
"balance_wei": balance.String(),
"balance_ether": weiToEther(balance),
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleBridgeStatus handles GET /api/v1/track1/bridge/status
func (s *Server) HandleBridgeStatus(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
// Return bridge status (simplified - in production, query bridge contracts)
response := map[string]interface{}{
"data": map[string]interface{}{
"status": "operational",
"chains": map[string]interface{}{
"138": map[string]interface{}{
"name": "Defi Oracle Meta Mainnet",
"status": "operational",
"last_sync": time.Now().UTC().Format(time.RFC3339),
},
"1": map[string]interface{}{
"name": "Ethereum Mainnet",
"status": "operational",
"last_sync": time.Now().UTC().Format(time.RFC3339),
},
},
"total_transfers_24h": 150,
"total_volume_24h": "5000000000000000000000",
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// Helper functions
func writeError(w http.ResponseWriter, statusCode int, code, message string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
json.NewEncoder(w).Encode(map[string]interface{}{
"error": map[string]interface{}{
"code": code,
"message": message,
},
})
}
func hexToInt(hex string) (int64, error) {
hex = strings.TrimPrefix(hex, "0x")
return strconv.ParseInt(hex, 16, 64)
}
func parseBlockNumber(s string) int64 {
num, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0
}
return num
}
func transformBlock(blockData map[string]interface{}) map[string]interface{} {
return map[string]interface{}{
"number": parseHexField(blockData["number"]),
"hash": blockData["hash"],
"parent_hash": blockData["parentHash"],
"timestamp": parseHexTimestamp(blockData["timestamp"]),
"transaction_count": countTransactions(blockData["transactions"]),
"gas_used": parseHexField(blockData["gasUsed"]),
"gas_limit": parseHexField(blockData["gasLimit"]),
"miner": blockData["miner"],
}
}
func transformTransaction(txData map[string]interface{}) map[string]interface{} {
return map[string]interface{}{
"hash": txData["hash"],
"from": txData["from"],
"to": txData["to"],
"value": txData["value"],
"block_number": parseHexField(txData["blockNumber"]),
"timestamp": parseHexTimestamp(txData["timestamp"]),
}
}
func parseHexField(field interface{}) interface{} {
if str, ok := field.(string); ok {
if num, err := hexToInt(str); err == nil {
return num
}
}
return field
}
func parseHexTimestamp(field interface{}) string {
if str, ok := field.(string); ok {
if num, err := hexToInt(str); err == nil {
return time.Unix(num, 0).Format(time.RFC3339)
}
}
return ""
}
func countTransactions(txs interface{}) int {
if txsList, ok := txs.([]interface{}); ok {
return len(txsList)
}
return 0
}
func hexToBigInt(hex string) (*big.Int, error) {
hex = strings.TrimPrefix(hex, "0x")
bigInt := new(big.Int)
bigInt, ok := bigInt.SetString(hex, 16)
if !ok {
return nil, fmt.Errorf("invalid hex number")
}
return bigInt, nil
}
func weiToEther(wei *big.Int) string {
ether := new(big.Float).Quo(new(big.Float).SetInt(wei), big.NewFloat(1e18))
return ether.Text('f', 18)
}

View File

@@ -0,0 +1,83 @@
package track1
import (
"sync"
"time"
)
// InMemoryRateLimiter is a simple in-memory rate limiter
// In production, use Redis for distributed rate limiting
type InMemoryRateLimiter struct {
limits map[string]*limitEntry
mu sync.RWMutex
config RateLimitConfig
}
// RateLimitConfig defines rate limit configuration
type RateLimitConfig struct {
RequestsPerSecond int
RequestsPerMinute int
BurstSize int
}
// limitEntry tracks rate limit state for a key
type limitEntry struct {
count int
resetAt time.Time
lastReset time.Time
}
// NewInMemoryRateLimiter creates a new in-memory rate limiter
func NewInMemoryRateLimiter(config RateLimitConfig) *InMemoryRateLimiter {
return &InMemoryRateLimiter{
limits: make(map[string]*limitEntry),
config: config,
}
}
// Allow checks if a request is allowed for the given key
func (rl *InMemoryRateLimiter) Allow(key string) bool {
rl.mu.Lock()
defer rl.mu.Unlock()
now := time.Now()
entry, exists := rl.limits[key]
if !exists {
rl.limits[key] = &limitEntry{
count: 1,
resetAt: now.Add(time.Minute),
lastReset: now,
}
return true
}
// Reset if minute has passed
if now.After(entry.resetAt) {
entry.count = 1
entry.resetAt = now.Add(time.Minute)
entry.lastReset = now
return true
}
// Check limits
if entry.count >= rl.config.RequestsPerMinute {
return false
}
entry.count++
return true
}
// Cleanup removes old entries (call periodically)
func (rl *InMemoryRateLimiter) Cleanup() {
rl.mu.Lock()
defer rl.mu.Unlock()
now := time.Now()
for key, entry := range rl.limits {
if now.After(entry.resetAt.Add(5 * time.Minute)) {
delete(rl.limits, key)
}
}
}

View File

@@ -0,0 +1,87 @@
package track1
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestInMemoryRateLimiter_Allow(t *testing.T) {
config := RateLimitConfig{
RequestsPerSecond: 10,
RequestsPerMinute: 100,
BurstSize: 20,
}
limiter := NewInMemoryRateLimiter(config)
key := "test-key"
// Should allow first 100 requests
for i := 0; i < 100; i++ {
assert.True(t, limiter.Allow(key), "Request %d should be allowed", i)
}
// 101st request should be denied
assert.False(t, limiter.Allow(key), "Request 101 should be denied")
}
func TestInMemoryRateLimiter_Reset(t *testing.T) {
config := RateLimitConfig{
RequestsPerMinute: 10,
}
limiter := NewInMemoryRateLimiter(config)
key := "test-key"
// Exhaust limit
for i := 0; i < 10; i++ {
limiter.Allow(key)
}
assert.False(t, limiter.Allow(key))
// Wait for reset (1 minute)
time.Sleep(61 * time.Second)
// Should allow again after reset
assert.True(t, limiter.Allow(key))
}
func TestInMemoryRateLimiter_DifferentKeys(t *testing.T) {
config := RateLimitConfig{
RequestsPerMinute: 10,
}
limiter := NewInMemoryRateLimiter(config)
key1 := "key1"
key2 := "key2"
// Exhaust limit for key1
for i := 0; i < 10; i++ {
limiter.Allow(key1)
}
assert.False(t, limiter.Allow(key1))
// key2 should still have full limit
for i := 0; i < 10; i++ {
assert.True(t, limiter.Allow(key2), "Request %d for key2 should be allowed", i)
}
}
func TestInMemoryRateLimiter_Cleanup(t *testing.T) {
config := RateLimitConfig{
RequestsPerMinute: 10,
}
limiter := NewInMemoryRateLimiter(config)
key := "test-key"
limiter.Allow(key)
// Cleanup should remove old entries
limiter.Cleanup()
// Entry should still exist if not old enough
// This test verifies cleanup doesn't break functionality
assert.NotNil(t, limiter)
}

View File

@@ -0,0 +1,88 @@
package track1
import (
"context"
"os"
"time"
"github.com/redis/go-redis/v9"
)
// RedisCache is a Redis-based cache implementation
// Use this in production for distributed caching
type RedisCache struct {
client *redis.Client
ctx context.Context
}
// NewRedisCache creates a new Redis cache
func NewRedisCache(redisURL string) (*RedisCache, error) {
opts, err := redis.ParseURL(redisURL)
if err != nil {
return nil, err
}
client := redis.NewClient(opts)
ctx := context.Background()
// Test connection
if err := client.Ping(ctx).Err(); err != nil {
return nil, err
}
return &RedisCache{
client: client,
ctx: ctx,
}, nil
}
// NewRedisCacheFromClient creates a new Redis cache from an existing client
func NewRedisCacheFromClient(client *redis.Client) *RedisCache {
return &RedisCache{
client: client,
ctx: context.Background(),
}
}
// Get retrieves a value from cache
func (c *RedisCache) Get(key string) ([]byte, error) {
val, err := c.client.Get(c.ctx, key).Bytes()
if err == redis.Nil {
return nil, ErrCacheMiss
}
if err != nil {
return nil, err
}
return val, nil
}
// Set stores a value in cache with TTL
func (c *RedisCache) Set(key string, value []byte, ttl time.Duration) error {
return c.client.Set(c.ctx, key, value, ttl).Err()
}
// Delete removes a key from cache
func (c *RedisCache) Delete(key string) error {
return c.client.Del(c.ctx, key).Err()
}
// Clear clears all cache keys (use with caution)
func (c *RedisCache) Clear() error {
return c.client.FlushDB(c.ctx).Err()
}
// Close closes the Redis connection
func (c *RedisCache) Close() error {
return c.client.Close()
}
// NewCache creates a cache based on environment
// Returns Redis cache if REDIS_URL is set, otherwise in-memory cache
func NewCache() (Cache, error) {
redisURL := os.Getenv("REDIS_URL")
if redisURL != "" {
return NewRedisCache(redisURL)
}
return NewInMemoryCache(), nil
}

View File

@@ -0,0 +1,135 @@
package track1
import (
"context"
"os"
"time"
"github.com/redis/go-redis/v9"
)
// RedisRateLimiter is a Redis-based rate limiter implementation
// Use this in production for distributed rate limiting
type RedisRateLimiter struct {
client *redis.Client
ctx context.Context
config RateLimitConfig
}
// NewRedisRateLimiter creates a new Redis rate limiter
func NewRedisRateLimiter(redisURL string, config RateLimitConfig) (*RedisRateLimiter, error) {
opts, err := redis.ParseURL(redisURL)
if err != nil {
return nil, err
}
client := redis.NewClient(opts)
ctx := context.Background()
// Test connection
if err := client.Ping(ctx).Err(); err != nil {
return nil, err
}
return &RedisRateLimiter{
client: client,
ctx: ctx,
config: config,
}, nil
}
// NewRedisRateLimiterFromClient creates a new Redis rate limiter from an existing client
func NewRedisRateLimiterFromClient(client *redis.Client, config RateLimitConfig) *RedisRateLimiter {
return &RedisRateLimiter{
client: client,
ctx: context.Background(),
config: config,
}
}
// Allow checks if a request is allowed for the given key
// Uses sliding window algorithm with Redis
func (rl *RedisRateLimiter) Allow(key string) bool {
now := time.Now()
windowStart := now.Add(-time.Minute)
// Use sorted set to track requests in the current window
zsetKey := "ratelimit:" + key
// Remove old entries (outside the window)
rl.client.ZRemRangeByScore(rl.ctx, zsetKey, "0", formatTime(windowStart))
// Count requests in current window
count, err := rl.client.ZCard(rl.ctx, zsetKey).Result()
if err != nil {
// On error, allow the request (fail open)
return true
}
// Check if limit exceeded
if int(count) >= rl.config.RequestsPerMinute {
return false
}
// Add current request to the window
member := formatTime(now)
score := float64(now.Unix())
rl.client.ZAdd(rl.ctx, zsetKey, redis.Z{
Score: score,
Member: member,
})
// Set expiration on the key (cleanup)
rl.client.Expire(rl.ctx, zsetKey, time.Minute*2)
return true
}
// GetRemaining returns the number of requests remaining in the current window
func (rl *RedisRateLimiter) GetRemaining(key string) int {
now := time.Now()
windowStart := now.Add(-time.Minute)
zsetKey := "ratelimit:" + key
// Remove old entries
rl.client.ZRemRangeByScore(rl.ctx, zsetKey, "0", formatTime(windowStart))
// Count requests in current window
count, err := rl.client.ZCard(rl.ctx, zsetKey).Result()
if err != nil {
return rl.config.RequestsPerMinute
}
remaining := rl.config.RequestsPerMinute - int(count)
if remaining < 0 {
return 0
}
return remaining
}
// Reset resets the rate limit for a key
func (rl *RedisRateLimiter) Reset(key string) error {
zsetKey := "ratelimit:" + key
return rl.client.Del(rl.ctx, zsetKey).Err()
}
// Close closes the Redis connection
func (rl *RedisRateLimiter) Close() error {
return rl.client.Close()
}
// formatTime formats time for Redis sorted set
func formatTime(t time.Time) string {
return t.Format(time.RFC3339Nano)
}
// NewRateLimiter creates a rate limiter based on environment
// Returns Redis rate limiter if REDIS_URL is set, otherwise in-memory rate limiter
func NewRateLimiter(config RateLimitConfig) (RateLimiter, error) {
redisURL := os.Getenv("REDIS_URL")
if redisURL != "" {
return NewRedisRateLimiter(redisURL, config)
}
return NewInMemoryRateLimiter(config), nil
}

View File

@@ -0,0 +1,178 @@
package track1
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
)
// RPCGateway handles RPC passthrough with caching
type RPCGateway struct {
rpcURL string
httpClient *http.Client
cache Cache
rateLimit RateLimiter
}
// Cache interface for caching RPC responses
type Cache interface {
Get(key string) ([]byte, error)
Set(key string, value []byte, ttl time.Duration) error
}
// RateLimiter interface for rate limiting
type RateLimiter interface {
Allow(key string) bool
}
// NewRPCGateway creates a new RPC gateway
func NewRPCGateway(rpcURL string, cache Cache, rateLimit RateLimiter) *RPCGateway {
return &RPCGateway{
rpcURL: rpcURL,
httpClient: &http.Client{
Timeout: 10 * time.Second,
},
cache: cache,
rateLimit: rateLimit,
}
}
// RPCRequest represents a JSON-RPC request
type RPCRequest struct {
JSONRPC string `json:"jsonrpc"`
Method string `json:"method"`
Params []interface{} `json:"params"`
ID int `json:"id"`
}
// RPCResponse represents a JSON-RPC response
type RPCResponse struct {
JSONRPC string `json:"jsonrpc"`
Result interface{} `json:"result,omitempty"`
Error *RPCError `json:"error,omitempty"`
ID int `json:"id"`
}
// RPCError represents an RPC error
type RPCError struct {
Code int `json:"code"`
Message string `json:"message"`
Data interface{} `json:"data,omitempty"`
}
// Call makes an RPC call with caching and rate limiting
func (g *RPCGateway) Call(ctx context.Context, method string, params []interface{}, cacheKey string, cacheTTL time.Duration) (*RPCResponse, error) {
// Check cache first
if cacheKey != "" {
if cached, err := g.cache.Get(cacheKey); err == nil {
var response RPCResponse
if err := json.Unmarshal(cached, &response); err == nil {
return &response, nil
}
}
}
// Check rate limit
if !g.rateLimit.Allow("rpc") {
return nil, fmt.Errorf("rate limit exceeded")
}
// Make RPC call
req := RPCRequest{
JSONRPC: "2.0",
Method: method,
Params: params,
ID: 1,
}
reqBody, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("failed to marshal request: %w", err)
}
httpReq, err := http.NewRequestWithContext(ctx, "POST", g.rpcURL, bytes.NewBuffer(reqBody))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
httpReq.Header.Set("Content-Type", "application/json")
resp, err := g.httpClient.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("RPC call failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("RPC returned status %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
var rpcResp RPCResponse
if err := json.Unmarshal(body, &rpcResp); err != nil {
return nil, fmt.Errorf("failed to unmarshal response: %w", err)
}
if rpcResp.Error != nil {
return nil, fmt.Errorf("RPC error: %s (code: %d)", rpcResp.Error.Message, rpcResp.Error.Code)
}
// Cache response if cache key provided
if cacheKey != "" && rpcResp.Result != nil {
if cacheData, err := json.Marshal(rpcResp); err == nil {
g.cache.Set(cacheKey, cacheData, cacheTTL)
}
}
return &rpcResp, nil
}
// GetBlockByNumber gets a block by number
func (g *RPCGateway) GetBlockByNumber(ctx context.Context, blockNumber string, includeTxs bool) (*RPCResponse, error) {
cacheKey := fmt.Sprintf("block:%s:%v", blockNumber, includeTxs)
return g.Call(ctx, "eth_getBlockByNumber", []interface{}{blockNumber, includeTxs}, cacheKey, 10*time.Second)
}
// GetBlockByHash gets a block by hash
func (g *RPCGateway) GetBlockByHash(ctx context.Context, blockHash string, includeTxs bool) (*RPCResponse, error) {
cacheKey := fmt.Sprintf("block_hash:%s:%v", blockHash, includeTxs)
return g.Call(ctx, "eth_getBlockByHash", []interface{}{blockHash, includeTxs}, cacheKey, 10*time.Second)
}
// GetTransactionByHash gets a transaction by hash
func (g *RPCGateway) GetTransactionByHash(ctx context.Context, txHash string) (*RPCResponse, error) {
cacheKey := fmt.Sprintf("tx:%s", txHash)
return g.Call(ctx, "eth_getTransactionByHash", []interface{}{txHash}, cacheKey, 30*time.Second)
}
// GetBalance gets an address balance
func (g *RPCGateway) GetBalance(ctx context.Context, address string, blockNumber string) (*RPCResponse, error) {
if blockNumber == "" {
blockNumber = "latest"
}
cacheKey := fmt.Sprintf("balance:%s:%s", address, blockNumber)
return g.Call(ctx, "eth_getBalance", []interface{}{address, blockNumber}, cacheKey, 10*time.Second)
}
// GetBlockNumber gets the latest block number
func (g *RPCGateway) GetBlockNumber(ctx context.Context) (*RPCResponse, error) {
return g.Call(ctx, "eth_blockNumber", []interface{}{}, "block_number", 5*time.Second)
}
// GetTransactionCount gets transaction count for an address
func (g *RPCGateway) GetTransactionCount(ctx context.Context, address string, blockNumber string) (*RPCResponse, error) {
if blockNumber == "" {
blockNumber = "latest"
}
cacheKey := fmt.Sprintf("tx_count:%s:%s", address, blockNumber)
return g.Call(ctx, "eth_getTransactionCount", []interface{}{address, blockNumber}, cacheKey, 10*time.Second)
}

View File

@@ -0,0 +1,374 @@
package track2
import (
"encoding/json"
"net/http"
"strconv"
"strings"
"github.com/jackc/pgx/v5/pgxpool"
)
// Server handles Track 2 endpoints
type Server struct {
db *pgxpool.Pool
chainID int
}
// NewServer creates a new Track 2 server
func NewServer(db *pgxpool.Pool, chainID int) *Server {
return &Server{
db: db,
chainID: chainID,
}
}
// HandleAddressTransactions handles GET /api/v1/track2/address/:addr/txs
func (s *Server) HandleAddressTransactions(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track2/address/")
parts := strings.Split(path, "/")
if len(parts) < 2 || parts[1] != "txs" {
writeError(w, http.StatusBadRequest, "bad_request", "Invalid path")
return
}
address := strings.ToLower(parts[0])
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
if page < 1 {
page = 1
}
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
if limit < 1 || limit > 100 {
limit = 20
}
offset := (page - 1) * limit
query := `
SELECT hash, from_address, to_address, value, block_number, timestamp, status
FROM transactions
WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)
ORDER BY block_number DESC, timestamp DESC
LIMIT $3 OFFSET $4
`
rows, err := s.db.Query(r.Context(), query, s.chainID, address, limit, offset)
if err != nil {
writeError(w, http.StatusInternalServerError, "database_error", err.Error())
return
}
defer rows.Close()
transactions := []map[string]interface{}{}
for rows.Next() {
var hash, from, to, value, status string
var blockNumber int64
var timestamp interface{}
if err := rows.Scan(&hash, &from, &to, &value, &blockNumber, &timestamp, &status); err != nil {
continue
}
direction := "received"
if strings.ToLower(from) == address {
direction = "sent"
}
transactions = append(transactions, map[string]interface{}{
"hash": hash,
"from": from,
"to": to,
"value": value,
"block_number": blockNumber,
"timestamp": timestamp,
"status": status,
"direction": direction,
})
}
// Get total count
var total int
countQuery := `SELECT COUNT(*) FROM transactions WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)`
s.db.QueryRow(r.Context(), countQuery, s.chainID, address).Scan(&total)
response := map[string]interface{}{
"data": transactions,
"pagination": map[string]interface{}{
"page": page,
"limit": limit,
"total": total,
"total_pages": (total + limit - 1) / limit,
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleAddressTokens handles GET /api/v1/track2/address/:addr/tokens
func (s *Server) HandleAddressTokens(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track2/address/")
parts := strings.Split(path, "/")
if len(parts) < 2 || parts[1] != "tokens" {
writeError(w, http.StatusBadRequest, "bad_request", "Invalid path")
return
}
address := strings.ToLower(parts[0])
query := `
SELECT token_contract, balance, last_updated_timestamp
FROM token_balances
WHERE address = $1 AND chain_id = $2 AND balance > 0
ORDER BY balance DESC
`
rows, err := s.db.Query(r.Context(), query, address, s.chainID)
if err != nil {
writeError(w, http.StatusInternalServerError, "database_error", err.Error())
return
}
defer rows.Close()
tokens := []map[string]interface{}{}
for rows.Next() {
var contract, balance string
var lastUpdated interface{}
if err := rows.Scan(&contract, &balance, &lastUpdated); err != nil {
continue
}
tokens = append(tokens, map[string]interface{}{
"contract": contract,
"balance": balance,
"balance_formatted": balance, // TODO: Format with decimals
"last_updated": lastUpdated,
})
}
response := map[string]interface{}{
"data": map[string]interface{}{
"address": address,
"tokens": tokens,
"total_tokens": len(tokens),
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleTokenInfo handles GET /api/v1/track2/token/:contract
func (s *Server) HandleTokenInfo(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track2/token/")
contract := strings.ToLower(path)
// Get token info from token_transfers
query := `
SELECT
COUNT(DISTINCT from_address) + COUNT(DISTINCT to_address) as holders,
COUNT(*) as transfers_24h,
SUM(value) as volume_24h
FROM token_transfers
WHERE token_contract = $1 AND chain_id = $2
AND timestamp >= NOW() - INTERVAL '24 hours'
`
var holders, transfers24h int
var volume24h string
err := s.db.QueryRow(r.Context(), query, contract, s.chainID).Scan(&holders, &transfers24h, &volume24h)
if err != nil {
writeError(w, http.StatusNotFound, "not_found", "Token not found")
return
}
response := map[string]interface{}{
"data": map[string]interface{}{
"contract": contract,
"holders": holders,
"transfers_24h": transfers24h,
"volume_24h": volume24h,
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleSearch handles GET /api/v1/track2/search?q=
func (s *Server) HandleSearch(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
query := r.URL.Query().Get("q")
if query == "" {
writeError(w, http.StatusBadRequest, "bad_request", "Query parameter 'q' is required")
return
}
query = strings.ToLower(strings.TrimPrefix(query, "0x"))
// Try to detect type and search
var result map[string]interface{}
// Check if it's a block number
if blockNum, err := strconv.ParseInt(query, 10, 64); err == nil {
var hash string
err := s.db.QueryRow(r.Context(), `SELECT hash FROM blocks WHERE chain_id = $1 AND number = $2`, s.chainID, blockNum).Scan(&hash)
if err == nil {
result = map[string]interface{}{
"type": "block",
"result": map[string]interface{}{
"number": blockNum,
"hash": hash,
},
}
}
} else if len(query) == 64 || len(query) == 40 {
// Could be address or transaction hash
fullQuery := "0x" + query
// Check transaction
var txHash string
err := s.db.QueryRow(r.Context(), `SELECT hash FROM transactions WHERE chain_id = $1 AND hash = $2`, s.chainID, fullQuery).Scan(&txHash)
if err == nil {
result = map[string]interface{}{
"type": "transaction",
"result": map[string]interface{}{
"hash": txHash,
},
}
} else {
// Check address
var balance string
err := s.db.QueryRow(r.Context(), `SELECT COALESCE(SUM(balance), '0') FROM token_balances WHERE address = $1 AND chain_id = $2`, fullQuery, s.chainID).Scan(&balance)
if err == nil {
result = map[string]interface{}{
"type": "address",
"result": map[string]interface{}{
"address": fullQuery,
"balance": balance,
},
}
}
}
}
if result == nil {
writeError(w, http.StatusNotFound, "not_found", "No results found")
return
}
response := map[string]interface{}{
"data": result,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleInternalTransactions handles GET /api/v1/track2/address/:addr/internal-txs
func (s *Server) HandleInternalTransactions(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeError(w, http.StatusMethodNotAllowed, "method_not_allowed", "Method not allowed")
return
}
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track2/address/")
parts := strings.Split(path, "/")
if len(parts) < 2 || parts[1] != "internal-txs" {
writeError(w, http.StatusBadRequest, "bad_request", "Invalid path")
return
}
address := strings.ToLower(parts[0])
page, _ := strconv.Atoi(r.URL.Query().Get("page"))
if page < 1 {
page = 1
}
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
if limit < 1 || limit > 100 {
limit = 20
}
offset := (page - 1) * limit
query := `
SELECT transaction_hash, from_address, to_address, value, block_number, timestamp
FROM internal_transactions
WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)
ORDER BY block_number DESC, timestamp DESC
LIMIT $3 OFFSET $4
`
rows, err := s.db.Query(r.Context(), query, s.chainID, address, limit, offset)
if err != nil {
writeError(w, http.StatusInternalServerError, "database_error", err.Error())
return
}
defer rows.Close()
internalTxs := []map[string]interface{}{}
for rows.Next() {
var txHash, from, to, value string
var blockNumber int64
var timestamp interface{}
if err := rows.Scan(&txHash, &from, &to, &value, &blockNumber, &timestamp); err != nil {
continue
}
internalTxs = append(internalTxs, map[string]interface{}{
"transaction_hash": txHash,
"from": from,
"to": to,
"value": value,
"block_number": blockNumber,
"timestamp": timestamp,
})
}
var total int
countQuery := `SELECT COUNT(*) FROM internal_transactions WHERE chain_id = $1 AND (from_address = $2 OR to_address = $2)`
s.db.QueryRow(r.Context(), countQuery, s.chainID, address).Scan(&total)
response := map[string]interface{}{
"data": internalTxs,
"pagination": map[string]interface{}{
"page": page,
"limit": limit,
"total": total,
"total_pages": (total + limit - 1) / limit,
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
func writeError(w http.ResponseWriter, statusCode int, code, message string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
json.NewEncoder(w).Encode(map[string]interface{}{
"error": map[string]interface{}{
"code": code,
"message": message,
},
})
}

View File

@@ -0,0 +1,167 @@
package track3
import (
"encoding/json"
"net/http"
"strconv"
"strings"
"time"
"github.com/explorer/backend/analytics"
"github.com/jackc/pgx/v5/pgxpool"
)
// Server handles Track 3 endpoints
type Server struct {
db *pgxpool.Pool
flowTracker *analytics.FlowTracker
bridgeAnalytics *analytics.BridgeAnalytics
tokenDist *analytics.TokenDistribution
riskAnalyzer *analytics.AddressRiskAnalyzer
chainID int
}
// NewServer creates a new Track 3 server
func NewServer(db *pgxpool.Pool, chainID int) *Server {
return &Server{
db: db,
flowTracker: analytics.NewFlowTracker(db, chainID),
bridgeAnalytics: analytics.NewBridgeAnalytics(db),
tokenDist: analytics.NewTokenDistribution(db, chainID),
riskAnalyzer: analytics.NewAddressRiskAnalyzer(db, chainID),
chainID: chainID,
}
}
// HandleFlows handles GET /api/v1/track3/analytics/flows
func (s *Server) HandleFlows(w http.ResponseWriter, r *http.Request) {
from := r.URL.Query().Get("from")
to := r.URL.Query().Get("to")
token := r.URL.Query().Get("token")
limit, _ := strconv.Atoi(r.URL.Query().Get("limit"))
if limit < 1 || limit > 200 {
limit = 50
}
var startDate, endDate *time.Time
if startStr := r.URL.Query().Get("start_date"); startStr != "" {
if t, err := time.Parse(time.RFC3339, startStr); err == nil {
startDate = &t
}
}
if endStr := r.URL.Query().Get("end_date"); endStr != "" {
if t, err := time.Parse(time.RFC3339, endStr); err == nil {
endDate = &t
}
}
flows, err := s.flowTracker.GetFlows(r.Context(), from, to, token, startDate, endDate, limit)
if err != nil {
writeError(w, http.StatusInternalServerError, "database_error", err.Error())
return
}
response := map[string]interface{}{
"data": map[string]interface{}{
"flows": flows,
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleBridge handles GET /api/v1/track3/analytics/bridge
func (s *Server) HandleBridge(w http.ResponseWriter, r *http.Request) {
var chainFrom, chainTo *int
if cf := r.URL.Query().Get("chain_from"); cf != "" {
if c, err := strconv.Atoi(cf); err == nil {
chainFrom = &c
}
}
if ct := r.URL.Query().Get("chain_to"); ct != "" {
if c, err := strconv.Atoi(ct); err == nil {
chainTo = &c
}
}
var startDate, endDate *time.Time
if startStr := r.URL.Query().Get("start_date"); startStr != "" {
if t, err := time.Parse(time.RFC3339, startStr); err == nil {
startDate = &t
}
}
if endStr := r.URL.Query().Get("end_date"); endStr != "" {
if t, err := time.Parse(time.RFC3339, endStr); err == nil {
endDate = &t
}
}
stats, err := s.bridgeAnalytics.GetBridgeStats(r.Context(), chainFrom, chainTo, startDate, endDate)
if err != nil {
writeError(w, http.StatusInternalServerError, "database_error", err.Error())
return
}
response := map[string]interface{}{
"data": stats,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleTokenDistribution handles GET /api/v1/track3/analytics/token-distribution
func (s *Server) HandleTokenDistribution(w http.ResponseWriter, r *http.Request) {
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track3/analytics/token-distribution/")
contract := strings.ToLower(path)
topN, _ := strconv.Atoi(r.URL.Query().Get("top_n"))
if topN < 1 || topN > 1000 {
topN = 100
}
stats, err := s.tokenDist.GetTokenDistribution(r.Context(), contract, topN)
if err != nil {
writeError(w, http.StatusNotFound, "not_found", err.Error())
return
}
response := map[string]interface{}{
"data": stats,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleAddressRisk handles GET /api/v1/track3/analytics/address-risk/:addr
func (s *Server) HandleAddressRisk(w http.ResponseWriter, r *http.Request) {
path := strings.TrimPrefix(r.URL.Path, "/api/v1/track3/analytics/address-risk/")
address := strings.ToLower(path)
analysis, err := s.riskAnalyzer.AnalyzeAddress(r.Context(), address)
if err != nil {
writeError(w, http.StatusInternalServerError, "database_error", err.Error())
return
}
response := map[string]interface{}{
"data": analysis,
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
func writeError(w http.ResponseWriter, statusCode int, code, message string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
json.NewEncoder(w).Encode(map[string]interface{}{
"error": map[string]interface{}{
"code": code,
"message": message,
},
})
}

View File

@@ -0,0 +1,152 @@
package track4
import (
"encoding/json"
"net/http"
"time"
"github.com/explorer/backend/auth"
"github.com/jackc/pgx/v5/pgxpool"
)
// Server handles Track 4 endpoints
type Server struct {
db *pgxpool.Pool
roleMgr *auth.RoleManager
chainID int
}
// NewServer creates a new Track 4 server
func NewServer(db *pgxpool.Pool, chainID int) *Server {
return &Server{
db: db,
roleMgr: auth.NewRoleManager(db),
chainID: chainID,
}
}
// HandleBridgeEvents handles GET /api/v1/track4/operator/bridge/events
func (s *Server) HandleBridgeEvents(w http.ResponseWriter, r *http.Request) {
// Get operator address from context
operatorAddr, _ := r.Context().Value("user_address").(string)
if operatorAddr == "" {
writeError(w, http.StatusUnauthorized, "unauthorized", "Operator address required")
return
}
// Check IP whitelist
ipAddr := r.RemoteAddr
if whitelisted, _ := s.roleMgr.IsIPWhitelisted(r.Context(), operatorAddr, ipAddr); !whitelisted {
writeError(w, http.StatusForbidden, "forbidden", "IP address not whitelisted")
return
}
// Log operator event
s.roleMgr.LogOperatorEvent(r.Context(), "bridge_events_read", &s.chainID, operatorAddr, "bridge/events", "read", map[string]interface{}{}, ipAddr, r.UserAgent())
// Return bridge events (simplified)
response := map[string]interface{}{
"data": map[string]interface{}{
"events": []map[string]interface{}{},
"control_state": map[string]interface{}{
"paused": false,
"maintenance_mode": false,
"last_update": time.Now().UTC().Format(time.RFC3339),
},
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleValidators handles GET /api/v1/track4/operator/validators
func (s *Server) HandleValidators(w http.ResponseWriter, r *http.Request) {
operatorAddr, _ := r.Context().Value("user_address").(string)
ipAddr := r.RemoteAddr
if whitelisted, _ := s.roleMgr.IsIPWhitelisted(r.Context(), operatorAddr, ipAddr); !whitelisted {
writeError(w, http.StatusForbidden, "forbidden", "IP address not whitelisted")
return
}
s.roleMgr.LogOperatorEvent(r.Context(), "validators_read", &s.chainID, operatorAddr, "validators", "read", map[string]interface{}{}, ipAddr, r.UserAgent())
response := map[string]interface{}{
"data": map[string]interface{}{
"validators": []map[string]interface{}{},
"total_validators": 0,
"active_validators": 0,
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleContracts handles GET /api/v1/track4/operator/contracts
func (s *Server) HandleContracts(w http.ResponseWriter, r *http.Request) {
operatorAddr, _ := r.Context().Value("user_address").(string)
ipAddr := r.RemoteAddr
if whitelisted, _ := s.roleMgr.IsIPWhitelisted(r.Context(), operatorAddr, ipAddr); !whitelisted {
writeError(w, http.StatusForbidden, "forbidden", "IP address not whitelisted")
return
}
s.roleMgr.LogOperatorEvent(r.Context(), "contracts_read", &s.chainID, operatorAddr, "contracts", "read", map[string]interface{}{}, ipAddr, r.UserAgent())
response := map[string]interface{}{
"data": map[string]interface{}{
"contracts": []map[string]interface{}{},
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleProtocolState handles GET /api/v1/track4/operator/protocol-state
func (s *Server) HandleProtocolState(w http.ResponseWriter, r *http.Request) {
operatorAddr, _ := r.Context().Value("user_address").(string)
ipAddr := r.RemoteAddr
if whitelisted, _ := s.roleMgr.IsIPWhitelisted(r.Context(), operatorAddr, ipAddr); !whitelisted {
writeError(w, http.StatusForbidden, "forbidden", "IP address not whitelisted")
return
}
s.roleMgr.LogOperatorEvent(r.Context(), "protocol_state_read", &s.chainID, operatorAddr, "protocol/state", "read", map[string]interface{}{}, ipAddr, r.UserAgent())
response := map[string]interface{}{
"data": map[string]interface{}{
"protocol_version": "1.0.0",
"chain_id": s.chainID,
"config": map[string]interface{}{
"bridge_enabled": true,
"max_transfer_amount": "1000000000000000000000000",
},
"state": map[string]interface{}{
"total_locked": "50000000000000000000000000",
"total_bridged": "10000000000000000000000000",
"active_bridges": 2,
},
"last_updated": time.Now().UTC().Format(time.RFC3339),
},
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
func writeError(w http.ResponseWriter, statusCode int, code, message string) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(statusCode)
json.NewEncoder(w).Encode(map[string]interface{}{
"error": map[string]interface{}{
"code": code,
"message": message,
},
})
}

View File

@@ -0,0 +1,74 @@
package watchlists
import (
"context"
"fmt"
"github.com/jackc/pgx/v5/pgxpool"
)
// WatchlistService handles watchlist operations
type WatchlistService struct {
db *pgxpool.Pool
}
// NewWatchlistService creates a new watchlist service
func NewWatchlistService(db *pgxpool.Pool) *WatchlistService {
return &WatchlistService{db: db}
}
// AddToWatchlist adds an address to a user's watchlist
func (w *WatchlistService) AddToWatchlist(ctx context.Context, userID string, chainID int, address, label string) error {
query := `
INSERT INTO watchlists (user_id, chain_id, address, label)
VALUES ($1, $2, $3, $4)
ON CONFLICT (user_id, chain_id, address) DO UPDATE SET
label = $4
`
_, err := w.db.Exec(ctx, query, userID, chainID, address, label)
return err
}
// RemoveFromWatchlist removes an address from watchlist
func (w *WatchlistService) RemoveFromWatchlist(ctx context.Context, userID string, chainID int, address string) error {
query := `DELETE FROM watchlists WHERE user_id = $1 AND chain_id = $2 AND address = $3`
_, err := w.db.Exec(ctx, query, userID, chainID, address)
return err
}
// GetWatchlist gets a user's watchlist
func (w *WatchlistService) GetWatchlist(ctx context.Context, userID string, chainID int) ([]WatchlistItem, error) {
query := `
SELECT chain_id, address, label, created_at
FROM watchlists
WHERE user_id = $1 AND chain_id = $2
ORDER BY created_at DESC
`
rows, err := w.db.Query(ctx, query, userID, chainID)
if err != nil {
return nil, fmt.Errorf("failed to query watchlist: %w", err)
}
defer rows.Close()
var items []WatchlistItem
for rows.Next() {
var item WatchlistItem
if err := rows.Scan(&item.ChainID, &item.Address, &item.Label, &item.CreatedAt); err != nil {
continue
}
items = append(items, item)
}
return items, nil
}
// WatchlistItem represents a watchlist item
type WatchlistItem struct {
ChainID int
Address string
Label string
CreatedAt string
}

View File

@@ -0,0 +1,29 @@
package main
import (
"log"
"net/http"
"os"
"strconv"
"github.com/explorer/backend/api/websocket"
)
func main() {
server := websocket.NewServer()
go server.Start()
http.HandleFunc("/ws", server.HandleWebSocket)
port := 8081
if envPort := os.Getenv("WS_PORT"); envPort != "" {
if p, err := strconv.Atoi(envPort); err == nil {
port = p
}
}
log.Printf("Starting WebSocket server on :%d", port)
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(port), nil))
}

View File

@@ -0,0 +1,225 @@
package websocket
import (
"encoding/json"
"log"
"net/http"
"sync"
"time"
"github.com/gorilla/websocket"
)
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true // Allow all origins in development
},
}
// Server represents the WebSocket server
type Server struct {
clients map[*Client]bool
broadcast chan []byte
register chan *Client
unregister chan *Client
mu sync.RWMutex
}
// Client represents a WebSocket client
type Client struct {
conn *websocket.Conn
send chan []byte
server *Server
subscriptions map[string]bool
}
// NewServer creates a new WebSocket server
func NewServer() *Server {
return &Server{
clients: make(map[*Client]bool),
broadcast: make(chan []byte),
register: make(chan *Client),
unregister: make(chan *Client),
}
}
// Start starts the WebSocket server
func (s *Server) Start() {
for {
select {
case client := <-s.register:
s.mu.Lock()
s.clients[client] = true
s.mu.Unlock()
log.Printf("Client connected. Total clients: %d", len(s.clients))
case client := <-s.unregister:
s.mu.Lock()
if _, ok := s.clients[client]; ok {
delete(s.clients, client)
close(client.send)
}
s.mu.Unlock()
log.Printf("Client disconnected. Total clients: %d", len(s.clients))
case message := <-s.broadcast:
s.mu.RLock()
for client := range s.clients {
select {
case client.send <- message:
default:
close(client.send)
delete(s.clients, client)
}
}
s.mu.RUnlock()
}
}
}
// HandleWebSocket handles WebSocket connections
func (s *Server) HandleWebSocket(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Printf("WebSocket upgrade failed: %v", err)
return
}
client := &Client{
conn: conn,
send: make(chan []byte, 256),
server: s,
subscriptions: make(map[string]bool),
}
s.register <- client
go client.writePump()
go client.readPump()
}
// Broadcast sends a message to all connected clients
func (s *Server) Broadcast(message []byte) {
s.broadcast <- message
}
// readPump reads messages from the WebSocket connection
func (c *Client) readPump() {
defer func() {
c.server.unregister <- c
c.conn.Close()
}()
c.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
c.conn.SetPongHandler(func(string) error {
c.conn.SetReadDeadline(time.Now().Add(60 * time.Second))
return nil
})
for {
_, message, err := c.conn.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
log.Printf("WebSocket error: %v", err)
}
break
}
// Handle message
var msg map[string]interface{}
if err := json.Unmarshal(message, &msg); err != nil {
continue
}
c.handleMessage(msg)
}
}
// writePump writes messages to the WebSocket connection
func (c *Client) writePump() {
ticker := time.NewTicker(30 * time.Second)
defer func() {
ticker.Stop()
c.conn.Close()
}()
for {
select {
case message, ok := <-c.send:
c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if !ok {
c.conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
w, err := c.conn.NextWriter(websocket.TextMessage)
if err != nil {
return
}
w.Write(message)
n := len(c.send)
for i := 0; i < n; i++ {
w.Write([]byte{'\n'})
w.Write(<-c.send)
}
if err := w.Close(); err != nil {
return
}
case <-ticker.C:
c.conn.SetWriteDeadline(time.Now().Add(10 * time.Second))
if err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
return
}
}
}
}
// handleMessage handles incoming WebSocket messages
func (c *Client) handleMessage(msg map[string]interface{}) {
msgType, ok := msg["type"].(string)
if !ok {
return
}
switch msgType {
case "subscribe":
channel, _ := msg["channel"].(string)
c.subscriptions[channel] = true
c.sendMessage(map[string]interface{}{
"type": "subscribed",
"channel": channel,
})
case "unsubscribe":
channel, _ := msg["channel"].(string)
delete(c.subscriptions, channel)
c.sendMessage(map[string]interface{}{
"type": "unsubscribed",
"channel": channel,
})
case "ping":
c.sendMessage(map[string]interface{}{
"type": "pong",
"timestamp": time.Now().Unix(),
})
}
}
// sendMessage sends a message to the client
func (c *Client) sendMessage(msg map[string]interface{}) {
data, err := json.Marshal(msg)
if err != nil {
return
}
select {
case c.send <- data:
default:
close(c.send)
}
}

150
backend/auth/auth.go Normal file
View File

@@ -0,0 +1,150 @@
package auth
import (
"context"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"fmt"
"time"
"github.com/jackc/pgx/v5/pgxpool"
"golang.org/x/crypto/bcrypt"
)
// Auth handles user authentication
type Auth struct {
db *pgxpool.Pool
}
// NewAuth creates a new auth handler
func NewAuth(db *pgxpool.Pool) *Auth {
return &Auth{db: db}
}
// User represents a user
type User struct {
ID string
Email string
Username string
CreatedAt time.Time
}
// RegisterUser registers a new user
func (a *Auth) RegisterUser(ctx context.Context, email, username, password string) (*User, error) {
// Hash password
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
if err != nil {
return nil, fmt.Errorf("failed to hash password: %w", err)
}
// Insert user
query := `
INSERT INTO users (email, username, password_hash)
VALUES ($1, $2, $3)
RETURNING id, email, username, created_at
`
var user User
err = a.db.QueryRow(ctx, query, email, username, hashedPassword).Scan(
&user.ID, &user.Email, &user.Username, &user.CreatedAt,
)
if err != nil {
return nil, fmt.Errorf("failed to create user: %w", err)
}
return &user, nil
}
// AuthenticateUser authenticates a user
func (a *Auth) AuthenticateUser(ctx context.Context, email, password string) (*User, error) {
var user User
var passwordHash string
query := `SELECT id, email, username, password_hash, created_at FROM users WHERE email = $1`
err := a.db.QueryRow(ctx, query, email).Scan(
&user.ID, &user.Email, &user.Username, &passwordHash, &user.CreatedAt,
)
if err != nil {
return nil, fmt.Errorf("invalid credentials")
}
// Verify password
if err := bcrypt.CompareHashAndPassword([]byte(passwordHash), []byte(password)); err != nil {
return nil, fmt.Errorf("invalid credentials")
}
return &user, nil
}
// GenerateAPIKey generates a new API key for a user
func (a *Auth) GenerateAPIKey(ctx context.Context, userID, name string, tier string) (string, error) {
// Generate random key
keyBytes := make([]byte, 32)
if _, err := rand.Read(keyBytes); err != nil {
return "", fmt.Errorf("failed to generate key: %w", err)
}
apiKey := "ek_" + hex.EncodeToString(keyBytes)
// Hash key for storage
hashedKey := sha256.Sum256([]byte(apiKey))
hashedKeyHex := hex.EncodeToString(hashedKey[:])
// Determine rate limits based on tier
var rateLimitPerSecond, rateLimitPerMinute int
switch tier {
case "free":
rateLimitPerSecond = 5
rateLimitPerMinute = 100
case "pro":
rateLimitPerSecond = 20
rateLimitPerMinute = 1000
case "enterprise":
rateLimitPerSecond = 100
rateLimitPerMinute = 10000
default:
rateLimitPerSecond = 5
rateLimitPerMinute = 100
}
// Store API key
query := `
INSERT INTO api_keys (user_id, key_hash, name, tier, rate_limit_per_second, rate_limit_per_minute)
VALUES ($1, $2, $3, $4, $5, $6)
`
_, err := a.db.Exec(ctx, query, userID, hashedKeyHex, name, tier, rateLimitPerSecond, rateLimitPerMinute)
if err != nil {
return "", fmt.Errorf("failed to store API key: %w", err)
}
return apiKey, nil
}
// ValidateAPIKey validates an API key
func (a *Auth) ValidateAPIKey(ctx context.Context, apiKey string) (string, error) {
hashedKey := sha256.Sum256([]byte(apiKey))
hashedKeyHex := hex.EncodeToString(hashedKey[:])
var userID string
var revoked bool
query := `SELECT user_id, revoked FROM api_keys WHERE key_hash = $1`
err := a.db.QueryRow(ctx, query, hashedKeyHex).Scan(&userID, &revoked)
if err != nil {
return "", fmt.Errorf("invalid API key")
}
if revoked {
return "", fmt.Errorf("API key revoked")
}
// Update last used
a.db.Exec(ctx, `UPDATE api_keys SET last_used_at = NOW() WHERE key_hash = $1`, hashedKeyHex)
return userID, nil
}

182
backend/auth/roles.go Normal file
View File

@@ -0,0 +1,182 @@
package auth
import (
"context"
"fmt"
"time"
"github.com/jackc/pgx/v5/pgxpool"
)
// RoleManager handles role-based access control
type RoleManager struct {
db *pgxpool.Pool
}
// NewRoleManager creates a new role manager
func NewRoleManager(db *pgxpool.Pool) *RoleManager {
return &RoleManager{db: db}
}
// UserRole represents a user's role and track assignment
type UserRole struct {
Address string
Track int
Roles []string
Approved bool
ApprovedBy string
ApprovedAt time.Time
}
// AssignTrack assigns a track level to a user address
func (r *RoleManager) AssignTrack(ctx context.Context, address string, track int, approvedBy string) error {
if track < 1 || track > 4 {
return fmt.Errorf("invalid track level: %d (must be 1-4)", track)
}
query := `
INSERT INTO operator_roles (address, track_level, approved, approved_by, approved_at)
VALUES ($1, $2, $3, $4, $5)
ON CONFLICT (address) DO UPDATE SET
track_level = EXCLUDED.track_level,
approved = EXCLUDED.approved,
approved_by = EXCLUDED.approved_by,
approved_at = EXCLUDED.approved_at,
updated_at = NOW()
`
_, err := r.db.Exec(ctx, query, address, track, true, approvedBy, time.Now())
if err != nil {
return fmt.Errorf("failed to assign track: %w", err)
}
return nil
}
// GetUserRole gets the role and track for a user address
func (r *RoleManager) GetUserRole(ctx context.Context, address string) (*UserRole, error) {
var role UserRole
query := `
SELECT address, track_level, roles, approved, approved_by, approved_at
FROM operator_roles
WHERE address = $1
`
err := r.db.QueryRow(ctx, query, address).Scan(
&role.Address,
&role.Track,
&role.Roles,
&role.Approved,
&role.ApprovedBy,
&role.ApprovedAt,
)
if err != nil {
// User not found, return default Track 1
return &UserRole{
Address: address,
Track: 1,
Roles: []string{},
Approved: false,
}, nil
}
return &role, nil
}
// ApproveUser approves a user for their assigned track
func (r *RoleManager) ApproveUser(ctx context.Context, address string, approvedBy string) error {
query := `
UPDATE operator_roles
SET approved = TRUE,
approved_by = $2,
approved_at = NOW(),
updated_at = NOW()
WHERE address = $1
`
result, err := r.db.Exec(ctx, query, address, approvedBy)
if err != nil {
return fmt.Errorf("failed to approve user: %w", err)
}
if result.RowsAffected() == 0 {
return fmt.Errorf("user not found")
}
return nil
}
// RevokeUser revokes a user's approval
func (r *RoleManager) RevokeUser(ctx context.Context, address string) error {
query := `
UPDATE operator_roles
SET approved = FALSE,
approved_at = NULL,
updated_at = NOW()
WHERE address = $1
`
result, err := r.db.Exec(ctx, query, address)
if err != nil {
return fmt.Errorf("failed to revoke user: %w", err)
}
if result.RowsAffected() == 0 {
return fmt.Errorf("user not found")
}
return nil
}
// AddIPWhitelist adds an IP address to the whitelist for an operator
func (r *RoleManager) AddIPWhitelist(ctx context.Context, operatorAddress string, ipAddress string, description string) error {
query := `
INSERT INTO operator_ip_whitelist (operator_address, ip_address, description)
VALUES ($1, $2, $3)
ON CONFLICT (operator_address, ip_address) DO UPDATE SET
description = EXCLUDED.description
`
_, err := r.db.Exec(ctx, query, operatorAddress, ipAddress, description)
if err != nil {
return fmt.Errorf("failed to add IP to whitelist: %w", err)
}
return nil
}
// IsIPWhitelisted checks if an IP address is whitelisted for an operator
func (r *RoleManager) IsIPWhitelisted(ctx context.Context, operatorAddress string, ipAddress string) (bool, error) {
var count int
query := `
SELECT COUNT(*)
FROM operator_ip_whitelist
WHERE operator_address = $1 AND ip_address = $2
`
err := r.db.QueryRow(ctx, query, operatorAddress, ipAddress).Scan(&count)
if err != nil {
return false, fmt.Errorf("failed to check IP whitelist: %w", err)
}
return count > 0, nil
}
// LogOperatorEvent logs an operator event for audit purposes
func (r *RoleManager) LogOperatorEvent(ctx context.Context, eventType string, chainID *int, operatorAddress string, targetResource string, action string, details map[string]interface{}, ipAddress string, userAgent string) error {
query := `
INSERT INTO operator_events (event_type, chain_id, operator_address, target_resource, action, details, ip_address, user_agent)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
`
// Convert details map to JSONB
detailsJSON := map[string]interface{}(details)
_, err := r.db.Exec(ctx, query, eventType, chainID, operatorAddress, targetResource, action, detailsJSON, ipAddress, userAgent)
if err != nil {
return fmt.Errorf("failed to log operator event: %w", err)
}
return nil
}

288
backend/auth/wallet_auth.go Normal file
View File

@@ -0,0 +1,288 @@
package auth
import (
"context"
"crypto/rand"
"encoding/hex"
"fmt"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/golang-jwt/jwt/v4"
"github.com/jackc/pgx/v5/pgxpool"
)
// WalletAuth handles wallet-based authentication
type WalletAuth struct {
db *pgxpool.Pool
jwtSecret []byte
}
// NewWalletAuth creates a new wallet auth handler
func NewWalletAuth(db *pgxpool.Pool, jwtSecret []byte) *WalletAuth {
return &WalletAuth{
db: db,
jwtSecret: jwtSecret,
}
}
// NonceRequest represents a nonce request
type NonceRequest struct {
Address string `json:"address"`
}
// NonceResponse represents a nonce response
type NonceResponse struct {
Nonce string `json:"nonce"`
ExpiresAt time.Time `json:"expires_at"`
}
// WalletAuthRequest represents a wallet authentication request
type WalletAuthRequest struct {
Address string `json:"address"`
Signature string `json:"signature"`
Nonce string `json:"nonce"`
}
// WalletAuthResponse represents a wallet authentication response
type WalletAuthResponse struct {
Token string `json:"token"`
ExpiresAt time.Time `json:"expires_at"`
Track int `json:"track"`
Permissions []string `json:"permissions"`
}
// GenerateNonce generates a random nonce for wallet authentication
func (w *WalletAuth) GenerateNonce(ctx context.Context, address string) (*NonceResponse, error) {
// Validate address format
if !common.IsHexAddress(address) {
return nil, fmt.Errorf("invalid address format")
}
// Normalize address to checksum format
addr := common.HexToAddress(address)
normalizedAddr := addr.Hex()
// Generate random nonce
nonceBytes := make([]byte, 32)
if _, err := rand.Read(nonceBytes); err != nil {
return nil, fmt.Errorf("failed to generate nonce: %w", err)
}
nonce := hex.EncodeToString(nonceBytes)
// Store nonce in database with expiration (5 minutes)
expiresAt := time.Now().Add(5 * time.Minute)
query := `
INSERT INTO wallet_nonces (address, nonce, expires_at)
VALUES ($1, $2, $3)
ON CONFLICT (address) DO UPDATE SET
nonce = EXCLUDED.nonce,
expires_at = EXCLUDED.expires_at,
created_at = NOW()
`
_, err := w.db.Exec(ctx, query, normalizedAddr, nonce, expiresAt)
if err != nil {
return nil, fmt.Errorf("failed to store nonce: %w", err)
}
return &NonceResponse{
Nonce: nonce,
ExpiresAt: expiresAt,
}, nil
}
// AuthenticateWallet authenticates a wallet using signature
func (w *WalletAuth) AuthenticateWallet(ctx context.Context, req *WalletAuthRequest) (*WalletAuthResponse, error) {
// Validate address format
if !common.IsHexAddress(req.Address) {
return nil, fmt.Errorf("invalid address format")
}
// Normalize address
addr := common.HexToAddress(req.Address)
normalizedAddr := addr.Hex()
// Verify nonce
var storedNonce string
var expiresAt time.Time
query := `SELECT nonce, expires_at FROM wallet_nonces WHERE address = $1`
err := w.db.QueryRow(ctx, query, normalizedAddr).Scan(&storedNonce, &expiresAt)
if err != nil {
return nil, fmt.Errorf("nonce not found or expired")
}
if time.Now().After(expiresAt) {
return nil, fmt.Errorf("nonce expired")
}
if storedNonce != req.Nonce {
return nil, fmt.Errorf("invalid nonce")
}
// Verify signature
message := fmt.Sprintf("Sign this message to authenticate with SolaceScanScout Explorer.\n\nNonce: %s", req.Nonce)
messageHash := accounts.TextHash([]byte(message))
sigBytes, err := hex.DecodeString(req.Signature[2:]) // Remove 0x prefix
if err != nil {
return nil, fmt.Errorf("invalid signature format: %w", err)
}
// Recover public key from signature
if sigBytes[64] >= 27 {
sigBytes[64] -= 27
}
pubKey, err := crypto.SigToPub(messageHash, sigBytes)
if err != nil {
return nil, fmt.Errorf("failed to recover public key: %w", err)
}
recoveredAddr := crypto.PubkeyToAddress(*pubKey)
if recoveredAddr.Hex() != normalizedAddr {
return nil, fmt.Errorf("signature does not match address")
}
// Get or create user and track level
track, err := w.getUserTrack(ctx, normalizedAddr)
if err != nil {
return nil, fmt.Errorf("failed to get user track: %w", err)
}
// Generate JWT token
token, expiresAt, err := w.generateJWT(normalizedAddr, track)
if err != nil {
return nil, fmt.Errorf("failed to generate token: %w", err)
}
// Delete used nonce
w.db.Exec(ctx, `DELETE FROM wallet_nonces WHERE address = $1`, normalizedAddr)
// Get permissions for track
permissions := getPermissionsForTrack(track)
return &WalletAuthResponse{
Token: token,
ExpiresAt: expiresAt,
Track: track,
Permissions: permissions,
}, nil
}
// getUserTrack gets the track level for a user address
func (w *WalletAuth) getUserTrack(ctx context.Context, address string) (int, error) {
// Check if user exists in operator_roles (Track 4)
var track int
var approved bool
query := `SELECT track_level, approved FROM operator_roles WHERE address = $1`
err := w.db.QueryRow(ctx, query, address).Scan(&track, &approved)
if err == nil && approved {
return track, nil
}
// Check if user is approved for Track 2 or 3
// For now, default to Track 1 (public)
// In production, you'd have an approval table
return 1, nil
}
// generateJWT generates a JWT token with track claim
func (w *WalletAuth) generateJWT(address string, track int) (string, time.Time, error) {
expiresAt := time.Now().Add(24 * time.Hour)
claims := jwt.MapClaims{
"address": address,
"track": track,
"exp": expiresAt.Unix(),
"iat": time.Now().Unix(),
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, err := token.SignedString(w.jwtSecret)
if err != nil {
return "", time.Time{}, fmt.Errorf("failed to sign token: %w", err)
}
return tokenString, expiresAt, nil
}
// ValidateJWT validates a JWT token and returns the address and track
func (w *WalletAuth) ValidateJWT(tokenString string) (string, int, error) {
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return w.jwtSecret, nil
})
if err != nil {
return "", 0, fmt.Errorf("failed to parse token: %w", err)
}
if !token.Valid {
return "", 0, fmt.Errorf("invalid token")
}
claims, ok := token.Claims.(jwt.MapClaims)
if !ok {
return "", 0, fmt.Errorf("invalid token claims")
}
address, ok := claims["address"].(string)
if !ok {
return "", 0, fmt.Errorf("address not found in token")
}
trackFloat, ok := claims["track"].(float64)
if !ok {
return "", 0, fmt.Errorf("track not found in token")
}
track := int(trackFloat)
return address, track, nil
}
// getPermissionsForTrack returns permissions for a track level
func getPermissionsForTrack(track int) []string {
permissions := []string{
"explorer.read.blocks",
"explorer.read.transactions",
"explorer.read.address.basic",
"explorer.read.bridge.status",
"weth.wrap",
"weth.unwrap",
}
if track >= 2 {
permissions = append(permissions,
"explorer.read.address.full",
"explorer.read.tokens",
"explorer.read.tx_history",
"explorer.read.internal_txs",
"explorer.search.enhanced",
)
}
if track >= 3 {
permissions = append(permissions,
"analytics.read.flows",
"analytics.read.bridge",
"analytics.read.token_distribution",
"analytics.read.address_risk",
)
}
if track >= 4 {
permissions = append(permissions,
"operator.read.bridge_events",
"operator.read.validators",
"operator.read.contracts",
"operator.read.protocol_state",
"operator.write.bridge_control",
)
}
return permissions
}

View File

@@ -0,0 +1,88 @@
package kyc
import (
"context"
"fmt"
)
// KYCService handles KYC/KYB operations
type KYCService struct {
provider KYCProvider
}
// NewKYCService creates a new KYC service
func NewKYCService(provider KYCProvider) *KYCService {
return &KYCService{provider: provider}
}
// KYCProvider interface for KYC providers
type KYCProvider interface {
InitiateVerification(ctx context.Context, req *VerificationRequest) (*VerificationResponse, error)
GetVerificationStatus(ctx context.Context, verificationID string) (*VerificationStatus, error)
}
// VerificationRequest represents a KYC verification request
type VerificationRequest struct {
UserID string
Email string
FirstName string
LastName string
Country string
DocumentType string
}
// VerificationResponse represents a KYC verification response
type VerificationResponse struct {
VerificationID string
RedirectURL string
Status string
}
// VerificationStatus represents verification status
type VerificationStatus struct {
Status string
RiskTier string
Limits *Limits
CompletedAt string
}
// Limits represents user limits based on KYC tier
type Limits struct {
DailyLimit string
MonthlyLimit string
YearlyLimit string
}
// InitiateVerification initiates KYC verification
func (k *KYCService) InitiateVerification(ctx context.Context, req *VerificationRequest) (*VerificationResponse, error) {
return k.provider.InitiateVerification(ctx, req)
}
// GetVerificationStatus gets verification status
func (k *KYCService) GetVerificationStatus(ctx context.Context, verificationID string) (*VerificationStatus, error) {
return k.provider.GetVerificationStatus(ctx, verificationID)
}
// JumioProvider implements KYCProvider for Jumio
type JumioProvider struct {
apiKey string
apiSecret string
}
func NewJumioProvider(apiKey, apiSecret string) *JumioProvider {
return &JumioProvider{
apiKey: apiKey,
apiSecret: apiSecret,
}
}
func (j *JumioProvider) InitiateVerification(ctx context.Context, req *VerificationRequest) (*VerificationResponse, error) {
// Implementation would call Jumio API
return nil, fmt.Errorf("not implemented - requires Jumio API integration")
}
func (j *JumioProvider) GetVerificationStatus(ctx context.Context, verificationID string) (*VerificationStatus, error) {
// Implementation would call Jumio API
return nil, fmt.Errorf("not implemented - requires Jumio API integration")
}

View File

@@ -0,0 +1,89 @@
package ledger
import (
"context"
"fmt"
"time"
"github.com/jackc/pgx/v5/pgxpool"
)
// Ledger handles double-entry accounting
type Ledger struct {
db *pgxpool.Pool
}
// NewLedger creates a new ledger
func NewLedger(db *pgxpool.Pool) *Ledger {
return &Ledger{db: db}
}
// Entry represents a ledger entry
type Entry struct {
ID string
CustomerID string
AccountType string // "asset", "liability", "equity"
Amount string
Currency string
Description string
Reference string
CreatedAt time.Time
}
// CreateEntry creates a double-entry ledger entry
func (l *Ledger) CreateEntry(ctx context.Context, debit, credit *Entry) error {
tx, err := l.db.Begin(ctx)
if err != nil {
return fmt.Errorf("failed to begin transaction: %w", err)
}
defer tx.Rollback(ctx)
// Insert debit entry
debitQuery := `
INSERT INTO ledger_entries (
customer_id, account_type, amount, currency, description, reference, side, created_at
) VALUES ($1, $2, $3, $4, $5, $6, 'debit', NOW())
`
_, err = tx.Exec(ctx, debitQuery,
debit.CustomerID, debit.AccountType, debit.Amount, debit.Currency,
debit.Description, debit.Reference,
)
if err != nil {
return fmt.Errorf("failed to create debit entry: %w", err)
}
// Insert credit entry
creditQuery := `
INSERT INTO ledger_entries (
customer_id, account_type, amount, currency, description, reference, side, created_at
) VALUES ($1, $2, $3, $4, $5, $6, 'credit', NOW())
`
_, err = tx.Exec(ctx, creditQuery,
credit.CustomerID, credit.AccountType, credit.Amount, credit.Currency,
credit.Description, credit.Reference,
)
if err != nil {
return fmt.Errorf("failed to create credit entry: %w", err)
}
return tx.Commit(ctx)
}
// GetBalance gets account balance for a customer
func (l *Ledger) GetBalance(ctx context.Context, customerID, accountType string) (string, error) {
query := `
SELECT
SUM(CASE WHEN side = 'debit' THEN amount::numeric ELSE -amount::numeric END) as balance
FROM ledger_entries
WHERE customer_id = $1 AND account_type = $2
`
var balance string
err := l.db.QueryRow(ctx, query, customerID, accountType).Scan(&balance)
if err != nil {
return "0", nil
}
return balance, nil
}

View File

@@ -0,0 +1,77 @@
package benchmarks
import (
"testing"
"time"
"github.com/explorer/backend/api/track1"
)
// BenchmarkInMemoryCache_Get benchmarks cache Get operations
func BenchmarkInMemoryCache_Get(b *testing.B) {
cache := track1.NewInMemoryCache()
key := "bench-key"
value := []byte("bench-value")
cache.Set(key, value, 5*time.Minute)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = cache.Get(key)
}
}
// BenchmarkInMemoryCache_Set benchmarks cache Set operations
func BenchmarkInMemoryCache_Set(b *testing.B) {
cache := track1.NewInMemoryCache()
key := "bench-key"
value := []byte("bench-value")
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = cache.Set(key, value, 5*time.Minute)
}
}
// BenchmarkInMemoryRateLimiter_Allow benchmarks rate limiter Allow operations
func BenchmarkInMemoryRateLimiter_Allow(b *testing.B) {
config := track1.RateLimitConfig{
RequestsPerMinute: 1000,
}
limiter := track1.NewInMemoryRateLimiter(config)
key := "bench-key"
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = limiter.Allow(key)
}
}
// BenchmarkCache_Concurrent benchmarks concurrent cache operations
func BenchmarkCache_Concurrent(b *testing.B) {
cache := track1.NewInMemoryCache()
key := "bench-key"
value := []byte("bench-value")
cache.Set(key, value, 5*time.Minute)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, _ = cache.Get(key)
}
})
}
// BenchmarkRateLimiter_Concurrent benchmarks concurrent rate limiter operations
func BenchmarkRateLimiter_Concurrent(b *testing.B) {
config := track1.RateLimitConfig{
RequestsPerMinute: 10000,
}
limiter := track1.NewInMemoryRateLimiter(config)
b.RunParallel(func(pb *testing.PB) {
key := "bench-key"
for pb.Next() {
_ = limiter.Allow(key)
}
})
}

BIN
backend/bin/api-server Executable file

Binary file not shown.

View File

@@ -0,0 +1,101 @@
package bridge
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"strconv"
"time"
)
const (
ccipTimeout = 5 * time.Second
defaultCCIPFee = "100000000000000000" // ~0.1 LINK (18 decimals)
)
// CCIP-supported chain pair: 138 <-> 1
var ccipSupportedPairs = map[string]bool{
"138-1": true,
"1-138": true,
}
type ccipQuoteResponse struct {
Fee string `json:"fee"`
}
// CCIPProvider implements Provider for Chainlink CCIP
type CCIPProvider struct {
quoteURL string
client *http.Client
}
// NewCCIPProvider creates a new CCIP bridge provider
func NewCCIPProvider() *CCIPProvider {
quoteURL := os.Getenv("CCIP_ROUTER_QUOTE_URL")
return &CCIPProvider{
quoteURL: quoteURL,
client: &http.Client{
Timeout: ccipTimeout,
},
}
}
// Name returns the provider name
func (p *CCIPProvider) Name() string {
return "CCIP"
}
// SupportsRoute returns true for 138 <-> 1
func (p *CCIPProvider) SupportsRoute(fromChain, toChain int) bool {
key := strconv.Itoa(fromChain) + "-" + strconv.Itoa(toChain)
return ccipSupportedPairs[key]
}
// GetQuote returns a bridge quote for 138 <-> 1
func (p *CCIPProvider) GetQuote(ctx context.Context, req *BridgeRequest) (*BridgeQuote, error) {
if !p.SupportsRoute(req.FromChain, req.ToChain) {
return nil, fmt.Errorf("CCIP: unsupported route %d -> %d", req.FromChain, req.ToChain)
}
fee := defaultCCIPFee
if p.quoteURL != "" {
body, err := json.Marshal(map[string]interface{}{
"sourceChain": req.FromChain,
"destChain": req.ToChain,
"token": req.FromToken,
"amount": req.Amount,
})
if err == nil {
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, p.quoteURL, bytes.NewReader(body))
if err == nil {
httpReq.Header.Set("Content-Type", "application/json")
resp, err := p.client.Do(httpReq)
if err == nil && resp != nil {
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
var r ccipQuoteResponse
if json.NewDecoder(resp.Body).Decode(&r) == nil && r.Fee != "" {
fee = r.Fee
}
}
}
}
}
}
return &BridgeQuote{
Provider: "CCIP",
FromChain: req.FromChain,
ToChain: req.ToChain,
FromAmount: req.Amount,
ToAmount: req.Amount,
Fee: fee,
EstimatedTime: "5-15 min",
Route: []BridgeStep{
{Provider: "CCIP", From: strconv.Itoa(req.FromChain), To: strconv.Itoa(req.ToChain), Type: "bridge"},
},
}, nil
}

View File

@@ -0,0 +1,169 @@
package bridge
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
const (
hopAPIBase = "https://api.hop.exchange"
hopTimeout = 10 * time.Second
)
// Hop-supported chain IDs: ethereum, optimism, arbitrum, polygon, gnosis, nova, base
var hopSupportedChains = map[int]bool{
1: true, // ethereum
10: true, // optimism
42161: true, // arbitrum
137: true, // polygon
100: true, // gnosis
42170: true, // nova
8453: true, // base
}
var hopChainIdToSlug = map[int]string{
1: "ethereum",
10: "optimism",
42161: "arbitrum",
137: "polygon",
100: "gnosis",
42170: "nova",
8453: "base",
}
// hopQuoteResponse represents Hop API /v1/quote response
type hopQuoteResponse struct {
AmountIn string `json:"amountIn"`
Slippage float64 `json:"slippage"`
AmountOutMin string `json:"amountOutMin"`
DestinationAmountOutMin string `json:"destinationAmountOutMin"`
BonderFee string `json:"bonderFee"`
EstimatedReceived string `json:"estimatedReceived"`
}
// HopProvider implements Provider for Hop Protocol
type HopProvider struct {
apiBase string
client *http.Client
}
// NewHopProvider creates a new Hop Protocol bridge provider
func NewHopProvider() *HopProvider {
return &HopProvider{
apiBase: hopAPIBase,
client: &http.Client{
Timeout: hopTimeout,
},
}
}
// Name returns the provider name
func (p *HopProvider) Name() string {
return "Hop"
}
// SupportsRoute returns true if Hop supports the fromChain->toChain route
func (p *HopProvider) SupportsRoute(fromChain, toChain int) bool {
return hopSupportedChains[fromChain] && hopSupportedChains[toChain]
}
// GetQuote fetches a bridge quote from the Hop API
func (p *HopProvider) GetQuote(ctx context.Context, req *BridgeRequest) (*BridgeQuote, error) {
fromSlug, ok := hopChainIdToSlug[req.FromChain]
if !ok {
return nil, fmt.Errorf("Hop: unsupported source chain %d", req.FromChain)
}
toSlug, ok := hopChainIdToSlug[req.ToChain]
if !ok {
return nil, fmt.Errorf("Hop: unsupported destination chain %d", req.ToChain)
}
if fromSlug == toSlug {
return nil, fmt.Errorf("Hop: source and destination must differ")
}
// Hop token symbols: USDC, USDT, DAI, ETH, MATIC, xDAI
params := url.Values{}
params.Set("amount", req.Amount)
params.Set("token", mapTokenToHop(req.FromToken))
params.Set("fromChain", fromSlug)
params.Set("toChain", toSlug)
params.Set("slippage", "0.5")
apiURL := fmt.Sprintf("%s/v1/quote?%s", p.apiBase, params.Encode())
httpReq, err := http.NewRequestWithContext(ctx, http.MethodGet, apiURL, nil)
if err != nil {
return nil, err
}
resp, err := p.client.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("Hop API error %d: %s", resp.StatusCode, string(body))
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var hopResp hopQuoteResponse
if err := json.Unmarshal(body, &hopResp); err != nil {
return nil, fmt.Errorf("failed to parse Hop response: %w", err)
}
toAmount := hopResp.EstimatedReceived
if toAmount == "" {
toAmount = hopResp.AmountIn
}
return &BridgeQuote{
Provider: "Hop",
FromChain: req.FromChain,
ToChain: req.ToChain,
FromAmount: req.Amount,
ToAmount: toAmount,
Fee: hopResp.BonderFee,
EstimatedTime: "2-5 min",
Route: []BridgeStep{
{
Provider: "Hop",
From: strconv.Itoa(req.FromChain),
To: strconv.Itoa(req.ToChain),
Type: "bridge",
},
},
}, nil
}
// mapTokenToHop maps token address/symbol to Hop token symbol
func mapTokenToHop(token string) string {
// Common mappings - extend as needed
switch token {
case "USDC", "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48":
return "USDC"
case "USDT", "0xdAC17F958D2ee523a2206206994597C13D831ec7":
return "USDT"
case "DAI", "0x6B175474E89094C44Da98b954EedeAC495271d0F":
return "DAI"
case "ETH", "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE", "0x0000000000000000000000000000000000000000":
return "ETH"
case "MATIC":
return "MATIC"
case "xDAI":
return "xDAI"
default:
return "USDC"
}
}

View File

@@ -0,0 +1,175 @@
package bridge
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
const (
lifiAPIBase = "https://li.quest"
lifiTimeout = 10 * time.Second
)
// LiFi-supported chain IDs for SupportsRoute (subset of Li.Fi's 40+ chains)
var lifiSupportedChains = map[int]bool{
1: true, // Ethereum Mainnet
137: true, // Polygon
10: true, // Optimism
8453: true, // Base
42161: true, // Arbitrum One
56: true, // BNB Chain
43114: true, // Avalanche
100: true, // Gnosis Chain
42220: true, // Celo
324: true, // zkSync Era
59144: true, // Linea
5000: true, // Mantle
534352: true, // Scroll
25: true, // Cronos
250: true, // Fantom
1111: true, // Wemix
}
// lifiQuoteResponse represents the Li.Fi API quote response structure
type lifiQuoteResponse struct {
ID string `json:"id"`
Type string `json:"type"`
Tool string `json:"tool"`
Estimate *struct {
FromAmount string `json:"fromAmount"`
ToAmount string `json:"toAmount"`
ToAmountMin string `json:"toAmountMin"`
} `json:"estimate"`
IncludedSteps []struct {
Type string `json:"type"`
Tool string `json:"tool"`
Estimate *struct {
FromAmount string `json:"fromAmount"`
ToAmount string `json:"toAmount"`
} `json:"estimate"`
} `json:"includedSteps"`
}
// LiFiProvider implements Provider for Li.Fi bridge aggregator
type LiFiProvider struct {
apiBase string
client *http.Client
}
// NewLiFiProvider creates a new Li.Fi bridge provider
func NewLiFiProvider() *LiFiProvider {
return &LiFiProvider{
apiBase: lifiAPIBase,
client: &http.Client{
Timeout: lifiTimeout,
},
}
}
// Name returns the provider name
func (p *LiFiProvider) Name() string {
return "LiFi"
}
// SupportsRoute returns true if Li.Fi supports the fromChain->toChain route
func (p *LiFiProvider) SupportsRoute(fromChain, toChain int) bool {
return lifiSupportedChains[fromChain] && lifiSupportedChains[toChain]
}
// GetQuote fetches a bridge quote from the Li.Fi API
func (p *LiFiProvider) GetQuote(ctx context.Context, req *BridgeRequest) (*BridgeQuote, error) {
if req.Recipient == "" {
return nil, fmt.Errorf("recipient address required for Li.Fi")
}
params := url.Values{}
params.Set("fromChain", strconv.Itoa(req.FromChain))
params.Set("toChain", strconv.Itoa(req.ToChain))
params.Set("fromToken", req.FromToken)
params.Set("toToken", req.ToToken)
params.Set("fromAmount", req.Amount)
params.Set("fromAddress", req.Recipient)
params.Set("toAddress", req.Recipient)
params.Set("integrator", "explorer-bridge-aggregator")
apiURL := fmt.Sprintf("%s/v1/quote?%s", p.apiBase, params.Encode())
httpReq, err := http.NewRequestWithContext(ctx, http.MethodGet, apiURL, nil)
if err != nil {
return nil, err
}
resp, err := p.client.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("Li.Fi API error %d: %s", resp.StatusCode, string(body))
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var lifiResp lifiQuoteResponse
if err := json.Unmarshal(body, &lifiResp); err != nil {
return nil, fmt.Errorf("failed to parse Li.Fi response: %w", err)
}
if lifiResp.Estimate == nil {
return nil, fmt.Errorf("Li.Fi response missing estimate")
}
toAmount := lifiResp.Estimate.ToAmount
if toAmount == "" && len(lifiResp.IncludedSteps) > 0 && lifiResp.IncludedSteps[len(lifiResp.IncludedSteps)-1].Estimate != nil {
toAmount = lifiResp.IncludedSteps[len(lifiResp.IncludedSteps)-1].Estimate.ToAmount
}
if toAmount == "" {
return nil, fmt.Errorf("Li.Fi response missing toAmount")
}
route := make([]BridgeStep, 0, len(lifiResp.IncludedSteps))
for _, step := range lifiResp.IncludedSteps {
stepType := "bridge"
if step.Type == "swap" {
stepType = "swap"
} else if step.Type == "cross" {
stepType = "bridge"
}
route = append(route, BridgeStep{
Provider: step.Tool,
From: strconv.Itoa(req.FromChain),
To: strconv.Itoa(req.ToChain),
Type: stepType,
})
}
if len(route) == 0 {
route = append(route, BridgeStep{
Provider: lifiResp.Tool,
From: strconv.Itoa(req.FromChain),
To: strconv.Itoa(req.ToChain),
Type: lifiResp.Type,
})
}
return &BridgeQuote{
Provider: "LiFi",
FromChain: req.FromChain,
ToChain: req.ToChain,
FromAmount: req.Amount,
ToAmount: toAmount,
Fee: "0",
EstimatedTime: "1-5 min",
Route: route,
}, nil
}

View File

@@ -0,0 +1,95 @@
package bridge
import (
"context"
"fmt"
)
// Provider interface for bridge providers
type Provider interface {
GetQuote(ctx context.Context, req *BridgeRequest) (*BridgeQuote, error)
Name() string
SupportsRoute(fromChain, toChain int) bool
}
// BridgeRequest represents a bridge request
type BridgeRequest struct {
FromChain int
ToChain int
FromToken string
ToToken string
Amount string
Recipient string
}
// BridgeQuote represents a bridge quote
type BridgeQuote struct {
Provider string
FromChain int
ToChain int
FromAmount string
ToAmount string
Fee string
EstimatedTime string
Route []BridgeStep
}
// BridgeStep represents a step in bridge route
type BridgeStep struct {
Provider string
From string
To string
Type string // "bridge" or "swap"
}
// Aggregator aggregates quotes from multiple bridge providers
type Aggregator struct {
providers []Provider
}
// NewAggregator creates a new bridge aggregator with all providers
func NewAggregator() *Aggregator {
return &Aggregator{
providers: []Provider{
NewLiFiProvider(), // Li.Fi: 40+ chains, swap+bridge aggregation
NewSocketProvider(), // Socket/Bungee: 40+ chains
NewSquidProvider(), // Squid: Axelar-based, 50+ chains
NewSymbiosisProvider(), // Symbiosis: 30+ chains
NewRelayProvider(), // Relay.link: EVM chains
NewStargateProvider(), // Stargate: LayerZero
NewCCIPProvider(), // Chainlink CCIP (138 <-> 1)
NewHopProvider(), // Hop Protocol (ETH <-> L2)
},
}
}
// GetBestQuote gets the best quote from all providers
func (a *Aggregator) GetBestQuote(ctx context.Context, req *BridgeRequest) (*BridgeQuote, error) {
var bestQuote *BridgeQuote
var bestAmount string
for _, provider := range a.providers {
if !provider.SupportsRoute(req.FromChain, req.ToChain) {
continue
}
quote, err := provider.GetQuote(ctx, req)
if err != nil {
continue
}
if bestQuote == nil || quote.ToAmount > bestAmount {
bestQuote = quote
bestAmount = quote.ToAmount
}
}
if bestQuote == nil {
return nil, fmt.Errorf("no bridge quotes available")
}
return bestQuote, nil
}
// CCIPProvider is implemented in ccip_provider.go
// HopProvider is implemented in hop_provider.go

View File

@@ -0,0 +1,148 @@
package bridge
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"time"
)
const (
relayAPIBase = "https://api.relay.link"
relayTimeout = 10 * time.Second
)
// Relay-supported chain IDs (EVM chains, configurable)
var relaySupportedChains = map[int]bool{
1: true, // Ethereum
10: true, // Optimism
137: true, // Polygon
42161: true, // Arbitrum
8453: true, // Base
56: true, // BNB Chain
43114: true, // Avalanche
100: true, // Gnosis
25: true, // Cronos
324: true, // zkSync
59144: true, // Linea
534352: true, // Scroll
}
type relayQuoteRequest struct {
User string `json:"user"`
OriginChainID int `json:"originChainId"`
DestinationChainID int `json:"destinationChainId"`
OriginCurrency string `json:"originCurrency"`
DestinationCurrency string `json:"destinationCurrency"`
Amount string `json:"amount"`
TradeType string `json:"tradeType"`
Recipient string `json:"recipient,omitempty"`
}
type relayQuoteResponse struct {
Details *struct {
CurrencyOut *struct {
Amount string `json:"amount"`
} `json:"currencyOut"`
} `json:"details"`
}
// RelayProvider implements Provider for Relay.link
type RelayProvider struct {
apiBase string
client *http.Client
}
// NewRelayProvider creates a new Relay.link bridge provider
func NewRelayProvider() *RelayProvider {
return &RelayProvider{
apiBase: relayAPIBase,
client: &http.Client{
Timeout: relayTimeout,
},
}
}
// Name returns the provider name
func (p *RelayProvider) Name() string {
return "Relay"
}
// SupportsRoute returns true if Relay supports the fromChain->toChain route
func (p *RelayProvider) SupportsRoute(fromChain, toChain int) bool {
return relaySupportedChains[fromChain] && relaySupportedChains[toChain]
}
// GetQuote fetches a bridge quote from the Relay API
func (p *RelayProvider) GetQuote(ctx context.Context, req *BridgeRequest) (*BridgeQuote, error) {
if req.Recipient == "" {
return nil, fmt.Errorf("Relay: recipient address required")
}
bodyReq := relayQuoteRequest{
User: req.Recipient,
OriginChainID: req.FromChain,
DestinationChainID: req.ToChain,
OriginCurrency: req.FromToken,
DestinationCurrency: req.ToToken,
Amount: req.Amount,
TradeType: "EXACT_INPUT",
Recipient: req.Recipient,
}
jsonBody, err := json.Marshal(bodyReq)
if err != nil {
return nil, err
}
apiURL := p.apiBase + "/quote/v2"
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, apiURL, bytes.NewReader(jsonBody))
if err != nil {
return nil, err
}
httpReq.Header.Set("Content-Type", "application/json")
resp, err := p.client.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Relay API error %d: %s", resp.StatusCode, string(body))
}
var relayResp relayQuoteResponse
if err := json.Unmarshal(body, &relayResp); err != nil {
return nil, fmt.Errorf("failed to parse Relay response: %w", err)
}
toAmount := ""
if relayResp.Details != nil && relayResp.Details.CurrencyOut != nil {
toAmount = relayResp.Details.CurrencyOut.Amount
}
if toAmount == "" {
return nil, fmt.Errorf("Relay: no quote amount")
}
steps := []BridgeStep{{Provider: "Relay", From: strconv.Itoa(req.FromChain), To: strconv.Itoa(req.ToChain), Type: "bridge"}}
return &BridgeQuote{
Provider: "Relay",
FromChain: req.FromChain,
ToChain: req.ToChain,
FromAmount: req.Amount,
ToAmount: toAmount,
Fee: "0",
EstimatedTime: "1-5 min",
Route: steps,
}, nil
}

View File

@@ -0,0 +1,92 @@
package bridge
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
const (
socketAPIBase = "https://public-backend.bungee.exchange"
socketTimeout = 10 * time.Second
)
var socketSupportedChains = map[int]bool{
1: true, 10: true, 137: true, 42161: true, 8453: true,
56: true, 43114: true, 100: true, 25: true, 250: true,
324: true, 59144: true, 534352: true, 42220: true, 5000: true, 1111: true,
}
type socketQuoteResponse struct {
Success bool `json:"success"`
Result *struct {
Route *struct {
ToAmount string `json:"toAmount"`
ToAmountMin string `json:"toAmountMin"`
} `json:"route"`
} `json:"result"`
Message string `json:"message"`
}
type SocketProvider struct {
apiBase string
client *http.Client
}
func NewSocketProvider() *SocketProvider {
return &SocketProvider{apiBase: socketAPIBase, client: &http.Client{Timeout: socketTimeout}}
}
func (p *SocketProvider) Name() string { return "Socket" }
func (p *SocketProvider) SupportsRoute(fromChain, toChain int) bool {
return socketSupportedChains[fromChain] && socketSupportedChains[toChain]
}
func (p *SocketProvider) GetQuote(ctx context.Context, req *BridgeRequest) (*BridgeQuote, error) {
if req.Recipient == "" {
return nil, fmt.Errorf("Socket: recipient required")
}
params := url.Values{}
params.Set("fromChainId", strconv.Itoa(req.FromChain))
params.Set("toChainId", strconv.Itoa(req.ToChain))
params.Set("fromTokenAddress", req.FromToken)
params.Set("toTokenAddress", req.ToToken)
params.Set("fromAmount", req.Amount)
params.Set("recipient", req.Recipient)
apiURL := fmt.Sprintf("%s/api/v1/bungee/quote?%s", p.apiBase, params.Encode())
httpReq, err := http.NewRequestWithContext(ctx, http.MethodGet, apiURL, nil)
if err != nil {
return nil, err
}
resp, err := p.client.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
var r socketQuoteResponse
if err := json.Unmarshal(body, &r); err != nil {
return nil, fmt.Errorf("Socket parse error: %w", err)
}
if !r.Success || r.Result == nil || r.Result.Route == nil {
return nil, fmt.Errorf("Socket API: %s", r.Message)
}
toAmount := r.Result.Route.ToAmount
if toAmount == "" {
toAmount = r.Result.Route.ToAmountMin
}
if toAmount == "" {
return nil, fmt.Errorf("Socket: no amount")
}
steps := []BridgeStep{{Provider: "Socket", From: strconv.Itoa(req.FromChain), To: strconv.Itoa(req.ToChain), Type: "bridge"}}
return &BridgeQuote{
Provider: "Socket", FromChain: req.FromChain, ToChain: req.ToChain,
FromAmount: req.Amount, ToAmount: toAmount, Fee: "0", EstimatedTime: "1-5 min", Route: steps,
}, nil
}

View File

@@ -0,0 +1,106 @@
package bridge
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"time"
)
const (
squidAPIBase = "https://v2.api.squidrouter.com"
squidTimeout = 10 * time.Second
squidIntegrator = "explorer-bridge-aggregator"
)
var squidSupportedChains = map[int]bool{
1: true, 10: true, 137: true, 42161: true, 8453: true,
56: true, 43114: true, 100: true, 25: true, 250: true,
324: true, 59144: true, 534352: true, 42220: true, 5000: true, 1111: true,
}
type squidReq struct {
FromAddress string `json:"fromAddress"`
FromChain string `json:"fromChain"`
FromToken string `json:"fromToken"`
FromAmount string `json:"fromAmount"`
ToChain string `json:"toChain"`
ToToken string `json:"toToken"`
ToAddress string `json:"toAddress"`
Slippage int `json:"slippage"`
}
type squidResp struct {
Route *struct {
Estimate *struct {
ToAmount string `json:"toAmount"`
ToAmountMin string `json:"toAmountMin"`
} `json:"estimate"`
} `json:"route"`
}
type SquidProvider struct {
apiBase string
client *http.Client
}
func NewSquidProvider() *SquidProvider {
return &SquidProvider{apiBase: squidAPIBase, client: &http.Client{Timeout: squidTimeout}}
}
func (p *SquidProvider) Name() string { return "Squid" }
func (p *SquidProvider) SupportsRoute(fromChain, toChain int) bool {
return squidSupportedChains[fromChain] && squidSupportedChains[toChain]
}
func (p *SquidProvider) GetQuote(ctx context.Context, req *BridgeRequest) (*BridgeQuote, error) {
addr := req.Recipient
if addr == "" {
addr = "0x0000000000000000000000000000000000000000"
}
bodyReq := squidReq{
FromAddress: addr, FromChain: strconv.Itoa(req.FromChain), FromToken: req.FromToken,
FromAmount: req.Amount, ToChain: strconv.Itoa(req.ToChain), ToToken: req.ToToken,
ToAddress: addr, Slippage: 1,
}
jsonBody, _ := json.Marshal(bodyReq)
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, p.apiBase+"/v2/route", bytes.NewReader(jsonBody))
if err != nil {
return nil, err
}
httpReq.Header.Set("Content-Type", "application/json")
httpReq.Header.Set("x-integrator-id", squidIntegrator)
resp, err := p.client.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Squid API %d: %s", resp.StatusCode, string(body))
}
var r squidResp
if err := json.Unmarshal(body, &r); err != nil {
return nil, err
}
if r.Route == nil || r.Route.Estimate == nil {
return nil, fmt.Errorf("Squid: no route")
}
toAmount := r.Route.Estimate.ToAmount
if toAmount == "" {
toAmount = r.Route.Estimate.ToAmountMin
}
if toAmount == "" {
return nil, fmt.Errorf("Squid: no amount")
}
return &BridgeQuote{
Provider: "Squid", FromChain: req.FromChain, ToChain: req.ToChain,
FromAmount: req.Amount, ToAmount: toAmount, Fee: "0", EstimatedTime: "1-5 min",
Route: []BridgeStep{{Provider: "Squid", From: strconv.Itoa(req.FromChain), To: strconv.Itoa(req.ToChain), Type: "bridge"}},
}, nil
}

View File

@@ -0,0 +1,178 @@
package bridge
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
const (
stargateAPIBase = "https://stargate.finance/api/v1"
stargateTimeout = 10 * time.Second
)
// chainIDToStargateKey maps chain ID to Stargate chain key
var stargateChainKeys = map[int]string{
1: "ethereum",
10: "optimism",
137: "polygon",
42161: "arbitrum",
8453: "base",
56: "bnb",
43114: "avalanche",
25: "cronos",
100: "gnosis",
324: "zksync",
59144: "linea",
534352: "scroll",
}
// Stargate-supported chain IDs
var stargateSupportedChains = map[int]bool{
1: true,
10: true,
137: true,
42161: true,
8453: true,
56: true,
43114: true,
25: true,
100: true,
324: true,
59144: true,
534352: true,
}
type stargateQuoteResponse struct {
Quotes []struct {
Bridge string `json:"bridge"`
SrcAmount string `json:"srcAmount"`
DstAmount string `json:"dstAmount"`
DstAmountMin string `json:"dstAmountMin"`
Error string `json:"error"`
Duration *struct {
Estimated int `json:"estimated"`
} `json:"duration"`
} `json:"quotes"`
}
// StargateProvider implements Provider for Stargate (LayerZero)
type StargateProvider struct {
apiBase string
client *http.Client
}
// NewStargateProvider creates a new Stargate bridge provider
func NewStargateProvider() *StargateProvider {
return &StargateProvider{
apiBase: stargateAPIBase,
client: &http.Client{
Timeout: stargateTimeout,
},
}
}
// Name returns the provider name
func (p *StargateProvider) Name() string {
return "Stargate"
}
// SupportsRoute returns true if Stargate supports the fromChain->toChain route
func (p *StargateProvider) SupportsRoute(fromChain, toChain int) bool {
return stargateSupportedChains[fromChain] && stargateSupportedChains[toChain]
}
// GetQuote fetches a bridge quote from the Stargate API
func (p *StargateProvider) GetQuote(ctx context.Context, req *BridgeRequest) (*BridgeQuote, error) {
srcKey, ok := stargateChainKeys[req.FromChain]
if !ok {
return nil, fmt.Errorf("Stargate: unsupported fromChain %d", req.FromChain)
}
dstKey, ok := stargateChainKeys[req.ToChain]
if !ok {
return nil, fmt.Errorf("Stargate: unsupported toChain %d", req.ToChain)
}
if req.Recipient == "" {
req.Recipient = "0x0000000000000000000000000000000000000000"
}
params := url.Values{}
params.Set("srcToken", req.FromToken)
params.Set("dstToken", req.ToToken)
params.Set("srcChainKey", srcKey)
params.Set("dstChainKey", dstKey)
params.Set("srcAddress", req.Recipient)
params.Set("dstAddress", req.Recipient)
params.Set("srcAmount", req.Amount)
params.Set("dstAmountMin", "0")
apiURL := fmt.Sprintf("%s/quotes?%s", p.apiBase, params.Encode())
httpReq, err := http.NewRequestWithContext(ctx, http.MethodGet, apiURL, nil)
if err != nil {
return nil, err
}
resp, err := p.client.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Stargate API error %d: %s", resp.StatusCode, string(body))
}
var stargateResp stargateQuoteResponse
if err := json.Unmarshal(body, &stargateResp); err != nil {
return nil, fmt.Errorf("failed to parse Stargate response: %w", err)
}
var bestIdx = -1
for i := range stargateResp.Quotes {
q := &stargateResp.Quotes[i]
if q.Error != "" {
continue
}
if bestIdx < 0 || q.DstAmount > stargateResp.Quotes[bestIdx].DstAmount {
bestIdx = i
}
}
if bestIdx < 0 {
return nil, fmt.Errorf("Stargate: no valid quotes")
}
bestQuote := &stargateResp.Quotes[bestIdx]
estTime := "1-5 min"
if bestQuote.Duration != nil && bestQuote.Duration.Estimated > 0 {
estTime = fmt.Sprintf("%d sec", bestQuote.Duration.Estimated)
}
return &BridgeQuote{
Provider: "Stargate",
FromChain: req.FromChain,
ToChain: req.ToChain,
FromAmount: req.Amount,
ToAmount: bestQuote.DstAmount,
Fee: "0",
EstimatedTime: estTime,
Route: []BridgeStep{{
Provider: bestQuote.Bridge,
From: strconv.Itoa(req.FromChain),
To: strconv.Itoa(req.ToChain),
Type: "bridge",
}},
}, nil
}

View File

@@ -0,0 +1,95 @@
package bridge
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"time"
)
const (
symbiosisAPIBase = "https://api.symbiosis.finance/crosschain"
symbiosisTimeout = 10 * time.Second
)
var symbiosisSupportedChains = map[int]bool{
1: true, 10: true, 137: true, 42161: true, 8453: true,
56: true, 43114: true, 100: true, 25: true, 250: true,
324: true, 59144: true, 534352: true, 42220: true, 5000: true,
}
type symbiosisReq struct {
Amount string `json:"amount"`
TokenInChain int `json:"tokenInChainId"`
TokenIn string `json:"tokenIn"`
TokenOutChain int `json:"tokenOutChainId"`
TokenOut string `json:"tokenOut"`
From string `json:"from"`
Slippage int `json:"slippage"`
}
type symbiosisResp struct {
AmountOut string `json:"amountOut"`
AmountOutMin string `json:"amountOutMin"`
}
type SymbiosisProvider struct {
apiBase string
client *http.Client
}
func NewSymbiosisProvider() *SymbiosisProvider {
return &SymbiosisProvider{apiBase: symbiosisAPIBase, client: &http.Client{Timeout: symbiosisTimeout}}
}
func (p *SymbiosisProvider) Name() string { return "Symbiosis" }
func (p *SymbiosisProvider) SupportsRoute(fromChain, toChain int) bool {
return symbiosisSupportedChains[fromChain] && symbiosisSupportedChains[toChain]
}
func (p *SymbiosisProvider) GetQuote(ctx context.Context, req *BridgeRequest) (*BridgeQuote, error) {
addr := req.Recipient
if addr == "" {
addr = "0x0000000000000000000000000000000000000000"
}
bodyReq := symbiosisReq{
Amount: req.Amount, TokenInChain: req.FromChain, TokenIn: req.FromToken,
TokenOutChain: req.ToChain, TokenOut: req.ToToken, From: addr, Slippage: 100,
}
jsonBody, _ := json.Marshal(bodyReq)
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, p.apiBase+"/v2/quote", bytes.NewReader(jsonBody))
if err != nil {
return nil, err
}
httpReq.Header.Set("Content-Type", "application/json")
resp, err := p.client.Do(httpReq)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Symbiosis API %d: %s", resp.StatusCode, string(body))
}
var r symbiosisResp
if err := json.Unmarshal(body, &r); err != nil {
return nil, err
}
toAmount := r.AmountOut
if toAmount == "" {
toAmount = r.AmountOutMin
}
if toAmount == "" {
return nil, fmt.Errorf("Symbiosis: no amount")
}
return &BridgeQuote{
Provider: "Symbiosis", FromChain: req.FromChain, ToChain: req.ToChain,
FromAmount: req.Amount, ToAmount: toAmount, Fee: "0", EstimatedTime: "1-5 min",
Route: []BridgeStep{{Provider: "Symbiosis", From: strconv.Itoa(req.FromChain), To: strconv.Itoa(req.ToChain), Type: "bridge"}},
}, nil
}

View File

@@ -0,0 +1,86 @@
package tracking
import (
"context"
"fmt"
"time"
"github.com/jackc/pgx/v5/pgxpool"
)
// Tracker tracks CCIP messages across chains
type Tracker struct {
db *pgxpool.Pool
}
// NewTracker creates a new CCIP tracker
func NewTracker(db *pgxpool.Pool) *Tracker {
return &Tracker{db: db}
}
// CCIPMessage represents a CCIP message
type CCIPMessage struct {
MessageID string
SourceChainID int
DestChainID int
SourceTxHash string
DestTxHash string
Status string
CreatedAt time.Time
DeliveredAt *time.Time
}
// TrackMessage tracks a CCIP message
func (t *Tracker) TrackMessage(ctx context.Context, msg *CCIPMessage) error {
query := `
INSERT INTO ccip_messages (
message_id, source_chain_id, dest_chain_id,
source_tx_hash, dest_tx_hash, status, created_at
) VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (message_id) DO UPDATE SET
dest_tx_hash = $5,
status = $6,
delivered_at = CASE WHEN $6 = 'delivered' THEN NOW() ELSE delivered_at END
`
_, err := t.db.Exec(ctx, query,
msg.MessageID,
msg.SourceChainID,
msg.DestChainID,
msg.SourceTxHash,
msg.DestTxHash,
msg.Status,
msg.CreatedAt,
)
return err
}
// GetMessage gets a CCIP message by ID
func (t *Tracker) GetMessage(ctx context.Context, messageID string) (*CCIPMessage, error) {
query := `
SELECT message_id, source_chain_id, dest_chain_id,
source_tx_hash, dest_tx_hash, status, created_at, delivered_at
FROM ccip_messages
WHERE message_id = $1
`
var msg CCIPMessage
err := t.db.QueryRow(ctx, query, messageID).Scan(
&msg.MessageID,
&msg.SourceChainID,
&msg.DestChainID,
&msg.SourceTxHash,
&msg.DestTxHash,
&msg.Status,
&msg.CreatedAt,
&msg.DeliveredAt,
)
if err != nil {
return nil, fmt.Errorf("failed to get message: %w", err)
}
return &msg, nil
}

View File

@@ -0,0 +1,71 @@
package adapters
import (
"context"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
)
// EVMAdapter implements ChainAdapter for EVM-compatible chains
type EVMAdapter struct {
client *ethclient.Client
chainID int64
}
// NewEVMAdapter creates a new EVM chain adapter
func NewEVMAdapter(client *ethclient.Client, chainID int64) *EVMAdapter {
return &EVMAdapter{
client: client,
chainID: chainID,
}
}
// ChainAdapter defines the interface for chain adapters
type ChainAdapter interface {
GetBlockByNumber(ctx context.Context, number int64) (*types.Block, error)
GetTransaction(ctx context.Context, hash common.Hash) (*types.Transaction, bool, error)
GetTransactionReceipt(ctx context.Context, hash common.Hash) (*types.Receipt, error)
GetCode(ctx context.Context, address common.Address) ([]byte, error)
GetBalance(ctx context.Context, address common.Address) (*big.Int, error)
GetGasPrice(ctx context.Context) (*big.Int, error)
ChainID() int64
}
// GetBlockByNumber gets a block by number
func (e *EVMAdapter) GetBlockByNumber(ctx context.Context, number int64) (*types.Block, error) {
return e.client.BlockByNumber(ctx, big.NewInt(number))
}
// GetTransaction gets a transaction by hash
func (e *EVMAdapter) GetTransaction(ctx context.Context, hash common.Hash) (*types.Transaction, bool, error) {
return e.client.TransactionByHash(ctx, hash)
}
// GetTransactionReceipt gets a transaction receipt
func (e *EVMAdapter) GetTransactionReceipt(ctx context.Context, hash common.Hash) (*types.Receipt, error) {
return e.client.TransactionReceipt(ctx, hash)
}
// GetCode gets contract code
func (e *EVMAdapter) GetCode(ctx context.Context, address common.Address) ([]byte, error) {
return e.client.CodeAt(ctx, address, nil)
}
// GetBalance gets account balance
func (e *EVMAdapter) GetBalance(ctx context.Context, address common.Address) (*big.Int, error) {
return e.client.BalanceAt(ctx, address, nil)
}
// GetGasPrice gets current gas price
func (e *EVMAdapter) GetGasPrice(ctx context.Context) (*big.Int, error) {
return e.client.SuggestGasPrice(ctx)
}
// ChainID returns the chain ID
func (e *EVMAdapter) ChainID() int64 {
return e.chainID
}

View File

@@ -0,0 +1,46 @@
{
"name": "MetaMask Dual-Chain Networks (Chain 138 + Ethereum Mainnet)",
"version": { "major": 1, "minor": 0, "patch": 0 },
"chains": [
{
"chainId": "0x8a",
"chainIdDecimal": 138,
"chainName": "DeFi Oracle Meta Mainnet",
"rpcUrls": [
"https://rpc-http-pub.d-bis.org",
"https://rpc.d-bis.org",
"https://rpc2.d-bis.org",
"https://rpc.defi-oracle.io"
],
"nativeCurrency": {
"name": "Ether",
"symbol": "ETH",
"decimals": 18
},
"blockExplorerUrls": ["https://explorer.d-bis.org"],
"iconUrls": [
"https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png"
]
},
{
"chainId": "0x1",
"chainIdDecimal": 1,
"chainName": "Ethereum Mainnet",
"rpcUrls": [
"https://eth.llamarpc.com",
"https://rpc.ankr.com/eth",
"https://ethereum.publicnode.com",
"https://1rpc.io/eth"
],
"nativeCurrency": {
"name": "Ether",
"symbol": "ETH",
"decimals": 18
},
"blockExplorerUrls": ["https://etherscan.io"],
"iconUrls": [
"https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png"
]
}
]
}

View File

@@ -0,0 +1,106 @@
{
"name": "Dual-Chain Token List (Chain 138 + Ethereum Mainnet)",
"version": { "major": 1, "minor": 0, "patch": 0 },
"timestamp": "2026-01-30T00:00:00.000Z",
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tokens": [
{
"chainId": 138,
"address": "0x3304b747e565a97ec8ac220b0b6a1f6ffdb837e6",
"name": "ETH/USD Price Feed",
"symbol": "ETH-USD",
"decimals": 8,
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tags": ["oracle", "price-feed"]
},
{
"chainId": 138,
"address": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2",
"name": "Wrapped Ether",
"symbol": "WETH",
"decimals": 18,
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tags": ["defi", "wrapped"]
},
{
"chainId": 138,
"address": "0xf4BB2e28688e89fCcE3c0580D37d36A7672E8A9f",
"name": "Wrapped Ether v10",
"symbol": "WETH10",
"decimals": 18,
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tags": ["defi", "wrapped"]
},
{
"chainId": 138,
"address": "0x93E66202A11B1772E55407B32B44e5Cd8eda7f22",
"name": "Compliant Tether USD",
"symbol": "cUSDT",
"decimals": 6,
"logoURI": "https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xdAC17F958D2ee523a2206206994597C13D831ec7/logo.png",
"tags": ["stablecoin", "defi", "compliant"]
},
{
"chainId": 138,
"address": "0xf22258f57794CC8E06237084b353Ab30fFfa640b",
"name": "Compliant USD Coin",
"symbol": "cUSDC",
"decimals": 6,
"logoURI": "https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48/logo.png",
"tags": ["stablecoin", "defi", "compliant"]
},
{
"chainId": 1,
"address": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2",
"name": "Wrapped Ether",
"symbol": "WETH",
"decimals": 18,
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tags": ["defi", "wrapped"]
},
{
"chainId": 1,
"address": "0xdAC17F958D2ee523a2206206994597C13D831ec7",
"name": "Tether USD",
"symbol": "USDT",
"decimals": 6,
"logoURI": "https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xdAC17F958D2ee523a2206206994597C13D831ec7/logo.png",
"tags": ["stablecoin", "defi"]
},
{
"chainId": 1,
"address": "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48",
"name": "USD Coin",
"symbol": "USDC",
"decimals": 6,
"logoURI": "https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48/logo.png",
"tags": ["stablecoin", "defi"]
},
{
"chainId": 1,
"address": "0x6B175474E89094C44Da98b954EedeAC495271d0F",
"name": "Dai Stablecoin",
"symbol": "DAI",
"decimals": 18,
"logoURI": "https://raw.githubusercontent.com/trustwallet/assets/master/blockchains/ethereum/assets/0x6B175474E89094C44Da98b954EedeAC495271d0F/logo.png",
"tags": ["stablecoin", "defi"]
},
{
"chainId": 1,
"address": "0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419",
"name": "ETH/USD Price Feed",
"symbol": "ETH-USD",
"decimals": 8,
"logoURI": "https://raw.githubusercontent.com/ethereum/ethereum.org/main/static/images/eth-diamond-black.png",
"tags": ["oracle", "price-feed"]
}
],
"tags": {
"defi": { "name": "DeFi", "description": "Decentralized Finance tokens" },
"wrapped": { "name": "Wrapped", "description": "Wrapped tokens representing native assets" },
"oracle": { "name": "Oracle", "description": "Oracle price feed contracts" },
"price-feed": { "name": "Price Feed", "description": "Price feed oracle contracts" },
"stablecoin": { "name": "Stablecoin", "description": "Stable value tokens pegged to fiat" },
"compliant": { "name": "Compliant", "description": "Regulatory compliant tokens" }
}
}

View File

@@ -0,0 +1,102 @@
package config
import (
"fmt"
"os"
"strconv"
"time"
"github.com/jackc/pgx/v5/pgxpool"
)
// DatabaseConfig holds database configuration
type DatabaseConfig struct {
Host string
Port int
User string
Password string
Database string
SSLMode string
MaxConnections int
MaxIdleTime time.Duration
ConnMaxLifetime time.Duration
}
// LoadDatabaseConfig loads database configuration from environment variables
func LoadDatabaseConfig() *DatabaseConfig {
maxConns, _ := strconv.Atoi(getEnv("DB_MAX_CONNECTIONS", "25"))
maxIdle, _ := time.ParseDuration(getEnv("DB_MAX_IDLE_TIME", "5m"))
maxLifetime, _ := time.ParseDuration(getEnv("DB_CONN_MAX_LIFETIME", "1h"))
return &DatabaseConfig{
Host: getEnv("DB_HOST", "localhost"),
Port: getIntEnv("DB_PORT", 5432),
User: getEnv("DB_USER", "explorer"),
Password: getEnv("DB_PASSWORD", ""),
Database: getEnv("DB_NAME", "explorer"),
SSLMode: getEnv("DB_SSLMODE", "disable"),
MaxConnections: maxConns,
MaxIdleTime: maxIdle,
ConnMaxLifetime: maxLifetime,
}
}
// ConnectionString returns PostgreSQL connection string
func (c *DatabaseConfig) ConnectionString() string {
return fmt.Sprintf(
"host=%s port=%d user=%s password=%s dbname=%s sslmode=%s",
c.Host, c.Port, c.User, c.Password, c.Database, c.SSLMode,
)
}
// PoolConfig returns pgxpool configuration
func (c *DatabaseConfig) PoolConfig() (*pgxpool.Config, error) {
config, err := pgxpool.ParseConfig(c.ConnectionString())
if err != nil {
return nil, err
}
config.MaxConns = int32(c.MaxConnections)
config.MaxConnIdleTime = c.MaxIdleTime
config.MaxConnLifetime = c.ConnMaxLifetime
return config, nil
}
// ReadReplicaConfig holds read replica configuration
type ReadReplicaConfig struct {
Host string
Port int
User string
Password string
Database string
SSLMode string
}
// LoadReadReplicaConfig loads read replica configuration
func LoadReadReplicaConfig() *ReadReplicaConfig {
return &ReadReplicaConfig{
Host: getEnv("DB_REPLICA_HOST", ""),
Port: getIntEnv("DB_REPLICA_PORT", 5432),
User: getEnv("DB_REPLICA_USER", ""),
Password: getEnv("DB_REPLICA_PASSWORD", ""),
Database: getEnv("DB_REPLICA_NAME", ""),
SSLMode: getEnv("DB_REPLICA_SSLMODE", "disable"),
}
}
func getEnv(key, defaultValue string) string {
if value := os.Getenv(key); value != "" {
return value
}
return defaultValue
}
func getIntEnv(key string, defaultValue int) int {
if value := os.Getenv(key); value != "" {
if intValue, err := strconv.Atoi(value); err == nil {
return intValue
}
}
return defaultValue
}

View File

@@ -0,0 +1,16 @@
-- Rollback initial schema
DROP TABLE IF EXISTS address_labels CASCADE;
DROP TABLE IF EXISTS watchlists CASCADE;
DROP TABLE IF EXISTS api_keys CASCADE;
DROP TABLE IF EXISTS users CASCADE;
DROP TABLE IF EXISTS contracts CASCADE;
DROP TABLE IF EXISTS token_transfers CASCADE;
DROP TABLE IF EXISTS tokens CASCADE;
DROP TABLE IF EXISTS logs CASCADE;
DROP TABLE IF EXISTS transactions CASCADE;
DROP TABLE IF EXISTS blocks CASCADE;
DROP EXTENSION IF EXISTS timescaledb;
DROP EXTENSION IF EXISTS "uuid-ossp";

View File

@@ -0,0 +1,283 @@
-- Initial schema for ChainID 138 Explorer
-- Supports multi-chain via chain_id partitioning
-- Enable UUID extension
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- Enable TimescaleDB extension (for time-series data)
CREATE EXTENSION IF NOT EXISTS timescaledb;
-- Blocks table
CREATE TABLE blocks (
id BIGSERIAL,
chain_id INTEGER NOT NULL,
number BIGINT NOT NULL,
hash VARCHAR(66) NOT NULL,
parent_hash VARCHAR(66) NOT NULL,
nonce VARCHAR(18),
sha3_uncles VARCHAR(66),
logs_bloom TEXT,
transactions_root VARCHAR(66),
state_root VARCHAR(66),
receipts_root VARCHAR(66),
miner VARCHAR(42),
difficulty NUMERIC,
total_difficulty NUMERIC,
size BIGINT,
extra_data TEXT,
gas_limit BIGINT,
gas_used BIGINT,
timestamp TIMESTAMP NOT NULL,
transaction_count INTEGER DEFAULT 0,
base_fee_per_gas BIGINT,
orphaned BOOLEAN DEFAULT false,
orphaned_at TIMESTAMP,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (id),
UNIQUE (chain_id, number),
UNIQUE (chain_id, hash)
) PARTITION BY LIST (chain_id);
-- Create partition for ChainID 138
CREATE TABLE blocks_chain_138 PARTITION OF blocks FOR VALUES IN (138);
-- Indexes for blocks
CREATE INDEX idx_blocks_chain_number ON blocks(chain_id, number);
CREATE INDEX idx_blocks_chain_hash ON blocks(chain_id, hash);
CREATE INDEX idx_blocks_chain_timestamp ON blocks(chain_id, timestamp);
-- Transactions table
CREATE TABLE transactions (
id BIGSERIAL,
chain_id INTEGER NOT NULL,
hash VARCHAR(66) NOT NULL,
block_number BIGINT NOT NULL,
block_hash VARCHAR(66) NOT NULL,
transaction_index INTEGER NOT NULL,
from_address VARCHAR(42) NOT NULL,
to_address VARCHAR(42),
value NUMERIC(78, 0) NOT NULL DEFAULT 0,
gas_price BIGINT,
max_fee_per_gas BIGINT,
max_priority_fee_per_gas BIGINT,
gas_limit BIGINT NOT NULL,
gas_used BIGINT,
nonce BIGINT NOT NULL,
input_data TEXT,
status INTEGER,
contract_address VARCHAR(42),
cumulative_gas_used BIGINT,
effective_gas_price BIGINT,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (id),
UNIQUE (chain_id, hash),
FOREIGN KEY (chain_id, block_number) REFERENCES blocks(chain_id, number)
) PARTITION BY LIST (chain_id);
-- Create partition for ChainID 138
CREATE TABLE transactions_chain_138 PARTITION OF transactions FOR VALUES IN (138);
-- Indexes for transactions
CREATE INDEX idx_transactions_chain_hash ON transactions(chain_id, hash);
CREATE INDEX idx_transactions_chain_block ON transactions(chain_id, block_number, transaction_index);
CREATE INDEX idx_transactions_chain_from ON transactions(chain_id, from_address);
CREATE INDEX idx_transactions_chain_to ON transactions(chain_id, to_address);
CREATE INDEX idx_transactions_chain_block_from ON transactions(chain_id, block_number, from_address);
-- Logs table
CREATE TABLE logs (
id BIGSERIAL,
chain_id INTEGER NOT NULL,
transaction_hash VARCHAR(66) NOT NULL,
block_number BIGINT NOT NULL,
block_hash VARCHAR(66) NOT NULL,
log_index INTEGER NOT NULL,
address VARCHAR(42) NOT NULL,
topic0 VARCHAR(66),
topic1 VARCHAR(66),
topic2 VARCHAR(66),
topic3 VARCHAR(66),
data TEXT,
decoded_data JSONB,
created_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (id),
UNIQUE (chain_id, transaction_hash, log_index),
FOREIGN KEY (chain_id, transaction_hash) REFERENCES transactions(chain_id, hash)
) PARTITION BY LIST (chain_id);
-- Create partition for ChainID 138
CREATE TABLE logs_chain_138 PARTITION OF logs FOR VALUES IN (138);
-- Indexes for logs
CREATE INDEX idx_logs_chain_tx ON logs(chain_id, transaction_hash);
CREATE INDEX idx_logs_chain_address ON logs(chain_id, address);
CREATE INDEX idx_logs_chain_topic0 ON logs(chain_id, topic0);
CREATE INDEX idx_logs_chain_block ON logs(chain_id, block_number);
CREATE INDEX idx_logs_chain_address_topic0 ON logs(chain_id, address, topic0);
-- Tokens table
CREATE TABLE tokens (
id BIGSERIAL,
chain_id INTEGER NOT NULL,
address VARCHAR(42) NOT NULL,
type VARCHAR(10) NOT NULL CHECK (type IN ('ERC20', 'ERC721', 'ERC1155')),
name VARCHAR(255),
symbol VARCHAR(50),
decimals INTEGER CHECK (decimals >= 0 AND decimals <= 18),
total_supply NUMERIC(78, 0),
holder_count INTEGER DEFAULT 0,
transfer_count INTEGER DEFAULT 0,
logo_url TEXT,
website_url TEXT,
description TEXT,
verified BOOLEAN DEFAULT false,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (id),
UNIQUE (chain_id, address)
) PARTITION BY LIST (chain_id);
-- Create partition for ChainID 138
CREATE TABLE tokens_chain_138 PARTITION OF tokens FOR VALUES IN (138);
-- Indexes for tokens
CREATE INDEX idx_tokens_chain_address ON tokens(chain_id, address);
CREATE INDEX idx_tokens_chain_type ON tokens(chain_id, type);
CREATE INDEX idx_tokens_chain_symbol ON tokens(chain_id, symbol);
-- Token transfers table
CREATE TABLE token_transfers (
id BIGSERIAL,
chain_id INTEGER NOT NULL,
transaction_hash VARCHAR(66) NOT NULL,
block_number BIGINT NOT NULL,
log_index INTEGER NOT NULL,
token_address VARCHAR(42) NOT NULL,
token_type VARCHAR(10) NOT NULL CHECK (token_type IN ('ERC20', 'ERC721', 'ERC1155')),
from_address VARCHAR(42) NOT NULL,
to_address VARCHAR(42) NOT NULL,
amount NUMERIC(78, 0),
token_id VARCHAR(78),
operator VARCHAR(42),
created_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (id),
FOREIGN KEY (chain_id, transaction_hash) REFERENCES transactions(chain_id, hash),
FOREIGN KEY (chain_id, token_address) REFERENCES tokens(chain_id, address),
UNIQUE (chain_id, transaction_hash, log_index)
) PARTITION BY LIST (chain_id);
-- Create partition for ChainID 138
CREATE TABLE token_transfers_chain_138 PARTITION OF token_transfers FOR VALUES IN (138);
-- Indexes for token transfers
CREATE INDEX idx_token_transfers_chain_token ON token_transfers(chain_id, token_address);
CREATE INDEX idx_token_transfers_chain_from ON token_transfers(chain_id, from_address);
CREATE INDEX idx_token_transfers_chain_to ON token_transfers(chain_id, to_address);
CREATE INDEX idx_token_transfers_chain_tx ON token_transfers(chain_id, transaction_hash);
CREATE INDEX idx_token_transfers_chain_block ON token_transfers(chain_id, block_number);
CREATE INDEX idx_token_transfers_chain_token_from ON token_transfers(chain_id, token_address, from_address);
CREATE INDEX idx_token_transfers_chain_token_to ON token_transfers(chain_id, token_address, to_address);
-- Contracts table
CREATE TABLE contracts (
id BIGSERIAL,
chain_id INTEGER NOT NULL,
address VARCHAR(42) NOT NULL,
name VARCHAR(255),
compiler_version VARCHAR(50),
optimization_enabled BOOLEAN,
optimization_runs INTEGER,
evm_version VARCHAR(20),
source_code TEXT,
abi JSONB,
constructor_arguments TEXT,
verification_status VARCHAR(20) NOT NULL CHECK (verification_status IN ('pending', 'verified', 'failed')),
verified_at TIMESTAMP,
verification_method VARCHAR(50),
license VARCHAR(50),
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (id),
UNIQUE (chain_id, address)
) PARTITION BY LIST (chain_id);
-- Create partition for ChainID 138
CREATE TABLE contracts_chain_138 PARTITION OF contracts FOR VALUES IN (138);
-- Indexes for contracts
CREATE INDEX idx_contracts_chain_address ON contracts(chain_id, address);
CREATE INDEX idx_contracts_chain_verified ON contracts(chain_id, verification_status);
CREATE INDEX idx_contracts_abi_gin ON contracts USING GIN (abi);
-- Users table
CREATE TABLE users (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
email VARCHAR(255) UNIQUE,
username VARCHAR(100) UNIQUE,
password_hash TEXT,
api_key_hash TEXT,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
last_login_at TIMESTAMP
);
CREATE INDEX idx_users_email ON users(email);
CREATE INDEX idx_users_username ON users(username);
-- API keys table
CREATE TABLE api_keys (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
user_id UUID NOT NULL,
key_hash TEXT NOT NULL UNIQUE,
name VARCHAR(255),
tier VARCHAR(20) NOT NULL CHECK (tier IN ('free', 'pro', 'enterprise')),
rate_limit_per_second INTEGER,
rate_limit_per_minute INTEGER,
ip_whitelist TEXT[],
last_used_at TIMESTAMP,
expires_at TIMESTAMP,
revoked BOOLEAN DEFAULT false,
created_at TIMESTAMP DEFAULT NOW(),
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
);
CREATE INDEX idx_api_keys_user ON api_keys(user_id);
CREATE INDEX idx_api_keys_hash ON api_keys(key_hash);
-- Watchlists table
CREATE TABLE watchlists (
id BIGSERIAL,
user_id UUID NOT NULL,
chain_id INTEGER NOT NULL,
address VARCHAR(42) NOT NULL,
label VARCHAR(255),
created_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (id),
UNIQUE (user_id, chain_id, address),
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
);
CREATE INDEX idx_watchlists_user ON watchlists(user_id);
CREATE INDEX idx_watchlists_chain_address ON watchlists(chain_id, address);
-- Address labels table
CREATE TABLE address_labels (
id BIGSERIAL,
chain_id INTEGER NOT NULL,
address VARCHAR(42) NOT NULL,
label VARCHAR(255) NOT NULL,
label_type VARCHAR(20) NOT NULL CHECK (label_type IN ('user', 'public', 'contract_name')),
user_id UUID,
source VARCHAR(50),
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (id),
UNIQUE (chain_id, address, label_type, user_id),
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
);
CREATE INDEX idx_labels_chain_address ON address_labels(chain_id, address);
CREATE INDEX idx_labels_chain_user ON address_labels(chain_id, user_id);

View File

@@ -0,0 +1,9 @@
-- Backfill checkpoints table for tracking backfill progress
CREATE TABLE IF NOT EXISTS backfill_checkpoints (
chain_id INTEGER NOT NULL,
last_block BIGINT NOT NULL,
updated_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (chain_id)
);

View File

@@ -0,0 +1,4 @@
-- Rollback traces table
DROP TABLE IF EXISTS traces CASCADE;

View File

@@ -0,0 +1,19 @@
-- Traces table for storing transaction traces
CREATE TABLE IF NOT EXISTS traces (
chain_id INTEGER NOT NULL,
transaction_hash VARCHAR(66) NOT NULL,
block_number BIGINT NOT NULL,
trace_data JSONB NOT NULL,
created_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (chain_id, transaction_hash)
) PARTITION BY LIST (chain_id);
-- Create partition for ChainID 138
CREATE TABLE IF NOT EXISTS traces_chain_138 PARTITION OF traces FOR VALUES IN (138);
-- Index
CREATE INDEX IF NOT EXISTS idx_traces_chain_tx ON traces(chain_id, transaction_hash);
CREATE INDEX IF NOT EXISTS idx_traces_chain_block ON traces(chain_id, block_number);
CREATE INDEX IF NOT EXISTS idx_traces_data_gin ON traces USING GIN (trace_data);

View File

@@ -0,0 +1,4 @@
-- Rollback CCIP messages table
DROP TABLE IF EXISTS ccip_messages CASCADE;

View File

@@ -0,0 +1,19 @@
-- CCIP messages table
CREATE TABLE IF NOT EXISTS ccip_messages (
message_id VARCHAR(255) PRIMARY KEY,
source_chain_id INTEGER NOT NULL,
dest_chain_id INTEGER NOT NULL,
source_tx_hash VARCHAR(66),
dest_tx_hash VARCHAR(66),
status VARCHAR(20) NOT NULL CHECK (status IN ('pending', 'delivered', 'failed')),
created_at TIMESTAMP DEFAULT NOW(),
delivered_at TIMESTAMP
);
CREATE INDEX idx_ccip_source_chain ON ccip_messages(source_chain_id);
CREATE INDEX idx_ccip_dest_chain ON ccip_messages(dest_chain_id);
CREATE INDEX idx_ccip_status ON ccip_messages(status);
CREATE INDEX idx_ccip_source_tx ON ccip_messages(source_tx_hash);
CREATE INDEX idx_ccip_dest_tx ON ccip_messages(dest_tx_hash);

View File

@@ -0,0 +1,19 @@
-- Ledger entries table for double-entry accounting
CREATE TABLE IF NOT EXISTS ledger_entries (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
customer_id UUID NOT NULL,
account_type VARCHAR(20) NOT NULL CHECK (account_type IN ('asset', 'liability', 'equity')),
amount NUMERIC(78, 0) NOT NULL,
currency VARCHAR(10) NOT NULL DEFAULT 'USD',
description TEXT,
reference VARCHAR(255),
side VARCHAR(10) NOT NULL CHECK (side IN ('debit', 'credit')),
created_at TIMESTAMP DEFAULT NOW()
);
CREATE INDEX idx_ledger_customer ON ledger_entries(customer_id);
CREATE INDEX idx_ledger_account_type ON ledger_entries(account_type);
CREATE INDEX idx_ledger_reference ON ledger_entries(reference);
CREATE INDEX idx_ledger_created_at ON ledger_entries(created_at);

View File

@@ -0,0 +1,17 @@
-- VTM conversation states table
CREATE TABLE IF NOT EXISTS conversation_states (
session_id VARCHAR(255) PRIMARY KEY,
user_id UUID,
workflow VARCHAR(50),
step VARCHAR(50),
context JSONB,
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
expires_at TIMESTAMP
);
CREATE INDEX idx_conversation_user ON conversation_states(user_id);
CREATE INDEX idx_conversation_workflow ON conversation_states(workflow);
CREATE INDEX idx_conversation_expires ON conversation_states(expires_at);

View File

@@ -0,0 +1,16 @@
-- Address tags table
CREATE TABLE IF NOT EXISTS address_tags (
id BIGSERIAL,
chain_id INTEGER NOT NULL,
address VARCHAR(42) NOT NULL,
tag VARCHAR(255) NOT NULL,
source VARCHAR(50),
created_at TIMESTAMP DEFAULT NOW(),
PRIMARY KEY (id),
UNIQUE (chain_id, address, tag)
);
CREATE INDEX idx_address_tags_chain_address ON address_tags(chain_id, address);
CREATE INDEX idx_address_tags_tag ON address_tags(tag);

View File

@@ -0,0 +1,18 @@
-- Rollback migration: Remove ISO timestamp columns
-- Drop triggers
DROP TRIGGER IF EXISTS trigger_transactions_timestamp_iso ON transactions;
DROP TRIGGER IF EXISTS trigger_blocks_timestamp_iso ON blocks;
-- Drop functions
DROP FUNCTION IF EXISTS update_transaction_timestamp_iso();
DROP FUNCTION IF EXISTS update_timestamp_iso();
-- Drop indexes
DROP INDEX IF EXISTS idx_transactions_chain_timestamp_iso;
DROP INDEX IF EXISTS idx_blocks_chain_timestamp_iso;
-- Drop columns
ALTER TABLE transactions DROP COLUMN IF EXISTS timestamp_iso;
ALTER TABLE blocks DROP COLUMN IF EXISTS timestamp_iso;

View File

@@ -0,0 +1,72 @@
-- Add ISO 8601 compliant timestamp columns to blocks and transactions
-- This migration adds timestamp_iso columns that store ISO 8601 formatted timestamps
-- Add timestamp_iso column to blocks table
ALTER TABLE blocks ADD COLUMN IF NOT EXISTS timestamp_iso VARCHAR(30);
-- Create index for timestamp_iso on blocks
CREATE INDEX IF NOT EXISTS idx_blocks_chain_timestamp_iso ON blocks(chain_id, timestamp_iso);
-- Add timestamp_iso column to transactions table
-- This will be populated from the block timestamp via trigger
ALTER TABLE transactions ADD COLUMN IF NOT EXISTS timestamp_iso VARCHAR(30);
-- Create index for timestamp_iso on transactions
CREATE INDEX IF NOT EXISTS idx_transactions_chain_timestamp_iso ON transactions(chain_id, timestamp_iso);
-- Function to update timestamp_iso from timestamp
CREATE OR REPLACE FUNCTION update_timestamp_iso()
RETURNS TRIGGER AS $$
BEGIN
NEW.timestamp_iso := to_char(NEW.timestamp, 'YYYY-MM-DD"T"HH24:MI:SS"Z"');
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to automatically update timestamp_iso when timestamp changes in blocks
DROP TRIGGER IF EXISTS trigger_blocks_timestamp_iso ON blocks;
CREATE TRIGGER trigger_blocks_timestamp_iso
BEFORE INSERT OR UPDATE OF timestamp ON blocks
FOR EACH ROW
EXECUTE FUNCTION update_timestamp_iso();
-- Function to update transaction timestamp_iso from block timestamp
CREATE OR REPLACE FUNCTION update_transaction_timestamp_iso()
RETURNS TRIGGER AS $$
DECLARE
block_timestamp TIMESTAMP;
BEGIN
-- Get the block timestamp
SELECT b.timestamp INTO block_timestamp
FROM blocks b
WHERE b.chain_id = NEW.chain_id AND b.number = NEW.block_number;
-- If block timestamp exists, format it as ISO 8601
IF block_timestamp IS NOT NULL THEN
NEW.timestamp_iso := to_char(block_timestamp, 'YYYY-MM-DD"T"HH24:MI:SS"Z"');
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger to automatically update timestamp_iso when transaction is inserted/updated
DROP TRIGGER IF EXISTS trigger_transactions_timestamp_iso ON transactions;
CREATE TRIGGER trigger_transactions_timestamp_iso
BEFORE INSERT OR UPDATE OF block_number ON transactions
FOR EACH ROW
EXECUTE FUNCTION update_transaction_timestamp_iso();
-- Backfill existing blocks with ISO timestamps
UPDATE blocks
SET timestamp_iso = to_char(timestamp, 'YYYY-MM-DD"T"HH24:MI:SS"Z"')
WHERE timestamp_iso IS NULL;
-- Backfill existing transactions with ISO timestamps from blocks
UPDATE transactions t
SET timestamp_iso = to_char(b.timestamp, 'YYYY-MM-DD"T"HH24:MI:SS"Z"')
FROM blocks b
WHERE t.chain_id = b.chain_id
AND t.block_number = b.number
AND t.timestamp_iso IS NULL;

View File

@@ -0,0 +1,7 @@
-- Rollback: Remove LINK token from tokens table
-- Note: This only removes if it matches the exact address
DELETE FROM tokens
WHERE chain_id = 138
AND address = '0x514910771AF9Ca656af840dff83E8264EcF986CA';

View File

@@ -0,0 +1,26 @@
-- Add LINK token to tokens table for ChainID 138
-- Uses deployed MockLinkToken address on ChainID 138
INSERT INTO tokens (chain_id, address, type, name, symbol, decimals, verified, description, logo_url, website_url)
VALUES (
138,
'0xb7721dD53A8c629d9f1Ba31a5819AFe250002b03',
'ERC20',
'Chainlink Token',
'LINK',
18,
true,
'Official Chainlink LINK token from Ethereum Mainnet. Used for CCIP fees and Chainlink services.',
'https://raw.githubusercontent.com/chainlink/chainlink-docs/main/docs/images/chainlink-logo.svg',
'https://chain.link/'
)
ON CONFLICT (chain_id, address) DO UPDATE SET
name = EXCLUDED.name,
symbol = EXCLUDED.symbol,
decimals = EXCLUDED.decimals,
verified = EXCLUDED.verified,
description = EXCLUDED.description,
logo_url = EXCLUDED.logo_url,
website_url = EXCLUDED.website_url,
updated_at = NOW();

View File

@@ -0,0 +1,21 @@
-- Rollback migration for Track 2-4 Schema
DROP TRIGGER IF EXISTS update_operator_roles_updated_at ON operator_roles;
DROP TRIGGER IF EXISTS update_analytics_flows_updated_at ON analytics_flows;
DROP TRIGGER IF EXISTS update_token_balances_updated_at ON token_balances;
DROP TRIGGER IF EXISTS update_addresses_updated_at ON addresses;
DROP FUNCTION IF EXISTS update_updated_at_column();
DROP TABLE IF EXISTS wallet_nonces;
DROP TABLE IF EXISTS operator_roles;
DROP TABLE IF EXISTS operator_ip_whitelist;
DROP TABLE IF EXISTS operator_events;
DROP MATERIALIZED VIEW IF EXISTS token_distribution;
DROP TABLE IF EXISTS analytics_bridge_history;
DROP TABLE IF EXISTS analytics_flows;
DROP TABLE IF EXISTS internal_transactions;
DROP TABLE IF EXISTS token_balances;
DROP TABLE IF EXISTS token_transfers;
DROP TABLE IF EXISTS addresses;

View File

@@ -0,0 +1,234 @@
-- Migration: Track 2-4 Schema
-- Description: Creates tables for indexed explorer (Track 2), analytics (Track 3), and operator tools (Track 4)
-- Track 2: Indexed Address Data
CREATE TABLE IF NOT EXISTS addresses (
id SERIAL PRIMARY KEY,
address VARCHAR(42) NOT NULL UNIQUE,
chain_id INTEGER NOT NULL,
first_seen_block BIGINT,
first_seen_timestamp TIMESTAMP WITH TIME ZONE,
last_seen_block BIGINT,
last_seen_timestamp TIMESTAMP WITH TIME ZONE,
tx_count_sent INTEGER DEFAULT 0,
tx_count_received INTEGER DEFAULT 0,
total_sent_wei NUMERIC(78, 0) DEFAULT 0,
total_received_wei NUMERIC(78, 0) DEFAULT 0,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX idx_addresses_address ON addresses(address);
CREATE INDEX idx_addresses_chain_id ON addresses(chain_id);
CREATE INDEX idx_addresses_first_seen ON addresses(first_seen_timestamp);
CREATE INDEX idx_addresses_last_seen ON addresses(last_seen_timestamp);
-- Track 2: Token Transfers (ERC-20)
CREATE TABLE IF NOT EXISTS token_transfers (
id SERIAL PRIMARY KEY,
chain_id INTEGER NOT NULL,
transaction_hash VARCHAR(66) NOT NULL,
log_index INTEGER NOT NULL,
block_number BIGINT NOT NULL,
block_hash VARCHAR(66) NOT NULL,
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
token_contract VARCHAR(42) NOT NULL,
from_address VARCHAR(42) NOT NULL,
to_address VARCHAR(42) NOT NULL,
value NUMERIC(78, 0) NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(chain_id, transaction_hash, log_index)
);
CREATE INDEX idx_token_transfers_token ON token_transfers(token_contract);
CREATE INDEX idx_token_transfers_from ON token_transfers(from_address);
CREATE INDEX idx_token_transfers_to ON token_transfers(to_address);
CREATE INDEX idx_token_transfers_block ON token_transfers(block_number);
CREATE INDEX idx_token_transfers_timestamp ON token_transfers(timestamp);
CREATE INDEX idx_token_transfers_tx_hash ON token_transfers(transaction_hash);
-- Track 2: Token Balances (Snapshots)
CREATE TABLE IF NOT EXISTS token_balances (
id SERIAL PRIMARY KEY,
address VARCHAR(42) NOT NULL,
token_contract VARCHAR(42) NOT NULL,
chain_id INTEGER NOT NULL,
balance NUMERIC(78, 0) NOT NULL DEFAULT 0,
balance_formatted NUMERIC(78, 18),
last_updated_block BIGINT,
last_updated_timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(address, token_contract, chain_id)
);
CREATE INDEX idx_token_balances_address ON token_balances(address);
CREATE INDEX idx_token_balances_token ON token_balances(token_contract);
CREATE INDEX idx_token_balances_chain ON token_balances(chain_id);
-- Track 2: Internal Transactions
CREATE TABLE IF NOT EXISTS internal_transactions (
id SERIAL PRIMARY KEY,
chain_id INTEGER NOT NULL,
transaction_hash VARCHAR(66) NOT NULL,
block_number BIGINT NOT NULL,
block_hash VARCHAR(66) NOT NULL,
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
trace_address INTEGER[],
from_address VARCHAR(42) NOT NULL,
to_address VARCHAR(42),
value NUMERIC(78, 0) NOT NULL DEFAULT 0,
gas_limit NUMERIC(78, 0),
gas_used NUMERIC(78, 0),
call_type VARCHAR(50),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX idx_internal_txs_tx_hash ON internal_transactions(transaction_hash);
CREATE INDEX idx_internal_txs_from ON internal_transactions(from_address);
CREATE INDEX idx_internal_txs_to ON internal_transactions(to_address);
CREATE INDEX idx_internal_txs_block ON internal_transactions(block_number);
CREATE INDEX idx_internal_txs_timestamp ON internal_transactions(timestamp);
-- Track 3: Analytics Flows (Address → Address)
CREATE TABLE IF NOT EXISTS analytics_flows (
id SERIAL PRIMARY KEY,
chain_id INTEGER NOT NULL,
from_address VARCHAR(42) NOT NULL,
to_address VARCHAR(42) NOT NULL,
token_contract VARCHAR(42),
total_amount NUMERIC(78, 0) NOT NULL DEFAULT 0,
transfer_count INTEGER NOT NULL DEFAULT 0,
first_seen TIMESTAMP WITH TIME ZONE NOT NULL,
last_seen TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(chain_id, from_address, to_address, token_contract)
);
CREATE INDEX idx_analytics_flows_from ON analytics_flows(from_address);
CREATE INDEX idx_analytics_flows_to ON analytics_flows(to_address);
CREATE INDEX idx_analytics_flows_token ON analytics_flows(token_contract);
CREATE INDEX idx_analytics_flows_last_seen ON analytics_flows(last_seen);
-- Track 3: Bridge Analytics History
CREATE TABLE IF NOT EXISTS analytics_bridge_history (
id SERIAL PRIMARY KEY,
chain_from INTEGER NOT NULL,
chain_to INTEGER NOT NULL,
token_contract VARCHAR(42),
transfer_hash VARCHAR(66) NOT NULL,
from_address VARCHAR(42) NOT NULL,
to_address VARCHAR(42) NOT NULL,
amount NUMERIC(78, 0) NOT NULL,
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
status VARCHAR(50) NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX idx_bridge_history_chains ON analytics_bridge_history(chain_from, chain_to);
CREATE INDEX idx_bridge_history_token ON analytics_bridge_history(token_contract);
CREATE INDEX idx_bridge_history_timestamp ON analytics_bridge_history(timestamp);
CREATE INDEX idx_bridge_history_from ON analytics_bridge_history(from_address);
-- Track 3: Token Distribution (Materialized View)
CREATE MATERIALIZED VIEW IF NOT EXISTS token_distribution AS
SELECT
token_contract,
chain_id,
COUNT(DISTINCT address) as holder_count,
SUM(balance) as total_balance,
AVG(balance) as avg_balance,
PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY balance) as median_balance,
MAX(balance) as max_balance,
MIN(balance) as min_balance,
COUNT(*) FILTER (WHERE balance > 0) as active_holders,
NOW() as last_updated
FROM token_balances
GROUP BY token_contract, chain_id;
CREATE UNIQUE INDEX idx_token_distribution_unique ON token_distribution(token_contract, chain_id);
CREATE INDEX idx_token_distribution_holders ON token_distribution(holder_count);
-- Track 4: Operator Events (Audit Log)
CREATE TABLE IF NOT EXISTS operator_events (
id SERIAL PRIMARY KEY,
event_type VARCHAR(100) NOT NULL,
chain_id INTEGER,
operator_address VARCHAR(42) NOT NULL,
target_resource VARCHAR(200),
action VARCHAR(100) NOT NULL,
details JSONB,
ip_address INET,
user_agent TEXT,
timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX idx_operator_events_type ON operator_events(event_type);
CREATE INDEX idx_operator_events_operator ON operator_events(operator_address);
CREATE INDEX idx_operator_events_timestamp ON operator_events(timestamp);
CREATE INDEX idx_operator_events_chain ON operator_events(chain_id);
-- Track 4: Operator IP Whitelist
CREATE TABLE IF NOT EXISTS operator_ip_whitelist (
id SERIAL PRIMARY KEY,
operator_address VARCHAR(42) NOT NULL,
ip_address INET NOT NULL,
description TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(operator_address, ip_address)
);
CREATE INDEX idx_operator_whitelist_operator ON operator_ip_whitelist(operator_address);
CREATE INDEX idx_operator_whitelist_ip ON operator_ip_whitelist(ip_address);
-- Track 4: Operator Roles
CREATE TABLE IF NOT EXISTS operator_roles (
id SERIAL PRIMARY KEY,
address VARCHAR(42) NOT NULL UNIQUE,
track_level INTEGER NOT NULL DEFAULT 4,
roles TEXT[],
approved BOOLEAN DEFAULT FALSE,
approved_by VARCHAR(42),
approved_at TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX idx_operator_roles_address ON operator_roles(address);
CREATE INDEX idx_operator_roles_approved ON operator_roles(approved);
-- Wallet Authentication: Nonce storage
CREATE TABLE IF NOT EXISTS wallet_nonces (
id SERIAL PRIMARY KEY,
address VARCHAR(42) NOT NULL UNIQUE,
nonce VARCHAR(64) NOT NULL,
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX idx_wallet_nonces_address ON wallet_nonces(address);
CREATE INDEX idx_wallet_nonces_expires ON wallet_nonces(expires_at);
-- Update triggers for updated_at
CREATE OR REPLACE FUNCTION update_updated_at_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_addresses_updated_at BEFORE UPDATE ON addresses
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_token_balances_updated_at BEFORE UPDATE ON token_balances
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_analytics_flows_updated_at BEFORE UPDATE ON analytics_flows
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_operator_roles_updated_at BEFORE UPDATE ON operator_roles
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();

View File

@@ -0,0 +1,11 @@
-- Migration: Token Aggregation Schema (Rollback)
-- Description: Drops tables created for token aggregation
-- Drop tables in reverse order (respecting dependencies)
DROP TABLE IF EXISTS swap_events CASCADE;
DROP TABLE IF EXISTS token_signals CASCADE;
DROP TABLE IF EXISTS external_api_cache CASCADE;
DROP TABLE IF EXISTS token_ohlcv CASCADE;
DROP TABLE IF EXISTS pool_reserves_history CASCADE;
DROP TABLE IF EXISTS liquidity_pools CASCADE;
DROP TABLE IF EXISTS token_market_data CASCADE;

View File

@@ -0,0 +1,228 @@
-- Migration: Token Aggregation Schema
-- Description: Creates tables for token market data, liquidity pools, OHLCV, and external API cache
-- Supports ChainID 138 and 651940
-- Token Market Data - Aggregated market metrics per token
CREATE TABLE IF NOT EXISTS token_market_data (
id BIGSERIAL PRIMARY KEY,
chain_id INTEGER NOT NULL,
token_address VARCHAR(42) NOT NULL,
price_usd NUMERIC(30, 8),
price_change_24h NUMERIC(10, 4),
volume_24h NUMERIC(30, 8) DEFAULT 0,
volume_7d NUMERIC(30, 8) DEFAULT 0,
volume_30d NUMERIC(30, 8) DEFAULT 0,
market_cap_usd NUMERIC(30, 8),
liquidity_usd NUMERIC(30, 8) DEFAULT 0,
holders_count INTEGER DEFAULT 0,
transfers_24h INTEGER DEFAULT 0,
last_updated TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(chain_id, token_address)
) PARTITION BY LIST (chain_id);
-- Create partitions for supported chains
CREATE TABLE IF NOT EXISTS token_market_data_chain_138 PARTITION OF token_market_data FOR VALUES IN (138);
CREATE TABLE IF NOT EXISTS token_market_data_chain_651940 PARTITION OF token_market_data FOR VALUES IN (651940);
CREATE INDEX idx_token_market_data_chain_token ON token_market_data(chain_id, token_address);
CREATE INDEX idx_token_market_data_price ON token_market_data(price_usd) WHERE price_usd IS NOT NULL;
CREATE INDEX idx_token_market_data_volume ON token_market_data(volume_24h) WHERE volume_24h > 0;
CREATE INDEX idx_token_market_data_last_updated ON token_market_data(last_updated);
-- Liquidity Pools - DEX pool information
CREATE TABLE IF NOT EXISTS liquidity_pools (
id BIGSERIAL PRIMARY KEY,
chain_id INTEGER NOT NULL,
pool_address VARCHAR(42) NOT NULL,
token0_address VARCHAR(42) NOT NULL,
token1_address VARCHAR(42) NOT NULL,
dex_type VARCHAR(20) NOT NULL CHECK (dex_type IN ('uniswap_v2', 'uniswap_v3', 'dodo', 'custom')),
factory_address VARCHAR(42),
router_address VARCHAR(42),
reserve0 NUMERIC(78, 0) DEFAULT 0,
reserve1 NUMERIC(78, 0) DEFAULT 0,
reserve0_usd NUMERIC(30, 8) DEFAULT 0,
reserve1_usd NUMERIC(30, 8) DEFAULT 0,
total_liquidity_usd NUMERIC(30, 8) DEFAULT 0,
volume_24h NUMERIC(30, 8) DEFAULT 0,
fee_tier INTEGER, -- For UniswapV3 (500, 3000, 10000)
created_at_block BIGINT,
created_at_timestamp TIMESTAMP WITH TIME ZONE,
last_updated TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(chain_id, pool_address)
) PARTITION BY LIST (chain_id);
-- Create partitions for supported chains
CREATE TABLE IF NOT EXISTS liquidity_pools_chain_138 PARTITION OF liquidity_pools FOR VALUES IN (138);
CREATE TABLE IF NOT EXISTS liquidity_pools_chain_651940 PARTITION OF liquidity_pools FOR VALUES IN (651940);
CREATE INDEX idx_liquidity_pools_chain_pool ON liquidity_pools(chain_id, pool_address);
CREATE INDEX idx_liquidity_pools_token0 ON liquidity_pools(chain_id, token0_address);
CREATE INDEX idx_liquidity_pools_token1 ON liquidity_pools(chain_id, token1_address);
CREATE INDEX idx_liquidity_pools_dex_type ON liquidity_pools(chain_id, dex_type);
CREATE INDEX idx_liquidity_pools_tvl ON liquidity_pools(total_liquidity_usd) WHERE total_liquidity_usd > 0;
-- Pool Reserves History - Time-series snapshots of pool reserves
CREATE TABLE IF NOT EXISTS pool_reserves_history (
id BIGSERIAL PRIMARY KEY,
chain_id INTEGER NOT NULL,
pool_address VARCHAR(42) NOT NULL,
reserve0 NUMERIC(78, 0) NOT NULL,
reserve1 NUMERIC(78, 0) NOT NULL,
reserve0_usd NUMERIC(30, 8),
reserve1_usd NUMERIC(30, 8),
total_liquidity_usd NUMERIC(30, 8),
block_number BIGINT NOT NULL,
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
) PARTITION BY LIST (chain_id);
-- Create partitions for supported chains
CREATE TABLE IF NOT EXISTS pool_reserves_history_chain_138 PARTITION OF pool_reserves_history FOR VALUES IN (138);
CREATE TABLE IF NOT EXISTS pool_reserves_history_chain_651940 PARTITION OF pool_reserves_history FOR VALUES IN (651940);
-- Convert to hypertable for TimescaleDB time-series optimization
SELECT create_hypertable('pool_reserves_history', 'timestamp',
chunk_time_interval => INTERVAL '1 day',
if_not_exists => TRUE
);
CREATE INDEX idx_pool_reserves_history_pool_time ON pool_reserves_history(chain_id, pool_address, timestamp DESC);
CREATE INDEX idx_pool_reserves_history_timestamp ON pool_reserves_history(timestamp DESC);
-- Token OHLCV - Open, High, Low, Close, Volume data by interval
CREATE TABLE IF NOT EXISTS token_ohlcv (
id BIGSERIAL PRIMARY KEY,
chain_id INTEGER NOT NULL,
token_address VARCHAR(42) NOT NULL,
pool_address VARCHAR(42), -- Optional: specific pool, NULL = aggregated across all pools
interval_type VARCHAR(10) NOT NULL CHECK (interval_type IN ('5m', '15m', '1h', '4h', '24h')),
open_price NUMERIC(30, 8) NOT NULL,
high_price NUMERIC(30, 8) NOT NULL,
low_price NUMERIC(30, 8) NOT NULL,
close_price NUMERIC(30, 8) NOT NULL,
volume NUMERIC(30, 8) DEFAULT 0,
volume_usd NUMERIC(30, 8) DEFAULT 0,
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(chain_id, token_address, pool_address, interval_type, timestamp)
) PARTITION BY LIST (chain_id);
-- Create partitions for supported chains
CREATE TABLE IF NOT EXISTS token_ohlcv_chain_138 PARTITION OF token_ohlcv FOR VALUES IN (138);
CREATE TABLE IF NOT EXISTS token_ohlcv_chain_651940 PARTITION OF token_ohlcv FOR VALUES IN (651940);
-- Convert to hypertable for TimescaleDB time-series optimization
SELECT create_hypertable('token_ohlcv', 'timestamp',
chunk_time_interval => INTERVAL '7 days',
if_not_exists => TRUE
);
CREATE INDEX idx_token_ohlcv_token_time ON token_ohlcv(chain_id, token_address, interval_type, timestamp DESC);
CREATE INDEX idx_token_ohlcv_pool_time ON token_ohlcv(chain_id, pool_address, interval_type, timestamp DESC) WHERE pool_address IS NOT NULL;
CREATE INDEX idx_token_ohlcv_timestamp ON token_ohlcv(timestamp DESC);
-- External API Cache - Cached responses from external APIs
CREATE TABLE IF NOT EXISTS external_api_cache (
id BIGSERIAL PRIMARY KEY,
api_provider VARCHAR(50) NOT NULL CHECK (api_provider IN ('coingecko', 'coinmarketcap', 'dexscreener')),
cache_key VARCHAR(255) NOT NULL,
chain_id INTEGER,
token_address VARCHAR(42),
response_data JSONB NOT NULL,
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(api_provider, cache_key)
);
CREATE INDEX idx_external_api_cache_provider_key ON external_api_cache(api_provider, cache_key);
CREATE INDEX idx_external_api_cache_chain_token ON external_api_cache(chain_id, token_address) WHERE chain_id IS NOT NULL AND token_address IS NOT NULL;
CREATE INDEX idx_external_api_cache_expires ON external_api_cache(expires_at);
-- Token Signals - Trending and growth metrics
CREATE TABLE IF NOT EXISTS token_signals (
id BIGSERIAL PRIMARY KEY,
chain_id INTEGER NOT NULL,
token_address VARCHAR(42) NOT NULL,
tx_count_growth_24h NUMERIC(10, 4) DEFAULT 0, -- Percentage change
unique_wallets_24h INTEGER DEFAULT 0,
unique_wallets_growth_24h NUMERIC(10, 4) DEFAULT 0,
swap_count_24h INTEGER DEFAULT 0,
swap_count_growth_24h NUMERIC(10, 4) DEFAULT 0,
new_lp_creations_24h INTEGER DEFAULT 0,
attention_score NUMERIC(10, 4) DEFAULT 0, -- Composite score 0-100
trending_rank INTEGER, -- Rank among trending tokens
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(chain_id, token_address, timestamp)
) PARTITION BY LIST (chain_id);
-- Create partitions for supported chains
CREATE TABLE IF NOT EXISTS token_signals_chain_138 PARTITION OF token_signals FOR VALUES IN (138);
CREATE TABLE IF NOT EXISTS token_signals_chain_651940 PARTITION OF token_signals FOR VALUES IN (651940);
-- Convert to hypertable for TimescaleDB time-series optimization
SELECT create_hypertable('token_signals', 'timestamp',
chunk_time_interval => INTERVAL '1 day',
if_not_exists => TRUE
);
CREATE INDEX idx_token_signals_token_time ON token_signals(chain_id, token_address, timestamp DESC);
CREATE INDEX idx_token_signals_attention ON token_signals(chain_id, attention_score DESC, timestamp DESC);
CREATE INDEX idx_token_signals_trending ON token_signals(chain_id, trending_rank, timestamp DESC) WHERE trending_rank IS NOT NULL;
-- Swap Events - Track individual swap events for volume calculation
CREATE TABLE IF NOT EXISTS swap_events (
id BIGSERIAL PRIMARY KEY,
chain_id INTEGER NOT NULL,
pool_address VARCHAR(42) NOT NULL,
transaction_hash VARCHAR(66) NOT NULL,
block_number BIGINT NOT NULL,
log_index INTEGER NOT NULL,
token0_address VARCHAR(42) NOT NULL,
token1_address VARCHAR(42) NOT NULL,
amount0_in NUMERIC(78, 0) DEFAULT 0,
amount1_in NUMERIC(78, 0) DEFAULT 0,
amount0_out NUMERIC(78, 0) DEFAULT 0,
amount1_out NUMERIC(78, 0) DEFAULT 0,
amount_usd NUMERIC(30, 8), -- Calculated USD value
sender VARCHAR(42),
to_address VARCHAR(42),
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(chain_id, transaction_hash, log_index)
) PARTITION BY LIST (chain_id);
-- Create partitions for supported chains
CREATE TABLE IF NOT EXISTS swap_events_chain_138 PARTITION OF swap_events FOR VALUES IN (138);
CREATE TABLE IF NOT EXISTS swap_events_chain_651940 PARTITION OF swap_events FOR VALUES IN (651940);
-- Convert to hypertable for TimescaleDB time-series optimization
SELECT create_hypertable('swap_events', 'timestamp',
chunk_time_interval => INTERVAL '1 day',
if_not_exists => TRUE
);
CREATE INDEX idx_swap_events_pool_time ON swap_events(chain_id, pool_address, timestamp DESC);
CREATE INDEX idx_swap_events_token0 ON swap_events(chain_id, token0_address, timestamp DESC);
CREATE INDEX idx_swap_events_token1 ON swap_events(chain_id, token1_address, timestamp DESC);
CREATE INDEX idx_swap_events_tx_hash ON swap_events(chain_id, transaction_hash);
CREATE INDEX idx_swap_events_block ON swap_events(chain_id, block_number);
-- Update triggers for last_updated
CREATE TRIGGER update_token_market_data_updated_at BEFORE UPDATE ON token_market_data
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_liquidity_pools_updated_at BEFORE UPDATE ON liquidity_pools
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
-- Comments for documentation
COMMENT ON TABLE token_market_data IS 'Aggregated market data per token including price, volume, market cap, and liquidity';
COMMENT ON TABLE liquidity_pools IS 'DEX liquidity pool information with reserves and TVL';
COMMENT ON TABLE pool_reserves_history IS 'Time-series history of pool reserve snapshots';
COMMENT ON TABLE token_ohlcv IS 'OHLCV (Open, High, Low, Close, Volume) data for token price charts';
COMMENT ON TABLE external_api_cache IS 'Cached responses from external APIs (CoinGecko, CMC, DexScreener)';
COMMENT ON TABLE token_signals IS 'Trending signals and growth metrics for tokens';
COMMENT ON TABLE swap_events IS 'Individual swap events from DEX pools for volume calculation';

View File

@@ -0,0 +1,9 @@
-- Migration: Admin Configuration Schema (Rollback)
-- Description: Drops tables created for admin configuration
DROP TABLE IF EXISTS admin_audit_log CASCADE;
DROP TABLE IF EXISTS admin_sessions CASCADE;
DROP TABLE IF EXISTS admin_users CASCADE;
DROP TABLE IF EXISTS dex_factory_config CASCADE;
DROP TABLE IF EXISTS api_endpoints CASCADE;
DROP TABLE IF EXISTS api_keys CASCADE;

View File

@@ -0,0 +1,133 @@
-- Migration: Admin Configuration Schema
-- Description: Creates tables for managing API keys, endpoints, and service configuration
-- For Token Aggregation Service Control Panel
-- API Keys Management
CREATE TABLE IF NOT EXISTS api_keys (
id BIGSERIAL PRIMARY KEY,
provider VARCHAR(50) NOT NULL CHECK (provider IN ('coingecko', 'coinmarketcap', 'dexscreener', 'custom')),
key_name VARCHAR(255) NOT NULL,
api_key_encrypted TEXT NOT NULL,
is_active BOOLEAN DEFAULT true,
rate_limit_per_minute INTEGER,
rate_limit_per_day INTEGER,
last_used_at TIMESTAMP WITH TIME ZONE,
expires_at TIMESTAMP WITH TIME ZONE,
created_by VARCHAR(255),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(provider, key_name)
);
CREATE INDEX idx_api_keys_provider ON api_keys(provider);
CREATE INDEX idx_api_keys_active ON api_keys(is_active) WHERE is_active = true;
-- API Endpoints Configuration
CREATE TABLE IF NOT EXISTS api_endpoints (
id BIGSERIAL PRIMARY KEY,
chain_id INTEGER NOT NULL,
endpoint_type VARCHAR(50) NOT NULL CHECK (endpoint_type IN ('rpc', 'explorer', 'indexer', 'custom')),
endpoint_name VARCHAR(255) NOT NULL,
endpoint_url TEXT NOT NULL,
is_primary BOOLEAN DEFAULT false,
is_active BOOLEAN DEFAULT true,
requires_auth BOOLEAN DEFAULT false,
auth_type VARCHAR(50),
auth_config JSONB,
rate_limit_per_minute INTEGER,
timeout_ms INTEGER DEFAULT 10000,
health_check_enabled BOOLEAN DEFAULT true,
last_health_check TIMESTAMP WITH TIME ZONE,
health_check_status VARCHAR(20),
created_by VARCHAR(255),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(chain_id, endpoint_type, endpoint_name)
);
CREATE INDEX idx_api_endpoints_chain ON api_endpoints(chain_id);
CREATE INDEX idx_api_endpoints_type ON api_endpoints(endpoint_type);
CREATE INDEX idx_api_endpoints_active ON api_endpoints(is_active) WHERE is_active = true;
-- DEX Factory Configuration
CREATE TABLE IF NOT EXISTS dex_factory_config (
id BIGSERIAL PRIMARY KEY,
chain_id INTEGER NOT NULL,
dex_type VARCHAR(20) NOT NULL CHECK (dex_type IN ('uniswap_v2', 'uniswap_v3', 'dodo', 'custom')),
factory_address VARCHAR(42) NOT NULL,
router_address VARCHAR(42),
pool_manager_address VARCHAR(42),
start_block BIGINT DEFAULT 0,
is_active BOOLEAN DEFAULT true,
description TEXT,
created_by VARCHAR(255),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
UNIQUE(chain_id, dex_type, factory_address)
);
CREATE INDEX idx_dex_factory_chain ON dex_factory_config(chain_id);
CREATE INDEX idx_dex_factory_type ON dex_factory_config(dex_type);
-- Admin Users
CREATE TABLE IF NOT EXISTS admin_users (
id BIGSERIAL PRIMARY KEY,
username VARCHAR(255) NOT NULL UNIQUE,
email VARCHAR(255),
password_hash TEXT NOT NULL,
role VARCHAR(50) DEFAULT 'admin' CHECK (role IN ('super_admin', 'admin', 'operator', 'viewer')),
is_active BOOLEAN DEFAULT true,
last_login TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX idx_admin_users_username ON admin_users(username);
CREATE INDEX idx_admin_users_active ON admin_users(is_active) WHERE is_active = true;
-- Admin Sessions
CREATE TABLE IF NOT EXISTS admin_sessions (
id BIGSERIAL PRIMARY KEY,
user_id BIGINT NOT NULL REFERENCES admin_users(id) ON DELETE CASCADE,
session_token VARCHAR(255) NOT NULL UNIQUE,
expires_at TIMESTAMP WITH TIME ZONE NOT NULL,
ip_address INET,
user_agent TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX idx_admin_sessions_token ON admin_sessions(session_token);
CREATE INDEX idx_admin_sessions_user ON admin_sessions(user_id);
-- Audit Log
CREATE TABLE IF NOT EXISTS admin_audit_log (
id BIGSERIAL PRIMARY KEY,
user_id BIGINT REFERENCES admin_users(id),
action VARCHAR(100) NOT NULL,
resource_type VARCHAR(50) NOT NULL,
resource_id BIGINT,
old_values JSONB,
new_values JSONB,
ip_address INET,
user_agent TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX idx_audit_log_user ON admin_audit_log(user_id);
CREATE INDEX idx_audit_log_resource ON admin_audit_log(resource_type, resource_id);
CREATE INDEX idx_audit_log_created ON admin_audit_log(created_at DESC);
-- Update triggers (if update_updated_at_column function exists)
DO $$
BEGIN
IF EXISTS (SELECT 1 FROM pg_proc WHERE proname = 'update_updated_at_column') THEN
CREATE TRIGGER update_api_keys_updated_at BEFORE UPDATE ON api_keys
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_api_endpoints_updated_at BEFORE UPDATE ON api_endpoints
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_dex_factory_config_updated_at BEFORE UPDATE ON dex_factory_config
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
CREATE TRIGGER update_admin_users_updated_at BEFORE UPDATE ON admin_users
FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
END IF;
END $$;

View File

@@ -0,0 +1,165 @@
package migrations
import (
"database/sql"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
_ "github.com/jackc/pgx/v5/stdlib"
)
// Migration represents a database migration
type Migration struct {
Version string
Up string
Down string
}
// Migrator handles database migrations
type Migrator struct {
db *sql.DB
}
// NewMigrator creates a new migrator
func NewMigrator(db *sql.DB) *Migrator {
return &Migrator{db: db}
}
// RunMigrations runs all pending migrations
func (m *Migrator) RunMigrations(migrationsDir string) error {
// Create migrations table if it doesn't exist
if err := m.createMigrationsTable(); err != nil {
return fmt.Errorf("failed to create migrations table: %w", err)
}
// Load migration files
migrations, err := m.loadMigrations(migrationsDir)
if err != nil {
return fmt.Errorf("failed to load migrations: %w", err)
}
// Get applied migrations
applied, err := m.getAppliedMigrations()
if err != nil {
return fmt.Errorf("failed to get applied migrations: %w", err)
}
// Run pending migrations
for _, migration := range migrations {
if applied[migration.Version] {
continue
}
if err := m.runMigration(migration); err != nil {
return fmt.Errorf("failed to run migration %s: %w", migration.Version, err)
}
}
return nil
}
func (m *Migrator) createMigrationsTable() error {
query := `
CREATE TABLE IF NOT EXISTS schema_migrations (
version VARCHAR(255) PRIMARY KEY,
applied_at TIMESTAMP DEFAULT NOW()
)
`
_, err := m.db.Exec(query)
return err
}
func (m *Migrator) loadMigrations(dir string) ([]Migration, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
migrations := make(map[string]*Migration)
for _, file := range files {
if file.IsDir() {
continue
}
filename := file.Name()
if !strings.HasSuffix(filename, ".up.sql") && !strings.HasSuffix(filename, ".down.sql") {
continue
}
version := strings.TrimSuffix(filename, ".up.sql")
version = strings.TrimSuffix(version, ".down.sql")
if migrations[version] == nil {
migrations[version] = &Migration{Version: version}
}
content, err := os.ReadFile(filepath.Join(dir, filename))
if err != nil {
return nil, err
}
if strings.HasSuffix(filename, ".up.sql") {
migrations[version].Up = string(content)
} else if strings.HasSuffix(filename, ".down.sql") {
migrations[version].Down = string(content)
}
}
// Convert to slice and sort
result := make([]Migration, 0, len(migrations))
for _, m := range migrations {
result = append(result, *m)
}
sort.Slice(result, func(i, j int) bool {
return result[i].Version < result[j].Version
})
return result, nil
}
func (m *Migrator) getAppliedMigrations() (map[string]bool, error) {
rows, err := m.db.Query("SELECT version FROM schema_migrations")
if err != nil {
return nil, err
}
defer rows.Close()
applied := make(map[string]bool)
for rows.Next() {
var version string
if err := rows.Scan(&version); err != nil {
return nil, err
}
applied[version] = true
}
return applied, rows.Err()
}
func (m *Migrator) runMigration(migration Migration) error {
tx, err := m.db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
// Execute migration
if _, err := tx.Exec(migration.Up); err != nil {
return fmt.Errorf("failed to execute migration: %w", err)
}
// Record migration
if _, err := tx.Exec(
"INSERT INTO schema_migrations (version) VALUES ($1)",
migration.Version,
); err != nil {
return fmt.Errorf("failed to record migration: %w", err)
}
return tx.Commit()
}

View File

@@ -0,0 +1,87 @@
-- TimescaleDB schema for mempool transactions
-- This extends the main database with time-series capabilities
-- Mempool transactions hypertable
CREATE TABLE IF NOT EXISTS mempool_transactions (
time TIMESTAMPTZ NOT NULL,
chain_id INTEGER NOT NULL,
hash VARCHAR(66) NOT NULL,
from_address VARCHAR(42) NOT NULL,
to_address VARCHAR(42),
value NUMERIC(78, 0),
gas_price BIGINT,
max_fee_per_gas BIGINT,
max_priority_fee_per_gas BIGINT,
gas_limit BIGINT,
nonce BIGINT,
input_data_length INTEGER,
first_seen TIMESTAMPTZ NOT NULL,
status VARCHAR(20) DEFAULT 'pending',
confirmed_block_number BIGINT,
confirmed_at TIMESTAMPTZ,
PRIMARY KEY (time, chain_id, hash)
);
-- Convert to hypertable
SELECT create_hypertable('mempool_transactions', 'time', if_not_exists => TRUE);
-- Indexes
CREATE INDEX IF NOT EXISTS idx_mempool_chain_hash ON mempool_transactions(chain_id, hash);
CREATE INDEX IF NOT EXISTS idx_mempool_chain_from ON mempool_transactions(chain_id, from_address);
CREATE INDEX IF NOT EXISTS idx_mempool_chain_status ON mempool_transactions(chain_id, status, time);
-- Network metrics hypertable
CREATE TABLE IF NOT EXISTS network_metrics (
time TIMESTAMPTZ NOT NULL,
chain_id INTEGER NOT NULL,
block_number BIGINT,
tps DOUBLE PRECISION,
gps DOUBLE PRECISION,
avg_gas_price BIGINT,
pending_transactions INTEGER,
block_time_seconds DOUBLE PRECISION,
PRIMARY KEY (time, chain_id)
);
SELECT create_hypertable('network_metrics', 'time', if_not_exists => TRUE);
CREATE INDEX IF NOT EXISTS idx_network_metrics_chain_time ON network_metrics(chain_id, time DESC);
-- Gas price history hypertable
CREATE TABLE IF NOT EXISTS gas_price_history (
time TIMESTAMPTZ NOT NULL,
chain_id INTEGER NOT NULL,
block_number BIGINT,
min_gas_price BIGINT,
max_gas_price BIGINT,
avg_gas_price BIGINT,
p25_gas_price BIGINT,
p50_gas_price BIGINT,
p75_gas_price BIGINT,
p95_gas_price BIGINT,
p99_gas_price BIGINT,
PRIMARY KEY (time, chain_id)
);
SELECT create_hypertable('gas_price_history', 'time', if_not_exists => TRUE);
-- Continuous aggregate for 1-minute network metrics
CREATE MATERIALIZED VIEW IF NOT EXISTS network_metrics_1m
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 minute', time) AS bucket,
chain_id,
AVG(tps) AS avg_tps,
AVG(gps) AS avg_gps,
AVG(avg_gas_price) AS avg_gas_price,
AVG(pending_transactions) AS avg_pending_tx
FROM network_metrics
GROUP BY bucket, chain_id;
-- Add refresh policy for continuous aggregate
SELECT add_continuous_aggregate_policy('network_metrics_1m',
start_offset => INTERVAL '1 hour',
end_offset => INTERVAL '1 minute',
schedule_interval => INTERVAL '1 minute',
if_not_exists => TRUE);

View File

@@ -0,0 +1,120 @@
package featureflags
// FeatureFlag represents a feature flag with track requirement
type FeatureFlag struct {
Name string
RequiredTrack int
Description string
}
// FeatureFlags maps feature names to their definitions
var FeatureFlags = map[string]FeatureFlag{
"address_full_detail": {
Name: "address_full_detail",
RequiredTrack: 2,
Description: "Full address detail pages with transaction history",
},
"token_balances": {
Name: "token_balances",
RequiredTrack: 2,
Description: "View token balances for addresses",
},
"tx_history": {
Name: "tx_history",
RequiredTrack: 2,
Description: "Transaction history pagination",
},
"internal_txs": {
Name: "internal_txs",
RequiredTrack: 2,
Description: "Internal transaction tracking",
},
"enhanced_search": {
Name: "enhanced_search",
RequiredTrack: 2,
Description: "Enhanced search with token support",
},
"analytics_dashboard": {
Name: "analytics_dashboard",
RequiredTrack: 3,
Description: "Analytics dashboard access",
},
"flow_tracking": {
Name: "flow_tracking",
RequiredTrack: 3,
Description: "Address-to-address flow tracking",
},
"bridge_analytics": {
Name: "bridge_analytics",
RequiredTrack: 3,
Description: "Bridge analytics and flow history",
},
"token_distribution": {
Name: "token_distribution",
RequiredTrack: 3,
Description: "Token concentration and distribution analysis",
},
"address_risk": {
Name: "address_risk",
RequiredTrack: 3,
Description: "Address risk analysis",
},
"operator_panel": {
Name: "operator_panel",
RequiredTrack: 4,
Description: "Operator control panel access",
},
"validator_status": {
Name: "validator_status",
RequiredTrack: 4,
Description: "Validator/sequencer status views",
},
"protocol_config": {
Name: "protocol_config",
RequiredTrack: 4,
Description: "Protocol configuration visibility",
},
"bridge_control": {
Name: "bridge_control",
RequiredTrack: 4,
Description: "Bridge control operations",
},
}
// HasAccess checks if a user's track level has access to a required track
func HasAccess(userTrack int, requiredTrack int) bool {
return userTrack >= requiredTrack
}
// IsFeatureEnabled checks if a feature is enabled for a user's track level
func IsFeatureEnabled(featureName string, userTrack int) bool {
feature, exists := FeatureFlags[featureName]
if !exists {
return false
}
return HasAccess(userTrack, feature.RequiredTrack)
}
// GetEnabledFeatures returns a map of all features and their enabled status for a track
func GetEnabledFeatures(userTrack int) map[string]bool {
features := make(map[string]bool)
for name, feature := range FeatureFlags {
features[name] = HasAccess(userTrack, feature.RequiredTrack)
}
return features
}
// GetRequiredTrack returns the required track level for a feature
func GetRequiredTrack(featureName string) (int, bool) {
feature, exists := FeatureFlags[featureName]
if !exists {
return 0, false
}
return feature.RequiredTrack, true
}
// GetAllFeatures returns all feature flags
func GetAllFeatures() map[string]FeatureFlag {
return FeatureFlags
}

56
backend/go.mod Normal file
View File

@@ -0,0 +1,56 @@
module github.com/explorer/backend
go 1.23.0
toolchain go1.24.11
require (
github.com/elastic/go-elasticsearch/v8 v8.11.0
github.com/ethereum/go-ethereum v1.13.5
github.com/golang-jwt/jwt/v4 v4.5.2
github.com/gorilla/websocket v1.5.1
github.com/jackc/pgx/v5 v5.5.1
github.com/redis/go-redis/v9 v9.17.2
github.com/stretchr/testify v1.11.1
golang.org/x/crypto v0.36.0
)
require (
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/StackExchange/wmi v1.2.1 // indirect
github.com/bits-and-blooms/bitset v1.7.0 // indirect
github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.12.1 // indirect
github.com/crate-crypto/go-kzg-4844 v0.7.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/elastic/elastic-transport-go/v8 v8.3.0 // indirect
github.com/ethereum/c-kzg-4844 v0.4.0 // indirect
github.com/go-ole/go-ole v1.2.5 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/holiman/uint256 v1.2.3 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mmcloughlin/addchain v0.4.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect
github.com/supranational/blst v0.3.11 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/text v0.23.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
rsc.io/tmplfunc v0.0.3 // indirect
)

214
backend/go.sum Normal file
View File

@@ -0,0 +1,214 @@
github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ=
github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40=
github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y=
github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 h1:aPEJyR4rPBvDmeyi+l/FS/VtA00IWvjeFvjen1m1l1A=
github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593/go.mod h1:6hk1eMY/u5t+Cf18q5lFMUA1Rc+Sm5I6Ra1QuPyxXCo=
github.com/cockroachdb/redact v1.0.8 h1:8QG/764wK+vmEYoOlfobpe12EQcS81ukx/a4hdVMxNw=
github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg=
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 h1:IKgmqgMQlVJIZj19CdocBeSfSaiCbEBZGKODaixqtHM=
github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo=
github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ=
github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ=
github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI=
github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M=
github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v0.7.0 h1:C0vgZRk4q4EZ/JgPfzuSoxdCq3C3mOZMBShovmncxvA=
github.com/crate-crypto/go-kzg-4844 v0.7.0/go.mod h1:1kMhvPgI0Ky3yIa+9lFySEBUBXkYxeOi8ZF1sYioxhc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI=
github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/elastic/elastic-transport-go/v8 v8.3.0 h1:DJGxovyQLXGr62e9nDMPSxRyWION0Bh6d9eCFBriiHo=
github.com/elastic/elastic-transport-go/v8 v8.3.0/go.mod h1:87Tcz8IVNe6rVSLdBux1o/PEItLtyabHU3naC7IoqKI=
github.com/elastic/go-elasticsearch/v8 v8.11.0 h1:gUazf443rdYAEAD7JHX5lSXRgTkG4N4IcsV8dcWQPxM=
github.com/elastic/go-elasticsearch/v8 v8.11.0/go.mod h1:GU1BJHO7WeamP7UhuElYwzzHtvf9SDmeVpSSy9+o6Qg=
github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY=
github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/ethereum/go-ethereum v1.13.5 h1:U6TCRciCqZRe4FPXmy1sMGxTfuk8P7u2UoinF3VbaFk=
github.com/ethereum/go-ethereum v1.13.5/go.mod h1:yMTu38GSuyxaYzQMViqNmQ1s3cE84abZexQmTgenWk0=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA=
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.5.1 h1:5I9etrGkLrN+2XPCsi6XLlV5DITbSL/xBZdmAxFcXPI=
github.com/jackc/pgx/v5 v5.5.1/go.mod h1:Ig06C2Vu0t5qXC60W8sqIthScaEnFvojjj9dSljmHRA=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU=
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg=
github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a h1:CmF68hwI0XsOQ5UwlBopMi2Ow4Pbg32akc4KIVCOm+Y=
github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/redis/go-redis/v9 v9.17.2 h1:P2EGsA4qVIM3Pp+aPocCJ7DguDHhqrXNhVcEp4ViluI=
github.com/redis/go-redis/v9 v9.17.2/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1:Bn1aCHHRnjv4Bl16T8rcaFjYSrGrIZvpiGO6P3Q4GpU=
github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=

View File

@@ -0,0 +1,118 @@
package backfill
import (
"context"
"fmt"
"log"
"math/big"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/explorer/backend/indexer/processor"
"github.com/jackc/pgx/v5/pgxpool"
)
// BackfillWorker handles historical block indexing
type BackfillWorker struct {
db *pgxpool.Pool
client *ethclient.Client
processor *processor.BlockProcessor
chainID int
batchSize int
startBlock int64
endBlock int64
}
// NewBackfillWorker creates a new backfill worker
func NewBackfillWorker(db *pgxpool.Pool, client *ethclient.Client, chainID int, batchSize int) *BackfillWorker {
proc := processor.NewBlockProcessor(db, client, chainID)
return &BackfillWorker{
db: db,
client: client,
processor: proc,
chainID: chainID,
batchSize: batchSize,
}
}
// SetRange sets the block range to backfill
func (bw *BackfillWorker) SetRange(startBlock, endBlock int64) {
bw.startBlock = startBlock
bw.endBlock = endBlock
}
// Run starts the backfill process
func (bw *BackfillWorker) Run(ctx context.Context) error {
currentBlock := bw.startBlock
checkpoint := bw.getCheckpoint(ctx)
if checkpoint > currentBlock {
currentBlock = checkpoint
log.Printf("Resuming from checkpoint: block %d", currentBlock)
}
for currentBlock <= bw.endBlock {
// Process batch
endBatch := currentBlock + int64(bw.batchSize) - 1
if endBatch > bw.endBlock {
endBatch = bw.endBlock
}
if err := bw.processBatch(ctx, currentBlock, endBatch); err != nil {
return fmt.Errorf("failed to process batch %d-%d: %w", currentBlock, endBatch, err)
}
// Update checkpoint
if err := bw.saveCheckpoint(ctx, endBatch); err != nil {
log.Printf("Warning: failed to save checkpoint: %v", err)
}
log.Printf("Processed blocks %d-%d", currentBlock, endBatch)
currentBlock = endBatch + 1
// Check for cancellation
select {
case <-ctx.Done():
return ctx.Err()
default:
}
}
return nil
}
// processBatch processes a batch of blocks
func (bw *BackfillWorker) processBatch(ctx context.Context, start, end int64) error {
for blockNum := start; blockNum <= end; blockNum++ {
block, err := bw.client.BlockByNumber(ctx, big.NewInt(blockNum))
if err != nil {
return fmt.Errorf("failed to fetch block %d: %w", blockNum, err)
}
if err := bw.processor.ProcessBlock(ctx, block); err != nil {
return fmt.Errorf("failed to process block %d: %w", blockNum, err)
}
}
return nil
}
// getCheckpoint gets the last processed block from checkpoint
func (bw *BackfillWorker) getCheckpoint(ctx context.Context) int64 {
var checkpoint int64
query := `SELECT last_block FROM backfill_checkpoints WHERE chain_id = $1`
err := bw.db.QueryRow(ctx, query, bw.chainID).Scan(&checkpoint)
if err != nil {
return 0
}
return checkpoint
}
// saveCheckpoint saves the checkpoint
func (bw *BackfillWorker) saveCheckpoint(ctx context.Context, blockNum int64) error {
query := `
INSERT INTO backfill_checkpoints (chain_id, last_block, updated_at)
VALUES ($1, $2, NOW())
ON CONFLICT (chain_id) DO UPDATE SET last_block = $2, updated_at = NOW()
`
_, err := bw.db.Exec(ctx, query, bw.chainID, blockNum)
return err
}

Some files were not shown because too many files have changed in this diff Show More