Add production deployment infrastructure (Agent 4)

- Docker: Multi-stage Dockerfile with security hardening, docker-compose
  for production and development environments
- Environment: Comprehensive .env.example with all config options,
  lib/config/env.ts for typed environment validation
- Logging: Structured JSON logging with request/response middleware
- Monitoring: Prometheus metrics endpoint, Grafana dashboard, health
  checks (liveness/readiness probes)
- Security: Security headers, rate limiting, CORS middleware
- CI/CD: GitHub Actions workflows for CI, production deploy, and
  preview deployments
- Error tracking: Sentry integration foundation

Files created:
- Docker: Dockerfile, docker-compose.yml, docker-compose.dev.yml, .dockerignore
- Config: lib/config/env.ts, lib/config/index.ts
- Logging: lib/logging/logger.ts, lib/logging/middleware.ts
- Monitoring: lib/monitoring/sentry.ts, lib/monitoring/metrics.ts,
  lib/monitoring/health.ts
- Security: lib/security/headers.ts, lib/security/rateLimit.ts,
  lib/security/cors.ts
- API: pages/api/health/*, pages/api/metrics.ts
- Infra: infra/prometheus/prometheus.yml, infra/grafana/*
This commit is contained in:
Claude 2025-11-23 03:54:03 +00:00
parent 705105d9b6
commit 5ea8bab5c3
No known key found for this signature in database
29 changed files with 3881 additions and 21 deletions

77
.dockerignore Normal file
View file

@ -0,0 +1,77 @@
# LocalGreenChain Docker Ignore
# Prevents copying unnecessary files to Docker context
# Dependencies
node_modules
.pnp
.pnp.js
# Testing
coverage
.nyc_output
cypress/videos
cypress/screenshots
__tests__
# Build outputs (we rebuild inside container)
.next
out
build
dist
# Development files
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.pnpm-debug.log*
# Environment files (should be passed at runtime)
.env
.env.local
.env.development.local
.env.test.local
.env.production.local
# IDE and editor files
.idea
.vscode
*.swp
*.swo
*~
# OS files
.DS_Store
Thumbs.db
# Git
.git
.gitignore
# Docker
Dockerfile*
docker-compose*
.docker
# Documentation (not needed in production)
*.md
docs
CHANGELOG.md
README.md
LICENSE
# Misc
.eslintcache
.turbo
*.tsbuildinfo
# Data files (should be mounted as volumes)
data
*.json.bak
# Tor configuration (handled separately)
tor
# Infrastructure files
infra
.github

View file

@ -1,25 +1,124 @@
# =============================================================================
# LocalGreenChain Environment Variables
# Agent 4: Production Deployment
# Copy this file to .env.local and fill in the values
# =============================================================================
# -----------------------------------------------------------------------------
# Application Settings
# -----------------------------------------------------------------------------
NODE_ENV=development
PORT=3001
NEXT_PUBLIC_API_URL=http://localhost:3001
NEXT_PUBLIC_APP_NAME=LocalGreenChain
# -----------------------------------------------------------------------------
# Database (PostgreSQL)
# -----------------------------------------------------------------------------
DATABASE_URL=postgresql://lgc:lgc_password@localhost:5432/localgreenchain
DB_USER=lgc
DB_PASSWORD=lgc_password
DB_NAME=localgreenchain
DB_HOST=localhost
DB_PORT=5432
# -----------------------------------------------------------------------------
# Redis Cache
# -----------------------------------------------------------------------------
REDIS_URL=redis://localhost:6379
REDIS_HOST=localhost
REDIS_PORT=6379
# -----------------------------------------------------------------------------
# Authentication (NextAuth.js)
# Generate secret: openssl rand -base64 32
# -----------------------------------------------------------------------------
NEXTAUTH_URL=http://localhost:3001
NEXTAUTH_SECRET=your-secret-key-change-in-production
# OAuth Providers (optional)
GITHUB_CLIENT_ID=
GITHUB_CLIENT_SECRET=
GOOGLE_CLIENT_ID=
GOOGLE_CLIENT_SECRET=
# -----------------------------------------------------------------------------
# Error Tracking (Sentry)
# -----------------------------------------------------------------------------
SENTRY_DSN=
NEXT_PUBLIC_SENTRY_DSN=
SENTRY_ORG=
SENTRY_PROJECT=
SENTRY_AUTH_TOKEN=
# -----------------------------------------------------------------------------
# Logging
# Levels: error, warn, info, debug, trace
# -----------------------------------------------------------------------------
LOG_LEVEL=info
LOG_FORMAT=json
# -----------------------------------------------------------------------------
# Monitoring
# -----------------------------------------------------------------------------
PROMETHEUS_ENABLED=false
METRICS_PORT=9091
# -----------------------------------------------------------------------------
# Plants.net API (optional)
# -----------------------------------------------------------------------------
PLANTS_NET_API_KEY=your_api_key_here
# Tor Configuration
# -----------------------------------------------------------------------------
# Tor Configuration (optional)
# -----------------------------------------------------------------------------
TOR_ENABLED=false
TOR_SOCKS_HOST=127.0.0.1
TOR_SOCKS_PORT=9050
TOR_CONTROL_PORT=9051
TOR_HIDDEN_SERVICE_DIR=/var/lib/tor/localgreenchain
# -----------------------------------------------------------------------------
# Privacy Settings
# -----------------------------------------------------------------------------
DEFAULT_PRIVACY_MODE=standard
ALLOW_ANONYMOUS_REGISTRATION=true
LOCATION_OBFUSCATION_DEFAULT=fuzzy
# Application Settings
NODE_ENV=development
PORT=3001
# -----------------------------------------------------------------------------
# File Storage (S3/R2/MinIO)
# -----------------------------------------------------------------------------
STORAGE_PROVIDER=local
S3_BUCKET=
S3_REGION=
S3_ACCESS_KEY_ID=
S3_SECRET_ACCESS_KEY=
S3_ENDPOINT=
# -----------------------------------------------------------------------------
# Email (SMTP)
# -----------------------------------------------------------------------------
SMTP_HOST=localhost
SMTP_PORT=1025
SMTP_USER=
SMTP_PASSWORD=
SMTP_FROM=noreply@localgreenchain.local
# -----------------------------------------------------------------------------
# Rate Limiting
# -----------------------------------------------------------------------------
RATE_LIMIT_WINDOW_MS=60000
RATE_LIMIT_MAX_REQUESTS=100
# -----------------------------------------------------------------------------
# Security
# -----------------------------------------------------------------------------
CORS_ORIGINS=http://localhost:3001
CSP_REPORT_URI=
# -----------------------------------------------------------------------------
# Legacy Drupal Settings (for backward compatibility)
# -----------------------------------------------------------------------------
NEXT_PUBLIC_DRUPAL_BASE_URL=http://localhost:8080
NEXT_IMAGE_DOMAIN=localhost
DRUPAL_CLIENT_ID=52ce1a10-bf5c-4c81-8edf-eea3af95da84

172
.github/workflows/ci.yml vendored Normal file
View file

@ -0,0 +1,172 @@
# LocalGreenChain CI Pipeline
# Agent 4: Production Deployment
#
# Runs on every push and pull request:
# - Linting and type checking
# - Unit and integration tests
# - Build verification
name: CI
on:
push:
branches: [main, develop]
pull_request:
branches: [main, develop]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
NODE_ENV: test
jobs:
# ==========================================================================
# Lint and Type Check
# ==========================================================================
lint:
name: Lint & Type Check
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Bun
uses: oven-sh/setup-bun@v1
with:
bun-version: latest
- name: Install dependencies
run: bun install --frozen-lockfile
- name: Run ESLint
run: bun run lint
- name: Run TypeScript type check
run: bunx tsc --noEmit
# ==========================================================================
# Unit Tests
# ==========================================================================
test:
name: Unit Tests
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Bun
uses: oven-sh/setup-bun@v1
with:
bun-version: latest
- name: Install dependencies
run: bun install --frozen-lockfile
- name: Run tests with coverage
run: bun run test:coverage
- name: Upload coverage reports
uses: codecov/codecov-action@v3
if: always()
with:
files: ./coverage/lcov.info
fail_ci_if_error: false
verbose: true
# ==========================================================================
# Build
# ==========================================================================
build:
name: Build
runs-on: ubuntu-latest
timeout-minutes: 15
needs: [lint, test]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Bun
uses: oven-sh/setup-bun@v1
with:
bun-version: latest
- name: Install dependencies
run: bun install --frozen-lockfile
- name: Build application
run: bun run build
env:
NEXT_TELEMETRY_DISABLED: 1
- name: Upload build artifacts
uses: actions/upload-artifact@v4
with:
name: build-output
path: .next/
retention-days: 7
# ==========================================================================
# Docker Build (only on main branch)
# ==========================================================================
docker:
name: Docker Build
runs-on: ubuntu-latest
timeout-minutes: 20
needs: [build]
if: github.ref == 'refs/heads/main'
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker image
uses: docker/build-push-action@v5
with:
context: .
push: false
tags: localgreenchain:${{ github.sha }}
cache-from: type=gha
cache-to: type=gha,mode=max
# ==========================================================================
# Security Scan
# ==========================================================================
security:
name: Security Scan
runs-on: ubuntu-latest
timeout-minutes: 10
needs: [lint]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Bun
uses: oven-sh/setup-bun@v1
with:
bun-version: latest
- name: Install dependencies
run: bun install --frozen-lockfile
- name: Run security audit
run: bun pm audit || true
continue-on-error: true
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
scan-type: 'fs'
scan-ref: '.'
severity: 'CRITICAL,HIGH'
exit-code: '0'

169
.github/workflows/deploy.yml vendored Normal file
View file

@ -0,0 +1,169 @@
# LocalGreenChain Production Deployment
# Agent 4: Production Deployment
#
# Deploys to production when a release is published
# or manually triggered
name: Deploy Production
on:
release:
types: [published]
workflow_dispatch:
inputs:
environment:
description: 'Deployment environment'
required: true
default: 'production'
type: choice
options:
- production
- staging
concurrency:
group: deploy-${{ github.event.inputs.environment || 'production' }}
cancel-in-progress: false
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
jobs:
# ==========================================================================
# Build and Push Docker Image
# ==========================================================================
build:
name: Build & Push Image
runs-on: ubuntu-latest
timeout-minutes: 20
permissions:
contents: read
packages: write
outputs:
image_tag: ${{ steps.meta.outputs.tags }}
image_digest: ${{ steps.build.outputs.digest }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=sha
- name: Build and push
id: build
uses: docker/build-push-action@v5
with:
context: .
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
build-args: |
NEXT_PUBLIC_API_URL=${{ vars.API_URL }}
NEXT_PUBLIC_SENTRY_DSN=${{ vars.SENTRY_DSN }}
# ==========================================================================
# Deploy to Production
# ==========================================================================
deploy:
name: Deploy
runs-on: ubuntu-latest
timeout-minutes: 15
needs: [build]
environment:
name: ${{ github.event.inputs.environment || 'production' }}
url: ${{ vars.APP_URL }}
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Deploy notification (start)
run: |
echo "🚀 Starting deployment to ${{ github.event.inputs.environment || 'production' }}"
echo "Image: ${{ needs.build.outputs.image_tag }}"
# Add your deployment steps here
# Examples:
# - SSH and docker-compose pull/up
# - Kubernetes deployment
# - Cloud provider specific deployment
- name: Deploy notification (complete)
run: |
echo "✅ Deployment completed successfully"
# ==========================================================================
# Post-Deployment Verification
# ==========================================================================
verify:
name: Verify Deployment
runs-on: ubuntu-latest
timeout-minutes: 10
needs: [deploy]
steps:
- name: Wait for deployment to stabilize
run: sleep 30
- name: Health check
run: |
for i in {1..5}; do
status=$(curl -s -o /dev/null -w "%{http_code}" ${{ vars.APP_URL }}/api/health || echo "000")
if [ "$status" = "200" ]; then
echo "✅ Health check passed"
exit 0
fi
echo "Attempt $i: Status $status, retrying..."
sleep 10
done
echo "❌ Health check failed after 5 attempts"
exit 1
- name: Smoke tests
run: |
# Verify critical endpoints
curl -f ${{ vars.APP_URL }}/api/health/live || exit 1
curl -f ${{ vars.APP_URL }}/api/health/ready || exit 1
echo "✅ Smoke tests passed"
# ==========================================================================
# Rollback on Failure
# ==========================================================================
rollback:
name: Rollback
runs-on: ubuntu-latest
needs: [verify]
if: failure()
steps:
- name: Rollback notification
run: |
echo "⚠️ Deployment verification failed, initiating rollback..."
# Add rollback logic here
- name: Alert team
run: |
echo "🔔 Deployment failed - team has been notified"

139
.github/workflows/preview.yml vendored Normal file
View file

@ -0,0 +1,139 @@
# LocalGreenChain Preview Deployments
# Agent 4: Production Deployment
#
# Creates preview deployments for pull requests
name: Preview Deployment
on:
pull_request:
types: [opened, synchronize, reopened]
concurrency:
group: preview-${{ github.event.pull_request.number }}
cancel-in-progress: true
jobs:
# ==========================================================================
# Build Preview
# ==========================================================================
build:
name: Build Preview
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Setup Bun
uses: oven-sh/setup-bun@v1
with:
bun-version: latest
- name: Install dependencies
run: bun install --frozen-lockfile
- name: Build application
run: bun run build
env:
NEXT_TELEMETRY_DISABLED: 1
NEXT_PUBLIC_API_URL: https://preview-${{ github.event.pull_request.number }}.localgreenchain.dev
- name: Upload build artifacts
uses: actions/upload-artifact@v4
with:
name: preview-build
path: |
.next/
public/
package.json
next.config.js
retention-days: 7
# ==========================================================================
# Deploy Preview
# ==========================================================================
deploy:
name: Deploy Preview
runs-on: ubuntu-latest
timeout-minutes: 10
needs: [build]
permissions:
pull-requests: write
steps:
- name: Download build artifacts
uses: actions/download-artifact@v4
with:
name: preview-build
- name: Deploy preview
id: deploy
run: |
# Add your preview deployment logic here
# Examples: Vercel, Netlify, or custom solution
PREVIEW_URL="https://preview-${{ github.event.pull_request.number }}.localgreenchain.dev"
echo "preview_url=${PREVIEW_URL}" >> $GITHUB_OUTPUT
echo "Deployed to: ${PREVIEW_URL}"
- name: Comment on PR
uses: actions/github-script@v7
with:
script: |
const previewUrl = '${{ steps.deploy.outputs.preview_url }}';
// Find existing comment
const comments = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const botComment = comments.data.find(comment =>
comment.user.type === 'Bot' &&
comment.body.includes('Preview Deployment')
);
const body = `## 🚀 Preview Deployment
| Status | URL |
|--------|-----|
| ✅ Ready | [${previewUrl}](${previewUrl}) |
**Commit:** \`${context.sha.substring(0, 7)}\`
**Updated:** ${new Date().toISOString()}
---
<sub>This preview will be automatically deleted when the PR is closed.</sub>`;
if (botComment) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: botComment.id,
body: body
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: body
});
}
# ==========================================================================
# Cleanup on PR Close
# ==========================================================================
cleanup:
name: Cleanup Preview
runs-on: ubuntu-latest
if: github.event.action == 'closed'
steps:
- name: Delete preview deployment
run: |
echo "Cleaning up preview deployment for PR #${{ github.event.pull_request.number }}"
# Add your cleanup logic here

View file

@ -1,40 +1,82 @@
# Dockerfile for LocalGreenChain
# Uses Bun for fast builds and runtime
# Multi-stage production build with Bun runtime
# Agent 4: Production Deployment
FROM oven/bun:1 as base
# =============================================================================
# Stage 1: Dependencies
# =============================================================================
FROM oven/bun:1 AS deps
WORKDIR /app
# Install dependencies
# Install dependencies only (better caching)
COPY package.json bun.lockb* ./
RUN bun install --frozen-lockfile
RUN bun install --frozen-lockfile --production=false
# Copy application code
# =============================================================================
# Stage 2: Builder
# =============================================================================
FROM oven/bun:1 AS builder
WORKDIR /app
# Copy dependencies from deps stage
COPY --from=deps /app/node_modules ./node_modules
COPY . .
# Build arguments for build-time configuration
ARG NEXT_PUBLIC_API_URL
ARG NEXT_PUBLIC_SENTRY_DSN
ENV NEXT_PUBLIC_API_URL=$NEXT_PUBLIC_API_URL
ENV NEXT_PUBLIC_SENTRY_DSN=$NEXT_PUBLIC_SENTRY_DSN
# Disable Next.js telemetry during build
ENV NEXT_TELEMETRY_DISABLED=1
# Build Next.js application
RUN bun run build
# Production stage
FROM oven/bun:1-slim as production
# Remove development dependencies
RUN bun install --frozen-lockfile --production
# =============================================================================
# Stage 3: Production Runner
# =============================================================================
FROM oven/bun:1-slim AS production
WORKDIR /app
# Copy dependencies and build output
COPY --from=base /app/node_modules ./node_modules
COPY --from=base /app/.next ./.next
COPY --from=base /app/public ./public
COPY --from=base /app/package.json ./package.json
COPY --from=base /app/next.config.js ./next.config.js
# Create non-root user for security
RUN addgroup --system --gid 1001 nodejs && \
adduser --system --uid 1001 nextjs
# Create data directory
RUN mkdir -p /app/data
# Copy necessary files from builder
COPY --from=builder /app/public ./public
COPY --from=builder /app/package.json ./package.json
COPY --from=builder /app/next.config.js ./next.config.js
# Copy Next.js build output with proper ownership
COPY --from=builder --chown=nextjs:nodejs /app/.next/standalone ./
COPY --from=builder --chown=nextjs:nodejs /app/.next/static ./.next/static
# Create data directory with proper permissions
RUN mkdir -p /app/data && chown -R nextjs:nodejs /app/data
# Set production environment
ENV NODE_ENV=production
ENV NEXT_TELEMETRY_DISABLED=1
ENV PORT=3001
ENV HOSTNAME="0.0.0.0"
# Expose port
EXPOSE 3001
# Set environment to production
ENV NODE_ENV=production
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:3001/api/health || exit 1
# Switch to non-root user
USER nextjs
# Run the application
CMD ["bun", "run", "start"]

155
docker-compose.dev.yml Normal file
View file

@ -0,0 +1,155 @@
# LocalGreenChain Development Docker Compose
# Agent 4: Production Deployment
# Development environment with hot reloading and debug tools
version: '3.8'
services:
# ==========================================================================
# Application (Development Mode)
# ==========================================================================
app:
build:
context: .
dockerfile: Dockerfile
target: deps # Use deps stage for development
container_name: lgc-app-dev
restart: unless-stopped
ports:
- "${PORT:-3001}:3001"
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://lgc:lgc_dev_password@postgres:5432/localgreenchain_dev
- REDIS_URL=redis://redis:6379
- LOG_LEVEL=debug
- PLANTS_NET_API_KEY=${PLANTS_NET_API_KEY:-}
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
volumes:
# Mount source code for hot reloading
- .:/app
- /app/node_modules # Exclude node_modules
- /app/.next # Exclude build output
networks:
- lgc-dev-network
command: bun run dev
# ==========================================================================
# Database (Development)
# ==========================================================================
postgres:
image: postgres:15-alpine
container_name: lgc-postgres-dev
restart: unless-stopped
environment:
- POSTGRES_USER=lgc
- POSTGRES_PASSWORD=lgc_dev_password
- POSTGRES_DB=localgreenchain_dev
volumes:
- postgres-dev-data:/var/lib/postgresql/data
ports:
- "5433:5432" # Different port to avoid conflicts
networks:
- lgc-dev-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U lgc -d localgreenchain_dev"]
interval: 5s
timeout: 5s
retries: 5
# ==========================================================================
# Cache (Development)
# ==========================================================================
redis:
image: redis:7-alpine
container_name: lgc-redis-dev
restart: unless-stopped
command: redis-server --appendonly yes
volumes:
- redis-dev-data:/data
ports:
- "6380:6379" # Different port to avoid conflicts
networks:
- lgc-dev-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 5s
retries: 5
# ==========================================================================
# Database Admin (pgAdmin)
# ==========================================================================
pgadmin:
image: dpage/pgadmin4:latest
container_name: lgc-pgadmin-dev
restart: unless-stopped
environment:
- PGADMIN_DEFAULT_EMAIL=admin@localgreenchain.local
- PGADMIN_DEFAULT_PASSWORD=admin
- PGADMIN_CONFIG_SERVER_MODE=False
volumes:
- pgadmin-dev-data:/var/lib/pgadmin
ports:
- "5050:80"
networks:
- lgc-dev-network
depends_on:
- postgres
profiles:
- tools
# ==========================================================================
# Redis Commander (Redis UI)
# ==========================================================================
redis-commander:
image: rediscommander/redis-commander:latest
container_name: lgc-redis-commander-dev
restart: unless-stopped
environment:
- REDIS_HOSTS=local:redis:6379
ports:
- "8081:8081"
networks:
- lgc-dev-network
depends_on:
- redis
profiles:
- tools
# ==========================================================================
# MailHog (Email Testing)
# ==========================================================================
mailhog:
image: mailhog/mailhog:latest
container_name: lgc-mailhog-dev
restart: unless-stopped
ports:
- "1025:1025" # SMTP
- "8025:8025" # Web UI
networks:
- lgc-dev-network
profiles:
- tools
# =============================================================================
# Networks
# =============================================================================
networks:
lgc-dev-network:
driver: bridge
name: lgc-dev-network
# =============================================================================
# Volumes
# =============================================================================
volumes:
postgres-dev-data:
name: lgc-postgres-dev-data
redis-dev-data:
name: lgc-redis-dev-data
pgadmin-dev-data:
name: lgc-pgadmin-dev-data

164
docker-compose.yml Normal file
View file

@ -0,0 +1,164 @@
# LocalGreenChain Production Docker Compose
# Agent 4: Production Deployment
# Full stack with PostgreSQL, Redis, and monitoring
version: '3.8'
services:
# ==========================================================================
# Application
# ==========================================================================
app:
build:
context: .
dockerfile: Dockerfile
args:
- NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL:-http://localhost:3001}
- NEXT_PUBLIC_SENTRY_DSN=${NEXT_PUBLIC_SENTRY_DSN:-}
container_name: lgc-app
restart: unless-stopped
ports:
- "${PORT:-3001}:3001"
environment:
- NODE_ENV=production
- DATABASE_URL=postgresql://${DB_USER:-lgc}:${DB_PASSWORD:-lgc_password}@postgres:5432/${DB_NAME:-localgreenchain}
- REDIS_URL=redis://redis:6379
- SENTRY_DSN=${SENTRY_DSN:-}
- LOG_LEVEL=${LOG_LEVEL:-info}
- PLANTS_NET_API_KEY=${PLANTS_NET_API_KEY:-}
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
volumes:
- app-data:/app/data
networks:
- lgc-network
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
labels:
- "prometheus.scrape=true"
- "prometheus.port=3001"
- "prometheus.path=/api/metrics"
# ==========================================================================
# Database
# ==========================================================================
postgres:
image: postgres:15-alpine
container_name: lgc-postgres
restart: unless-stopped
environment:
- POSTGRES_USER=${DB_USER:-lgc}
- POSTGRES_PASSWORD=${DB_PASSWORD:-lgc_password}
- POSTGRES_DB=${DB_NAME:-localgreenchain}
- PGDATA=/var/lib/postgresql/data/pgdata
volumes:
- postgres-data:/var/lib/postgresql/data
ports:
- "${DB_PORT:-5432}:5432"
networks:
- lgc-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-lgc} -d ${DB_NAME:-localgreenchain}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
# ==========================================================================
# Cache
# ==========================================================================
redis:
image: redis:7-alpine
container_name: lgc-redis
restart: unless-stopped
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
volumes:
- redis-data:/data
ports:
- "${REDIS_PORT:-6379}:6379"
networks:
- lgc-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
# ==========================================================================
# Monitoring - Prometheus
# ==========================================================================
prometheus:
image: prom/prometheus:v2.47.0
container_name: lgc-prometheus
restart: unless-stopped
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention.time=15d'
- '--web.enable-lifecycle'
volumes:
- ./infra/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus-data:/prometheus
ports:
- "${PROMETHEUS_PORT:-9090}:9090"
networks:
- lgc-network
depends_on:
- app
profiles:
- monitoring
# ==========================================================================
# Monitoring - Grafana
# ==========================================================================
grafana:
image: grafana/grafana:10.1.0
container_name: lgc-grafana
restart: unless-stopped
environment:
- GF_SECURITY_ADMIN_USER=${GRAFANA_USER:-admin}
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
- GF_SERVER_ROOT_URL=${GRAFANA_ROOT_URL:-http://localhost:3000}
volumes:
- grafana-data:/var/lib/grafana
- ./infra/grafana/provisioning:/etc/grafana/provisioning:ro
- ./infra/grafana/dashboards:/var/lib/grafana/dashboards:ro
ports:
- "${GRAFANA_PORT:-3000}:3000"
networks:
- lgc-network
depends_on:
- prometheus
profiles:
- monitoring
# =============================================================================
# Networks
# =============================================================================
networks:
lgc-network:
driver: bridge
name: lgc-network
# =============================================================================
# Volumes
# =============================================================================
volumes:
app-data:
name: lgc-app-data
postgres-data:
name: lgc-postgres-data
redis-data:
name: lgc-redis-data
prometheus-data:
name: lgc-prometheus-data
grafana-data:
name: lgc-grafana-data

View file

@ -0,0 +1,682 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"description": "LocalGreenChain Application Dashboard",
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": null,
"links": [],
"liveNow": false,
"panels": [
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 0
},
"id": 1,
"panels": [],
"title": "Overview",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 0,
"y": 1
},
"id": 2,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.1.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum(lgc_http_requests_total)",
"refId": "A"
}
],
"title": "Total Requests",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 6,
"y": 1
},
"id": 3,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.1.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "lgc_plants_registered_total",
"refId": "A"
}
],
"title": "Plants Registered",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 12,
"y": 1
},
"id": 4,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.1.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "lgc_active_agents",
"refId": "A"
}
],
"title": "Active Agents",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 18,
"y": 1
},
"id": 5,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": ["lastNotNull"],
"fields": "",
"values": false
},
"textMode": "auto"
},
"pluginVersion": "10.1.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "lgc_blockchain_blocks",
"refId": "A"
}
],
"title": "Blockchain Blocks",
"type": "stat"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 5
},
"id": 6,
"panels": [],
"title": "HTTP Metrics",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "reqps"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 6
},
"id": 7,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "rate(lgc_http_requests_total[5m])",
"legendFormat": "{{method}} {{path}}",
"refId": "A"
}
],
"title": "Request Rate",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 6
},
"id": 8,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "histogram_quantile(0.95, rate(lgc_http_request_duration_seconds_bucket[5m]))",
"legendFormat": "p95 {{method}} {{path}}",
"refId": "A"
}
],
"title": "Request Duration (p95)",
"type": "timeseries"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 14
},
"id": 9,
"panels": [],
"title": "Agent Metrics",
"type": "row"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 15
},
"id": 10,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "rate(lgc_agent_cycles_total[5m])",
"legendFormat": "{{agent}}",
"refId": "A"
}
],
"title": "Agent Cycle Rate",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 10,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 15
},
"id": 11,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "histogram_quantile(0.95, rate(lgc_agent_cycle_duration_seconds_bucket[5m]))",
"legendFormat": "{{agent}}",
"refId": "A"
}
],
"title": "Agent Cycle Duration (p95)",
"type": "timeseries"
}
],
"refresh": "30s",
"schemaVersion": 38,
"style": "dark",
"tags": ["localgreenchain", "application"],
"templating": {
"list": [
{
"current": {
"selected": false,
"text": "Prometheus",
"value": "Prometheus"
},
"hide": 0,
"includeAll": false,
"label": "Datasource",
"multi": false,
"name": "DS_PROMETHEUS",
"options": [],
"query": "prometheus",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"type": "datasource"
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "LocalGreenChain Dashboard",
"uid": "localgreenchain-main",
"version": 1,
"weekStart": ""
}

View file

@ -0,0 +1,16 @@
# LocalGreenChain Grafana Dashboard Provisioning
# Agent 4: Production Deployment
apiVersion: 1
providers:
- name: 'LocalGreenChain'
orgId: 1
folder: 'LocalGreenChain'
folderUid: 'lgc'
type: file
disableDeletion: false
updateIntervalSeconds: 30
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards

View file

@ -0,0 +1,30 @@
# LocalGreenChain Grafana Datasources
# Agent 4: Production Deployment
apiVersion: 1
datasources:
# Prometheus
- name: Prometheus
type: prometheus
access: proxy
url: http://prometheus:9090
isDefault: true
editable: false
jsonData:
timeInterval: "15s"
httpMethod: POST
# PostgreSQL (optional)
# - name: PostgreSQL
# type: postgres
# url: postgres:5432
# database: localgreenchain
# user: lgc
# secureJsonData:
# password: ${DB_PASSWORD}
# jsonData:
# sslmode: disable
# maxOpenConns: 5
# maxIdleConns: 2
# connMaxLifetime: 14400

View file

@ -0,0 +1,65 @@
# LocalGreenChain Prometheus Configuration
# Agent 4: Production Deployment
global:
scrape_interval: 15s
evaluation_interval: 15s
external_labels:
monitor: 'localgreenchain'
# Alerting configuration (optional)
alerting:
alertmanagers:
- static_configs:
- targets: []
# - alertmanager:9093
# Rule files (optional)
rule_files: []
# - "first_rules.yml"
# - "second_rules.yml"
# Scrape configurations
scrape_configs:
# Prometheus self-monitoring
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
metrics_path: /metrics
# LocalGreenChain Application
- job_name: 'localgreenchain'
static_configs:
- targets: ['app:3001']
metrics_path: /api/metrics
scrape_interval: 30s
scrape_timeout: 10s
# PostgreSQL (if using postgres_exporter)
- job_name: 'postgresql'
static_configs:
- targets: []
# - postgres-exporter:9187
scrape_interval: 60s
# Redis (if using redis_exporter)
- job_name: 'redis'
static_configs:
- targets: []
# - redis-exporter:9121
scrape_interval: 30s
# Node Exporter (if running)
- job_name: 'node'
static_configs:
- targets: []
# - node-exporter:9100
scrape_interval: 30s
# Remote write configuration (optional)
# For long-term storage or external Prometheus
# remote_write:
# - url: "https://remote-prometheus.example.com/api/v1/write"
# basic_auth:
# username: user
# password: pass

280
lib/config/env.ts Normal file
View file

@ -0,0 +1,280 @@
/**
* Environment Configuration
* Agent 4: Production Deployment
*
* Validates and exports environment variables with type safety.
* Throws errors in production if required variables are missing.
*/
type LogLevel = 'error' | 'warn' | 'info' | 'debug' | 'trace';
type LogFormat = 'json' | 'pretty';
type PrivacyMode = 'standard' | 'enhanced' | 'maximum';
type LocationObfuscation = 'none' | 'fuzzy' | 'region' | 'hidden';
type StorageProvider = 'local' | 's3' | 'r2' | 'minio';
interface EnvConfig {
// Application
nodeEnv: 'development' | 'production' | 'test';
port: number;
apiUrl: string;
appName: string;
// Database
databaseUrl: string;
dbHost: string;
dbPort: number;
dbUser: string;
dbPassword: string;
dbName: string;
// Redis
redisUrl: string;
redisHost: string;
redisPort: number;
// Authentication
nextAuthUrl: string;
nextAuthSecret: string;
githubClientId?: string;
githubClientSecret?: string;
googleClientId?: string;
googleClientSecret?: string;
// Sentry
sentryDsn?: string;
sentryOrg?: string;
sentryProject?: string;
sentryAuthToken?: string;
// Logging
logLevel: LogLevel;
logFormat: LogFormat;
// Monitoring
prometheusEnabled: boolean;
metricsPort: number;
// Plants.net
plantsNetApiKey?: string;
// Tor
torEnabled: boolean;
torSocksHost: string;
torSocksPort: number;
torControlPort: number;
torHiddenServiceDir: string;
// Privacy
defaultPrivacyMode: PrivacyMode;
allowAnonymousRegistration: boolean;
locationObfuscationDefault: LocationObfuscation;
// Storage
storageProvider: StorageProvider;
s3Bucket?: string;
s3Region?: string;
s3AccessKeyId?: string;
s3SecretAccessKey?: string;
s3Endpoint?: string;
// Email
smtpHost: string;
smtpPort: number;
smtpUser?: string;
smtpPassword?: string;
smtpFrom: string;
// Rate Limiting
rateLimitWindowMs: number;
rateLimitMaxRequests: number;
// Security
corsOrigins: string[];
cspReportUri?: string;
// Feature Flags
isProduction: boolean;
isDevelopment: boolean;
isTest: boolean;
}
function getEnvString(key: string, defaultValue?: string): string {
const value = process.env[key] ?? defaultValue;
if (value === undefined) {
if (process.env.NODE_ENV === 'production') {
throw new Error(`Missing required environment variable: ${key}`);
}
return '';
}
return value;
}
function getEnvNumber(key: string, defaultValue: number): number {
const value = process.env[key];
if (value === undefined) {
return defaultValue;
}
const parsed = parseInt(value, 10);
if (isNaN(parsed)) {
throw new Error(`Environment variable ${key} must be a number, got: ${value}`);
}
return parsed;
}
function getEnvBoolean(key: string, defaultValue: boolean): boolean {
const value = process.env[key];
if (value === undefined) {
return defaultValue;
}
return value.toLowerCase() === 'true' || value === '1';
}
function getEnvArray(key: string, defaultValue: string[] = []): string[] {
const value = process.env[key];
if (value === undefined || value === '') {
return defaultValue;
}
return value.split(',').map((s) => s.trim()).filter(Boolean);
}
function validateLogLevel(value: string): LogLevel {
const validLevels: LogLevel[] = ['error', 'warn', 'info', 'debug', 'trace'];
if (!validLevels.includes(value as LogLevel)) {
return 'info';
}
return value as LogLevel;
}
function validateLogFormat(value: string): LogFormat {
return value === 'pretty' ? 'pretty' : 'json';
}
function validatePrivacyMode(value: string): PrivacyMode {
const validModes: PrivacyMode[] = ['standard', 'enhanced', 'maximum'];
if (!validModes.includes(value as PrivacyMode)) {
return 'standard';
}
return value as PrivacyMode;
}
function validateLocationObfuscation(value: string): LocationObfuscation {
const validModes: LocationObfuscation[] = ['none', 'fuzzy', 'region', 'hidden'];
if (!validModes.includes(value as LocationObfuscation)) {
return 'fuzzy';
}
return value as LocationObfuscation;
}
function validateStorageProvider(value: string): StorageProvider {
const validProviders: StorageProvider[] = ['local', 's3', 'r2', 'minio'];
if (!validProviders.includes(value as StorageProvider)) {
return 'local';
}
return value as StorageProvider;
}
function validateNodeEnv(value: string): 'development' | 'production' | 'test' {
if (value === 'production' || value === 'test') {
return value;
}
return 'development';
}
/**
* Load and validate environment configuration
*/
function loadEnv(): EnvConfig {
const nodeEnv = validateNodeEnv(process.env.NODE_ENV || 'development');
return {
// Application
nodeEnv,
port: getEnvNumber('PORT', 3001),
apiUrl: getEnvString('NEXT_PUBLIC_API_URL', 'http://localhost:3001'),
appName: getEnvString('NEXT_PUBLIC_APP_NAME', 'LocalGreenChain'),
// Database
databaseUrl: getEnvString('DATABASE_URL', 'postgresql://lgc:lgc_password@localhost:5432/localgreenchain'),
dbHost: getEnvString('DB_HOST', 'localhost'),
dbPort: getEnvNumber('DB_PORT', 5432),
dbUser: getEnvString('DB_USER', 'lgc'),
dbPassword: getEnvString('DB_PASSWORD', 'lgc_password'),
dbName: getEnvString('DB_NAME', 'localgreenchain'),
// Redis
redisUrl: getEnvString('REDIS_URL', 'redis://localhost:6379'),
redisHost: getEnvString('REDIS_HOST', 'localhost'),
redisPort: getEnvNumber('REDIS_PORT', 6379),
// Authentication
nextAuthUrl: getEnvString('NEXTAUTH_URL', 'http://localhost:3001'),
nextAuthSecret: getEnvString('NEXTAUTH_SECRET', 'development-secret-change-in-production'),
githubClientId: process.env.GITHUB_CLIENT_ID,
githubClientSecret: process.env.GITHUB_CLIENT_SECRET,
googleClientId: process.env.GOOGLE_CLIENT_ID,
googleClientSecret: process.env.GOOGLE_CLIENT_SECRET,
// Sentry
sentryDsn: process.env.SENTRY_DSN || process.env.NEXT_PUBLIC_SENTRY_DSN,
sentryOrg: process.env.SENTRY_ORG,
sentryProject: process.env.SENTRY_PROJECT,
sentryAuthToken: process.env.SENTRY_AUTH_TOKEN,
// Logging
logLevel: validateLogLevel(getEnvString('LOG_LEVEL', 'info')),
logFormat: validateLogFormat(getEnvString('LOG_FORMAT', 'json')),
// Monitoring
prometheusEnabled: getEnvBoolean('PROMETHEUS_ENABLED', false),
metricsPort: getEnvNumber('METRICS_PORT', 9091),
// Plants.net
plantsNetApiKey: process.env.PLANTS_NET_API_KEY,
// Tor
torEnabled: getEnvBoolean('TOR_ENABLED', false),
torSocksHost: getEnvString('TOR_SOCKS_HOST', '127.0.0.1'),
torSocksPort: getEnvNumber('TOR_SOCKS_PORT', 9050),
torControlPort: getEnvNumber('TOR_CONTROL_PORT', 9051),
torHiddenServiceDir: getEnvString('TOR_HIDDEN_SERVICE_DIR', '/var/lib/tor/localgreenchain'),
// Privacy
defaultPrivacyMode: validatePrivacyMode(getEnvString('DEFAULT_PRIVACY_MODE', 'standard')),
allowAnonymousRegistration: getEnvBoolean('ALLOW_ANONYMOUS_REGISTRATION', true),
locationObfuscationDefault: validateLocationObfuscation(getEnvString('LOCATION_OBFUSCATION_DEFAULT', 'fuzzy')),
// Storage
storageProvider: validateStorageProvider(getEnvString('STORAGE_PROVIDER', 'local')),
s3Bucket: process.env.S3_BUCKET,
s3Region: process.env.S3_REGION,
s3AccessKeyId: process.env.S3_ACCESS_KEY_ID,
s3SecretAccessKey: process.env.S3_SECRET_ACCESS_KEY,
s3Endpoint: process.env.S3_ENDPOINT,
// Email
smtpHost: getEnvString('SMTP_HOST', 'localhost'),
smtpPort: getEnvNumber('SMTP_PORT', 1025),
smtpUser: process.env.SMTP_USER,
smtpPassword: process.env.SMTP_PASSWORD,
smtpFrom: getEnvString('SMTP_FROM', 'noreply@localgreenchain.local'),
// Rate Limiting
rateLimitWindowMs: getEnvNumber('RATE_LIMIT_WINDOW_MS', 60000),
rateLimitMaxRequests: getEnvNumber('RATE_LIMIT_MAX_REQUESTS', 100),
// Security
corsOrigins: getEnvArray('CORS_ORIGINS', ['http://localhost:3001']),
cspReportUri: process.env.CSP_REPORT_URI,
// Feature Flags
isProduction: nodeEnv === 'production',
isDevelopment: nodeEnv === 'development',
isTest: nodeEnv === 'test',
};
}
// Export singleton config
export const env = loadEnv();
// Re-export types
export type { EnvConfig, LogLevel, LogFormat, PrivacyMode, LocationObfuscation, StorageProvider };

16
lib/config/index.ts Normal file
View file

@ -0,0 +1,16 @@
/**
* Configuration Module
* Agent 4: Production Deployment
*
* Central export for all configuration utilities.
*/
export { env } from './env';
export type {
EnvConfig,
LogLevel,
LogFormat,
PrivacyMode,
LocationObfuscation,
StorageProvider,
} from './env';

12
lib/logging/index.ts Normal file
View file

@ -0,0 +1,12 @@
/**
* Logging Module
* Agent 4: Production Deployment
*
* Central export for logging utilities.
*/
export { logger, createLogger } from './logger';
export type { Logger, LogContext, LogEntry } from './logger';
export { withLogging, getRequestLogger } from './middleware';
export type { RequestLogContext, ResponseLogContext } from './middleware';

188
lib/logging/logger.ts Normal file
View file

@ -0,0 +1,188 @@
/**
* Structured Logging System
* Agent 4: Production Deployment
*
* Provides structured JSON logging with support for different log levels,
* context enrichment, and production-ready formatting.
*/
import { env, LogLevel } from '../config';
interface LogContext {
[key: string]: unknown;
}
interface LogEntry {
timestamp: string;
level: LogLevel;
message: string;
service: string;
environment: string;
context?: LogContext;
error?: {
name: string;
message: string;
stack?: string;
};
}
type LogMethod = (message: string, context?: LogContext) => void;
interface Logger {
error: (message: string, errorOrContext?: Error | LogContext, context?: LogContext) => void;
warn: LogMethod;
info: LogMethod;
debug: LogMethod;
trace: LogMethod;
child: (context: LogContext) => Logger;
}
const LOG_LEVEL_PRIORITY: Record<LogLevel, number> = {
error: 0,
warn: 1,
info: 2,
debug: 3,
trace: 4,
};
const LOG_LEVEL_COLORS: Record<LogLevel, string> = {
error: '\x1b[31m', // Red
warn: '\x1b[33m', // Yellow
info: '\x1b[36m', // Cyan
debug: '\x1b[35m', // Magenta
trace: '\x1b[90m', // Gray
};
const RESET_COLOR = '\x1b[0m';
class LoggerImpl implements Logger {
private baseContext: LogContext;
private minLevel: number;
private format: 'json' | 'pretty';
private serviceName: string;
constructor(context: LogContext = {}) {
this.baseContext = context;
this.minLevel = LOG_LEVEL_PRIORITY[env.logLevel];
this.format = env.logFormat;
this.serviceName = env.appName;
}
private shouldLog(level: LogLevel): boolean {
return LOG_LEVEL_PRIORITY[level] <= this.minLevel;
}
private formatEntry(entry: LogEntry): string {
if (this.format === 'pretty') {
return this.formatPretty(entry);
}
return JSON.stringify(entry);
}
private formatPretty(entry: LogEntry): string {
const color = LOG_LEVEL_COLORS[entry.level];
const timestamp = new Date(entry.timestamp).toLocaleTimeString();
const level = entry.level.toUpperCase().padEnd(5);
let output = `${color}[${timestamp}] ${level}${RESET_COLOR} ${entry.message}`;
if (entry.context && Object.keys(entry.context).length > 0) {
output += ` ${JSON.stringify(entry.context)}`;
}
if (entry.error) {
output += `\n ${color}Error: ${entry.error.message}${RESET_COLOR}`;
if (entry.error.stack) {
output += `\n${entry.error.stack.split('\n').slice(1).join('\n')}`;
}
}
return output;
}
private log(level: LogLevel, message: string, context?: LogContext, error?: Error): void {
if (!this.shouldLog(level)) {
return;
}
const entry: LogEntry = {
timestamp: new Date().toISOString(),
level,
message,
service: this.serviceName,
environment: env.nodeEnv,
};
// Merge base context with provided context
const mergedContext = { ...this.baseContext, ...context };
if (Object.keys(mergedContext).length > 0) {
entry.context = mergedContext;
}
// Add error details if present
if (error) {
entry.error = {
name: error.name,
message: error.message,
stack: error.stack,
};
}
const output = this.formatEntry(entry);
// Use appropriate console method
switch (level) {
case 'error':
console.error(output);
break;
case 'warn':
console.warn(output);
break;
case 'debug':
case 'trace':
console.debug(output);
break;
default:
console.log(output);
}
}
error(message: string, errorOrContext?: Error | LogContext, context?: LogContext): void {
if (errorOrContext instanceof Error) {
this.log('error', message, context, errorOrContext);
} else {
this.log('error', message, errorOrContext);
}
}
warn(message: string, context?: LogContext): void {
this.log('warn', message, context);
}
info(message: string, context?: LogContext): void {
this.log('info', message, context);
}
debug(message: string, context?: LogContext): void {
this.log('debug', message, context);
}
trace(message: string, context?: LogContext): void {
this.log('trace', message, context);
}
child(context: LogContext): Logger {
return new LoggerImpl({ ...this.baseContext, ...context });
}
}
// Create and export default logger instance
export const logger = new LoggerImpl();
// Export for creating child loggers with context
export function createLogger(context: LogContext): Logger {
return new LoggerImpl(context);
}
// Export types
export type { Logger, LogContext, LogEntry };

158
lib/logging/middleware.ts Normal file
View file

@ -0,0 +1,158 @@
/**
* Logging Middleware for API Routes
* Agent 4: Production Deployment
*
* Provides request/response logging middleware for Next.js API routes.
*/
import type { NextApiRequest, NextApiResponse, NextApiHandler } from 'next';
import { createLogger, Logger } from './logger';
interface RequestLogContext {
requestId: string;
method: string;
path: string;
query?: Record<string, string | string[]>;
userAgent?: string;
ip?: string;
}
interface ResponseLogContext extends RequestLogContext {
statusCode: number;
duration: number;
}
/**
* Generate a unique request ID
*/
function generateRequestId(): string {
return `req_${Date.now().toString(36)}_${Math.random().toString(36).substring(2, 9)}`;
}
/**
* Get client IP from request headers
*/
function getClientIp(req: NextApiRequest): string {
const forwarded = req.headers['x-forwarded-for'];
if (typeof forwarded === 'string') {
return forwarded.split(',')[0].trim();
}
if (Array.isArray(forwarded)) {
return forwarded[0];
}
return req.socket?.remoteAddress || 'unknown';
}
/**
* Sanitize headers for logging (remove sensitive data)
*/
function sanitizeHeaders(headers: Record<string, string | string[] | undefined>): Record<string, string> {
const sensitiveHeaders = ['authorization', 'cookie', 'x-api-key', 'x-auth-token'];
const sanitized: Record<string, string> = {};
for (const [key, value] of Object.entries(headers)) {
if (sensitiveHeaders.includes(key.toLowerCase())) {
sanitized[key] = '[REDACTED]';
} else if (typeof value === 'string') {
sanitized[key] = value;
} else if (Array.isArray(value)) {
sanitized[key] = value.join(', ');
}
}
return sanitized;
}
/**
* Request logging middleware
*/
export function withLogging(handler: NextApiHandler): NextApiHandler {
return async (req: NextApiRequest, res: NextApiResponse) => {
const startTime = Date.now();
const requestId = generateRequestId();
const logger = createLogger({ requestId });
// Extract request information
const requestContext: RequestLogContext = {
requestId,
method: req.method || 'UNKNOWN',
path: req.url || '/',
query: req.query as Record<string, string | string[]>,
userAgent: req.headers['user-agent'],
ip: getClientIp(req),
};
// Log incoming request
logger.info('Incoming request', {
...requestContext,
headers: sanitizeHeaders(req.headers as Record<string, string | string[] | undefined>),
});
// Add request ID to response headers
res.setHeader('X-Request-Id', requestId);
// Capture the original end method
const originalEnd = res.end;
let responseLogged = false;
// Override end to log response
res.end = function (this: NextApiResponse, ...args: Parameters<typeof originalEnd>) {
if (!responseLogged) {
responseLogged = true;
const duration = Date.now() - startTime;
const responseContext: ResponseLogContext = {
...requestContext,
statusCode: res.statusCode,
duration,
};
// Log based on status code
if (res.statusCode >= 500) {
logger.error('Request completed with server error', responseContext);
} else if (res.statusCode >= 400) {
logger.warn('Request completed with client error', responseContext);
} else {
logger.info('Request completed', responseContext);
}
}
return originalEnd.apply(this, args);
} as typeof originalEnd;
try {
// Execute the handler
await handler(req, res);
} catch (error) {
const duration = Date.now() - startTime;
// Log error
logger.error(
'Request failed with exception',
error instanceof Error ? error : new Error(String(error)),
{
...requestContext,
duration,
}
);
// Re-throw to let Next.js handle the error
throw error;
}
};
}
/**
* Create a logger with request context for use within API handlers
*/
export function getRequestLogger(req: NextApiRequest): Logger {
const requestId = (req.headers['x-request-id'] as string) || generateRequestId();
return createLogger({
requestId,
method: req.method,
path: req.url,
});
}
// Export types
export type { RequestLogContext, ResponseLogContext };

182
lib/monitoring/health.ts Normal file
View file

@ -0,0 +1,182 @@
/**
* Health Check Utilities
* Agent 4: Production Deployment
*
* Provides health check functionality for the application.
*/
import { env } from '../config';
interface HealthCheckResult {
status: 'healthy' | 'unhealthy' | 'degraded';
message?: string;
latencyMs?: number;
}
interface ComponentHealth {
name: string;
status: 'healthy' | 'unhealthy' | 'degraded';
message?: string;
latencyMs?: number;
}
interface HealthStatus {
status: 'healthy' | 'unhealthy' | 'degraded';
version: string;
timestamp: string;
uptime: number;
environment: string;
checks: ComponentHealth[];
}
type HealthChecker = () => Promise<HealthCheckResult>;
/**
* Health check registry
*/
class HealthCheckRegistry {
private checks: Map<string, HealthChecker> = new Map();
private startTime: number = Date.now();
/**
* Register a health check
*/
register(name: string, checker: HealthChecker): void {
this.checks.set(name, checker);
}
/**
* Unregister a health check
*/
unregister(name: string): void {
this.checks.delete(name);
}
/**
* Run all health checks
*/
async runAll(): Promise<HealthStatus> {
const results: ComponentHealth[] = [];
let overallStatus: 'healthy' | 'unhealthy' | 'degraded' = 'healthy';
for (const [name, checker] of this.checks) {
const start = Date.now();
try {
const result = await Promise.race([
checker(),
new Promise<HealthCheckResult>((_, reject) =>
setTimeout(() => reject(new Error('Timeout')), 5000)
),
]);
results.push({
name,
status: result.status,
message: result.message,
latencyMs: result.latencyMs ?? (Date.now() - start),
});
if (result.status === 'unhealthy') {
overallStatus = 'unhealthy';
} else if (result.status === 'degraded' && overallStatus !== 'unhealthy') {
overallStatus = 'degraded';
}
} catch (error) {
results.push({
name,
status: 'unhealthy',
message: error instanceof Error ? error.message : 'Unknown error',
latencyMs: Date.now() - start,
});
overallStatus = 'unhealthy';
}
}
return {
status: overallStatus,
version: process.env.npm_package_version || '1.0.0',
timestamp: new Date().toISOString(),
uptime: Math.floor((Date.now() - this.startTime) / 1000),
environment: env.nodeEnv,
checks: results,
};
}
/**
* Run liveness check (is the process alive?)
*/
async checkLiveness(): Promise<{ status: 'ok' | 'error' }> {
return { status: 'ok' };
}
/**
* Run readiness check (is the application ready to serve traffic?)
*/
async checkReadiness(): Promise<HealthStatus> {
return this.runAll();
}
/**
* Get uptime in seconds
*/
getUptime(): number {
return Math.floor((Date.now() - this.startTime) / 1000);
}
}
// Create singleton instance
export const healthChecks = new HealthCheckRegistry();
// Register default checks
healthChecks.register('memory', async () => {
const used = process.memoryUsage();
const heapUsedMB = Math.round(used.heapUsed / 1024 / 1024);
const heapTotalMB = Math.round(used.heapTotal / 1024 / 1024);
const heapUsagePercent = (used.heapUsed / used.heapTotal) * 100;
if (heapUsagePercent > 90) {
return {
status: 'unhealthy',
message: `High memory usage: ${heapUsedMB}MB / ${heapTotalMB}MB (${heapUsagePercent.toFixed(1)}%)`,
};
} else if (heapUsagePercent > 75) {
return {
status: 'degraded',
message: `Elevated memory usage: ${heapUsedMB}MB / ${heapTotalMB}MB (${heapUsagePercent.toFixed(1)}%)`,
};
}
return {
status: 'healthy',
message: `Memory usage: ${heapUsedMB}MB / ${heapTotalMB}MB (${heapUsagePercent.toFixed(1)}%)`,
};
});
healthChecks.register('eventloop', async () => {
const start = Date.now();
await new Promise((resolve) => setImmediate(resolve));
const lag = Date.now() - start;
if (lag > 100) {
return {
status: 'unhealthy',
message: `Event loop lag: ${lag}ms`,
latencyMs: lag,
};
} else if (lag > 50) {
return {
status: 'degraded',
message: `Event loop lag: ${lag}ms`,
latencyMs: lag,
};
}
return {
status: 'healthy',
message: `Event loop lag: ${lag}ms`,
latencyMs: lag,
};
});
// Export types
export type { HealthCheckResult, ComponentHealth, HealthStatus, HealthChecker };

15
lib/monitoring/index.ts Normal file
View file

@ -0,0 +1,15 @@
/**
* Monitoring Module
* Agent 4: Production Deployment
*
* Central export for monitoring utilities.
*/
export { sentry, captureApiError } from './sentry';
export type { SentryContext, BreadcrumbData } from './sentry';
export { metrics, httpMetrics, appMetrics } from './metrics';
export type { MetricValue, Histogram, HistogramBucket } from './metrics';
export { healthChecks } from './health';
export type { HealthCheckResult, ComponentHealth, HealthStatus, HealthChecker } from './health';

272
lib/monitoring/metrics.ts Normal file
View file

@ -0,0 +1,272 @@
/**
* Application Metrics
* Agent 4: Production Deployment
*
* Provides application metrics for Prometheus monitoring.
* Tracks request counts, response times, and application health.
*/
import { env } from '../config';
interface MetricValue {
value: number;
labels: Record<string, string>;
timestamp: number;
}
interface HistogramBucket {
le: number;
count: number;
}
interface Histogram {
buckets: HistogramBucket[];
sum: number;
count: number;
labels: Record<string, string>;
}
/**
* Simple in-memory metrics store
* In production, replace with prom-client for full Prometheus compatibility
*/
class MetricsRegistry {
private counters: Map<string, MetricValue[]> = new Map();
private gauges: Map<string, MetricValue> = new Map();
private histograms: Map<string, Histogram[]> = new Map();
private readonly defaultBuckets = [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10];
/**
* Increment a counter metric
*/
incrementCounter(name: string, labels: Record<string, string> = {}, value = 1): void {
const key = this.getKey(name, labels);
const existing = this.counters.get(key) || [];
// Find existing entry with same labels or create new
const labelKey = JSON.stringify(labels);
const existingEntry = existing.find(e => JSON.stringify(e.labels) === labelKey);
if (existingEntry) {
existingEntry.value += value;
existingEntry.timestamp = Date.now();
} else {
existing.push({ value, labels, timestamp: Date.now() });
this.counters.set(key, existing);
}
}
/**
* Set a gauge metric value
*/
setGauge(name: string, value: number, labels: Record<string, string> = {}): void {
const key = this.getKey(name, labels);
this.gauges.set(key, { value, labels, timestamp: Date.now() });
}
/**
* Increment a gauge metric
*/
incrementGauge(name: string, labels: Record<string, string> = {}, value = 1): void {
const key = this.getKey(name, labels);
const existing = this.gauges.get(key);
const newValue = (existing?.value || 0) + value;
this.gauges.set(key, { value: newValue, labels, timestamp: Date.now() });
}
/**
* Decrement a gauge metric
*/
decrementGauge(name: string, labels: Record<string, string> = {}, value = 1): void {
this.incrementGauge(name, labels, -value);
}
/**
* Observe a value in a histogram
*/
observeHistogram(name: string, value: number, labels: Record<string, string> = {}): void {
const key = this.getKey(name, labels);
let histograms = this.histograms.get(key);
if (!histograms) {
histograms = [];
this.histograms.set(key, histograms);
}
const labelKey = JSON.stringify(labels);
let histogram = histograms.find(h => JSON.stringify(h.labels) === labelKey);
if (!histogram) {
histogram = {
buckets: this.defaultBuckets.map(le => ({ le, count: 0 })),
sum: 0,
count: 0,
labels,
};
histograms.push(histogram);
}
// Update histogram
histogram.sum += value;
histogram.count += 1;
for (const bucket of histogram.buckets) {
if (value <= bucket.le) {
bucket.count += 1;
}
}
}
/**
* Get all metrics in Prometheus format
*/
toPrometheusFormat(): string {
const lines: string[] = [];
const prefix = 'lgc';
// Counters
for (const [name, values] of this.counters.entries()) {
const metricName = `${prefix}_${name.replace(/[^a-zA-Z0-9_]/g, '_')}_total`;
lines.push(`# HELP ${metricName} Counter metric`);
lines.push(`# TYPE ${metricName} counter`);
for (const v of values) {
const labelStr = this.formatLabels(v.labels);
lines.push(`${metricName}${labelStr} ${v.value}`);
}
}
// Gauges
const gaugeGroups = new Map<string, MetricValue[]>();
for (const [key, value] of this.gauges.entries()) {
const name = key.split('|')[0];
if (!gaugeGroups.has(name)) {
gaugeGroups.set(name, []);
}
gaugeGroups.get(name)!.push(value);
}
for (const [name, values] of gaugeGroups.entries()) {
const metricName = `${prefix}_${name.replace(/[^a-zA-Z0-9_]/g, '_')}`;
lines.push(`# HELP ${metricName} Gauge metric`);
lines.push(`# TYPE ${metricName} gauge`);
for (const v of values) {
const labelStr = this.formatLabels(v.labels);
lines.push(`${metricName}${labelStr} ${v.value}`);
}
}
// Histograms
for (const [name, histograms] of this.histograms.entries()) {
const metricName = `${prefix}_${name.replace(/[^a-zA-Z0-9_]/g, '_')}`;
lines.push(`# HELP ${metricName} Histogram metric`);
lines.push(`# TYPE ${metricName} histogram`);
for (const h of histograms) {
const baseLabels = this.formatLabels(h.labels);
for (const bucket of h.buckets) {
const bucketLabel = h.labels ? `,le="${bucket.le}"` : `le="${bucket.le}"`;
const labelStr = baseLabels ? baseLabels.slice(0, -1) + bucketLabel + '}' : `{${bucketLabel.slice(1)}}`;
lines.push(`${metricName}_bucket${labelStr} ${bucket.count}`);
}
const infLabel = h.labels ? baseLabels.slice(0, -1) + `,le="+Inf"}` : `{le="+Inf"}`;
lines.push(`${metricName}_bucket${infLabel} ${h.count}`);
lines.push(`${metricName}_sum${baseLabels} ${h.sum}`);
lines.push(`${metricName}_count${baseLabels} ${h.count}`);
}
}
return lines.join('\n');
}
/**
* Get metrics as JSON
*/
toJSON(): Record<string, unknown> {
return {
counters: Object.fromEntries(this.counters),
gauges: Object.fromEntries(this.gauges),
histograms: Object.fromEntries(this.histograms),
timestamp: new Date().toISOString(),
};
}
/**
* Reset all metrics
*/
reset(): void {
this.counters.clear();
this.gauges.clear();
this.histograms.clear();
}
private getKey(name: string, labels: Record<string, string>): string {
return name;
}
private formatLabels(labels: Record<string, string>): string {
const entries = Object.entries(labels);
if (entries.length === 0) return '';
return '{' + entries.map(([k, v]) => `${k}="${v}"`).join(',') + '}';
}
}
// Create singleton instance
export const metrics = new MetricsRegistry();
// Pre-defined metric helpers
export const httpMetrics = {
requestTotal(method: string, path: string, statusCode: number): void {
metrics.incrementCounter('http_requests', {
method,
path,
status: String(statusCode),
});
},
requestDuration(method: string, path: string, durationMs: number): void {
metrics.observeHistogram('http_request_duration_seconds', durationMs / 1000, {
method,
path,
});
},
activeConnections(delta: number): void {
metrics.incrementGauge('http_active_connections', {}, delta);
},
};
export const appMetrics = {
plantsRegistered(count = 1): void {
metrics.incrementCounter('plants_registered', {}, count);
},
transportEvents(eventType: string, count = 1): void {
metrics.incrementCounter('transport_events', { type: eventType }, count);
},
agentCycleCompleted(agentName: string, durationMs: number): void {
metrics.observeHistogram('agent_cycle_duration_seconds', durationMs / 1000, {
agent: agentName,
});
metrics.incrementCounter('agent_cycles', { agent: agentName });
},
activeAgents(count: number): void {
metrics.setGauge('active_agents', count);
},
blockchainBlocks(count: number): void {
metrics.setGauge('blockchain_blocks', count);
},
databaseConnections(count: number): void {
metrics.setGauge('database_connections', count);
},
};
// Export types
export type { MetricValue, Histogram, HistogramBucket };

225
lib/monitoring/sentry.ts Normal file
View file

@ -0,0 +1,225 @@
/**
* Sentry Error Tracking Integration
* Agent 4: Production Deployment
*
* Provides error tracking and reporting with Sentry.
* Note: Requires @sentry/nextjs package when implementing full integration.
*/
import { env } from '../config';
import { logger } from '../logging';
interface SentryContext {
user?: {
id?: string;
email?: string;
username?: string;
};
tags?: Record<string, string>;
extra?: Record<string, unknown>;
}
interface BreadcrumbData {
category: string;
message: string;
level?: 'debug' | 'info' | 'warning' | 'error';
data?: Record<string, unknown>;
}
/**
* Sentry error handler (stub implementation)
* Replace with actual @sentry/nextjs integration when package is installed
*/
class SentryHandler {
private isEnabled: boolean;
private dsn: string | undefined;
private breadcrumbs: BreadcrumbData[] = [];
private maxBreadcrumbs = 100;
constructor() {
this.dsn = env.sentryDsn;
this.isEnabled = !!this.dsn && env.isProduction;
if (this.isEnabled) {
logger.info('Sentry error tracking enabled', {
environment: env.nodeEnv,
});
} else if (env.isProduction && !this.dsn) {
logger.warn('Sentry DSN not configured - error tracking disabled');
}
}
/**
* Capture an exception and send to Sentry
*/
captureException(error: Error, context?: SentryContext): string {
const eventId = this.generateEventId();
// Log the error
logger.error('Error captured', error, {
eventId,
...context?.tags,
...context?.extra,
});
if (this.isEnabled) {
// In a real implementation, this would send to Sentry
// For now, we log the error details
this.logToSentry('exception', {
eventId,
error: {
name: error.name,
message: error.message,
stack: error.stack,
},
context,
breadcrumbs: this.breadcrumbs.slice(-20),
});
}
return eventId;
}
/**
* Capture a message and send to Sentry
*/
captureMessage(
message: string,
level: 'debug' | 'info' | 'warning' | 'error' = 'info',
context?: SentryContext
): string {
const eventId = this.generateEventId();
// Log the message
const logFn = level === 'error' ? logger.error : level === 'warning' ? logger.warn : logger.info;
logFn.call(logger, message, { eventId, ...context?.tags });
if (this.isEnabled) {
this.logToSentry('message', {
eventId,
message,
level,
context,
});
}
return eventId;
}
/**
* Set user context for error tracking
*/
setUser(user: SentryContext['user'] | null): void {
if (this.isEnabled && user) {
logger.debug('Sentry user context set', { userId: user.id });
}
}
/**
* Set extra context for error tracking
*/
setContext(name: string, context: Record<string, unknown>): void {
if (this.isEnabled) {
logger.debug('Sentry context set', { name, ...context });
}
}
/**
* Add a breadcrumb for debugging
*/
addBreadcrumb(breadcrumb: BreadcrumbData): void {
this.breadcrumbs.push({
...breadcrumb,
level: breadcrumb.level || 'info',
});
// Keep only the last N breadcrumbs
if (this.breadcrumbs.length > this.maxBreadcrumbs) {
this.breadcrumbs = this.breadcrumbs.slice(-this.maxBreadcrumbs);
}
}
/**
* Create a scope for isolated error tracking
*/
withScope(callback: (scope: SentryScope) => void): void {
const scope = new SentryScope();
callback(scope);
}
/**
* Flush pending events (for serverless environments)
*/
async flush(timeout = 2000): Promise<boolean> {
// In a real implementation, this would flush pending events
return true;
}
/**
* Check if Sentry is enabled
*/
isActive(): boolean {
return this.isEnabled;
}
private generateEventId(): string {
return `evt_${Date.now().toString(36)}_${Math.random().toString(36).substring(2, 11)}`;
}
private logToSentry(type: string, data: Record<string, unknown>): void {
// Placeholder for actual Sentry API call
// In production with @sentry/nextjs, this would use the Sentry SDK
logger.debug(`[Sentry ${type}]`, data);
}
}
/**
* Scope class for isolated error context
*/
class SentryScope {
private tags: Record<string, string> = {};
private extra: Record<string, unknown> = {};
private user: SentryContext['user'] | null = null;
private level: string = 'error';
setTag(key: string, value: string): void {
this.tags[key] = value;
}
setExtra(key: string, value: unknown): void {
this.extra[key] = value;
}
setUser(user: SentryContext['user'] | null): void {
this.user = user;
}
setLevel(level: string): void {
this.level = level;
}
getContext(): SentryContext {
return {
user: this.user || undefined,
tags: this.tags,
extra: this.extra,
};
}
}
// Export singleton instance
export const sentry = new SentryHandler();
// Export for API error handling
export function captureApiError(error: Error, req?: { url?: string; method?: string }): string {
return sentry.captureException(error, {
tags: {
api: 'true',
path: req?.url || 'unknown',
method: req?.method || 'unknown',
},
});
}
// Export types
export type { SentryContext, BreadcrumbData };

131
lib/security/cors.ts Normal file
View file

@ -0,0 +1,131 @@
/**
* CORS Middleware
* Agent 4: Production Deployment
*
* Configures Cross-Origin Resource Sharing for API routes.
*/
import type { NextApiRequest, NextApiResponse, NextApiHandler } from 'next';
import { env } from '../config';
interface CorsConfig {
origins: string[];
methods: string[];
allowedHeaders: string[];
exposedHeaders: string[];
credentials: boolean;
maxAge: number;
}
const DEFAULT_CONFIG: CorsConfig = {
origins: env.corsOrigins,
methods: ['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'OPTIONS'],
allowedHeaders: [
'Content-Type',
'Authorization',
'X-Requested-With',
'X-Request-Id',
'X-API-Key',
],
exposedHeaders: [
'X-Request-Id',
'X-RateLimit-Limit',
'X-RateLimit-Remaining',
'X-RateLimit-Reset',
],
credentials: true,
maxAge: 86400, // 24 hours
};
/**
* Check if origin is allowed
*/
function isOriginAllowed(origin: string | undefined, allowedOrigins: string[]): boolean {
if (!origin) return false;
return allowedOrigins.some((allowed) => {
// Exact match
if (allowed === origin) return true;
// Wildcard subdomain match (e.g., *.example.com)
if (allowed.startsWith('*.')) {
const domain = allowed.slice(2);
return origin.endsWith(domain) || origin === `https://${domain}` || origin === `http://${domain}`;
}
// All origins allowed
if (allowed === '*') return true;
return false;
});
}
/**
* Apply CORS headers to response
*/
export function applyCorsHeaders(
req: NextApiRequest,
res: NextApiResponse,
config: CorsConfig = DEFAULT_CONFIG
): void {
const origin = req.headers.origin;
// Set allowed origin
if (isOriginAllowed(origin, config.origins)) {
res.setHeader('Access-Control-Allow-Origin', origin!);
} else if (config.origins.includes('*')) {
res.setHeader('Access-Control-Allow-Origin', '*');
}
// Set other CORS headers
if (config.credentials) {
res.setHeader('Access-Control-Allow-Credentials', 'true');
}
res.setHeader('Access-Control-Allow-Methods', config.methods.join(', '));
res.setHeader('Access-Control-Allow-Headers', config.allowedHeaders.join(', '));
res.setHeader('Access-Control-Expose-Headers', config.exposedHeaders.join(', '));
res.setHeader('Access-Control-Max-Age', config.maxAge.toString());
}
/**
* CORS middleware for API routes
*/
export function withCors(
handler: NextApiHandler,
config?: Partial<CorsConfig>
): NextApiHandler {
const mergedConfig: CorsConfig = { ...DEFAULT_CONFIG, ...config };
return async (req: NextApiRequest, res: NextApiResponse) => {
applyCorsHeaders(req, res, mergedConfig);
// Handle preflight requests
if (req.method === 'OPTIONS') {
return res.status(204).end();
}
return handler(req, res);
};
}
/**
* Strict CORS for internal APIs only
*/
export const strictCors = (handler: NextApiHandler) =>
withCors(handler, {
origins: ['http://localhost:3001'],
credentials: true,
});
/**
* Open CORS for public APIs
*/
export const openCors = (handler: NextApiHandler) =>
withCors(handler, {
origins: ['*'],
credentials: false,
});
// Export types
export type { CorsConfig };

135
lib/security/headers.ts Normal file
View file

@ -0,0 +1,135 @@
/**
* Security Headers Middleware
* Agent 4: Production Deployment
*
* Adds security headers to all responses to protect against common attacks.
*/
import type { NextApiRequest, NextApiResponse, NextApiHandler } from 'next';
import { env } from '../config';
interface SecurityHeadersConfig {
contentSecurityPolicy?: string;
reportUri?: string;
enableHSTS?: boolean;
hstsMaxAge?: number;
frameOptions?: 'DENY' | 'SAMEORIGIN';
contentTypeOptions?: boolean;
xssProtection?: boolean;
referrerPolicy?: string;
permissionsPolicy?: string;
}
const DEFAULT_CONFIG: SecurityHeadersConfig = {
enableHSTS: true,
hstsMaxAge: 31536000, // 1 year
frameOptions: 'DENY',
contentTypeOptions: true,
xssProtection: true,
referrerPolicy: 'strict-origin-when-cross-origin',
};
/**
* Generate Content Security Policy header
*/
function generateCSP(reportUri?: string): string {
const directives = [
"default-src 'self'",
"script-src 'self' 'unsafe-inline' 'unsafe-eval'", // Needed for Next.js
"style-src 'self' 'unsafe-inline'",
"img-src 'self' data: blob: https:",
"font-src 'self' data:",
"connect-src 'self' https: wss:",
"media-src 'self'",
"object-src 'none'",
"frame-ancestors 'none'",
"base-uri 'self'",
"form-action 'self'",
"upgrade-insecure-requests",
];
if (reportUri) {
directives.push(`report-uri ${reportUri}`);
}
return directives.join('; ');
}
/**
* Generate Permissions Policy header
*/
function generatePermissionsPolicy(): string {
const policies = [
'accelerometer=()',
'camera=()',
'geolocation=(self)',
'gyroscope=()',
'magnetometer=()',
'microphone=()',
'payment=()',
'usb=()',
];
return policies.join(', ');
}
/**
* Apply security headers to response
*/
export function applySecurityHeaders(
res: NextApiResponse,
config: SecurityHeadersConfig = DEFAULT_CONFIG
): void {
// Content Security Policy
const csp = config.contentSecurityPolicy || generateCSP(config.reportUri || env.cspReportUri);
res.setHeader('Content-Security-Policy', csp);
// Strict Transport Security (HTTPS)
if (config.enableHSTS && env.isProduction) {
res.setHeader(
'Strict-Transport-Security',
`max-age=${config.hstsMaxAge}; includeSubDomains; preload`
);
}
// Prevent clickjacking
res.setHeader('X-Frame-Options', config.frameOptions || 'DENY');
// Prevent MIME type sniffing
if (config.contentTypeOptions) {
res.setHeader('X-Content-Type-Options', 'nosniff');
}
// XSS Protection (legacy, but still useful)
if (config.xssProtection) {
res.setHeader('X-XSS-Protection', '1; mode=block');
}
// Referrer Policy
res.setHeader('Referrer-Policy', config.referrerPolicy || 'strict-origin-when-cross-origin');
// Permissions Policy
res.setHeader(
'Permissions-Policy',
config.permissionsPolicy || generatePermissionsPolicy()
);
// Remove unnecessary headers
res.removeHeader('X-Powered-By');
}
/**
* Security headers middleware for API routes
*/
export function withSecurityHeaders(
handler: NextApiHandler,
config?: SecurityHeadersConfig
): NextApiHandler {
return async (req: NextApiRequest, res: NextApiResponse) => {
applySecurityHeaders(res, config);
return handler(req, res);
};
}
// Export types
export type { SecurityHeadersConfig };

71
lib/security/index.ts Normal file
View file

@ -0,0 +1,71 @@
/**
* Security Module
* Agent 4: Production Deployment
*
* Central export for security utilities.
*/
export { withSecurityHeaders, applySecurityHeaders } from './headers';
export type { SecurityHeadersConfig } from './headers';
export {
withRateLimit,
createRateLimiter,
authRateLimiter,
apiRateLimiter,
} from './rateLimit';
export type { RateLimitConfig, RateLimitEntry } from './rateLimit';
export { withCors, applyCorsHeaders, strictCors, openCors } from './cors';
export type { CorsConfig } from './cors';
/**
* Compose multiple security middlewares
*/
import type { NextApiHandler } from 'next';
import { withSecurityHeaders } from './headers';
import { withRateLimit } from './rateLimit';
import { withCors } from './cors';
import { withLogging } from '../logging';
/**
* Apply all security middlewares to an API handler
* Order: CORS -> Security Headers -> Rate Limit -> Logging -> Handler
*/
export function withSecurity(handler: NextApiHandler): NextApiHandler {
return withCors(
withSecurityHeaders(
withRateLimit(
withLogging(handler)
)
)
);
}
/**
* Apply security middlewares for public APIs
* Less restrictive for external access
*/
export function withPublicSecurity(handler: NextApiHandler): NextApiHandler {
return withCors(
withSecurityHeaders(
withLogging(handler)
),
{ origins: ['*'], credentials: false }
);
}
/**
* Apply security middlewares for authenticated APIs
* Stricter rate limiting
*/
export function withAuthSecurity(handler: NextApiHandler): NextApiHandler {
return withCors(
withSecurityHeaders(
withRateLimit(
withLogging(handler),
{ maxRequests: 30, windowMs: 60000 }
)
)
);
}

151
lib/security/rateLimit.ts Normal file
View file

@ -0,0 +1,151 @@
/**
* Rate Limiting Middleware
* Agent 4: Production Deployment
*
* Provides rate limiting for API routes to prevent abuse.
*/
import type { NextApiRequest, NextApiResponse, NextApiHandler } from 'next';
import { env } from '../config';
import { logger } from '../logging';
interface RateLimitConfig {
windowMs: number; // Time window in milliseconds
maxRequests: number; // Max requests per window
keyGenerator?: (req: NextApiRequest) => string;
skipSuccessfulRequests?: boolean;
skipFailedRequests?: boolean;
message?: string;
}
interface RateLimitEntry {
count: number;
resetTime: number;
}
// In-memory store for rate limiting
// In production, use Redis for distributed rate limiting
const store = new Map<string, RateLimitEntry>();
// Cleanup old entries periodically
setInterval(() => {
const now = Date.now();
for (const [key, entry] of store.entries()) {
if (entry.resetTime < now) {
store.delete(key);
}
}
}, 60000); // Cleanup every minute
const DEFAULT_CONFIG: RateLimitConfig = {
windowMs: env.rateLimitWindowMs,
maxRequests: env.rateLimitMaxRequests,
message: 'Too many requests, please try again later.',
};
/**
* Default key generator - uses IP address
*/
function defaultKeyGenerator(req: NextApiRequest): string {
const forwarded = req.headers['x-forwarded-for'];
if (typeof forwarded === 'string') {
return forwarded.split(',')[0].trim();
}
if (Array.isArray(forwarded)) {
return forwarded[0];
}
return req.socket?.remoteAddress || 'unknown';
}
/**
* Check if request is rate limited
*/
function checkRateLimit(
key: string,
config: RateLimitConfig
): { limited: boolean; remaining: number; resetTime: number } {
const now = Date.now();
let entry = store.get(key);
// Create new entry if doesn't exist or expired
if (!entry || entry.resetTime < now) {
entry = {
count: 0,
resetTime: now + config.windowMs,
};
store.set(key, entry);
}
// Increment count
entry.count += 1;
const remaining = Math.max(0, config.maxRequests - entry.count);
const limited = entry.count > config.maxRequests;
return { limited, remaining, resetTime: entry.resetTime };
}
/**
* Rate limiting middleware
*/
export function withRateLimit(
handler: NextApiHandler,
config: Partial<RateLimitConfig> = {}
): NextApiHandler {
const mergedConfig: RateLimitConfig = { ...DEFAULT_CONFIG, ...config };
const keyGenerator = mergedConfig.keyGenerator || defaultKeyGenerator;
return async (req: NextApiRequest, res: NextApiResponse) => {
const key = keyGenerator(req);
const { limited, remaining, resetTime } = checkRateLimit(key, mergedConfig);
// Set rate limit headers
res.setHeader('X-RateLimit-Limit', mergedConfig.maxRequests);
res.setHeader('X-RateLimit-Remaining', remaining);
res.setHeader('X-RateLimit-Reset', Math.ceil(resetTime / 1000));
if (limited) {
logger.warn('Rate limit exceeded', {
ip: key,
path: req.url,
method: req.method,
});
res.setHeader('Retry-After', Math.ceil((resetTime - Date.now()) / 1000));
return res.status(429).json({
error: 'Too Many Requests',
message: mergedConfig.message,
retryAfter: Math.ceil((resetTime - Date.now()) / 1000),
});
}
return handler(req, res);
};
}
/**
* Create a rate limiter with custom settings
*/
export function createRateLimiter(config: Partial<RateLimitConfig>) {
return (handler: NextApiHandler) => withRateLimit(handler, config);
}
/**
* Stricter rate limiter for authentication endpoints
*/
export const authRateLimiter = createRateLimiter({
windowMs: 15 * 60 * 1000, // 15 minutes
maxRequests: 5, // 5 attempts per window
message: 'Too many authentication attempts. Please try again in 15 minutes.',
});
/**
* API rate limiter for general endpoints
*/
export const apiRateLimiter = createRateLimiter({
windowMs: 60 * 1000, // 1 minute
maxRequests: 60, // 60 requests per minute
});
// Export types
export type { RateLimitConfig, RateLimitEntry };

52
pages/api/health/index.ts Normal file
View file

@ -0,0 +1,52 @@
/**
* Health Check Endpoint
* Agent 4: Production Deployment
*
* GET /api/health
* Returns overall application health status with component checks.
*/
import type { NextApiRequest, NextApiResponse } from 'next';
import { healthChecks, HealthStatus } from '../../../lib/monitoring';
import { withLogging } from '../../../lib/logging';
interface HealthResponse extends HealthStatus {}
interface ErrorResponse {
error: string;
message: string;
}
async function handler(
req: NextApiRequest,
res: NextApiResponse<HealthResponse | ErrorResponse>
) {
if (req.method !== 'GET') {
res.setHeader('Allow', ['GET']);
return res.status(405).json({
error: 'Method Not Allowed',
message: `Method ${req.method} is not allowed`,
});
}
try {
const health = await healthChecks.runAll();
// Set appropriate status code based on health
const statusCode = health.status === 'healthy' ? 200 : health.status === 'degraded' ? 200 : 503;
// Add cache headers - don't cache health checks
res.setHeader('Cache-Control', 'no-cache, no-store, must-revalidate');
res.setHeader('Pragma', 'no-cache');
res.setHeader('Expires', '0');
return res.status(statusCode).json(health);
} catch (error) {
return res.status(503).json({
error: 'Health Check Failed',
message: error instanceof Error ? error.message : 'Unknown error',
});
}
}
export default withLogging(handler);

60
pages/api/health/live.ts Normal file
View file

@ -0,0 +1,60 @@
/**
* Liveness Probe Endpoint
* Agent 4: Production Deployment
*
* GET /api/health/live
* Returns liveness status for Kubernetes/container orchestration.
* Used to determine if the application process is running.
*/
import type { NextApiRequest, NextApiResponse } from 'next';
import { healthChecks } from '../../../lib/monitoring';
interface LiveResponse {
alive: boolean;
uptime: number;
timestamp: string;
}
interface ErrorResponse {
alive: boolean;
error: string;
timestamp: string;
}
export default async function handler(
req: NextApiRequest,
res: NextApiResponse<LiveResponse | ErrorResponse>
) {
if (req.method !== 'GET') {
res.setHeader('Allow', ['GET']);
return res.status(405).json({
alive: false,
error: `Method ${req.method} is not allowed`,
timestamp: new Date().toISOString(),
});
}
try {
const liveness = await healthChecks.checkLiveness();
const uptime = healthChecks.getUptime();
// Add cache headers - don't cache liveness checks
res.setHeader('Cache-Control', 'no-cache, no-store, must-revalidate');
res.setHeader('Pragma', 'no-cache');
res.setHeader('Expires', '0');
return res.status(200).json({
alive: liveness.status === 'ok',
uptime,
timestamp: new Date().toISOString(),
});
} catch (error) {
// If we can respond at all, we're technically alive
return res.status(200).json({
alive: true,
uptime: healthChecks.getUptime(),
timestamp: new Date().toISOString(),
});
}
}

65
pages/api/health/ready.ts Normal file
View file

@ -0,0 +1,65 @@
/**
* Readiness Probe Endpoint
* Agent 4: Production Deployment
*
* GET /api/health/ready
* Returns readiness status for Kubernetes/container orchestration.
* Used to determine if the application is ready to receive traffic.
*/
import type { NextApiRequest, NextApiResponse } from 'next';
import { healthChecks } from '../../../lib/monitoring';
interface ReadyResponse {
ready: boolean;
checks: Array<{
name: string;
status: string;
}>;
timestamp: string;
}
interface ErrorResponse {
ready: boolean;
error: string;
timestamp: string;
}
export default async function handler(
req: NextApiRequest,
res: NextApiResponse<ReadyResponse | ErrorResponse>
) {
if (req.method !== 'GET') {
res.setHeader('Allow', ['GET']);
return res.status(405).json({
ready: false,
error: `Method ${req.method} is not allowed`,
timestamp: new Date().toISOString(),
});
}
try {
const health = await healthChecks.checkReadiness();
const isReady = health.status === 'healthy' || health.status === 'degraded';
// Add cache headers - don't cache readiness checks
res.setHeader('Cache-Control', 'no-cache, no-store, must-revalidate');
res.setHeader('Pragma', 'no-cache');
res.setHeader('Expires', '0');
return res.status(isReady ? 200 : 503).json({
ready: isReady,
checks: health.checks.map((c) => ({
name: c.name,
status: c.status,
})),
timestamp: health.timestamp,
});
} catch (error) {
return res.status(503).json({
ready: false,
error: error instanceof Error ? error.message : 'Unknown error',
timestamp: new Date().toISOString(),
});
}
}

37
pages/api/metrics.ts Normal file
View file

@ -0,0 +1,37 @@
/**
* Prometheus Metrics Endpoint
* Agent 4: Production Deployment
*
* GET /api/metrics
* Returns application metrics in Prometheus format.
*/
import type { NextApiRequest, NextApiResponse } from 'next';
import { metrics } from '../../lib/monitoring';
import { env } from '../../lib/config';
export default async function handler(
req: NextApiRequest,
res: NextApiResponse
) {
if (req.method !== 'GET') {
res.setHeader('Allow', ['GET']);
return res.status(405).end('Method Not Allowed');
}
// Only expose metrics if enabled
if (!env.prometheusEnabled && env.isProduction) {
return res.status(403).end('Metrics endpoint disabled');
}
try {
const metricsOutput = metrics.toPrometheusFormat();
res.setHeader('Content-Type', 'text/plain; version=0.0.4; charset=utf-8');
res.setHeader('Cache-Control', 'no-cache, no-store, must-revalidate');
return res.status(200).end(metricsOutput);
} catch (error) {
return res.status(500).end('Failed to generate metrics');
}
}