Complete guide for deploying LatticeDB to AWS, Azure, GCP, and HashiCorp stack environments
LatticeDB provides production-ready deployment configurations for major cloud providers and HashiCorp stack. Each deployment option is designed for high availability, security, and scalability.
Platform | Service | Features | Best For |
---|---|---|---|
AWS | ECS Fargate + ALB | Auto-scaling, EFS storage, CloudWatch | Enterprise workloads |
Azure | Container Apps | Serverless, Auto-scaling, Azure Files | Modern cloud-native apps |
GCP | Cloud Run + Cloud SQL | Serverless, Global load balancer | Global applications |
HashiCorp | Nomad + Consul + Vault | Service mesh, Secret management | On-premises/hybrid |
graph TB
subgraph "Load Balancer Layer"
LB[Load Balancer]
end
subgraph "Application Layer"
A1[LatticeDB Instance 1]
A2[LatticeDB Instance 2]
A3[LatticeDB Instance N]
end
subgraph "Storage Layer"
DB[(Database)]
FS[File Storage]
CACHE[Cache]
end
subgraph "Observability"
LOGS[Logs]
METRICS[Metrics]
TRACES[Traces]
end
LB --> A1
LB --> A2
LB --> A3
A1 --> DB
A2 --> DB
A3 --> DB
A1 --> FS
A2 --> FS
A3 --> FS
A1 --> LOGS
A2 --> LOGS
A3 --> LOGS
git clone https://github.com/your-org/LatticeDB-DBMS.git
cd LatticeDB-DBMS
# Build the application first
mkdir build && cd build
cmake .. -DCMAKE_BUILD_TYPE=Release
make -j$(nproc)
cd ..
# AWS
cd aws && ./deploy.sh
# Azure
cd azure && ./deploy.sh
# GCP
export GCP_PROJECT=your-project-id
cd gcp && ./deploy.sh
# HashiCorp Stack
cd hashicorp && ./deploy.sh
Each deployment script will output connection information and management commands.
graph TB
subgraph "AWS Cloud"
subgraph "VPC"
subgraph "Public Subnets"
ALB[Application Load Balancer]
NAT[NAT Gateway]
end
subgraph "Private Subnets"
ECS1[ECS Task 1]
ECS2[ECS Task 2]
end
end
ECR[Elastic Container Registry]
EFS[Elastic File System]
CW[CloudWatch]
end
Internet --> ALB
ALB --> ECS1
ALB --> ECS2
ECS1 --> EFS
ECS2 --> EFS
ECS1 --> CW
ECS2 --> CW
cd aws
# Copy and customize configuration
cp terraform.tfvars.example terraform.tfvars
# Edit terraform.tfvars with your settings
# Deploy
./deploy.sh
terraform.tfvars
)# AWS Configuration
aws_region = "us-west-2"
project_name = "latticedb"
environment = "production"
# Container Configuration
container_cpu = 1024 # 1 vCPU
container_memory = 2048 # 2GB
desired_count = 2
min_capacity = 1
max_capacity = 10
# Storage
enable_efs = true
# Domain (optional)
domain_name = "latticedb.example.com"
certificate_arn = "arn:aws:acm:us-west-2:123456789012:certificate/..."
# Network Security
allowed_cidr_blocks = ["10.0.0.0/8", "172.16.0.0/12"]
# Monitoring
enable_monitoring = true
notification_email = "admin@example.com"
# Backup
enable_backup = true
backup_retention_days = 30
# View service status
aws ecs describe-services --cluster latticedb-production --services latticedb-production
# Scale service
aws ecs update-service --cluster latticedb-production --service latticedb-production --desired-count 5
# View logs
aws logs tail /ecs/latticedb-production --follow
# Update deployment
./deploy.sh update
graph TB
subgraph "Azure Cloud"
subgraph "Resource Group"
subgraph "Virtual Network"
CA[Container Apps Environment]
ACR[Container Registry]
end
SA[Storage Account]
KV[Key Vault]
AI[Application Insights]
end
end
Internet --> CA
CA --> SA
CA --> KV
CA --> AI
cd azure
# Authenticate with Azure
az login
# Copy and customize configuration
cp terraform.tfvars.example terraform.tfvars
# Edit terraform.tfvars with your settings
# Deploy
./deploy.sh
terraform.tfvars
)# Azure Configuration
azure_location = "East US"
project_name = "latticedb"
environment = "production"
# Container Configuration
container_cpu = 1.0
container_memory = "2Gi"
min_replicas = 1
max_replicas = 10
# Storage
enable_persistent_storage = true
storage_replication_type = "LRS"
# Monitoring
enable_monitoring = true
notification_email = "admin@example.com"
# View app status
az containerapp show --name latticedb-production --resource-group latticedb-production-rg
# Scale app
az containerapp update --name latticedb-production --resource-group latticedb-production-rg --min-replicas 2 --max-replicas 15
# View logs
az containerapp logs show --name latticedb-production --resource-group latticedb-production-rg --follow
# Update deployment
./deploy.sh update
graph TB
subgraph "Google Cloud"
subgraph "Project"
CR[Cloud Run]
AR[Artifact Registry]
LB[Load Balancer]
end
subgraph "Optional"
SQL[Cloud SQL]
GCS[Cloud Storage]
end
end
Internet --> LB
LB --> CR
CR --> SQL
CR --> GCS
cd gcp
# Set up GCP authentication
gcloud auth login
gcloud config set project YOUR_PROJECT_ID
# Copy and customize configuration
cp terraform.tfvars.example terraform.tfvars
# Edit terraform.tfvars with your settings
# Deploy
export GCP_PROJECT=your-project-id
./deploy.sh
terraform.tfvars
)# GCP Configuration
project_id = "your-gcp-project-id"
gcp_region = "us-central1"
project_name = "latticedb"
environment = "production"
# Container Configuration
cpu_limit = "2000m"
memory_limit = "2Gi"
min_instances = 0
max_instances = 10
# Database (optional)
enable_cloud_sql = false
# Domain (optional)
custom_domain = "latticedb.example.com"
enable_cdn = true
# View service status
gcloud run services describe latticedb-production --region=us-central1
# Update service
gcloud run services update latticedb-production --image=gcr.io/project/latticedb:v2.0.0 --region=us-central1
# View logs
gcloud logging read 'resource.type=cloud_run_revision resource.labels.service_name=latticedb-production' --limit=50
# Scale service
gcloud run services update latticedb-production --region=us-central1 --min-instances=2 --max-instances=20
graph TB
subgraph "HashiCorp Stack"
subgraph "Consul"
SD[Service Discovery]
KV[Key-Value Store]
SM[Service Mesh]
end
subgraph "Vault"
SEC[Secrets Management]
PKI[Certificate Authority]
DB[Dynamic Secrets]
end
subgraph "Nomad"
SCHED[Job Scheduler]
ORCH[Container Orchestration]
end
end
subgraph "Application"
APP[LatticeDB Instances]
PROXY[Envoy Proxy]
end
SD --> APP
SEC --> APP
SCHED --> APP
SM --> PROXY
cd hashicorp
# Ensure HashiCorp stack is running
export CONSUL_HTTP_ADDR=http://127.0.0.1:8500
export VAULT_ADDR=http://127.0.0.1:8200
export NOMAD_ADDR=http://127.0.0.1:4646
# Authenticate with Vault
vault auth
# Copy and customize configuration
cp terraform.tfvars.example terraform.tfvars
# Edit terraform.tfvars with your settings
# Deploy
./deploy.sh
terraform.tfvars
)# HashiCorp Stack
consul_address = "http://127.0.0.1:8500"
vault_address = "http://127.0.0.1:8200"
nomad_address = "http://127.0.0.1:4646"
consul_datacenter = "dc1"
# Service Mesh
enable_consul_connect = true
enable_consul_acl = true
# Vault Features
enable_vault_database_secrets = true
enable_vault_pki = true
# Application
instance_count = 2
cpu_limit = 1000 # MHz
memory_limit = 2048 # MB
# View service status
consul catalog service latticedb-production
# View job status
nomad job status latticedb-production
# Scale job
nomad job scale latticedb-production 5
# View logs
nomad logs -f latticedb-production
# Vault operations
vault kv get latticedb/production/config
The .travis.yml
file provides multi-stage pipeline with:
The Jenkinsfile
provides comprehensive pipeline with:
Create .github/workflows/deploy.yml
:
name: Deploy LatticeDB
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
jobs:
deploy:
runs-on: ubuntu-latest
strategy:
matrix:
target: [aws, azure, gcp, hashicorp]
steps:
- uses: actions/checkout@v3
- name: Deploy to $
run: |
cd $
./deploy.sh
env:
# Add your secrets here
AWS_ACCESS_KEY_ID: $
AZURE_CLIENT_ID: $
GOOGLE_PROJECT_ID: $
All LatticeDB deployments include integrated Prometheus and Grafana monitoring:
graph TB
subgraph "Application Layer"
APP1[LatticeDB Instance 1]
APP2[LatticeDB Instance 2]
APP3[LatticeDB Instance N]
end
subgraph "Monitoring Stack"
PROM[Prometheus]
GRAF[Grafana]
AM[AlertManager]
end
subgraph "Infrastructure Monitoring"
NE[Node Exporter]
CAD[cAdvisor]
BE[Blackbox Exporter]
end
subgraph "Notification Channels"
EMAIL[Email]
SLACK[Slack]
PD[PagerDuty]
end
APP1 --> PROM
APP2 --> PROM
APP3 --> PROM
NE --> PROM
CAD --> PROM
BE --> PROM
PROM --> GRAF
PROM --> AM
AM --> EMAIL
AM --> SLACK
AM --> PD
# Access Prometheus (AWS)
kubectl port-forward svc/prometheus 9090:9090
# Access Grafana (AWS)
kubectl port-forward svc/grafana 3000:3000
# Access Prometheus (Azure)
az containerapp exec --name prometheus --command "curl localhost:9090"
# Access Grafana (Azure)
az containerapp browse --name grafana
# Access Prometheus (GCP)
gcloud run services proxy prometheus --port=9090
# Access Grafana (GCP)
gcloud run services proxy grafana --port=3000
# Access Prometheus (HashiCorp)
consul connect proxy -service prometheus -upstream prometheus:9090
# Access Grafana (HashiCorp)
consul connect proxy -service grafana -upstream grafana:3000
Each environment includes monitoring-specific configurations:
Development Environment:
# environments/dev/monitoring.tfvars
prometheus_retention = "7d"
grafana_admin_password = "admin"
alert_channels = ["email"]
enable_pagerduty = false
Staging Environment:
# environments/staging/monitoring.tfvars
prometheus_retention = "14d"
grafana_admin_password = "secure-staging-password"
alert_channels = ["email", "slack"]
enable_pagerduty = false
Production Environment:
# environments/prod/monitoring.tfvars
prometheus_retention = "30d"
grafana_admin_password = "ultra-secure-password"
alert_channels = ["email", "slack", "pagerduty"]
enable_pagerduty = true
pagerduty_service_key = "your-pagerduty-key"
All deployments include comprehensive health checks:
/health
endpoint/ready
endpoint/metrics
endpoint (Prometheus format)aws logs tail /ecs/latticedb-production --follow --format short
az monitor log-analytics query -w workspace-id --analytics-query "ContainerAppConsoleLogs_CL | where TimeGenerated > ago(1h)"
gcloud logging read 'resource.type=cloud_run_revision' --limit=100
nomad logs -f latticedb-production
consul monitor -log-level=INFO
Issue: Terraform state lock
# Solution: Force unlock (use carefully)
terraform force-unlock LOCK_ID
Issue: Container startup failures
# Check container logs
docker logs container_name
# Check resource constraints
# Increase CPU/memory limits in terraform.tfvars
Issue: Health check failures
# Test health endpoint directly
curl -f http://service-url/health
# Check application logs for errors
# Verify database connectivity
Issue: High response times
Issue: Memory leaks
Issue: SSL/TLS certificate issues
Issue: Network connectivity
Each platform provides monitoring dashboards:
# Cost-optimized configuration
container_cpu = 512 # Reduce CPU
container_memory = 1024 # Reduce memory
desired_count = 1 # Start with fewer instances
# Cost-optimized configuration
min_replicas = 0 # Scale to zero when not used
storage_replication_type = "LRS" # Local redundancy
# Cost-optimized configuration
min_instances = 0 # Scale to zero
enable_cloud_sql = false # Use SQLite for development
# Cost-optimized configuration
cpu_limit = 500 # Lower CPU limit
memory_limit = 1024 # Lower memory limit
Set up billing alerts and cost monitoring:
# AWS
aws budgets create-budget --account-id 123456789012 --budget file://budget.json
# Azure
az consumption budget create --budget-name LatticeDBBudget --amount 100
# GCP
gcloud alpha billing budgets create --billing-account=BILLING_ACCOUNT --display-name="LatticeDB Budget"
Q: Can I deploy to multiple clouds simultaneously? A: Yes, each deployment is independent. You can run deployments in parallel or sequentially.
Q: How do I migrate between cloud providers? A: Use the database backup/restore functionality to migrate data between providers.
Q: Whatโs the recommended deployment for production? A: For production workloads, we recommend:
Q: Can I use existing VPC? A: Yes, modify the Terraform configuration to reference existing VPC resources.
Q: How do I enable HTTPS?
A: Provide domain_name
, certificate_arn
, and route53_zone_id
in terraform.tfvars.
Q: Can I use RDS instead of EFS? A: Yes, modify the configuration to include RDS and update the connection string.
Q: How do I use custom domain?
A: Set domain_name
and certificate_thumbprint
in terraform.tfvars.
Q: Can I use Azure SQL? A: The configuration supports Azure Files by default. For Azure SQL, modify the database connection string.
Q: How do I enable Cloud SQL?
A: Set enable_cloud_sql = true
in terraform.tfvars and configure database parameters.
Q: Can I use multiple regions? A: Yes, deploy to multiple regions with different Terraform state files.
Q: Do I need all three tools (Consul, Vault, Nomad)? A: Consul and Nomad are required. Vault is optional but recommended for production.
Q: Can I use external Consul/Vault/Nomad? A: Yes, configure the addresses in terraform.tfvars to point to your existing clusters.
Q: How do I enable mTLS? A: Enable Consul Connect and Vault PKI in terraform.tfvars for automatic mTLS.
For additional support and questions, please refer to the GitHub Issues or contact the development team.