# Check Python version
python3 --version # Should be 3.8+
# Check pip
pip3 --version
# Check Node.js (for UI)
node --version # Should be 20+
# Check npm
npm --version
You need at least one of these AI CLI tools installed:
# 1. Clone repository
git clone <repository-url>
cd AI-Agents-Orchestrator
# 2. Install Python dependencies
pip3 install -r requirements.txt
# 3. Make CLI executable
chmod +x ai-orchestrator
# 4. Verify installation
./ai-orchestrator --help
# 5. Check available agents
./ai-orchestrator agents
# 6. Start interactive shell
./ai-orchestrator shell
git clone <repository-url>
cd AI-Agents-Orchestrator
# Create virtual environment
python3 -m venv venv
# Activate it
# On Linux/macOS:
source venv/bin/activate
# On Windows:
venv\Scripts\activate
# Your prompt should now show (venv)
# Install production dependencies
pip install -r requirements.txt
# Or install in development mode
pip install -e ".[dev]"
Dependencies Installed:
click - CLI frameworkpyyaml - Configuration parsingrich - Terminal formattingpydantic - Data validationtenacity - Retry logicprometheus-client - Metricsstructlog - Structured loggingpython-dotenv - Environment variables# Linux/macOS
chmod +x ai-orchestrator
# Verify
./ai-orchestrator --help
Windows Users:
# Run with Python directly
python ai-orchestrator --help
# Copy example environment file
cp .env.example .env
# Edit with your settings
nano .env # or vim, code, etc.
Example .env file:
# Logging
LOG_LEVEL=INFO
LOG_FILE=ai-orchestrator.log
# Metrics
ENABLE_METRICS=true
METRICS_PORT=9090
# Workspace
WORKSPACE_DIR=./workspace
OUTPUT_DIR=./output
SESSIONS_DIR=./sessions
# Agent Configuration
AGENTS_CONFIG=config/agents.yaml
# Rate Limiting
RATE_LIMIT_ENABLED=true
RATE_LIMIT_PER_MINUTE=10
# Check configuration
./ai-orchestrator validate
# List available agents
./ai-orchestrator agents
# List workflows
./ai-orchestrator workflows
# Show system info
./ai-orchestrator info
Installation:
Follow official Claude Code installation from Anthropic:
# Visit: https://docs.anthropic.com/claude-code
# Follow installation instructions for your OS
Authentication:
# Login to Claude
claude auth login
# Follow the prompts to authenticate
Verification:
# Check version
claude --version
# Test command
claude --message "Hello, Claude!"
Configuration in config/agents.yaml:
agents:
claude:
enabled: true
command: "claude"
role: "refinement"
timeout: 300
Installation:
# Install via pip (if available)
pip install openai-codex
# Or follow OpenAI's official instructions
Authentication:
# Set API key
export OPENAI_API_KEY="your-api-key-here"
# Or add to .env file
echo "OPENAI_API_KEY=your-api-key" >> .env
# Or use auth command if available
codex auth login
Verification:
# Check version
codex --version
# Test command
echo "Write a hello world function" | codex
Configuration:
agents:
codex:
enabled: true
command: "codex"
role: "implementation"
timeout: 300
Installation:
# Install Gemini CLI
pip install google-generativeai
# Or follow Google's installation instructions
Authentication:
# Login with Google account
gemini auth login
# Or set API key
export GOOGLE_API_KEY="your-api-key"
echo "GOOGLE_API_KEY=your-key" >> .env
Verification:
# Check version
gemini --version
# Test command
gemini --prompt "Hello, Gemini!"
Configuration:
agents:
gemini:
enabled: true
command: "gemini"
role: "review"
timeout: 180
Installation:
# Install via npm
npm install -g @githubnext/github-copilot-cli
# Or follow GitHub's official instructions
Authentication:
# Login to GitHub
gh auth login
# Authenticate Copilot
copilot auth login
Verification:
# Check version
copilot --version
# Test command
copilot "write a function"
Configuration:
agents:
copilot:
enabled: true # Set to true to enable
command: "copilot"
role: "suggestions"
timeout: 120
Save this as check-tools.sh:
#!/bin/bash
echo "=== AI CLI Tools Check ==="
echo ""
# Claude
if command -v claude &> /dev/null; then
echo "✓ Claude Code CLI: INSTALLED"
claude --version 2>&1 | head -1
else
echo "✗ Claude Code CLI: NOT FOUND"
fi
echo ""
# Codex
if command -v codex &> /dev/null; then
echo "✓ Codex CLI: INSTALLED"
codex --version 2>&1 | head -1
else
echo "✗ Codex CLI: NOT FOUND"
fi
echo ""
# Gemini
if command -v gemini &> /dev/null; then
echo "✓ Gemini CLI: INSTALLED"
gemini --version 2>&1 | head -1
else
echo "✗ Gemini CLI: NOT FOUND"
fi
echo ""
# Copilot
if command -v copilot &> /dev/null; then
echo "✓ Copilot CLI: INSTALLED"
copilot --version 2>&1 | head -1
else
echo "✗ Copilot CLI: NOT FOUND"
fi
echo ""
echo "=== AI Orchestrator ==="
if [ -x "./ai-orchestrator" ]; then
echo "✓ AI Orchestrator: READY"
./ai-orchestrator agents
else
echo "✗ AI Orchestrator: Not executable"
echo " Run: chmod +x ai-orchestrator"
fi
Run it:
chmod +x check-tools.sh
./check-tools.sh
Edit config/agents.yaml to configure agents and workflows:
# Agent Configuration
agents:
codex:
enabled: true # Enable/disable agent
command: "codex" # CLI command name
role: "implementation" # Agent role
timeout: 300 # Timeout in seconds
gemini:
enabled: true
command: "gemini"
role: "review"
timeout: 180
claude:
enabled: true
command: "claude"
role: "refinement"
timeout: 300
# Workflow Configuration
workflows:
default:
- agent: "codex"
task: "implement"
- agent: "gemini"
task: "review"
- agent: "claude"
task: "refine"
quick:
- agent: "codex"
task: "implement"
# Global Settings
settings:
max_iterations: 3
min_suggestions_threshold: 3
output_dir: "./output"
workspace_dir: "./workspace"
log_level: "INFO"
colored_output: true
Custom Workflow Example:
workflows:
security_focused:
max_iterations: 5
min_suggestions_threshold: 10
steps:
- agent: "codex"
task: "implement"
description: "Initial implementation"
- agent: "gemini"
task: "security_review"
description: "Security vulnerability scan"
- agent: "gemini"
task: "performance_review"
description: "Performance analysis"
- agent: "claude"
task: "refine"
description: "Implement all fixes"
- agent: "gemini"
task: "final_review"
description: "Verify all issues resolved"
All settings can be overridden via environment variables:
# Agent configuration
export AI_ORCHESTRATOR_CONFIG="./config/custom-agents.yaml"
# Logging
export LOG_LEVEL="DEBUG"
export LOG_FILE="./logs/orchestrator.log"
# Workspace
export WORKSPACE_DIR="./my-workspace"
export OUTPUT_DIR="./my-output"
# Metrics
export ENABLE_METRICS="true"
export METRICS_PORT="9090"
# Rate limiting
export RATE_LIMIT_PER_MINUTE="20"
# Check Node.js version (need 20+)
node --version
# Check npm
npm --version
# Navigate to UI directory
cd ui
# Install Python dependencies for backend
pip install -r requirements.txt
# Navigate to frontend
cd frontend
# Install Node dependencies
npm install
# From ui/ directory
cd ui
# Start Flask backend
python app.py
# Backend runs on http://localhost:5000
Backend provides:
# From ui/frontend/ directory
cd ui/frontend
# Start development server
npm run dev
# Frontend runs on http://localhost:3000
Frontend features:
Open browser to: http://localhost:3000
# Build for production
cd ui/frontend
npm run build
# Dist files in: ui/frontend/dist/
# Serve with backend
cd ui
python app.py --production
Use the provided startup script:
# Make executable
chmod +x start-ui.sh
# Start both backend and frontend
./start-ui.sh
What it does:
# Build image
docker build -t ai-orchestrator:latest .
# Run container
docker run -it --rm \
-v $(pwd)/config:/app/config \
-v $(pwd)/workspace:/app/workspace \
-v $(pwd)/output:/app/output \
ai-orchestrator:latest shell
# Start orchestrator
docker-compose up -d
# Start with monitoring stack
docker-compose --profile monitoring up -d
# View logs
docker-compose logs -f
# Stop services
docker-compose down
Services included:
ai-orchestrator - Main applicationprometheus - Metrics collection (optional)grafana - Metrics visualization (optional)Create docker-compose.override.yml:
version: '3.8'
services:
ai-orchestrator:
environment:
- LOG_LEVEL=DEBUG
- ENABLE_METRICS=true
volumes:
- ./my-custom-config:/app/config
# Copy service file
sudo cp deployment/systemd/ai-orchestrator.service /etc/systemd/system/
# Edit service file if needed
sudo vim /etc/systemd/system/ai-orchestrator.service
# Create configuration directory
sudo mkdir -p /etc/ai-orchestrator
sudo cp .env /etc/ai-orchestrator/environment
# Reload systemd
sudo systemctl daemon-reload
# Enable service
sudo systemctl enable ai-orchestrator
# Start service
sudo systemctl start ai-orchestrator
# Check status
sudo systemctl status ai-orchestrator
# View logs
sudo journalctl -u ai-orchestrator -f
# Create namespace
kubectl create namespace ai-orchestrator
# Apply configuration
kubectl apply -f deployment/kubernetes/
# Check deployment
kubectl get pods -n ai-orchestrator
# View logs
kubectl logs -f deployment/ai-orchestrator -n ai-orchestrator
# Port forward for access
kubectl port-forward svc/ai-orchestrator 8080:8080 -n ai-orchestrator
Kubernetes resources created:
Development:
# config/dev.yaml
settings:
log_level: "DEBUG"
enable_metrics: false
max_iterations: 2
Staging:
# config/staging.yaml
settings:
log_level: "INFO"
enable_metrics: true
max_iterations: 3
Production:
# config/production.yaml
settings:
log_level: "WARNING"
enable_metrics: true
max_iterations: 5
rate_limiting: true
Solution:
# Make it executable
chmod +x ai-orchestrator
# Or run with Python
python3 ./ai-orchestrator --help
# Add to PATH (optional)
export PATH=$PATH:$(pwd)
Solution:
# Check if CLI is installed
which claude
which codex
which gemini
# If not found, install it
# See AI CLI Tools Setup section
# Check authentication
claude auth status
codex auth status
# Re-authenticate if needed
claude auth login
Solution:
# Install dependencies
pip install -r requirements.txt
# Or install individually
pip install click pyyaml rich pydantic
# Verify installation
python3 -c "import click; print(click.__version__)"
Solution:
# Fix permissions
chmod +x ai-orchestrator
# Or use Python directly
python3 ai-orchestrator --help
Solution:
# Check Node.js version
node --version # Must be 20+
# Update if needed
nvm install 20
nvm use 20
# Clear node_modules and reinstall
cd ui/frontend
rm -rf node_modules package-lock.json
npm install
# Check backend is running
curl http://localhost:5000/health
Solution:
# Find process using port
lsof -i :5000 # Backend
lsof -i :3000 # Frontend
# Kill process
kill -9 <PID>
# Or use different ports
export BACKEND_PORT=5001
export FRONTEND_PORT=3001
Solution:
# Check YAML syntax
python3 -c "import yaml; yaml.safe_load(open('config/agents.yaml'))"
# Validate against schema
./ai-orchestrator validate --verbose
# Check for common issues
yamllint config/agents.yaml
Enable debug logging for troubleshooting:
# Via environment variable
export LOG_LEVEL=DEBUG
./ai-orchestrator shell
# Via command line flag
./ai-orchestrator run "task" --verbose --debug
# Check logs
tail -f ai-orchestrator.log
# Check system health
./ai-orchestrator info
# Check agent availability
./ai-orchestrator agents
# Validate configuration
./ai-orchestrator validate
# Test with dry run
./ai-orchestrator run "test task" --dry-run
# Show help
./ai-orchestrator --help
# Command-specific help
./ai-orchestrator run --help
./ai-orchestrator shell --help
# Show version
./ai-orchestrator version
# ✓ Python version
python3 --version # 3.8+
# ✓ Dependencies installed
pip list | grep -E "click|pyyaml|rich|pydantic"
# ✓ CLI executable
./ai-orchestrator --help
# ✓ Configuration valid
./ai-orchestrator validate
# ✓ At least one agent available
./ai-orchestrator agents | grep "Available"
# ✓ Workflows loaded
./ai-orchestrator workflows
# ✓ Can execute task
./ai-orchestrator run "Create a hello function" --dry-run
# ✓ Interactive shell works
echo "/exit" | ./ai-orchestrator shell
# ✓ Metrics enabled (optional)
curl http://localhost:9090/metrics
# ✓ Health check passes
curl http://localhost:9090/health
You should see:
./ai-orchestrator shell
./ai-orchestrator run "Create a Python function to add two numbers"
./ai-orchestrator shell
> /save my-first-session
./start-ui.sh
docs/ directoryCongratulations! You’re ready to use AI Coding Tools Orchestrator! 🎉
Start with: ./ai-orchestrator shell