36 lines
981 B
Plaintext
36 lines
981 B
Plaintext
# LLM Configuration
|
|
# Choose one of the following LLM providers:
|
|
# For OpenAI:
|
|
LLM_MODEL=gpt-4
|
|
LLM_BASE_URL=https://api.openai.com/v1
|
|
LLM_API_KEY=your_openai_api_key_here
|
|
LLM_PROVIDER=openai
|
|
|
|
# For Anthropic:
|
|
# LLM_MODEL=claude-3-opus-20240229
|
|
# LLM_BASE_URL=https://api.anthropic.com
|
|
# LLM_API_KEY=your_anthropic_api_key_here
|
|
# LLM_PROVIDER=anthropic
|
|
|
|
# For Ollama (local):
|
|
# LLM_MODEL=llama2
|
|
# LLM_BASE_URL=http://localhost:11434
|
|
# LLM_API_KEY=ollama # Ollama doesn't require a real API key
|
|
# LLM_PROVIDER=ollama
|
|
|
|
# MCP Server Configuration
|
|
# Hadolint MCP Server (installed via pip in Docker)
|
|
# Checkov MCP Server (installed via pip in Docker)
|
|
# Semgrep MCP Server (native, no configuration needed)
|
|
# Trivy MCP Server (native, no configuration needed)
|
|
|
|
# Optional: Semgrep App URL and Token for SEMgrep App functionality
|
|
SEMGRAPH_APP_URL=
|
|
SEMGRAPH_API_TOKEN=
|
|
|
|
# Timeout Configuration (in seconds)
|
|
TOTAL_FLOW_TIMEOUT=600
|
|
PER_CREW_TIMEOUT=300
|
|
|
|
# Other Configuration
|
|
LOG_LEVEL=INFO |