Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 59 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# LLM Integration Configuration
# Copy this file to .env and fill in your API keys

# ============================================================================
# LLM Provider Selection
# ============================================================================
# Choose: 'openai' or 'claude'
LLM_PROVIDER=openai

# ============================================================================
# OpenAI Configuration
# ============================================================================
# Get your API key from: https://platform.openai.com/api-keys
OPENAI_API_KEY=your-openai-api-key-here

# Model options:
# - gpt-4 (most capable, expensive)
# - gpt-3.5-turbo (fast, cheap)
OPENAI_MODEL=gpt-4

# ============================================================================
# Anthropic Claude Configuration
# ============================================================================
# Get your API key from: https://console.anthropic.com/
ANTHROPIC_API_KEY=your-anthropic-api-key-here

# Model options:
# - claude-3-5-sonnet-20241022 (best performance)
# - claude-3-haiku-20240307 (fast, cheap)
CLAUDE_MODEL=claude-3-5-sonnet-20241022

# ============================================================================
# Hybrid Configuration
# ============================================================================
# Confidence threshold for LLM fallback (0.0 - 1.0)
# If Rasa confidence < this value, use LLM
CONFIDENCE_THRESHOLD=0.7

# Ambiguity threshold for intent clarification
# If top 2 intents differ by less than this, clarify with LLM
AMBIGUITY_THRESHOLD=0.2

# Number of previous messages to include as context
CONTEXT_LENGTH=5

# ============================================================================
# Advanced Settings
# ============================================================================
# LLM temperature (0.0 = deterministic, 1.0 = creative)
LLM_TEMPERATURE=0.7

# Max tokens for LLM response
LLM_MAX_TOKENS=500

# Enable response caching (reduce API costs)
ENABLE_LLM_CACHE=true

# Log LLM requests for debugging
LOG_LLM_REQUESTS=true
Loading