From 4041018397901f4fbe0b29f1a26f3afbb15f4dea Mon Sep 17 00:00:00 2001 From: CyberSecurityUP Date: Sun, 22 Feb 2026 18:04:43 -0300 Subject: [PATCH] Fix: OpenRouter/Together/Fireworks detection + deprecated gpt-4-turbo-preview model Issues fixed: - OpenRouter API key not recognized: _set_no_provider_error() now checks all 7 provider keys (was only checking Anthropic/OpenAI/Google), so users with only OPENROUTER_API_KEY set no longer get "No API keys configured" error - Error message now lists all 8 providers (added OpenRouter, Together, Fireworks) instead of only 5 (Anthropic, OpenAI, Google, Ollama, LM Studio) - gpt-4-turbo-preview (deprecated by OpenAI, 404 error) replaced with gpt-4o as default OpenAI model in LLMClient init and generate() fallback - Settings API model list updated: removed gpt-4-turbo-preview and o1-preview/mini, added gpt-4.1, gpt-4.1-mini, o3-mini - .env.example comment updated to reference gpt-4o instead of gpt-4-turbo Co-Authored-By: Claude Opus 4.6 --- .env.example | 2 +- backend/api/v1/agent.py | 23 ++++++++++++++++------- backend/api/v1/settings.py | 6 +++--- backend/core/autonomous_agent.py | 8 +++++--- 4 files changed, 25 insertions(+), 14 deletions(-) diff --git a/.env.example b/.env.example index 288dc59..19ea484 100755 --- a/.env.example +++ b/.env.example @@ -41,7 +41,7 @@ FIREWORKS_API_KEY= # Max output tokens (up to 64000 for Claude). Comment out for profile defaults. #MAX_OUTPUT_TOKENS=64000 -# Select specific model name (e.g., claude-sonnet-4-20250514, gpt-4-turbo, llama3.2, qwen2.5) +# Select specific model name (e.g., claude-sonnet-4-20250514, gpt-4o, llama3.2, qwen2.5) # Leave empty for provider default #DEFAULT_LLM_MODEL= diff --git a/backend/api/v1/agent.py b/backend/api/v1/agent.py index 60f47e8..6b943de 100755 --- a/backend/api/v1/agent.py +++ b/backend/api/v1/agent.py @@ -1856,17 +1856,26 @@ async def send_realtime_message(session_id: str, request: RealtimeMessageRequest if not llm_status.get("lmstudio_available"): error_details.append("LM Studio not running") if not llm_status.get("has_google_key"): - error_details.append("No GOOGLE_API_KEY set") + error_details.append("No GEMINI_API_KEY set") + if not llm_status.get("has_openrouter_key"): + error_details.append("No OPENROUTER_API_KEY set") + if not llm_status.get("has_together_key"): + error_details.append("No TOGETHER_API_KEY set") + if not llm_status.get("has_fireworks_key"): + error_details.append("No FIREWORKS_API_KEY set") error_msg = f"""⚠️ **No LLM Provider Available** -Configure at least one of the following: +Configure at least one of the following in your `.env` file: -1. **Claude (Anthropic)**: Set `ANTHROPIC_API_KEY` environment variable -2. **OpenAI/ChatGPT**: Set `OPENAI_API_KEY` environment variable -3. **Google Gemini**: Set `GOOGLE_API_KEY` environment variable -4. **Ollama (Local)**: Run `ollama serve` and ensure a model is pulled -5. **LM Studio (Local)**: Start LM Studio server on port 1234 +1. **Claude (Anthropic)**: Set `ANTHROPIC_API_KEY` +2. **OpenAI/ChatGPT**: Set `OPENAI_API_KEY` +3. **OpenRouter (multi-model)**: Set `OPENROUTER_API_KEY` +4. **Google Gemini**: Set `GEMINI_API_KEY` +5. **Together AI**: Set `TOGETHER_API_KEY` +6. **Fireworks AI**: Set `FIREWORKS_API_KEY` +7. **Ollama (Local)**: Run `ollama serve` and ensure a model is pulled +8. **LM Studio (Local)**: Start LM Studio server on port 1234 **Current status:** {chr(10).join(f"- {d}" for d in error_details) if error_details else "- Unknown configuration issue"} diff --git a/backend/api/v1/settings.py b/backend/api/v1/settings.py index c8a2f2f..d21d876 100755 --- a/backend/api/v1/settings.py +++ b/backend/api/v1/settings.py @@ -538,11 +538,11 @@ CLOUD_MODELS = { {"model_id": "claude-haiku-4-20250514", "display_name": "Claude Haiku 4", "context_length": 200000}, ], "openai": [ - {"model_id": "gpt-4-turbo-preview", "display_name": "GPT-4 Turbo", "context_length": 128000}, {"model_id": "gpt-4o", "display_name": "GPT-4o", "context_length": 128000}, {"model_id": "gpt-4o-mini", "display_name": "GPT-4o Mini", "context_length": 128000}, - {"model_id": "o1-preview", "display_name": "O1 Preview", "context_length": 128000}, - {"model_id": "o1-mini", "display_name": "O1 Mini", "context_length": 128000}, + {"model_id": "gpt-4.1", "display_name": "GPT-4.1", "context_length": 1047576}, + {"model_id": "gpt-4.1-mini", "display_name": "GPT-4.1 Mini", "context_length": 1047576}, + {"model_id": "o3-mini", "display_name": "O3 Mini", "context_length": 200000}, ], "gemini": [ {"model_id": "gemini-pro", "display_name": "Gemini Pro", "context_length": 30720}, diff --git a/backend/core/autonomous_agent.py b/backend/core/autonomous_agent.py index 8ca3c17..590ff7f 100755 --- a/backend/core/autonomous_agent.py +++ b/backend/core/autonomous_agent.py @@ -421,7 +421,7 @@ class LLMClient: try: self.client = openai.OpenAI(api_key=self.openai_key) self.provider = "openai" - self.model_name = self.configured_model or "gpt-4-turbo-preview" + self.model_name = self.configured_model or "gpt-4o" print(f"[LLM] OpenAI API initialized (model: {self.model_name})") return except Exception as e: @@ -514,8 +514,10 @@ class LLMClient: errors = [] if not ANTHROPIC_AVAILABLE and not OPENAI_AVAILABLE: errors.append("LLM libraries not installed (run: pip install anthropic openai)") - if not self.anthropic_key and not self.openai_key and not self.google_key: - errors.append("No API keys configured") + all_keys = [self.anthropic_key, self.openai_key, self.google_key, + self.openrouter_key, self.together_key, self.fireworks_key, self.codex_key] + if not any(all_keys): + errors.append("No API keys configured (set ANTHROPIC_API_KEY, OPENAI_API_KEY, OPENROUTER_API_KEY, GEMINI_API_KEY, TOGETHER_API_KEY, or FIREWORKS_API_KEY)") if not self._check_ollama(): errors.append("Ollama not running locally") if not self._check_lmstudio():