diff --git a/backend/app/api/settings.py b/backend/app/api/settings.py index ec50221..687d032 100644 --- a/backend/app/api/settings.py +++ b/backend/app/api/settings.py @@ -55,32 +55,156 @@ async def update_setting( @router.post("/test-llm") -async def test_llm_connection(current_user: User = Depends(get_current_admin)): - """Test that the configured LLM provider responds correctly.""" - from app.services.llm_service import get_llm_provider +async def test_llm_connection( + db: AsyncSession = Depends(get_db), + current_user: User = Depends(get_current_admin), +): + """Ping the configured LLM provider with a minimal request.""" + import asyncio + prov_row = await db.get(AppSetting, "llm_provider") + model_row = await db.get(AppSetting, "llm_model") + provider_name = prov_row.value if prov_row else settings.LLM_PROVIDER + model_name = model_row.value if model_row else None try: - provider = get_llm_provider() - brief = provider.generate_brief( - doc_text="This is a test bill for connection verification purposes.", - bill_metadata={ - "title": "Test Connection Bill", - "sponsor_name": "Test Sponsor", - "party": "Test", - "state": "DC", - "chamber": "House", - "introduced_date": "2025-01-01", - "latest_action_text": "Test action", - "latest_action_date": "2025-01-01", - }, + return await asyncio.to_thread(_ping_provider, provider_name, model_name) + except Exception as exc: + return {"status": "error", "detail": str(exc)} + + +_PING = "Reply with exactly three words: Connection test successful." + + +def _ping_provider(provider_name: str, model_name: str | None) -> dict: + if provider_name == "openai": + from openai import OpenAI + model = model_name or settings.OPENAI_MODEL + client = OpenAI(api_key=settings.OPENAI_API_KEY) + resp = client.chat.completions.create( + model=model, + messages=[{"role": "user", "content": _PING}], + max_tokens=20, ) - return { - "status": "ok", - "provider": brief.llm_provider, - "model": brief.llm_model, - "summary_preview": brief.summary[:100] + "..." if len(brief.summary) > 100 else brief.summary, - } - except Exception as e: - return {"status": "error", "detail": str(e)} + reply = resp.choices[0].message.content.strip() + return {"status": "ok", "provider": "openai", "model": model, "reply": reply} + + if provider_name == "anthropic": + import anthropic + model = model_name or settings.ANTHROPIC_MODEL + client = anthropic.Anthropic(api_key=settings.ANTHROPIC_API_KEY) + resp = client.messages.create( + model=model, + max_tokens=20, + messages=[{"role": "user", "content": _PING}], + ) + reply = resp.content[0].text.strip() + return {"status": "ok", "provider": "anthropic", "model": model, "reply": reply} + + if provider_name == "gemini": + import google.generativeai as genai + model = model_name or settings.GEMINI_MODEL + genai.configure(api_key=settings.GEMINI_API_KEY) + resp = genai.GenerativeModel(model_name=model).generate_content(_PING) + reply = resp.text.strip() + return {"status": "ok", "provider": "gemini", "model": model, "reply": reply} + + if provider_name == "ollama": + import requests as req + model = model_name or settings.OLLAMA_MODEL + resp = req.post( + f"{settings.OLLAMA_BASE_URL}/api/generate", + json={"model": model, "prompt": _PING, "stream": False}, + timeout=30, + ) + resp.raise_for_status() + reply = resp.json().get("response", "").strip() + return {"status": "ok", "provider": "ollama", "model": model, "reply": reply} + + raise ValueError(f"Unknown provider: {provider_name}") + + +@router.get("/llm-models") +async def list_llm_models( + provider: str, + current_user: User = Depends(get_current_admin), +): + """Fetch available models directly from the provider's API.""" + import asyncio + handlers = { + "openai": _list_openai_models, + "anthropic": _list_anthropic_models, + "gemini": _list_gemini_models, + "ollama": _list_ollama_models, + } + fn = handlers.get(provider) + if not fn: + return {"models": [], "error": f"Unknown provider: {provider}"} + try: + return await asyncio.to_thread(fn) + except Exception as exc: + return {"models": [], "error": str(exc)} + + +def _list_openai_models() -> dict: + from openai import OpenAI + if not settings.OPENAI_API_KEY: + return {"models": [], "error": "OPENAI_API_KEY not configured"} + client = OpenAI(api_key=settings.OPENAI_API_KEY) + all_models = client.models.list().data + CHAT_PREFIXES = ("gpt-", "o1", "o3", "o4", "chatgpt-") + EXCLUDE = ("realtime", "audio", "tts", "whisper", "embedding", "dall-e", "instruct") + filtered = sorted( + [m.id for m in all_models + if any(m.id.startswith(p) for p in CHAT_PREFIXES) + and not any(x in m.id for x in EXCLUDE)], + reverse=True, + ) + return {"models": [{"id": m, "name": m} for m in filtered]} + + +def _list_anthropic_models() -> dict: + import requests as req + if not settings.ANTHROPIC_API_KEY: + return {"models": [], "error": "ANTHROPIC_API_KEY not configured"} + resp = req.get( + "https://api.anthropic.com/v1/models", + headers={ + "x-api-key": settings.ANTHROPIC_API_KEY, + "anthropic-version": "2023-06-01", + }, + timeout=10, + ) + resp.raise_for_status() + data = resp.json() + return { + "models": [ + {"id": m["id"], "name": m.get("display_name", m["id"])} + for m in data.get("data", []) + ] + } + + +def _list_gemini_models() -> dict: + import google.generativeai as genai + if not settings.GEMINI_API_KEY: + return {"models": [], "error": "GEMINI_API_KEY not configured"} + genai.configure(api_key=settings.GEMINI_API_KEY) + models = [ + {"id": m.name.replace("models/", ""), "name": m.display_name} + for m in genai.list_models() + if "generateContent" in m.supported_generation_methods + ] + return {"models": sorted(models, key=lambda x: x["id"])} + + +def _list_ollama_models() -> dict: + import requests as req + try: + resp = req.get(f"{settings.OLLAMA_BASE_URL}/api/tags", timeout=5) + resp.raise_for_status() + tags = resp.json().get("models", []) + return {"models": [{"id": m["name"], "name": m["name"]} for m in tags]} + except Exception as exc: + return {"models": [], "error": f"Ollama unreachable: {exc}"} def _current_model(provider: str) -> str: diff --git a/backend/app/config.py b/backend/app/config.py index 0b19eff..4894732 100644 --- a/backend/app/config.py +++ b/backend/app/config.py @@ -34,7 +34,7 @@ class Settings(BaseSettings): ANTHROPIC_MODEL: str = "claude-opus-4-6" GEMINI_API_KEY: str = "" - GEMINI_MODEL: str = "gemini-1.5-pro" + GEMINI_MODEL: str = "gemini-2.0-flash" OLLAMA_BASE_URL: str = "http://host.docker.internal:11434" OLLAMA_MODEL: str = "llama3.1" diff --git a/backend/app/services/llm_service.py b/backend/app/services/llm_service.py index 24e4125..384ef10 100644 --- a/backend/app/services/llm_service.py +++ b/backend/app/services/llm_service.py @@ -185,10 +185,10 @@ class LLMProvider(ABC): class OpenAIProvider(LLMProvider): - def __init__(self): + def __init__(self, model: str | None = None): from openai import OpenAI self.client = OpenAI(api_key=settings.OPENAI_API_KEY) - self.model = settings.OPENAI_MODEL + self.model = model or settings.OPENAI_MODEL def generate_brief(self, doc_text: str, bill_metadata: dict) -> ReverseBrief: prompt = build_prompt(doc_text, bill_metadata, MAX_TOKENS_DEFAULT) @@ -220,10 +220,10 @@ class OpenAIProvider(LLMProvider): class AnthropicProvider(LLMProvider): - def __init__(self): + def __init__(self, model: str | None = None): import anthropic self.client = anthropic.Anthropic(api_key=settings.ANTHROPIC_API_KEY) - self.model = settings.ANTHROPIC_MODEL + self.model = model or settings.ANTHROPIC_MODEL def generate_brief(self, doc_text: str, bill_metadata: dict) -> ReverseBrief: prompt = build_prompt(doc_text, bill_metadata, MAX_TOKENS_DEFAULT) @@ -249,11 +249,11 @@ class AnthropicProvider(LLMProvider): class GeminiProvider(LLMProvider): - def __init__(self): + def __init__(self, model: str | None = None): import google.generativeai as genai genai.configure(api_key=settings.GEMINI_API_KEY) self._genai = genai - self.model_name = settings.GEMINI_MODEL + self.model_name = model or settings.GEMINI_MODEL def _make_model(self, system_prompt: str): return self._genai.GenerativeModel( @@ -274,9 +274,9 @@ class GeminiProvider(LLMProvider): class OllamaProvider(LLMProvider): - def __init__(self): + def __init__(self, model: str | None = None): self.base_url = settings.OLLAMA_BASE_URL.rstrip("/") - self.model = settings.OLLAMA_MODEL + self.model = model or settings.OLLAMA_MODEL def _generate(self, system_prompt: str, user_prompt: str) -> str: import requests as req @@ -327,15 +327,20 @@ class OllamaProvider(LLMProvider): return parse_brief_json(raw2, "ollama", self.model) -def get_llm_provider() -> LLMProvider: - """Factory — returns the configured LLM provider.""" - provider = settings.LLM_PROVIDER.lower() +def get_llm_provider(provider: str | None = None, model: str | None = None) -> LLMProvider: + """Factory — returns the configured LLM provider. + + Pass ``provider`` and/or ``model`` explicitly (e.g. from DB overrides) to bypass env defaults. + """ + if provider is None: + provider = settings.LLM_PROVIDER + provider = provider.lower() if provider == "openai": - return OpenAIProvider() + return OpenAIProvider(model=model) elif provider == "anthropic": - return AnthropicProvider() + return AnthropicProvider(model=model) elif provider == "gemini": - return GeminiProvider() + return GeminiProvider(model=model) elif provider == "ollama": - return OllamaProvider() + return OllamaProvider(model=model) raise ValueError(f"Unknown LLM_PROVIDER: '{provider}'. Must be one of: openai, anthropic, gemini, ollama") diff --git a/backend/app/workers/llm_processor.py b/backend/app/workers/llm_processor.py index fb2dd3f..47cc1d8 100644 --- a/backend/app/workers/llm_processor.py +++ b/backend/app/workers/llm_processor.py @@ -60,7 +60,13 @@ def process_document_with_llm(self, document_id: int): .first() ) - provider = get_llm_provider() + from app.models.setting import AppSetting + prov_row = db.get(AppSetting, "llm_provider") + model_row = db.get(AppSetting, "llm_model") + provider = get_llm_provider( + prov_row.value if prov_row else None, + model_row.value if model_row else None, + ) if previous_full_brief and previous_full_brief.document_id: # New version of a bill we've already analyzed — generate amendment brief @@ -97,6 +103,9 @@ def process_document_with_llm(self, document_id: int): logger.info(f"{brief_type.capitalize()} brief {db_brief.id} created for bill {doc.bill_id} using {brief.llm_provider}/{brief.llm_model}") + # Emit notification events for users who follow this bill + _emit_notification_events(db, bill, doc.bill_id, brief_type, brief.summary) + # Trigger news fetch now that we have topic tags from app.workers.news_fetcher import fetch_news_for_bill fetch_news_for_bill.delay(doc.bill_id) @@ -111,6 +120,35 @@ def process_document_with_llm(self, document_id: int): db.close() +def _emit_notification_events(db, bill, bill_id: str, brief_type: str, summary: str | None) -> None: + """Create a NotificationEvent row for every user following this bill.""" + from app.models.follow import Follow + from app.models.notification import NotificationEvent + from app.config import settings + + followers = db.query(Follow).filter_by(follow_type="bill", follow_value=bill_id).all() + if not followers: + return + + base_url = (settings.PUBLIC_URL or settings.LOCAL_URL).rstrip("/") + payload = { + "bill_title": bill.short_title or bill.title or "", + "bill_label": f"{bill.bill_type.upper()} {bill.bill_number}", + "brief_summary": (summary or "")[:300], + "bill_url": f"{base_url}/bills/{bill_id}", + } + event_type = "new_amendment" if brief_type == "amendment" else "new_document" + + for follow in followers: + db.add(NotificationEvent( + user_id=follow.user_id, + bill_id=bill_id, + event_type=event_type, + payload=payload, + )) + db.commit() + + @celery_app.task(bind=True, name="app.workers.llm_processor.backfill_brief_citations") def backfill_brief_citations(self): """ diff --git a/frontend/app/settings/page.tsx b/frontend/app/settings/page.tsx index 6186d70..b3e7474 100644 --- a/frontend/app/settings/page.tsx +++ b/frontend/app/settings/page.tsx @@ -1,6 +1,6 @@ "use client"; -import { useState } from "react"; +import { useState, useEffect } from "react"; import { useQuery, useMutation, useQueryClient } from "@tanstack/react-query"; import { Settings, @@ -16,17 +16,21 @@ import { FileText, Brain, BarChart3, + Bell, + Copy, + Rss, } from "lucide-react"; -import { settingsAPI, adminAPI, type AdminUser } from "@/lib/api"; +import { settingsAPI, adminAPI, notificationsAPI, type AdminUser, type LLMModel } from "@/lib/api"; import { useAuthStore } from "@/stores/authStore"; const LLM_PROVIDERS = [ - { value: "openai", label: "OpenAI (GPT-4o)", hint: "Requires OPENAI_API_KEY in .env" }, + { value: "openai", label: "OpenAI", hint: "Requires OPENAI_API_KEY in .env" }, { value: "anthropic", label: "Anthropic (Claude)", hint: "Requires ANTHROPIC_API_KEY in .env" }, { value: "gemini", label: "Google Gemini", hint: "Requires GEMINI_API_KEY in .env" }, { value: "ollama", label: "Ollama (Local)", hint: "Requires Ollama running on host" }, ]; + export default function SettingsPage() { const qc = useQueryClient(); const currentUser = useAuthStore((s) => s.user); @@ -64,11 +68,60 @@ export default function SettingsPage() { onSuccess: () => qc.invalidateQueries({ queryKey: ["admin-users"] }), }); + const { data: notifSettings, refetch: refetchNotif } = useQuery({ + queryKey: ["notification-settings"], + queryFn: () => notificationsAPI.getSettings(), + }); + + const updateNotif = useMutation({ + mutationFn: (data: Parameters[0]) => + notificationsAPI.updateSettings(data), + onSuccess: () => refetchNotif(), + }); + + const resetRss = useMutation({ + mutationFn: () => notificationsAPI.resetRssToken(), + onSuccess: () => refetchNotif(), + }); + + const [ntfyUrl, setNtfyUrl] = useState(""); + const [ntfyToken, setNtfyToken] = useState(""); + const [notifSaved, setNotifSaved] = useState(false); + const [copied, setCopied] = useState(false); + + // Live model list from provider API + const { data: modelsData, isFetching: modelsFetching, refetch: refetchModels } = useQuery({ + queryKey: ["llm-models", settings?.llm_provider], + queryFn: () => settingsAPI.listModels(settings!.llm_provider), + enabled: !!currentUser?.is_admin && !!settings?.llm_provider, + staleTime: 5 * 60 * 1000, + retry: false, + }); + const liveModels: LLMModel[] = modelsData?.models ?? []; + const modelsError: string | undefined = modelsData?.error; + + // Model picker state + const [showCustomModel, setShowCustomModel] = useState(false); + const [customModel, setCustomModel] = useState(""); + useEffect(() => { + if (!settings || modelsFetching) return; + const inList = liveModels.some((m) => m.id === settings.llm_model); + if (!inList && settings.llm_model) { + setShowCustomModel(true); + setCustomModel(settings.llm_model); + } else { + setShowCustomModel(false); + setCustomModel(""); + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [settings?.llm_provider, settings?.llm_model, modelsFetching]); + const [testResult, setTestResult] = useState<{ status: string; detail?: string; - summary_preview?: string; + reply?: string; provider?: string; + model?: string; } | null>(null); const [testing, setTesting] = useState(false); const [taskIds, setTaskIds] = useState>({}); @@ -236,9 +289,6 @@ export default function SettingsPage() {

LLM Provider

-

- Current: {settings?.llm_provider} / {settings?.llm_model} -

{LLM_PROVIDERS.map(({ value, label, hint }) => (