"use client"; import React, { useState, useEffect } from "react"; import { useQuery, useMutation, useQueryClient } from "@tanstack/react-query"; import { Settings, Cpu, RefreshCw, CheckCircle, XCircle, Play, Users, Trash2, ShieldCheck, ShieldOff, BarChart3, Bell, Shield, Zap, ChevronDown, ChevronRight, Wrench, } from "lucide-react"; import Link from "next/link"; import { settingsAPI, adminAPI, notificationsAPI, type AdminUser, type LLMModel, type ApiHealthResult } from "@/lib/api"; import { useAuthStore } from "@/stores/authStore"; function relativeTime(isoStr: string): string { const diff = Date.now() - new Date(isoStr.endsWith("Z") ? isoStr : isoStr + "Z").getTime(); const hours = Math.floor(diff / 3_600_000); const mins = Math.floor((diff % 3_600_000) / 60_000); return hours > 0 ? `${hours}h ${mins}m ago` : `${mins}m ago`; } const LLM_PROVIDERS = [ { value: "openai", label: "OpenAI", hint: "Requires OPENAI_API_KEY in .env", rateNote: "Free: 3 RPM · Paid tier 1: 500 RPM", modelNote: "Recommended: gpt-4o-mini — excellent JSON quality at ~10× lower cost than gpt-4o", }, { value: "anthropic", label: "Anthropic (Claude)", hint: "Requires ANTHROPIC_API_KEY in .env", rateNote: "Tier 1: 50 RPM · Tier 2: 1,000 RPM", modelNote: "Recommended: claude-sonnet-4-6 — matches Opus quality at ~5× lower cost", }, { value: "gemini", label: "Google Gemini", hint: "Requires GEMINI_API_KEY in .env", rateNote: "Free: 15 RPM · Paid: 2,000 RPM", modelNote: "Recommended: gemini-2.0-flash — best value, generous free tier", }, { value: "ollama", label: "Ollama (Local)", hint: "Requires Ollama running on host", rateNote: "No API rate limits", modelNote: "Recommended: llama3.1 or mistral for reliable structured JSON output", }, ]; export default function SettingsPage() { const qc = useQueryClient(); const currentUser = useAuthStore((s) => s.user); const { data: settings, isLoading: settingsLoading } = useQuery({ queryKey: ["settings"], queryFn: () => settingsAPI.get(), }); const { data: stats } = useQuery({ queryKey: ["admin-stats"], queryFn: () => adminAPI.getStats(), enabled: !!currentUser?.is_admin, refetchInterval: 30_000, }); const [healthTesting, setHealthTesting] = useState(false); const [healthData, setHealthData] = useState | null>(null); const testApiHealth = async () => { setHealthTesting(true); try { const result = await adminAPI.getApiHealth(); setHealthData(result as unknown as Record); } finally { setHealthTesting(false); } }; const { data: users, isLoading: usersLoading } = useQuery({ queryKey: ["admin-users"], queryFn: () => adminAPI.listUsers(), enabled: !!currentUser?.is_admin, }); const updateSetting = useMutation({ mutationFn: ({ key, value }: { key: string; value: string }) => settingsAPI.update(key, value), onSuccess: () => qc.invalidateQueries({ queryKey: ["settings"] }), }); const deleteUser = useMutation({ mutationFn: (id: number) => adminAPI.deleteUser(id), onSuccess: () => qc.invalidateQueries({ queryKey: ["admin-users"] }), }); const toggleAdmin = useMutation({ mutationFn: (id: number) => adminAPI.toggleAdmin(id), onSuccess: () => qc.invalidateQueries({ queryKey: ["admin-users"] }), }); // Live model list from provider API const { data: modelsData, isFetching: modelsFetching, refetch: refetchModels } = useQuery({ queryKey: ["llm-models", settings?.llm_provider], queryFn: () => settingsAPI.listModels(settings!.llm_provider), enabled: !!currentUser?.is_admin && !!settings?.llm_provider, staleTime: 5 * 60 * 1000, retry: false, }); const liveModels: LLMModel[] = modelsData?.models ?? []; const modelsError: string | undefined = modelsData?.error; // Model picker state const [showCustomModel, setShowCustomModel] = useState(false); const [customModel, setCustomModel] = useState(""); useEffect(() => { if (!settings || modelsFetching) return; const inList = liveModels.some((m) => m.id === settings.llm_model); if (!inList && settings.llm_model) { setShowCustomModel(true); setCustomModel(settings.llm_model); } else { setShowCustomModel(false); setCustomModel(""); } // eslint-disable-next-line react-hooks/exhaustive-deps }, [settings?.llm_provider, settings?.llm_model, modelsFetching]); const [testResult, setTestResult] = useState<{ status: string; detail?: string; reply?: string; provider?: string; model?: string; } | null>(null); const [testing, setTesting] = useState(false); const [modeTestResults, setModeTestResults] = useState>({}); const [modeTestRunning, setModeTestRunning] = useState>({}); const runModeTest = async (key: string, mode: string, event_type: string) => { setModeTestRunning((p) => ({ ...p, [key]: true })); try { const result = await notificationsAPI.testFollowMode(mode, event_type); setModeTestResults((p) => ({ ...p, [key]: result })); } catch (e: unknown) { setModeTestResults((p) => ({ ...p, [key]: { status: "error", detail: e instanceof Error ? e.message : String(e) }, })); } finally { setModeTestRunning((p) => ({ ...p, [key]: false })); } }; const [taskIds, setTaskIds] = useState>({}); const [taskStatuses, setTaskStatuses] = useState>({}); const [confirmDelete, setConfirmDelete] = useState(null); const [showMaintenance, setShowMaintenance] = useState(false); const { data: newsApiQuota, refetch: refetchQuota } = useQuery({ queryKey: ["newsapi-quota"], queryFn: () => adminAPI.getNewsApiQuota(), enabled: !!currentUser?.is_admin && !!settings?.newsapi_enabled, staleTime: 60_000, }); const { data: batchStatus } = useQuery({ queryKey: ["llm-batch-status"], queryFn: () => adminAPI.getLlmBatchStatus(), enabled: !!currentUser?.is_admin, refetchInterval: (query) => query.state.data?.status === "processing" ? 30_000 : false, }); const [clearingCache, setClearingCache] = useState(false); const [cacheClearResult, setCacheClearResult] = useState(null); const clearGnewsCache = async () => { setClearingCache(true); setCacheClearResult(null); try { const result = await adminAPI.clearGnewsCache(); setCacheClearResult(`Cleared ${result.cleared} cached entries`); } catch (e: unknown) { setCacheClearResult(e instanceof Error ? e.message : "Failed"); } finally { setClearingCache(false); } }; const testLLM = async () => { setTesting(true); setTestResult(null); try { const result = await settingsAPI.testLLM(); setTestResult(result); } catch (e: unknown) { setTestResult({ status: "error", detail: e instanceof Error ? e.message : String(e) }); } finally { setTesting(false); } }; const pollTaskStatus = async (name: string, taskId: string) => { for (let i = 0; i < 60; i++) { await new Promise((r) => setTimeout(r, 5000)); try { const data = await adminAPI.getTaskStatus(taskId); if (["SUCCESS", "FAILURE", "REVOKED"].includes(data.status)) { setTaskStatuses((prev) => ({ ...prev, [name]: data.status === "SUCCESS" ? "done" : "error" })); qc.invalidateQueries({ queryKey: ["admin-stats"] }); return; } } catch { /* ignore polling errors */ } } setTaskStatuses((prev) => ({ ...prev, [name]: "error" })); }; const trigger = async (name: string, fn: () => Promise<{ task_id: string }>) => { const result = await fn(); setTaskIds((prev) => ({ ...prev, [name]: result.task_id })); setTaskStatuses((prev) => ({ ...prev, [name]: "running" })); pollTaskStatus(name, result.task_id); }; if (settingsLoading) return
Loading...
; if (!currentUser?.is_admin) { return
Admin access required.
; } const pct = stats && stats.total_bills > 0 ? Math.round((stats.briefs_generated / stats.total_bills) * 100) : 0; return (

Admin

Manage users, LLM provider, and system settings

{/* Notifications link */}
Notification Settings
Configure ntfy push alerts and RSS feed per user
{/* Follow Mode Notification Testing */}

Follow Mode Notifications

Requires at least one bill followed and ntfy configured. Tests use your first followed bill.

{([ { key: "veto-suppress", mode: "pocket_veto", event_type: "new_document", icon: Shield, label: "Pocket Veto — suppress brief", description: "Sends a new_document event. Dispatcher should silently drop it — no ntfy notification.", expectColor: "text-amber-600 dark:text-amber-400", }, { key: "veto-deliver", mode: "pocket_veto", event_type: "bill_updated", icon: Shield, label: "Pocket Veto — deliver milestone", description: "Sends a bill_updated (milestone) event. Dispatcher should allow it and send ntfy.", expectColor: "text-amber-600 dark:text-amber-400", }, { key: "boost-deliver", mode: "pocket_boost", event_type: "bill_updated", icon: Zap, label: "Pocket Boost — deliver with actions", description: "Sends a bill_updated event. ntfy notification should include 'View Bill' and 'Find Your Rep' action buttons.", expectColor: "text-green-600 dark:text-green-400", }, ] as Array<{ key: string; mode: string; event_type: string; icon: React.ElementType; label: string; description: string; expectColor: string; }>).map(({ key, mode, event_type, icon: Icon, label, description }) => { const result = modeTestResults[key]; const running = modeTestRunning[key]; return (
{label}

{description}

{result && (
{result.status === "ok" ? : } {result.detail}
)}
); })}
{/* Analysis Status */}

Bill Pipeline refreshes every 30s

{stats ? ( <> {/* Progress bar */}
{stats.briefs_generated.toLocaleString()} analyzed ({stats.full_briefs} full · {stats.amendment_briefs} amendments) {pct}% of {stats.total_bills.toLocaleString()} bills
{/* Pipeline breakdown table */}
{[ { label: "Total bills tracked", value: stats.total_bills, color: "text-foreground", icon: "📋" }, { label: "Text published on Congress.gov", value: stats.docs_fetched, color: "text-blue-600 dark:text-blue-400", icon: "📄" }, { label: "No text published yet", value: stats.no_text_bills, color: "text-muted-foreground", icon: "⏳", note: "Normal — bill text appears after committee markup" }, { label: "AI briefs generated", value: stats.briefs_generated, color: "text-green-600 dark:text-green-400", icon: "✅" }, { label: "Pending LLM analysis", value: stats.pending_llm, color: stats.pending_llm > 0 ? "text-amber-600 dark:text-amber-400" : "text-muted-foreground", icon: "🔄", action: stats.pending_llm > 0 ? "Resume Analysis" : undefined }, { label: "Briefs missing citations", value: stats.uncited_briefs, color: stats.uncited_briefs > 0 ? "text-amber-600 dark:text-amber-400" : "text-muted-foreground", icon: "⚠️", action: stats.uncited_briefs > 0 ? "Backfill Citations" : undefined }, { label: "Briefs with unlabeled points", value: stats.unlabeled_briefs, color: stats.unlabeled_briefs > 0 ? "text-amber-600 dark:text-amber-400" : "text-muted-foreground", icon: "🏷️", action: stats.unlabeled_briefs > 0 ? "Backfill Labels" : undefined }, ].map(({ label, value, color, icon, note, action }) => (
{icon}
{label} {note &&

{note}

}
{value.toLocaleString()} {action && ( → run {action} )}
))}
) : (

Loading stats...

)}
{/* User Management */}

Users

{usersLoading ? (

Loading users...

) : (
{(users ?? []).map((u: AdminUser) => (
{u.email} {u.is_admin && ( admin )} {u.id === currentUser.id && ( (you) )}
{u.follow_count} follow{u.follow_count !== 1 ? "s" : ""} ·{" "} joined {new Date(u.created_at).toLocaleDateString()}
{u.id !== currentUser.id && (
{confirmDelete === u.id ? (
) : ( )}
)}
))}
)}
{/* LLM Provider */}

LLM Provider

{LLM_PROVIDERS.map(({ value, label, hint, rateNote, modelNote }) => { const hasKey = settings?.api_keys_configured?.[value] ?? true; return ( ); })}
{/* Model picker — live from provider API */}
{modelsFetching && Loading models…} {modelsError && !modelsFetching && ( {modelsError} )} {!modelsFetching && liveModels.length > 0 && ( )}
{liveModels.length > 0 ? ( ) : ( !modelsFetching && (

{modelsError ? "Could not fetch models — enter a model name manually below." : "No models found."}

) )} {(showCustomModel || (liveModels.length === 0 && !modelsFetching)) && (
setCustomModel(e.target.value)} className="flex-1 px-3 py-1.5 text-sm bg-background border border-border rounded-md focus:outline-none focus:ring-1 focus:ring-primary" />
)}

Active: {settings?.llm_provider} / {settings?.llm_model}

{testResult && (
{testResult.status === "ok" ? ( <> {testResult.model} — {testResult.reply} ) : ( <> {testResult.detail} )}
)}
{/* Data Sources */}

Data Sources

Congress.gov Poll Interval
How often to check for new bills
NewsAPI.org
100 requests/day free tier
{newsApiQuota && ( {newsApiQuota.remaining}/{newsApiQuota.limit} remaining today )} {settings?.newsapi_enabled ? "Configured" : "Not configured"}
Google Trends
Zeitgeist scoring via pytrends
{settings?.pytrends_enabled ? "Enabled" : "Disabled"}
{/* API Health */}

External API Health

{healthData ? (
{[ { key: "congress_gov", label: "Congress.gov API" }, { key: "govinfo", label: "GovInfo API" }, { key: "newsapi", label: "NewsAPI.org" }, { key: "google_news", label: "Google News RSS" }, { key: "rep_lookup", label: "Rep Lookup (Nominatim + TIGERweb)" }, ].map(({ key, label }) => { const r = healthData[key]; if (!r) return null; return (
{label}
{r.detail}
{r.latency_ms !== undefined && ( {r.latency_ms}ms )} {r.status === "ok" && } {r.status === "error" && } {r.status === "skipped" && }
); })}
) : (

Click Run Tests to check connectivity to each external data source.

)}
{/* Manual Controls */}

Manual Controls

{(() => { type ControlItem = { key: string; name: string; description: string; fn: () => Promise<{ task_id: string }>; status: "ok" | "needed" | "on-demand"; count?: number; countLabel?: string; }; const renderRow = ({ key, name, description, fn, status, count, countLabel }: ControlItem) => (
{name} {taskStatuses[key] === "running" ? ( running {taskIds[key] && ( {taskIds[key].slice(0, 8)}… )} ) : taskStatuses[key] === "done" ? ( ✓ Complete ) : taskStatuses[key] === "error" ? ( ✗ Failed ) : status === "ok" ? ( ✓ Up to date ) : status === "needed" && count !== undefined && count > 0 ? ( ⚠ {count.toLocaleString()} {countLabel} ) : null}

{description}

); // Clear RSS cache — inline action (returns count, not task_id) const ClearCacheRow = (
Clear Google News Cache {cacheClearResult && ( ✓ {cacheClearResult} )}

Flush the 2-hour Google News RSS cache so fresh articles are fetched on the next trend scoring or news run.

); const recurring: ControlItem[] = [ { key: "poll", name: "Trigger Poll", description: "Check Congress.gov for newly introduced or updated bills. Runs automatically on a schedule — use this to force an immediate sync.", fn: adminAPI.triggerPoll, status: "on-demand", }, { key: "members", name: "Sync Members", description: "Refresh all member profiles from Congress.gov including biography, current term, leadership roles, and contact information.", fn: adminAPI.triggerMemberSync, status: "on-demand", }, { key: "trends", name: "Calculate Trends", description: "Score bill and member newsworthiness by counting recent news headlines and Google search interest. Updates the trend charts.", fn: adminAPI.triggerTrendScores, status: "on-demand", }, { key: "actions", name: "Fetch Bill Actions", description: "Download the full legislative history (votes, referrals, amendments) for recently active bills and populate the timeline view.", fn: adminAPI.triggerFetchActions, status: "on-demand", }, { key: "resume", name: "Resume Analysis", description: "Restart AI brief generation for bills where processing stalled or failed (e.g. after an LLM quota outage). Also re-queues document fetching for bills that have no text yet.", fn: adminAPI.resumeAnalysis, status: stats ? (stats.pending_llm > 0 ? "needed" : "on-demand") : "on-demand", count: stats?.pending_llm, countLabel: "bills pending analysis", }, { key: "weekly-digest", name: "Send Weekly Digest", description: "Immediately dispatch the weekly bill activity summary to all users who have ntfy or RSS enabled and at least one bill followed. Runs automatically every Monday at 8:30 AM UTC.", fn: adminAPI.triggerWeeklyDigest, status: "on-demand", }, ]; if (settings?.llm_provider === "openai" || settings?.llm_provider === "anthropic") { recurring.push({ key: "llm-batch", name: "Submit LLM Batch (50% off)", description: "Send all unbriefed documents to the Batch API for overnight processing at half the token cost. Returns within seconds — results are imported automatically every 30 minutes via the background poller.", fn: adminAPI.submitLlmBatch, status: "on-demand", }); } const maintenance: ControlItem[] = [ { key: "backfill-actions", name: "Backfill All Action Histories", description: "One-time catch-up: fetch action histories for all bills that were imported before this feature existed.", fn: adminAPI.backfillAllActions, status: stats ? (stats.bills_missing_actions > 0 ? "needed" : "ok") : "on-demand", count: stats?.bills_missing_actions, countLabel: "bills missing action history", }, { key: "sponsors", name: "Backfill Sponsors", description: "Link bill sponsors that weren't captured during the initial import. Safe to re-run — skips bills that already have a sponsor.", fn: adminAPI.backfillSponsors, status: stats ? (stats.bills_missing_sponsor > 0 ? "needed" : "ok") : "on-demand", count: stats?.bills_missing_sponsor, countLabel: "bills missing sponsor", }, { key: "metadata", name: "Backfill Dates & Links", description: "Fill in missing introduced dates, chamber assignments, and congress.gov links by re-fetching bill detail from Congress.gov.", fn: adminAPI.backfillMetadata, status: stats ? (stats.bills_missing_metadata > 0 ? "needed" : "ok") : "on-demand", count: stats?.bills_missing_metadata, countLabel: "bills missing metadata", }, { key: "citations", name: "Backfill Citations", description: "Regenerate AI briefs created before inline source citations were added. Deletes the old brief and re-runs LLM analysis using already-stored bill text.", fn: adminAPI.backfillCitations, status: stats ? (stats.uncited_briefs > 0 ? "needed" : "ok") : "on-demand", count: stats?.uncited_briefs, countLabel: "briefs need regeneration", }, { key: "labels", name: "Backfill Fact/Inference Labels", description: "Classify existing cited brief points as fact or inference. One compact LLM call per brief — no re-generation of summaries or citations.", fn: adminAPI.backfillLabels, status: stats ? (stats.unlabeled_briefs > 0 ? "needed" : "ok") : "on-demand", count: stats?.unlabeled_briefs, countLabel: "briefs with unlabeled points", }, ]; const maintenanceNeeded = maintenance.some((m) => m.status === "needed"); return ( <>
{recurring.map(renderRow)} {batchStatus?.status === "processing" && (
Batch in progress · {batchStatus.doc_count} documents · submitted {relativeTime(batchStatus.submitted_at!)}
)} {ClearCacheRow}
{/* Maintenance subsection */}
{showMaintenance && (
{maintenance.map(renderRow)}
)}
); })()}
); }