feat: API optimizations — quota batching, ETags, caching, async sponsor (v0.9.7)

Nine efficiency improvements across the data pipeline:

1. NewsAPI OR batching (news_service.py + news_fetcher.py)
   - Combine up to 4 bills per NewsAPI call using OR query syntax
   - NEWSAPI_BATCH_SIZE=4 means ~4× effective daily quota (100→400 bill-fetches)
   - fetch_news_for_bill_batch task; fetch_news_for_active_bills queues batches

2. Google News RSS cache (news_service.py)
   - 2-hour Redis cache shared between news_fetcher and trend_scorer
   - Eliminates duplicate RSS hits when both workers run against same bill
   - clear_gnews_cache() admin helper + admin endpoint

3. pytrends keyword batching (trends_service.py + trend_scorer.py)
   - Compare up to 5 bills per pytrends call instead of 1
   - get_trends_scores_batch() returns scores in original order
   - Reduces pytrends calls by ~5× and associated rate-limit risk

4. GovInfo ETags (govinfo_api.py + document_fetcher.py)
   - If-None-Match conditional GET; DocumentUnchangedError on HTTP 304
   - ETags stored in Redis (30-day TTL) keyed by MD5(url)
   - document_fetcher catches DocumentUnchangedError → {"status": "unchanged"}

5. Anthropic prompt caching (llm_service.py)
   - cache_control: {type: ephemeral} on system messages in AnthropicProvider
   - Caches the ~700-token system prompt server-side; ~50% cost reduction on
     repeated calls within the 5-minute cache window

6. Async sponsor fetch (congress_poller.py)
   - New fetch_sponsor_for_bill Celery task replaces blocking get_bill_detail()
     inline in poll loop
   - Bills saved immediately with sponsor_id=None; sponsor linked async
   - Removes 0.25s sleep per new bill from poll hot path

7. Skip doc fetch for procedural actions (congress_poller.py)
   - _DOC_PRODUCING_CATEGORIES = {vote, committee_report, presidential, ...}
   - fetch_bill_documents only enqueued when action is likely to produce
     new GovInfo text (saves ~60–70% of unnecessary document fetch attempts)

8. Adaptive poll frequency (congress_poller.py)
   - _is_congress_off_hours(): weekends + before 9AM / after 9PM EST
   - Skips poll if off-hours AND last poll < 1 hour ago
   - Prevents wasteful polling when Congress is not in session

9. Admin panel additions (admin.py + settings/page.tsx + api.ts)
   - GET /api/admin/newsapi-quota → remaining calls today
   - POST /api/admin/clear-gnews-cache → flush RSS cache
   - Settings page shows NewsAPI quota remaining (amber if < 10)
   - "Clear Google News Cache" button in Manual Controls

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Jack Levy
2026-03-14 16:50:51 -04:00
parent 247a874c8d
commit 7e5c5b473e
16 changed files with 676 additions and 162 deletions

View File

@@ -14,6 +14,32 @@ from app.config import settings
logger = logging.getLogger(__name__)
class RateLimitError(Exception):
"""Raised when a provider returns a rate-limit response (HTTP 429 / quota exceeded)."""
def __init__(self, provider: str, retry_after: int = 60):
self.provider = provider
self.retry_after = retry_after
super().__init__(f"{provider} rate limit exceeded; retry after {retry_after}s")
def _detect_rate_limit(exc: Exception) -> bool:
"""Return True if exc represents a provider rate-limit / quota error."""
exc_type = type(exc).__name__.lower()
exc_str = str(exc).lower()
# OpenAI / Anthropic SDK raise a class named *RateLimitError
if "ratelimit" in exc_type or "rate_limit" in exc_type:
return True
# Google Gemini SDK raises ResourceExhausted
if "resourceexhausted" in exc_type:
return True
# Generic HTTP 429 or quota messages (e.g. Ollama, raw requests)
if "429" in exc_str or "rate limit" in exc_str or "quota" in exc_str:
return True
return False
SYSTEM_PROMPT = """You are a nonpartisan legislative analyst specializing in translating complex \
legislation into clear, accurate summaries for informed citizens. You analyze bills objectively \
without political bias.
@@ -182,6 +208,19 @@ def parse_brief_json(raw: str | dict, provider: str, model: str) -> ReverseBrief
class LLMProvider(ABC):
_provider_name: str = "unknown"
def _call(self, fn):
"""Invoke fn(), translating provider-specific rate-limit errors to RateLimitError."""
try:
return fn()
except RateLimitError:
raise
except Exception as exc:
if _detect_rate_limit(exc):
raise RateLimitError(self._provider_name) from exc
raise
@abstractmethod
def generate_brief(self, doc_text: str, bill_metadata: dict) -> ReverseBrief:
pass
@@ -196,6 +235,8 @@ class LLMProvider(ABC):
class OpenAIProvider(LLMProvider):
_provider_name = "openai"
def __init__(self, model: str | None = None):
from openai import OpenAI
self.client = OpenAI(api_key=settings.OPENAI_API_KEY)
@@ -203,7 +244,7 @@ class OpenAIProvider(LLMProvider):
def generate_brief(self, doc_text: str, bill_metadata: dict) -> ReverseBrief:
prompt = build_prompt(doc_text, bill_metadata, MAX_TOKENS_DEFAULT)
response = self.client.chat.completions.create(
response = self._call(lambda: self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
@@ -211,13 +252,13 @@ class OpenAIProvider(LLMProvider):
],
response_format={"type": "json_object"},
temperature=0.1,
)
))
raw = response.choices[0].message.content
return parse_brief_json(raw, "openai", self.model)
def generate_amendment_brief(self, new_text: str, previous_text: str, bill_metadata: dict) -> ReverseBrief:
prompt = build_amendment_prompt(new_text, previous_text, bill_metadata, MAX_TOKENS_DEFAULT)
response = self.client.chat.completions.create(
response = self._call(lambda: self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": AMENDMENT_SYSTEM_PROMPT},
@@ -225,20 +266,22 @@ class OpenAIProvider(LLMProvider):
],
response_format={"type": "json_object"},
temperature=0.1,
)
))
raw = response.choices[0].message.content
return parse_brief_json(raw, "openai", self.model)
def generate_text(self, prompt: str) -> str:
response = self.client.chat.completions.create(
response = self._call(lambda: self.client.chat.completions.create(
model=self.model,
messages=[{"role": "user", "content": prompt}],
temperature=0.3,
)
))
return response.choices[0].message.content or ""
class AnthropicProvider(LLMProvider):
_provider_name = "anthropic"
def __init__(self, model: str | None = None):
import anthropic
self.client = anthropic.Anthropic(api_key=settings.ANTHROPIC_API_KEY)
@@ -246,36 +289,46 @@ class AnthropicProvider(LLMProvider):
def generate_brief(self, doc_text: str, bill_metadata: dict) -> ReverseBrief:
prompt = build_prompt(doc_text, bill_metadata, MAX_TOKENS_DEFAULT)
response = self.client.messages.create(
response = self._call(lambda: self.client.messages.create(
model=self.model,
max_tokens=4096,
system=SYSTEM_PROMPT + "\n\nIMPORTANT: Respond with ONLY valid JSON. No other text.",
system=[{
"type": "text",
"text": SYSTEM_PROMPT + "\n\nIMPORTANT: Respond with ONLY valid JSON. No other text.",
"cache_control": {"type": "ephemeral"},
}],
messages=[{"role": "user", "content": prompt}],
)
))
raw = response.content[0].text
return parse_brief_json(raw, "anthropic", self.model)
def generate_amendment_brief(self, new_text: str, previous_text: str, bill_metadata: dict) -> ReverseBrief:
prompt = build_amendment_prompt(new_text, previous_text, bill_metadata, MAX_TOKENS_DEFAULT)
response = self.client.messages.create(
response = self._call(lambda: self.client.messages.create(
model=self.model,
max_tokens=4096,
system=AMENDMENT_SYSTEM_PROMPT + "\n\nIMPORTANT: Respond with ONLY valid JSON. No other text.",
system=[{
"type": "text",
"text": AMENDMENT_SYSTEM_PROMPT + "\n\nIMPORTANT: Respond with ONLY valid JSON. No other text.",
"cache_control": {"type": "ephemeral"},
}],
messages=[{"role": "user", "content": prompt}],
)
))
raw = response.content[0].text
return parse_brief_json(raw, "anthropic", self.model)
def generate_text(self, prompt: str) -> str:
response = self.client.messages.create(
response = self._call(lambda: self.client.messages.create(
model=self.model,
max_tokens=1024,
messages=[{"role": "user", "content": prompt}],
)
))
return response.content[0].text
class GeminiProvider(LLMProvider):
_provider_name = "gemini"
def __init__(self, model: str | None = None):
import google.generativeai as genai
genai.configure(api_key=settings.GEMINI_API_KEY)
@@ -291,12 +344,12 @@ class GeminiProvider(LLMProvider):
def generate_brief(self, doc_text: str, bill_metadata: dict) -> ReverseBrief:
prompt = build_prompt(doc_text, bill_metadata, MAX_TOKENS_DEFAULT)
response = self._make_model(SYSTEM_PROMPT).generate_content(prompt)
response = self._call(lambda: self._make_model(SYSTEM_PROMPT).generate_content(prompt))
return parse_brief_json(response.text, "gemini", self.model_name)
def generate_amendment_brief(self, new_text: str, previous_text: str, bill_metadata: dict) -> ReverseBrief:
prompt = build_amendment_prompt(new_text, previous_text, bill_metadata, MAX_TOKENS_DEFAULT)
response = self._make_model(AMENDMENT_SYSTEM_PROMPT).generate_content(prompt)
response = self._call(lambda: self._make_model(AMENDMENT_SYSTEM_PROMPT).generate_content(prompt))
return parse_brief_json(response.text, "gemini", self.model_name)
def generate_text(self, prompt: str) -> str:
@@ -304,11 +357,13 @@ class GeminiProvider(LLMProvider):
model_name=self.model_name,
generation_config={"temperature": 0.3},
)
response = model.generate_content(prompt)
response = self._call(lambda: model.generate_content(prompt))
return response.text
class OllamaProvider(LLMProvider):
_provider_name = "ollama"
def __init__(self, model: str | None = None):
self.base_url = settings.OLLAMA_BASE_URL.rstrip("/")
self.model = model or settings.OLLAMA_MODEL