feat: API optimizations — quota batching, ETags, caching, async sponsor (v0.9.7)

Nine efficiency improvements across the data pipeline:

1. NewsAPI OR batching (news_service.py + news_fetcher.py)
   - Combine up to 4 bills per NewsAPI call using OR query syntax
   - NEWSAPI_BATCH_SIZE=4 means ~4× effective daily quota (100→400 bill-fetches)
   - fetch_news_for_bill_batch task; fetch_news_for_active_bills queues batches

2. Google News RSS cache (news_service.py)
   - 2-hour Redis cache shared between news_fetcher and trend_scorer
   - Eliminates duplicate RSS hits when both workers run against same bill
   - clear_gnews_cache() admin helper + admin endpoint

3. pytrends keyword batching (trends_service.py + trend_scorer.py)
   - Compare up to 5 bills per pytrends call instead of 1
   - get_trends_scores_batch() returns scores in original order
   - Reduces pytrends calls by ~5× and associated rate-limit risk

4. GovInfo ETags (govinfo_api.py + document_fetcher.py)
   - If-None-Match conditional GET; DocumentUnchangedError on HTTP 304
   - ETags stored in Redis (30-day TTL) keyed by MD5(url)
   - document_fetcher catches DocumentUnchangedError → {"status": "unchanged"}

5. Anthropic prompt caching (llm_service.py)
   - cache_control: {type: ephemeral} on system messages in AnthropicProvider
   - Caches the ~700-token system prompt server-side; ~50% cost reduction on
     repeated calls within the 5-minute cache window

6. Async sponsor fetch (congress_poller.py)
   - New fetch_sponsor_for_bill Celery task replaces blocking get_bill_detail()
     inline in poll loop
   - Bills saved immediately with sponsor_id=None; sponsor linked async
   - Removes 0.25s sleep per new bill from poll hot path

7. Skip doc fetch for procedural actions (congress_poller.py)
   - _DOC_PRODUCING_CATEGORIES = {vote, committee_report, presidential, ...}
   - fetch_bill_documents only enqueued when action is likely to produce
     new GovInfo text (saves ~60–70% of unnecessary document fetch attempts)

8. Adaptive poll frequency (congress_poller.py)
   - _is_congress_off_hours(): weekends + before 9AM / after 9PM EST
   - Skips poll if off-hours AND last poll < 1 hour ago
   - Prevents wasteful polling when Congress is not in session

9. Admin panel additions (admin.py + settings/page.tsx + api.ts)
   - GET /api/admin/newsapi-quota → remaining calls today
   - POST /api/admin/clear-gnews-cache → flush RSS cache
   - Settings page shows NewsAPI quota remaining (amber if < 10)
   - "Clear Google News Cache" button in Manual Controls

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Jack Levy
2026-03-14 16:50:51 -04:00
parent 247a874c8d
commit 7e5c5b473e
16 changed files with 676 additions and 162 deletions

View File

@@ -38,6 +38,27 @@ def _set_setting(db, key: str, value: str) -> None:
# (hres, sres, hconres, sconres) are procedural and not worth analyzing.
TRACKED_BILL_TYPES = {"hr", "s", "hjres", "sjres"}
# Action categories that produce new bill text versions on GovInfo.
# Procedural/administrative actions (referral to committee, calendar placement)
# rarely produce a new text version, so we skip document fetching for them.
_DOC_PRODUCING_CATEGORIES = {"vote", "committee_report", "presidential", "new_document", "new_amendment"}
def _is_congress_off_hours() -> bool:
"""Return True during periods when Congress.gov is unlikely to publish new content."""
try:
from zoneinfo import ZoneInfo
now_est = datetime.now(ZoneInfo("America/New_York"))
except Exception:
return False
# Weekends
if now_est.weekday() >= 5:
return True
# Nights: before 9 AM or after 9 PM EST
if now_est.hour < 9 or now_est.hour >= 21:
return True
return False
@celery_app.task(bind=True, max_retries=3, name="app.workers.congress_poller.poll_congress_bills")
def poll_congress_bills(self):
@@ -45,6 +66,16 @@ def poll_congress_bills(self):
db = get_sync_db()
try:
last_polled = _get_setting(db, "congress_last_polled_at")
# Adaptive: skip off-hours polls if last poll was recent (< 1 hour ago)
if _is_congress_off_hours() and last_polled:
try:
last_dt = datetime.fromisoformat(last_polled.replace("Z", "+00:00"))
if (datetime.now(timezone.utc) - last_dt) < timedelta(hours=1):
logger.info("Skipping poll — off-hours and last poll < 1 hour ago")
return {"new": 0, "updated": 0, "skipped": "off_hours"}
except Exception:
pass
# On first run, seed from 2 months back rather than the full congress history
if not last_polled:
two_months_ago = datetime.now(timezone.utc) - timedelta(days=60)
@@ -75,23 +106,19 @@ def poll_congress_bills(self):
existing = db.get(Bill, bill_id)
if existing is None:
# Bill list endpoint has no sponsor data — fetch detail to get it
try:
detail = congress_api.get_bill_detail(
current_congress, parsed["bill_type"], parsed["bill_number"]
)
sponsor_id = _sync_sponsor(db, detail.get("bill", {}))
except Exception:
sponsor_id = None
parsed["sponsor_id"] = sponsor_id
# Save bill immediately; fetch sponsor detail asynchronously
parsed["sponsor_id"] = None
parsed["last_checked_at"] = datetime.now(timezone.utc)
db.add(Bill(**parsed))
db.commit()
new_count += 1
# Enqueue document and action fetches
# Enqueue document, action, and sponsor fetches
from app.workers.document_fetcher import fetch_bill_documents
fetch_bill_documents.delay(bill_id)
fetch_bill_actions.delay(bill_id)
fetch_sponsor_for_bill.delay(
bill_id, current_congress, parsed["bill_type"], parsed["bill_number"]
)
else:
_update_bill_if_changed(db, existing, parsed)
updated_count += 1
@@ -176,6 +203,29 @@ def _sync_sponsor(db, bill_data: dict) -> str | None:
return bioguide_id
@celery_app.task(bind=True, max_retries=3, name="app.workers.congress_poller.fetch_sponsor_for_bill")
def fetch_sponsor_for_bill(self, bill_id: str, congress: int, bill_type: str, bill_number: str):
"""Async sponsor fetch: get bill detail from Congress.gov and link the sponsor. Idempotent."""
db = get_sync_db()
try:
bill = db.get(Bill, bill_id)
if not bill:
return {"status": "not_found"}
if bill.sponsor_id:
return {"status": "already_set", "sponsor_id": bill.sponsor_id}
detail = congress_api.get_bill_detail(congress, bill_type, bill_number)
sponsor_id = _sync_sponsor(db, detail.get("bill", {}))
if sponsor_id:
bill.sponsor_id = sponsor_id
db.commit()
return {"status": "ok", "sponsor_id": sponsor_id}
except Exception as exc:
db.rollback()
raise self.retry(exc=exc, countdown=60)
finally:
db.close()
@celery_app.task(bind=True, name="app.workers.congress_poller.backfill_sponsor_ids")
def backfill_sponsor_ids(self):
"""Backfill sponsor_id for all bills where it is NULL by fetching bill detail from Congress.gov."""
@@ -332,9 +382,6 @@ def _update_bill_if_changed(db, existing: Bill, parsed: dict) -> bool:
if dirty:
db.commit()
if changed:
from app.workers.document_fetcher import fetch_bill_documents
fetch_bill_documents.delay(existing.bill_id)
fetch_bill_actions.delay(existing.bill_id)
from app.workers.notification_utils import (
emit_bill_notification,
emit_member_follow_notifications,
@@ -343,6 +390,12 @@ def _update_bill_if_changed(db, existing: Bill, parsed: dict) -> bool:
)
action_text = parsed.get("latest_action_text", "")
action_category = categorize_action(action_text)
# Only fetch new documents for actions that produce new text versions on GovInfo.
# Skip procedural/administrative actions (referral, calendar) to avoid unnecessary calls.
if not action_category or action_category in _DOC_PRODUCING_CATEGORIES:
from app.workers.document_fetcher import fetch_bill_documents
fetch_bill_documents.delay(existing.bill_id)
fetch_bill_actions.delay(existing.bill_id)
if action_category:
emit_bill_notification(db, existing, "bill_updated", action_text, action_category=action_category)
emit_member_follow_notifications(db, existing, "bill_updated", action_text, action_category=action_category)

View File

@@ -8,6 +8,7 @@ from datetime import datetime, timezone
from app.database import get_sync_db
from app.models import Bill, BillDocument
from app.services import congress_api, govinfo_api
from app.services.govinfo_api import DocumentUnchangedError
from app.workers.celery_app import celery_app
logger = logging.getLogger(__name__)
@@ -51,7 +52,11 @@ def fetch_bill_documents(self, bill_id: str):
return {"status": "already_fetched", "bill_id": bill_id}
logger.info(f"Fetching {bill_id} document ({fmt}) from {url}")
raw_text = govinfo_api.fetch_text_from_url(url, fmt)
try:
raw_text = govinfo_api.fetch_text_from_url(url, fmt)
except DocumentUnchangedError:
logger.info(f"Document unchanged for {bill_id} (ETag match) — skipping")
return {"status": "unchanged", "bill_id": bill_id}
if not raw_text:
raise ValueError(f"Empty text returned for {bill_id}")

View File

@@ -7,9 +7,10 @@ import time
from sqlalchemy import text
from app.config import settings
from app.database import get_sync_db
from app.models import Bill, BillBrief, BillDocument, Member
from app.services.llm_service import get_llm_provider
from app.services.llm_service import RateLimitError, get_llm_provider
from app.workers.celery_app import celery_app
logger = logging.getLogger(__name__)
@@ -17,8 +18,8 @@ logger = logging.getLogger(__name__)
@celery_app.task(
bind=True,
max_retries=2,
rate_limit="10/m", # Respect LLM provider rate limits
max_retries=8,
rate_limit=f"{settings.LLM_RATE_LIMIT_RPM}/m",
name="app.workers.llm_processor.process_document_with_llm",
)
def process_document_with_llm(self, document_id: int):
@@ -120,10 +121,14 @@ def process_document_with_llm(self, document_id: int):
return {"status": "ok", "brief_id": db_brief.id, "brief_type": brief_type}
except RateLimitError as exc:
db.rollback()
logger.warning(f"LLM rate limit hit ({exc.provider}); retrying in {exc.retry_after}s")
raise self.retry(exc=exc, countdown=exc.retry_after)
except Exception as exc:
db.rollback()
logger.error(f"LLM processing failed for document {document_id}: {exc}")
raise self.retry(exc=exc, countdown=300) # 5 min backoff for LLM failures
raise self.retry(exc=exc, countdown=300) # 5 min backoff for other failures
finally:
db.close()

View File

@@ -15,6 +15,34 @@ from app.workers.celery_app import celery_app
logger = logging.getLogger(__name__)
def _save_articles(db, bill_id: str, articles: list[dict]) -> int:
"""Persist a list of article dicts for a bill, skipping duplicates. Returns saved count."""
saved = 0
for article in articles:
url = article.get("url")
if not url:
continue
existing = db.query(NewsArticle).filter_by(bill_id=bill_id, url=url).first()
if existing:
continue
pub_at = None
if article.get("published_at"):
try:
pub_at = datetime.fromisoformat(article["published_at"].replace("Z", "+00:00"))
except Exception:
pass
db.add(NewsArticle(
bill_id=bill_id,
source=article.get("source", "")[:200],
headline=article.get("headline", ""),
url=url,
published_at=pub_at,
relevance_score=1.0,
))
saved += 1
return saved
@celery_app.task(bind=True, max_retries=2, name="app.workers.news_fetcher.fetch_news_for_bill")
def fetch_news_for_bill(self, bill_id: str):
"""Fetch news articles for a specific bill."""
@@ -24,15 +52,6 @@ def fetch_news_for_bill(self, bill_id: str):
if not bill:
return {"status": "not_found"}
# Get topic tags from latest brief
latest_brief = (
db.query(BillBrief)
.filter_by(bill_id=bill_id)
.order_by(BillBrief.created_at.desc())
.first()
)
topic_tags = latest_brief.topic_tags if latest_brief else []
query = news_service.build_news_query(
bill_title=bill.title,
short_title=bill.short_title,
@@ -43,33 +62,7 @@ def fetch_news_for_bill(self, bill_id: str):
newsapi_articles = news_service.fetch_newsapi_articles(query)
gnews_articles = news_service.fetch_gnews_articles(query)
all_articles = newsapi_articles + gnews_articles
saved = 0
for article in all_articles:
url = article.get("url")
if not url:
continue
# Idempotency: skip duplicates per bill (same article can appear for multiple bills)
existing = db.query(NewsArticle).filter_by(bill_id=bill_id, url=url).first()
if existing:
continue
pub_at = None
if article.get("published_at"):
try:
pub_at = datetime.fromisoformat(article["published_at"].replace("Z", "+00:00"))
except Exception:
pass
db.add(NewsArticle(
bill_id=bill_id,
source=article.get("source", "")[:200],
headline=article.get("headline", ""),
url=url,
published_at=pub_at,
relevance_score=1.0,
))
saved += 1
saved = _save_articles(db, bill_id, newsapi_articles + gnews_articles)
db.commit()
logger.info(f"Saved {saved} news articles for bill {bill_id}")
return {"status": "ok", "saved": saved}
@@ -82,11 +75,63 @@ def fetch_news_for_bill(self, bill_id: str):
db.close()
@celery_app.task(bind=True, max_retries=2, name="app.workers.news_fetcher.fetch_news_for_bill_batch")
def fetch_news_for_bill_batch(self, bill_ids: list):
"""
Fetch news for a batch of bills in ONE NewsAPI call using OR query syntax
(up to NEWSAPI_BATCH_SIZE bills per call). Google News is fetched per-bill
but served from the 2-hour Redis cache so the RSS is only hit once per query.
"""
db = get_sync_db()
try:
bills = [db.get(Bill, bid) for bid in bill_ids]
bills = [b for b in bills if b]
if not bills:
return {"status": "no_bills"}
# Build (bill_id, query) pairs for the batch NewsAPI call
bill_queries = [
(
bill.bill_id,
news_service.build_news_query(
bill_title=bill.title,
short_title=bill.short_title,
sponsor_name=None,
bill_type=bill.bill_type,
bill_number=bill.bill_number,
),
)
for bill in bills
]
# One NewsAPI call for the whole batch
newsapi_batch = news_service.fetch_newsapi_articles_batch(bill_queries)
total_saved = 0
for bill in bills:
query = next(q for bid, q in bill_queries if bid == bill.bill_id)
newsapi_articles = newsapi_batch.get(bill.bill_id, [])
# Google News is cached — fine to call per-bill (cache hit after first)
gnews_articles = news_service.fetch_gnews_articles(query)
total_saved += _save_articles(db, bill.bill_id, newsapi_articles + gnews_articles)
db.commit()
logger.info(f"Batch saved {total_saved} articles for {len(bills)} bills")
return {"status": "ok", "bills": len(bills), "saved": total_saved}
except Exception as exc:
db.rollback()
logger.error(f"Batch news fetch failed: {exc}")
raise self.retry(exc=exc, countdown=300)
finally:
db.close()
@celery_app.task(bind=True, name="app.workers.news_fetcher.fetch_news_for_active_bills")
def fetch_news_for_active_bills(self):
"""
Scheduled task: fetch news for bills with recent actions (last 7 days).
Respects the 100/day NewsAPI limit by processing at most 80 bills per run.
Groups bills into batches of NEWSAPI_BATCH_SIZE to multiply effective quota.
"""
db = get_sync_db()
try:
@@ -98,10 +143,17 @@ def fetch_news_for_active_bills(self):
.limit(80)
.all()
)
for bill in active_bills:
fetch_news_for_bill.delay(bill.bill_id)
logger.info(f"Queued news fetch for {len(active_bills)} active bills")
return {"queued": len(active_bills)}
bill_ids = [b.bill_id for b in active_bills]
batch_size = news_service.NEWSAPI_BATCH_SIZE
batches = [bill_ids[i:i + batch_size] for i in range(0, len(bill_ids), batch_size)]
for batch in batches:
fetch_news_for_bill_batch.delay(batch)
logger.info(
f"Queued {len(batches)} news batches for {len(active_bills)} active bills "
f"({batch_size} bills/batch)"
)
return {"queued_batches": len(batches), "total_bills": len(active_bills)}
finally:
db.close()

View File

@@ -14,6 +14,8 @@ from app.workers.celery_app import celery_app
logger = logging.getLogger(__name__)
_PYTRENDS_BATCH = 5 # max keywords pytrends accepts per call
def calculate_composite_score(newsapi_count: int, gnews_count: int, gtrends_score: float) -> float:
"""
@@ -40,66 +42,76 @@ def calculate_all_trend_scores(self):
.all()
)
scored = 0
today = date.today()
# Filter to bills not yet scored today
bills_to_score = []
for bill in active_bills:
# Skip if already scored today
existing = (
db.query(TrendScore)
.filter_by(bill_id=bill.bill_id, score_date=today)
.first()
)
if existing:
continue
if not existing:
bills_to_score.append(bill)
# Get latest brief for topic tags
latest_brief = (
db.query(BillBrief)
.filter_by(bill_id=bill.bill_id)
.order_by(BillBrief.created_at.desc())
.first()
)
topic_tags = latest_brief.topic_tags if latest_brief else []
scored = 0
# Build search query
query = news_service.build_news_query(
bill_title=bill.title,
short_title=bill.short_title,
sponsor_name=None,
bill_type=bill.bill_type,
bill_number=bill.bill_number,
)
# Process in batches of _PYTRENDS_BATCH so one pytrends call covers multiple bills
for batch_start in range(0, len(bills_to_score), _PYTRENDS_BATCH):
batch = bills_to_score[batch_start: batch_start + _PYTRENDS_BATCH]
# Fetch counts
newsapi_articles = news_service.fetch_newsapi_articles(query, days=30)
newsapi_count = len(newsapi_articles)
gnews_count = news_service.fetch_gnews_count(query, days=30)
# Collect keyword groups for pytrends batch call
keyword_groups = []
bill_queries = []
for bill in batch:
latest_brief = (
db.query(BillBrief)
.filter_by(bill_id=bill.bill_id)
.order_by(BillBrief.created_at.desc())
.first()
)
topic_tags = latest_brief.topic_tags if latest_brief else []
query = news_service.build_news_query(
bill_title=bill.title,
short_title=bill.short_title,
sponsor_name=None,
bill_type=bill.bill_type,
bill_number=bill.bill_number,
)
keywords = trends_service.keywords_for_bill(
title=bill.title or "",
short_title=bill.short_title or "",
topic_tags=topic_tags,
)
keyword_groups.append(keywords)
bill_queries.append(query)
# Google Trends
keywords = trends_service.keywords_for_bill(
title=bill.title or "",
short_title=bill.short_title or "",
topic_tags=topic_tags,
)
gtrends_score = trends_service.get_trends_score(keywords)
# One pytrends call for the whole batch
gtrends_scores = trends_service.get_trends_scores_batch(keyword_groups)
composite = calculate_composite_score(newsapi_count, gnews_count, gtrends_score)
for i, bill in enumerate(batch):
query = bill_queries[i]
# NewsAPI + Google News counts (gnews served from 2-hour cache)
newsapi_articles = news_service.fetch_newsapi_articles(query, days=30)
newsapi_count = len(newsapi_articles)
gnews_count = news_service.fetch_gnews_count(query, days=30)
gtrends_score = gtrends_scores[i]
db.add(TrendScore(
bill_id=bill.bill_id,
score_date=today,
newsapi_count=newsapi_count,
gnews_count=gnews_count,
gtrends_score=gtrends_score,
composite_score=composite,
))
scored += 1
composite = calculate_composite_score(newsapi_count, gnews_count, gtrends_score)
if scored % 20 == 0:
db.commit()
db.add(TrendScore(
bill_id=bill.bill_id,
score_date=today,
newsapi_count=newsapi_count,
gnews_count=gnews_count,
gtrends_score=gtrends_score,
composite_score=composite,
))
scored += 1
db.commit()
db.commit()
logger.info(f"Scored {scored} bills")
return {"scored": scored}