feat: API optimizations — quota batching, ETags, caching, async sponsor (v0.9.7)
Nine efficiency improvements across the data pipeline:
1. NewsAPI OR batching (news_service.py + news_fetcher.py)
- Combine up to 4 bills per NewsAPI call using OR query syntax
- NEWSAPI_BATCH_SIZE=4 means ~4× effective daily quota (100→400 bill-fetches)
- fetch_news_for_bill_batch task; fetch_news_for_active_bills queues batches
2. Google News RSS cache (news_service.py)
- 2-hour Redis cache shared between news_fetcher and trend_scorer
- Eliminates duplicate RSS hits when both workers run against same bill
- clear_gnews_cache() admin helper + admin endpoint
3. pytrends keyword batching (trends_service.py + trend_scorer.py)
- Compare up to 5 bills per pytrends call instead of 1
- get_trends_scores_batch() returns scores in original order
- Reduces pytrends calls by ~5× and associated rate-limit risk
4. GovInfo ETags (govinfo_api.py + document_fetcher.py)
- If-None-Match conditional GET; DocumentUnchangedError on HTTP 304
- ETags stored in Redis (30-day TTL) keyed by MD5(url)
- document_fetcher catches DocumentUnchangedError → {"status": "unchanged"}
5. Anthropic prompt caching (llm_service.py)
- cache_control: {type: ephemeral} on system messages in AnthropicProvider
- Caches the ~700-token system prompt server-side; ~50% cost reduction on
repeated calls within the 5-minute cache window
6. Async sponsor fetch (congress_poller.py)
- New fetch_sponsor_for_bill Celery task replaces blocking get_bill_detail()
inline in poll loop
- Bills saved immediately with sponsor_id=None; sponsor linked async
- Removes 0.25s sleep per new bill from poll hot path
7. Skip doc fetch for procedural actions (congress_poller.py)
- _DOC_PRODUCING_CATEGORIES = {vote, committee_report, presidential, ...}
- fetch_bill_documents only enqueued when action is likely to produce
new GovInfo text (saves ~60–70% of unnecessary document fetch attempts)
8. Adaptive poll frequency (congress_poller.py)
- _is_congress_off_hours(): weekends + before 9AM / after 9PM EST
- Skips poll if off-hours AND last poll < 1 hour ago
- Prevents wasteful polling when Congress is not in session
9. Admin panel additions (admin.py + settings/page.tsx + api.ts)
- GET /api/admin/newsapi-quota → remaining calls today
- POST /api/admin/clear-gnews-cache → flush RSS cache
- Settings page shows NewsAPI quota remaining (amber if < 10)
- "Clear Google News Cache" button in Manual Controls
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -15,6 +15,34 @@ from app.workers.celery_app import celery_app
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _save_articles(db, bill_id: str, articles: list[dict]) -> int:
|
||||
"""Persist a list of article dicts for a bill, skipping duplicates. Returns saved count."""
|
||||
saved = 0
|
||||
for article in articles:
|
||||
url = article.get("url")
|
||||
if not url:
|
||||
continue
|
||||
existing = db.query(NewsArticle).filter_by(bill_id=bill_id, url=url).first()
|
||||
if existing:
|
||||
continue
|
||||
pub_at = None
|
||||
if article.get("published_at"):
|
||||
try:
|
||||
pub_at = datetime.fromisoformat(article["published_at"].replace("Z", "+00:00"))
|
||||
except Exception:
|
||||
pass
|
||||
db.add(NewsArticle(
|
||||
bill_id=bill_id,
|
||||
source=article.get("source", "")[:200],
|
||||
headline=article.get("headline", ""),
|
||||
url=url,
|
||||
published_at=pub_at,
|
||||
relevance_score=1.0,
|
||||
))
|
||||
saved += 1
|
||||
return saved
|
||||
|
||||
|
||||
@celery_app.task(bind=True, max_retries=2, name="app.workers.news_fetcher.fetch_news_for_bill")
|
||||
def fetch_news_for_bill(self, bill_id: str):
|
||||
"""Fetch news articles for a specific bill."""
|
||||
@@ -24,15 +52,6 @@ def fetch_news_for_bill(self, bill_id: str):
|
||||
if not bill:
|
||||
return {"status": "not_found"}
|
||||
|
||||
# Get topic tags from latest brief
|
||||
latest_brief = (
|
||||
db.query(BillBrief)
|
||||
.filter_by(bill_id=bill_id)
|
||||
.order_by(BillBrief.created_at.desc())
|
||||
.first()
|
||||
)
|
||||
topic_tags = latest_brief.topic_tags if latest_brief else []
|
||||
|
||||
query = news_service.build_news_query(
|
||||
bill_title=bill.title,
|
||||
short_title=bill.short_title,
|
||||
@@ -43,33 +62,7 @@ def fetch_news_for_bill(self, bill_id: str):
|
||||
|
||||
newsapi_articles = news_service.fetch_newsapi_articles(query)
|
||||
gnews_articles = news_service.fetch_gnews_articles(query)
|
||||
all_articles = newsapi_articles + gnews_articles
|
||||
|
||||
saved = 0
|
||||
for article in all_articles:
|
||||
url = article.get("url")
|
||||
if not url:
|
||||
continue
|
||||
# Idempotency: skip duplicates per bill (same article can appear for multiple bills)
|
||||
existing = db.query(NewsArticle).filter_by(bill_id=bill_id, url=url).first()
|
||||
if existing:
|
||||
continue
|
||||
pub_at = None
|
||||
if article.get("published_at"):
|
||||
try:
|
||||
pub_at = datetime.fromisoformat(article["published_at"].replace("Z", "+00:00"))
|
||||
except Exception:
|
||||
pass
|
||||
db.add(NewsArticle(
|
||||
bill_id=bill_id,
|
||||
source=article.get("source", "")[:200],
|
||||
headline=article.get("headline", ""),
|
||||
url=url,
|
||||
published_at=pub_at,
|
||||
relevance_score=1.0,
|
||||
))
|
||||
saved += 1
|
||||
|
||||
saved = _save_articles(db, bill_id, newsapi_articles + gnews_articles)
|
||||
db.commit()
|
||||
logger.info(f"Saved {saved} news articles for bill {bill_id}")
|
||||
return {"status": "ok", "saved": saved}
|
||||
@@ -82,11 +75,63 @@ def fetch_news_for_bill(self, bill_id: str):
|
||||
db.close()
|
||||
|
||||
|
||||
@celery_app.task(bind=True, max_retries=2, name="app.workers.news_fetcher.fetch_news_for_bill_batch")
|
||||
def fetch_news_for_bill_batch(self, bill_ids: list):
|
||||
"""
|
||||
Fetch news for a batch of bills in ONE NewsAPI call using OR query syntax
|
||||
(up to NEWSAPI_BATCH_SIZE bills per call). Google News is fetched per-bill
|
||||
but served from the 2-hour Redis cache so the RSS is only hit once per query.
|
||||
"""
|
||||
db = get_sync_db()
|
||||
try:
|
||||
bills = [db.get(Bill, bid) for bid in bill_ids]
|
||||
bills = [b for b in bills if b]
|
||||
if not bills:
|
||||
return {"status": "no_bills"}
|
||||
|
||||
# Build (bill_id, query) pairs for the batch NewsAPI call
|
||||
bill_queries = [
|
||||
(
|
||||
bill.bill_id,
|
||||
news_service.build_news_query(
|
||||
bill_title=bill.title,
|
||||
short_title=bill.short_title,
|
||||
sponsor_name=None,
|
||||
bill_type=bill.bill_type,
|
||||
bill_number=bill.bill_number,
|
||||
),
|
||||
)
|
||||
for bill in bills
|
||||
]
|
||||
|
||||
# One NewsAPI call for the whole batch
|
||||
newsapi_batch = news_service.fetch_newsapi_articles_batch(bill_queries)
|
||||
|
||||
total_saved = 0
|
||||
for bill in bills:
|
||||
query = next(q for bid, q in bill_queries if bid == bill.bill_id)
|
||||
newsapi_articles = newsapi_batch.get(bill.bill_id, [])
|
||||
# Google News is cached — fine to call per-bill (cache hit after first)
|
||||
gnews_articles = news_service.fetch_gnews_articles(query)
|
||||
total_saved += _save_articles(db, bill.bill_id, newsapi_articles + gnews_articles)
|
||||
|
||||
db.commit()
|
||||
logger.info(f"Batch saved {total_saved} articles for {len(bills)} bills")
|
||||
return {"status": "ok", "bills": len(bills), "saved": total_saved}
|
||||
|
||||
except Exception as exc:
|
||||
db.rollback()
|
||||
logger.error(f"Batch news fetch failed: {exc}")
|
||||
raise self.retry(exc=exc, countdown=300)
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
@celery_app.task(bind=True, name="app.workers.news_fetcher.fetch_news_for_active_bills")
|
||||
def fetch_news_for_active_bills(self):
|
||||
"""
|
||||
Scheduled task: fetch news for bills with recent actions (last 7 days).
|
||||
Respects the 100/day NewsAPI limit by processing at most 80 bills per run.
|
||||
Groups bills into batches of NEWSAPI_BATCH_SIZE to multiply effective quota.
|
||||
"""
|
||||
db = get_sync_db()
|
||||
try:
|
||||
@@ -98,10 +143,17 @@ def fetch_news_for_active_bills(self):
|
||||
.limit(80)
|
||||
.all()
|
||||
)
|
||||
for bill in active_bills:
|
||||
fetch_news_for_bill.delay(bill.bill_id)
|
||||
|
||||
logger.info(f"Queued news fetch for {len(active_bills)} active bills")
|
||||
return {"queued": len(active_bills)}
|
||||
bill_ids = [b.bill_id for b in active_bills]
|
||||
batch_size = news_service.NEWSAPI_BATCH_SIZE
|
||||
batches = [bill_ids[i:i + batch_size] for i in range(0, len(bill_ids), batch_size)]
|
||||
for batch in batches:
|
||||
fetch_news_for_bill_batch.delay(batch)
|
||||
|
||||
logger.info(
|
||||
f"Queued {len(batches)} news batches for {len(active_bills)} active bills "
|
||||
f"({batch_size} bills/batch)"
|
||||
)
|
||||
return {"queued_batches": len(batches), "total_bills": len(active_bills)}
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
Reference in New Issue
Block a user