feat(email_gen): draft constituent letter generator + bill text indicators
- Add DraftLetterPanel: collapsible UI below BriefPanel for bills with a
brief; lets users select up to 3 cited points, pick stance/tone, and
generate a plain-text letter via the configured LLM provider
- Stance pre-fills from follow mode (pocket_boost → YES, pocket_veto → NO)
and clears when the user unfollows; recipient derived from bill chamber
- Add POST /api/bills/{bill_id}/draft-letter endpoint with proper LLM
provider/model resolution from AppSetting (respects Settings page choice)
- Add generate_text() to LLMProvider ABC and all four providers
- Expose has_document on BillSchema (list endpoint) via a single batch
query; BillCard shows Brief / Pending / No text indicator per bill
Authored-By: Jack Levy
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
from typing import Optional
|
||||
from typing import Literal, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy import desc, func, or_, select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
from sqlalchemy.orm import selectinload
|
||||
@@ -16,6 +17,30 @@ from app.schemas.schemas import (
|
||||
TrendScoreSchema,
|
||||
)
|
||||
|
||||
_BILL_TYPE_LABELS: dict[str, str] = {
|
||||
"hr": "H.R.",
|
||||
"s": "S.",
|
||||
"hjres": "H.J.Res.",
|
||||
"sjres": "S.J.Res.",
|
||||
"hconres": "H.Con.Res.",
|
||||
"sconres": "S.Con.Res.",
|
||||
"hres": "H.Res.",
|
||||
"sres": "S.Res.",
|
||||
}
|
||||
|
||||
|
||||
class DraftLetterRequest(BaseModel):
|
||||
stance: Literal["yes", "no"]
|
||||
recipient: Literal["house", "senate"]
|
||||
tone: Literal["short", "polite", "firm"]
|
||||
selected_points: list[str]
|
||||
include_citations: bool = True
|
||||
zip_code: str | None = None # not stored, not logged
|
||||
|
||||
|
||||
class DraftLetterResponse(BaseModel):
|
||||
draft: str
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@@ -67,7 +92,14 @@ async def list_bills(
|
||||
result = await db.execute(query)
|
||||
bills = result.scalars().unique().all()
|
||||
|
||||
# Attach latest brief and trend to each bill
|
||||
# Single batch query: which of these bills have at least one document?
|
||||
bill_ids = [b.bill_id for b in bills]
|
||||
doc_result = await db.execute(
|
||||
select(BillDocument.bill_id).where(BillDocument.bill_id.in_(bill_ids)).distinct()
|
||||
)
|
||||
bills_with_docs = {row[0] for row in doc_result}
|
||||
|
||||
# Attach latest brief, trend, and has_document to each bill
|
||||
items = []
|
||||
for bill in bills:
|
||||
bill_dict = BillSchema.model_validate(bill)
|
||||
@@ -75,6 +107,7 @@ async def list_bills(
|
||||
bill_dict.latest_brief = bill.briefs[0]
|
||||
if bill.trend_scores:
|
||||
bill_dict.latest_trend = bill.trend_scores[0]
|
||||
bill_dict.has_document = bill.bill_id in bills_with_docs
|
||||
items.append(bill_dict)
|
||||
|
||||
return PaginatedResponse(
|
||||
@@ -159,3 +192,50 @@ async def get_bill_trend(bill_id: str, days: int = Query(30, ge=7, le=365), db:
|
||||
.order_by(TrendScore.score_date)
|
||||
)
|
||||
return result.scalars().all()
|
||||
|
||||
|
||||
@router.post("/{bill_id}/draft-letter", response_model=DraftLetterResponse)
|
||||
async def generate_letter(bill_id: str, body: DraftLetterRequest, db: AsyncSession = Depends(get_db)):
|
||||
from app.models.setting import AppSetting
|
||||
from app.services.llm_service import generate_draft_letter
|
||||
|
||||
bill = await db.get(Bill, bill_id)
|
||||
if not bill:
|
||||
raise HTTPException(status_code=404, detail="Bill not found")
|
||||
|
||||
if not body.selected_points:
|
||||
raise HTTPException(status_code=422, detail="At least one point must be selected")
|
||||
|
||||
prov_row = await db.get(AppSetting, "llm_provider")
|
||||
model_row = await db.get(AppSetting, "llm_model")
|
||||
llm_provider_override = prov_row.value if prov_row else None
|
||||
llm_model_override = model_row.value if model_row else None
|
||||
|
||||
type_label = _BILL_TYPE_LABELS.get((bill.bill_type or "").lower(), (bill.bill_type or "").upper())
|
||||
bill_label = f"{type_label} {bill.bill_number}"
|
||||
|
||||
try:
|
||||
draft = generate_draft_letter(
|
||||
bill_label=bill_label,
|
||||
bill_title=bill.short_title or bill.title or bill_label,
|
||||
stance=body.stance,
|
||||
recipient=body.recipient,
|
||||
tone=body.tone,
|
||||
selected_points=body.selected_points,
|
||||
include_citations=body.include_citations,
|
||||
zip_code=body.zip_code,
|
||||
llm_provider=llm_provider_override,
|
||||
llm_model=llm_model_override,
|
||||
)
|
||||
except Exception as exc:
|
||||
msg = str(exc)
|
||||
if "insufficient_quota" in msg or "quota" in msg.lower():
|
||||
detail = "LLM quota exceeded. Check your API key billing."
|
||||
elif "rate_limit" in msg.lower() or "429" in msg:
|
||||
detail = "LLM rate limit hit. Wait a moment and try again."
|
||||
elif "auth" in msg.lower() or "401" in msg or "403" in msg:
|
||||
detail = "LLM authentication failed. Check your API key."
|
||||
else:
|
||||
detail = f"LLM error: {msg[:200]}"
|
||||
raise HTTPException(status_code=502, detail=detail)
|
||||
return {"draft": draft}
|
||||
|
||||
@@ -200,6 +200,7 @@ class BillSchema(BaseModel):
|
||||
latest_brief: Optional[BriefSchema] = None
|
||||
latest_trend: Optional[TrendScoreSchema] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
has_document: bool = False
|
||||
|
||||
model_config = {"from_attributes": True}
|
||||
|
||||
|
||||
@@ -183,6 +183,10 @@ class LLMProvider(ABC):
|
||||
def generate_amendment_brief(self, new_text: str, previous_text: str, bill_metadata: dict) -> ReverseBrief:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def generate_text(self, prompt: str) -> str:
|
||||
pass
|
||||
|
||||
|
||||
class OpenAIProvider(LLMProvider):
|
||||
def __init__(self, model: str | None = None):
|
||||
@@ -218,6 +222,14 @@ class OpenAIProvider(LLMProvider):
|
||||
raw = response.choices[0].message.content
|
||||
return parse_brief_json(raw, "openai", self.model)
|
||||
|
||||
def generate_text(self, prompt: str) -> str:
|
||||
response = self.client.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
temperature=0.3,
|
||||
)
|
||||
return response.choices[0].message.content or ""
|
||||
|
||||
|
||||
class AnthropicProvider(LLMProvider):
|
||||
def __init__(self, model: str | None = None):
|
||||
@@ -247,6 +259,14 @@ class AnthropicProvider(LLMProvider):
|
||||
raw = response.content[0].text
|
||||
return parse_brief_json(raw, "anthropic", self.model)
|
||||
|
||||
def generate_text(self, prompt: str) -> str:
|
||||
response = self.client.messages.create(
|
||||
model=self.model,
|
||||
max_tokens=1024,
|
||||
messages=[{"role": "user", "content": prompt}],
|
||||
)
|
||||
return response.content[0].text
|
||||
|
||||
|
||||
class GeminiProvider(LLMProvider):
|
||||
def __init__(self, model: str | None = None):
|
||||
@@ -272,6 +292,14 @@ class GeminiProvider(LLMProvider):
|
||||
response = self._make_model(AMENDMENT_SYSTEM_PROMPT).generate_content(prompt)
|
||||
return parse_brief_json(response.text, "gemini", self.model_name)
|
||||
|
||||
def generate_text(self, prompt: str) -> str:
|
||||
model = self._genai.GenerativeModel(
|
||||
model_name=self.model_name,
|
||||
generation_config={"temperature": 0.3},
|
||||
)
|
||||
response = model.generate_content(prompt)
|
||||
return response.text
|
||||
|
||||
|
||||
class OllamaProvider(LLMProvider):
|
||||
def __init__(self, model: str | None = None):
|
||||
@@ -326,6 +354,16 @@ class OllamaProvider(LLMProvider):
|
||||
)
|
||||
return parse_brief_json(raw2, "ollama", self.model)
|
||||
|
||||
def generate_text(self, prompt: str) -> str:
|
||||
import requests as req
|
||||
response = req.post(
|
||||
f"{self.base_url}/api/generate",
|
||||
json={"model": self.model, "prompt": prompt, "stream": False},
|
||||
timeout=120,
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json().get("response", "")
|
||||
|
||||
|
||||
def get_llm_provider(provider: str | None = None, model: str | None = None) -> LLMProvider:
|
||||
"""Factory — returns the configured LLM provider.
|
||||
@@ -344,3 +382,72 @@ def get_llm_provider(provider: str | None = None, model: str | None = None) -> L
|
||||
elif provider == "ollama":
|
||||
return OllamaProvider(model=model)
|
||||
raise ValueError(f"Unknown LLM_PROVIDER: '{provider}'. Must be one of: openai, anthropic, gemini, ollama")
|
||||
|
||||
|
||||
_BILL_TYPE_LABELS: dict[str, str] = {
|
||||
"hr": "H.R.",
|
||||
"s": "S.",
|
||||
"hjres": "H.J.Res.",
|
||||
"sjres": "S.J.Res.",
|
||||
"hconres": "H.Con.Res.",
|
||||
"sconres": "S.Con.Res.",
|
||||
"hres": "H.Res.",
|
||||
"sres": "S.Res.",
|
||||
}
|
||||
|
||||
_TONE_INSTRUCTIONS: dict[str, str] = {
|
||||
"short": "Keep the letter brief — 6 to 8 sentences total.",
|
||||
"polite": "Use a respectful, formal, and courteous tone throughout the letter.",
|
||||
"firm": "Use a direct, firm tone that makes clear the constituent's strong conviction.",
|
||||
}
|
||||
|
||||
|
||||
def generate_draft_letter(
|
||||
bill_label: str,
|
||||
bill_title: str,
|
||||
stance: str,
|
||||
recipient: str,
|
||||
tone: str,
|
||||
selected_points: list[str],
|
||||
include_citations: bool,
|
||||
zip_code: str | None,
|
||||
llm_provider: str | None = None,
|
||||
llm_model: str | None = None,
|
||||
) -> str:
|
||||
"""Generate a plain-text constituent letter draft using the configured LLM provider."""
|
||||
vote_word = "YES" if stance == "yes" else "NO"
|
||||
chamber_word = "House" if recipient == "house" else "Senate"
|
||||
tone_instruction = _TONE_INSTRUCTIONS.get(tone, _TONE_INSTRUCTIONS["polite"])
|
||||
|
||||
points_block = "\n".join(f"- {p}" for p in selected_points)
|
||||
|
||||
citation_instruction = (
|
||||
"You may reference the citation label for each point (e.g. 'as noted in Section 3') if it adds clarity."
|
||||
if include_citations
|
||||
else "Do not include any citation references."
|
||||
)
|
||||
|
||||
location_line = f"The constituent is writing from ZIP code {zip_code}." if zip_code else ""
|
||||
|
||||
prompt = f"""Write a short constituent letter to a {chamber_word} member of Congress.
|
||||
|
||||
RULES:
|
||||
- {tone_instruction}
|
||||
- 6 to 12 sentences total.
|
||||
- First sentence must be a clear, direct ask: "Please vote {vote_word} on {bill_label}."
|
||||
- The body must reference ONLY the points listed below — do not invent any other claims or facts.
|
||||
- {citation_instruction}
|
||||
- Close with a brief sign-off and the placeholder "[Your Name]".
|
||||
- Plain text only. No markdown, no bullet points, no headers, no partisan framing.
|
||||
- Do not mention any political party.
|
||||
|
||||
BILL: {bill_label} — {bill_title}
|
||||
STANCE: Vote {vote_word}
|
||||
{location_line}
|
||||
|
||||
SELECTED POINTS TO REFERENCE:
|
||||
{points_block}
|
||||
|
||||
Write the letter now:"""
|
||||
|
||||
return get_llm_provider(provider=llm_provider, model=llm_model).generate_text(prompt)
|
||||
|
||||
Reference in New Issue
Block a user