from functools import lru_cache from pydantic_settings import BaseSettings, SettingsConfigDict class Settings(BaseSettings): model_config = SettingsConfigDict(env_file=".env", extra="ignore") # URLs LOCAL_URL: str = "http://localhost" PUBLIC_URL: str = "" # Auth / JWT JWT_SECRET_KEY: str = "change-me-in-production" JWT_EXPIRE_MINUTES: int = 60 * 24 * 7 # 7 days # Database DATABASE_URL: str = "postgresql+asyncpg://congress:congress@postgres:5432/pocketveto" SYNC_DATABASE_URL: str = "postgresql://congress:congress@postgres:5432/pocketveto" # Redis REDIS_URL: str = "redis://redis:6379/0" # api.data.gov (shared key for Congress.gov and GovInfo) DATA_GOV_API_KEY: str = "" CONGRESS_POLL_INTERVAL_MINUTES: int = 30 # LLM LLM_PROVIDER: str = "openai" # openai | anthropic | gemini | ollama OPENAI_API_KEY: str = "" OPENAI_MODEL: str = "gpt-4o-mini" # gpt-4o-mini: excellent JSON quality at ~10x lower cost than gpt-4o ANTHROPIC_API_KEY: str = "" ANTHROPIC_MODEL: str = "claude-sonnet-4-6" # Sonnet matches Opus for structured tasks at ~5x lower cost GEMINI_API_KEY: str = "" GEMINI_MODEL: str = "gemini-2.0-flash" OLLAMA_BASE_URL: str = "http://host.docker.internal:11434" OLLAMA_MODEL: str = "llama3.1" # Max LLM requests per minute — Celery enforces this globally across all workers. # Safe defaults: free Gemini=15 RPM, Anthropic paid=50 RPM, OpenAI paid=500 RPM. # Raise this in .env once you confirm your API tier. LLM_RATE_LIMIT_RPM: int = 10 # Google Civic Information API (zip → representative lookup) # Free key: https://console.cloud.google.com/apis/library/civicinfo.googleapis.com CIVIC_API_KEY: str = "" # News NEWSAPI_KEY: str = "" # pytrends PYTRENDS_ENABLED: bool = True # SMTP (Email notifications) SMTP_HOST: str = "" SMTP_PORT: int = 587 SMTP_USER: str = "" SMTP_PASSWORD: str = "" SMTP_FROM: str = "" # Defaults to SMTP_USER if blank SMTP_STARTTLS: bool = True @lru_cache def get_settings() -> Settings: return Settings() settings = get_settings()