chore: add README, LICENSE (GPL 3.0), and update .env.example

- README.md: feature overview, tech stack, quick-start guide
- LICENSE: GNU General Public License v3.0
- .env.example: add ENCRYPTION_SECRET_KEY, LLM_RATE_LIMIT_RPM, correct model defaults

Authored by: Jack Levy
This commit is contained in:
Jack Levy
2026-03-15 01:34:44 -04:00
parent a96bd024e9
commit d378f35cc5
3 changed files with 743 additions and 3 deletions

View File

@@ -10,6 +10,11 @@ PUBLIC_URL=
# Generate: python -c "import secrets; print(secrets.token_hex(32))"
JWT_SECRET_KEY=
# Fernet key for encrypting sensitive user prefs (ntfy passwords, etc.)
# Generate: python -c "from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())"
# Set once and never change after data has been written.
ENCRYPTION_SECRET_KEY=
# ─── PostgreSQL ───────────────────────────────────────────────────────────────
POSTGRES_USER=congress
POSTGRES_PASSWORD=congress
@@ -35,18 +40,23 @@ CONGRESS_POLL_INTERVAL_MINUTES=30
LLM_PROVIDER=openai
OPENAI_API_KEY=
OPENAI_MODEL=gpt-4o
OPENAI_MODEL=gpt-4o-mini
ANTHROPIC_API_KEY=
ANTHROPIC_MODEL=claude-opus-4-6
ANTHROPIC_MODEL=claude-sonnet-4-6
GEMINI_API_KEY=
GEMINI_MODEL=gemini-1.5-pro
GEMINI_MODEL=gemini-2.0-flash
# For Ollama: use host.docker.internal to reach a locally running Ollama server
OLLAMA_BASE_URL=http://host.docker.internal:11434
OLLAMA_MODEL=llama3.1
# Max LLM requests per minute (Celery enforces this globally across all workers).
# Defaults: Gemini free=15, Anthropic paid=50, OpenAI paid=500.
# Lower this if you hit rate-limit errors on a restricted tier.
LLM_RATE_LIMIT_RPM=50
# ─── Google Civic Information API ─────────────────────────────────────────────
# Used for zip code → representative lookup in the Draft Letter panel.
# Free tier: 25,000 req/day. Enable the API at: