| 123456789101112131415161718192021222324252627282930 |
- # mem0 server (your LAN address)
- MEM0_BASE_URL=http://192.168.0.200:8420
- MEM0_AGENT_ID=knowledge_base
- # book-ingestor posts to /knowledge — change if your server uses a different path
- # MEM0_KNOWLEDGE_PATH=/knowledge
- # Groq
- GROQ_API_KEY=your_groq_key_here
- GROQ_MODEL=meta-llama/llama-4-scout-17b-16e-instruct
- # Folders (defaults work out of the box)
- BOOKS_INBOX=./books/inbox
- BOOKS_PROCESSING=./books/processing
- BOOKS_DONE=./books/done
- BOOKS_MANIFESTS=./books/manifests
- # Chunking
- CHUNK_SIZE_TOKENS=350
- # Max sections before falling back to flat processing
- # Prevents token burn on crappy OCR'd PDFs with hundreds of fake chapters
- MAX_SECTIONS=64
- # Throttling — delay in seconds between chunk POSTs (0 = no delay)
- # Increase if Ollama/nomic embedder is pegging your GPU
- # 0.5 = 2 chunks/sec, 1.0 = 1 chunk/sec
- INGEST_DELAY=0.5
- # Logging: DEBUG | INFO | WARNING
- LOG_LEVEL=INFO
|