.env.example 886 B

123456789101112131415161718192021222324252627282930
  1. # mem0 server (your LAN address)
  2. MEM0_BASE_URL=http://192.168.0.200:8420
  3. MEM0_AGENT_ID=knowledge_base
  4. # book-ingestor posts to /knowledge — change if your server uses a different path
  5. # MEM0_KNOWLEDGE_PATH=/knowledge
  6. # Groq
  7. GROQ_API_KEY=your_groq_key_here
  8. GROQ_MODEL=meta-llama/llama-4-scout-17b-16e-instruct
  9. # Folders (defaults work out of the box)
  10. BOOKS_INBOX=./books/inbox
  11. BOOKS_PROCESSING=./books/processing
  12. BOOKS_DONE=./books/done
  13. BOOKS_MANIFESTS=./books/manifests
  14. # Chunking
  15. CHUNK_SIZE_TOKENS=350
  16. # Max sections before falling back to flat processing
  17. # Prevents token burn on crappy OCR'd PDFs with hundreds of fake chapters
  18. MAX_SECTIONS=64
  19. # Throttling — delay in seconds between chunk POSTs (0 = no delay)
  20. # Increase if Ollama/nomic embedder is pegging your GPU
  21. # 0.5 = 2 chunks/sec, 1.0 = 1 chunk/sec
  22. INGEST_DELAY=0.5
  23. # Logging: DEBUG | INFO | WARNING
  24. LOG_LEVEL=INFO