# mem0 server (your LAN address) MEM0_BASE_URL=http://192.168.0.200:8420 MEM0_AGENT_ID=knowledge_base # Groq GROQ_API_KEY=your_groq_key_here GROQ_MODEL=meta-llama/llama-4-scout-17b-16e-instruct # Folders (defaults work out of the box) BOOKS_INBOX=./books/inbox BOOKS_PROCESSING=./books/processing BOOKS_DONE=./books/done BOOKS_MANIFESTS=./books/manifests # Chunking CHUNK_SIZE_TOKENS=350 INGEST_DELAY=0.5 # default — 2 chunks/sec, gentle on the GPU #INGEST_DELAY=1.0 # 1 chunk/sec — if GPU still runs hot #INGEST_DELAY=0.2 # faster — if GPU handles it fine #INGEST_DELAY=0.0 # no throttle — full speed, GPU on its own # Logging: DEBUG | INFO | WARNING LOG_LEVEL=INFO