sample.config.toml 711 B

1234567891011121314151617181920212223242526272829303132333435
  1. [GENERAL]
  2. SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
  3. KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
  4. [MODELS.OPENAI]
  5. API_KEY = ""
  6. [MODELS.GROQ]
  7. API_KEY = ""
  8. [MODELS.ANTHROPIC]
  9. API_KEY = ""
  10. [MODELS.GEMINI]
  11. API_KEY = ""
  12. [MODELS.CUSTOM_OPENAI]
  13. API_KEY = ""
  14. API_URL = ""
  15. MODEL_NAME = ""
  16. [MODELS.OLLAMA]
  17. API_URL = "" # Ollama API URL - http://host.docker.internal:11434
  18. [MODELS.DEEPSEEK]
  19. API_KEY = ""
  20. [MODELS.AIMLAPI]
  21. API_KEY = "" # Required to use AI/ML API chat and embedding models
  22. [MODELS.LM_STUDIO]
  23. API_URL = "" # LM Studio API URL - http://host.docker.internal:1234
  24. [API_ENDPOINTS]
  25. SEARXNG = "" # SearxNG API URL - http://localhost:32768