[GENERAL] PORT = 3001 # Port to run the server on SIMILARITY_MEASURE = "cosine" # "cosine" or "dot" KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m") [API_KEYS] OPENAI = "" # OpenAI API key - sk-1234567890abcdef1234567890abcdef GROQ = "" # Groq API key - gsk_1234567890abcdef1234567890abcdef ANTHROPIC = "" # Anthropic API key - sk-ant-1234567890abcdef1234567890abcdef GEMINI = "" # Gemini API key - sk-1234567890abcdef1234567890abcdef [API_ENDPOINTS] SEARXNG = "http://localhost:32768" # SearxNG API URL OLLAMA = "" # Ollama API URL - http://host.docker.internal:11434 [OLLAMA_CHAT_OPTIONS] # maps to parameters found here: https://v03.api.js.langchain.com/interfaces/_langchain_ollama.ChatOllamaInput.html numCtx = 2048 # the default, some models demand more [OLLAMA_EMBEDDINGS_PARAMS] # maps to parameters found here: https://v03.api.js.langchain.com/interfaces/_langchain_ollama.OllamaEmbeddingsParams.html