This commit is contained in:
Patrick Wiltrout 2024-11-20 01:39:50 +07:00 committed by GitHub
commit e7c12945df
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 24 additions and 7 deletions

View file

@ -10,3 +10,7 @@ ANTHROPIC = "" # Anthropic API key - sk-ant-1234567890abcdef1234567890abcdef
[API_ENDPOINTS] [API_ENDPOINTS]
SEARXNG = "http://localhost:32768" # SearxNG API URL SEARXNG = "http://localhost:32768" # SearxNG API URL
OLLAMA = "" # Ollama API URL - http://host.docker.internal:11434 OLLAMA = "" # Ollama API URL - http://host.docker.internal:11434
[OLLAMA_PARAMS]
TEMPERATURE = 0.7 # ollama default temp is 0.8
NUM_CTX = 2_048 # ollama num_ctx default is 2048

View file

@ -18,6 +18,10 @@ interface Config {
SEARXNG: string; SEARXNG: string;
OLLAMA: string; OLLAMA: string;
}; };
OLLAMA_PARAMS: {
TEMPERATURE: number;
NUM_CTX: number;
}
} }
type RecursivePartial<T> = { type RecursivePartial<T> = {
@ -45,6 +49,10 @@ export const getSearxngApiEndpoint = () =>
export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA; export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
export const getModelTemperature = () => loadConfig().OLLAMA_PARAMS.TEMPERATURE;
export const getModelNumCtx = () => loadConfig().OLLAMA_PARAMS.NUM_CTX;
export const updateConfig = (config: RecursivePartial<Config>) => { export const updateConfig = (config: RecursivePartial<Config>) => {
const currentConfig = loadConfig(); const currentConfig = loadConfig();

View file

@ -1,11 +1,12 @@
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'; import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { getOllamaApiEndpoint } from '../../config'; import { getModelNumCtx, getModelTemperature, getOllamaApiEndpoint } from '../../config';
import logger from '../../utils/logger'; import logger from '../../utils/logger';
import { ChatOllama } from '@langchain/community/chat_models/ollama'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
export const loadOllamaChatModels = async () => { export const loadOllamaChatModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint(); const ollamaEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {}; if (!ollamaEndpoint) return {};
try { try {
@ -18,18 +19,22 @@ export const loadOllamaChatModels = async () => {
const { models: ollamaModels } = (await response.json()) as any; const { models: ollamaModels } = (await response.json()) as any;
const chatModels = ollamaModels.reduce((acc, model) => { const chatModels = ollamaModels.reduce((acc, model) => {
const modelTemperature = getModelTemperature();
const modelNumCtx = getModelNumCtx();
acc[model.model] = { acc[model.model] = {
displayName: model.name, displayName: model.name,
model: new ChatOllama({ model: new ChatOllama({
baseUrl: ollamaEndpoint, baseUrl: ollamaEndpoint,
model: model.model, model: model.model,
temperature: 0.7, temperature: modelTemperature,
numCtx: modelNumCtx,
}), }),
}; };
return acc; return acc;
}, {}); }, {});
return chatModels; return chatModels;
} catch (err) { } catch (err) {
logger.error(`Error loading Ollama models: ${err}`); logger.error(`Error loading Ollama models: ${err}`);

View file

@ -281,7 +281,7 @@ const SettingsDialog = ({
? chatModelProvider.map((model) => ({ ? chatModelProvider.map((model) => ({
value: model.name, value: model.name,
label: model.displayName, label: model.displayName,
})) })).sort((a, b) => a.label.localeCompare(b.label))
: [ : [
{ {
value: '', value: '',
@ -392,7 +392,7 @@ const SettingsDialog = ({
? embeddingModelProvider.map((model) => ({ ? embeddingModelProvider.map((model) => ({
label: model.displayName, label: model.displayName,
value: model.name, value: model.name,
})) })).sort((a, b) => a.label.localeCompare(b.label))
: [ : [
{ {
label: 'No embedding models available', label: 'No embedding models available',