Merge 62411d3ea6
into 874505cd0e
This commit is contained in:
commit
e7c12945df
4 changed files with 24 additions and 7 deletions
|
@ -10,3 +10,7 @@ ANTHROPIC = "" # Anthropic API key - sk-ant-1234567890abcdef1234567890abcdef
|
|||
[API_ENDPOINTS]
|
||||
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
||||
OLLAMA = "" # Ollama API URL - http://host.docker.internal:11434
|
||||
|
||||
[OLLAMA_PARAMS]
|
||||
TEMPERATURE = 0.7 # ollama default temp is 0.8
|
||||
NUM_CTX = 2_048 # ollama num_ctx default is 2048
|
|
@ -18,6 +18,10 @@ interface Config {
|
|||
SEARXNG: string;
|
||||
OLLAMA: string;
|
||||
};
|
||||
OLLAMA_PARAMS: {
|
||||
TEMPERATURE: number;
|
||||
NUM_CTX: number;
|
||||
}
|
||||
}
|
||||
|
||||
type RecursivePartial<T> = {
|
||||
|
@ -45,6 +49,10 @@ export const getSearxngApiEndpoint = () =>
|
|||
|
||||
export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
|
||||
|
||||
export const getModelTemperature = () => loadConfig().OLLAMA_PARAMS.TEMPERATURE;
|
||||
|
||||
export const getModelNumCtx = () => loadConfig().OLLAMA_PARAMS.NUM_CTX;
|
||||
|
||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||
const currentConfig = loadConfig();
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { getOllamaApiEndpoint } from '../../config';
|
||||
import { getModelNumCtx, getModelTemperature, getOllamaApiEndpoint } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
|
||||
export const loadOllamaChatModels = async () => {
|
||||
const ollamaEndpoint = getOllamaApiEndpoint();
|
||||
|
||||
|
||||
if (!ollamaEndpoint) return {};
|
||||
|
||||
try {
|
||||
|
@ -18,18 +19,22 @@ export const loadOllamaChatModels = async () => {
|
|||
const { models: ollamaModels } = (await response.json()) as any;
|
||||
|
||||
const chatModels = ollamaModels.reduce((acc, model) => {
|
||||
const modelTemperature = getModelTemperature();
|
||||
const modelNumCtx = getModelNumCtx();
|
||||
acc[model.model] = {
|
||||
displayName: model.name,
|
||||
model: new ChatOllama({
|
||||
baseUrl: ollamaEndpoint,
|
||||
model: model.model,
|
||||
temperature: 0.7,
|
||||
temperature: modelTemperature,
|
||||
numCtx: modelNumCtx,
|
||||
}),
|
||||
};
|
||||
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading Ollama models: ${err}`);
|
||||
|
|
|
@ -281,7 +281,7 @@ const SettingsDialog = ({
|
|||
? chatModelProvider.map((model) => ({
|
||||
value: model.name,
|
||||
label: model.displayName,
|
||||
}))
|
||||
})).sort((a, b) => a.label.localeCompare(b.label))
|
||||
: [
|
||||
{
|
||||
value: '',
|
||||
|
@ -392,7 +392,7 @@ const SettingsDialog = ({
|
|||
? embeddingModelProvider.map((model) => ({
|
||||
label: model.displayName,
|
||||
value: model.name,
|
||||
}))
|
||||
})).sort((a, b) => a.label.localeCompare(b.label))
|
||||
: [
|
||||
{
|
||||
label: 'No embedding models available',
|
||||
|
|
Loading…
Add table
Reference in a new issue