Merge 6edac6938c
into 7ec201d011
This commit is contained in:
commit
0d7d5940c1
5 changed files with 191 additions and 18 deletions
|
@ -17,8 +17,9 @@ interface Config {
|
|||
GEMINI: string;
|
||||
};
|
||||
API_ENDPOINTS: {
|
||||
SEARXNG: string;
|
||||
OLLAMA: string;
|
||||
LMSTUDIO: string;
|
||||
SEARXNG: string;
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -51,6 +52,8 @@ export const getSearxngApiEndpoint = () =>
|
|||
|
||||
export const getOllamaApiEndpoint = () => loadConfig().API_ENDPOINTS.OLLAMA;
|
||||
|
||||
export const getLMStudioApiEndpoint = () => loadConfig().API_ENDPOINTS.LMSTUDIO;
|
||||
|
||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||
const currentConfig = loadConfig();
|
||||
|
||||
|
@ -72,6 +75,27 @@ export const updateConfig = (config: RecursivePartial<Config>) => {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
export const updateConfig = (config: RecursivePartial<Config>) => {
|
||||
const currentConfig = loadConfig();
|
||||
|
||||
// Merge existing config with new values
|
||||
const mergedConfig: RecursivePartial<Config> = {
|
||||
GENERAL: {
|
||||
...currentConfig.GENERAL,
|
||||
...config.GENERAL,
|
||||
},
|
||||
API_KEYS: {
|
||||
...currentConfig.API_KEYS,
|
||||
...config.API_KEYS,
|
||||
},
|
||||
API_ENDPOINTS: {
|
||||
...currentConfig.API_ENDPOINTS,
|
||||
...config.API_ENDPOINTS,
|
||||
},
|
||||
};
|
||||
*/
|
||||
|
||||
fs.writeFileSync(
|
||||
path.join(__dirname, `../${configFileName}`),
|
||||
toml.stringify(config),
|
||||
|
|
|
@ -4,6 +4,7 @@ import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
|
|||
import { loadAnthropicChatModels } from './anthropic';
|
||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
|
||||
import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio';
|
||||
|
||||
const chatModelProviders = {
|
||||
openai: loadOpenAIChatModels,
|
||||
|
@ -11,6 +12,7 @@ const chatModelProviders = {
|
|||
ollama: loadOllamaChatModels,
|
||||
anthropic: loadAnthropicChatModels,
|
||||
gemini: loadGeminiChatModels,
|
||||
lm_studio: loadLMStudioChatModels,
|
||||
};
|
||||
|
||||
const embeddingModelProviders = {
|
||||
|
@ -18,6 +20,7 @@ const embeddingModelProviders = {
|
|||
local: loadTransformersEmbeddingsModels,
|
||||
ollama: loadOllamaEmbeddingsModels,
|
||||
gemini: loadGeminiEmbeddingsModels,
|
||||
lm_studio: loadLMStudioEmbeddingsModels,
|
||||
};
|
||||
|
||||
export const getAvailableChatModelProviders = async () => {
|
||||
|
|
89
src/lib/providers/lmstudio.ts
Normal file
89
src/lib/providers/lmstudio.ts
Normal file
|
@ -0,0 +1,89 @@
|
|||
import { OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { getKeepAlive, getLMStudioApiEndpoint } from '../../config';
|
||||
import logger from '../../utils/logger';
|
||||
import axios from 'axios';
|
||||
|
||||
interface LMStudioModel {
|
||||
id: string;
|
||||
// add other properties if LM Studio API provides them
|
||||
}
|
||||
|
||||
interface ChatModelConfig {
|
||||
displayName: string;
|
||||
model: ChatOpenAI;
|
||||
}
|
||||
|
||||
export const loadLMStudioChatModels = async (): Promise<Record<string, ChatModelConfig>> => {
|
||||
const lmStudioEndpoint = getLMStudioApiEndpoint();
|
||||
|
||||
if (!lmStudioEndpoint) {
|
||||
logger.debug('LM Studio endpoint not configured, skipping');
|
||||
return {};
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await axios.get<{ data: LMStudioModel[] }>(`${lmStudioEndpoint}/models`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const lmStudioModels = response.data.data;
|
||||
|
||||
const chatModels = lmStudioModels.reduce<Record<string, ChatModelConfig>>((acc, model) => {
|
||||
acc[model.id] = {
|
||||
displayName: model.id,
|
||||
model: new ChatOpenAI({
|
||||
openAIApiKey: 'lm-studio',
|
||||
configuration: {
|
||||
baseURL: lmStudioEndpoint,
|
||||
},
|
||||
modelName: model.id,
|
||||
temperature: 0.7,
|
||||
}),
|
||||
};
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return chatModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading LM Studio models: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
export const loadLMStudioEmbeddingsModels = async () => {
|
||||
const lmStudioEndpoint = getLMStudioApiEndpoint();
|
||||
|
||||
if (!lmStudioEndpoint) return {};
|
||||
|
||||
try {
|
||||
const response = await axios.get(`${lmStudioEndpoint}/models`, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
});
|
||||
|
||||
const lmStudioModels = response.data.data;
|
||||
|
||||
const embeddingsModels = lmStudioModels.reduce((acc, model) => {
|
||||
acc[model.id] = {
|
||||
displayName: model.id,
|
||||
model: new OpenAIEmbeddings({
|
||||
openAIApiKey: 'lm-studio', // Dummy key required by LangChain
|
||||
configuration: {
|
||||
baseURL: lmStudioEndpoint,
|
||||
},
|
||||
modelName: model.id,
|
||||
}),
|
||||
};
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
return embeddingsModels;
|
||||
} catch (err) {
|
||||
logger.error(`Error loading LM Studio embeddings model: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
|
@ -6,6 +6,7 @@ import {
|
|||
import {
|
||||
getGroqApiKey,
|
||||
getOllamaApiEndpoint,
|
||||
getLMStudioApiEndpoint,
|
||||
getAnthropicApiKey,
|
||||
getGeminiApiKey,
|
||||
getOpenaiApiKey,
|
||||
|
@ -51,6 +52,7 @@ router.get('/', async (_, res) => {
|
|||
|
||||
config['openaiApiKey'] = getOpenaiApiKey();
|
||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||
config['lmStudioApiUrl'] = getLMStudioApiEndpoint();
|
||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||
config['groqApiKey'] = getGroqApiKey();
|
||||
config['geminiApiKey'] = getGeminiApiKey();
|
||||
|
@ -74,6 +76,7 @@ router.post('/', async (req, res) => {
|
|||
},
|
||||
API_ENDPOINTS: {
|
||||
OLLAMA: config.ollamaApiUrl,
|
||||
LMSTUDIO: config.lmStudioApiUrl,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -11,6 +11,8 @@ import {
|
|||
StopCircle,
|
||||
Layers3,
|
||||
Plus,
|
||||
Brain,
|
||||
ChevronDown,
|
||||
} from 'lucide-react';
|
||||
import Markdown from 'markdown-to-jsx';
|
||||
import Copy from './MessageActions/Copy';
|
||||
|
@ -41,26 +43,48 @@ const MessageBox = ({
|
|||
}) => {
|
||||
const [parsedMessage, setParsedMessage] = useState(message.content);
|
||||
const [speechMessage, setSpeechMessage] = useState(message.content);
|
||||
const [thinking, setThinking] = useState<string>('');
|
||||
const [answer, setAnswer] = useState<string>('');
|
||||
const [isThinkingExpanded, setIsThinkingExpanded] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
const regex = /\[(\d+)\]/g;
|
||||
|
||||
if (
|
||||
message.role === 'assistant' &&
|
||||
message?.sources &&
|
||||
message.sources.length > 0
|
||||
) {
|
||||
return setParsedMessage(
|
||||
// First check for thinking content
|
||||
const match = message.content.match(/<think>(.*?)<\/think>(.*)/s);
|
||||
if (match) {
|
||||
const [_, thinkingContent, answerContent] = match;
|
||||
setThinking(thinkingContent.trim());
|
||||
setAnswer(answerContent.trim());
|
||||
|
||||
// Process the answer part for sources if needed
|
||||
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
|
||||
setParsedMessage(
|
||||
answerContent.trim().replace(
|
||||
regex,
|
||||
(_, number) =>
|
||||
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
||||
),
|
||||
);
|
||||
} else {
|
||||
setParsedMessage(answerContent.trim());
|
||||
}
|
||||
setSpeechMessage(answerContent.trim().replace(regex, ''));
|
||||
} else {
|
||||
// No thinking content - process as before
|
||||
if (message.role === 'assistant' && message?.sources && message.sources.length > 0) {
|
||||
setParsedMessage(
|
||||
message.content.replace(
|
||||
regex,
|
||||
(_, number) =>
|
||||
`<a href="${message.sources?.[number - 1]?.metadata?.url}" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">${number}</a>`,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
setSpeechMessage(message.content.replace(regex, ''));
|
||||
} else {
|
||||
setParsedMessage(message.content);
|
||||
}
|
||||
setSpeechMessage(message.content.replace(regex, ''));
|
||||
}
|
||||
}, [message.content, message.sources, message.role]);
|
||||
|
||||
const { speechStatus, start, stop } = useSpeech({ text: speechMessage });
|
||||
|
@ -81,6 +105,37 @@ const MessageBox = ({
|
|||
ref={dividerRef}
|
||||
className="flex flex-col space-y-6 w-full lg:w-9/12"
|
||||
>
|
||||
{thinking && (
|
||||
<div className="flex flex-col space-y-2 mb-4">
|
||||
<button
|
||||
onClick={() => setIsThinkingExpanded(!isThinkingExpanded)}
|
||||
className="flex flex-row items-center space-x-2 group text-black/70 dark:text-white/70 hover:text-black dark:hover:text-white transition duration-200"
|
||||
>
|
||||
<Brain size={20} />
|
||||
<h3 className="font-medium text-xl">View Thinking</h3>
|
||||
<ChevronDown
|
||||
size={16}
|
||||
className={cn(
|
||||
"transition-transform duration-200",
|
||||
isThinkingExpanded ? "rotate-180" : ""
|
||||
)}
|
||||
/>
|
||||
</button>
|
||||
|
||||
{isThinkingExpanded && (
|
||||
<div className="rounded-lg bg-light-secondary/50 dark:bg-dark-secondary/50 p-4">
|
||||
<Markdown
|
||||
className={cn(
|
||||
'prose dark:prose-invert text-sm leading-relaxed',
|
||||
'max-w-none break-words'
|
||||
)}
|
||||
>
|
||||
{thinking}
|
||||
</Markdown>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
{message.sources && message.sources.length > 0 && (
|
||||
<div className="flex flex-col space-y-2">
|
||||
<div className="flex flex-row items-center space-x-2">
|
||||
|
@ -199,4 +254,3 @@ const MessageBox = ({
|
|||
);
|
||||
};
|
||||
|
||||
export default MessageBox;
|
||||
|
|
Loading…
Add table
Reference in a new issue