
LM Studio Integration: - Added LM Studio provider with OpenAI-compatible API support - Dynamic model discovery via /v1/models endpoint - Support for both chat and embeddings models - Docker-compatible networking configuration Thinking Model Panel: - Added collapsible UI panel for model's chain of thought - Parses responses with <think> tags to separate reasoning - Maintains backward compatibility with regular responses - Styled consistently with app theme for light/dark modes - Preserves all existing message functionality (sources, markdown, etc.) These improvements enhance the app's compatibility with local LLMs and provide better visibility into model reasoning processes while maintaining existing functionality.
52 lines
1.6 KiB
TypeScript
52 lines
1.6 KiB
TypeScript
import { loadGroqChatModels } from './groq';
|
|
import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama';
|
|
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
|
|
import { loadAnthropicChatModels } from './anthropic';
|
|
import { loadTransformersEmbeddingsModels } from './transformers';
|
|
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
|
|
import { loadLMStudioChatModels, loadLMStudioEmbeddingsModels } from './lmstudio';
|
|
|
|
const chatModelProviders = {
|
|
openai: loadOpenAIChatModels,
|
|
groq: loadGroqChatModels,
|
|
ollama: loadOllamaChatModels,
|
|
anthropic: loadAnthropicChatModels,
|
|
gemini: loadGeminiChatModels,
|
|
lm_studio: loadLMStudioChatModels,
|
|
};
|
|
|
|
const embeddingModelProviders = {
|
|
openai: loadOpenAIEmbeddingsModels,
|
|
local: loadTransformersEmbeddingsModels,
|
|
ollama: loadOllamaEmbeddingsModels,
|
|
gemini: loadGeminiEmbeddingsModels,
|
|
lm_studio: loadLMStudioEmbeddingsModels,
|
|
};
|
|
|
|
export const getAvailableChatModelProviders = async () => {
|
|
const models = {};
|
|
|
|
for (const provider in chatModelProviders) {
|
|
const providerModels = await chatModelProviders[provider]();
|
|
if (Object.keys(providerModels).length > 0) {
|
|
models[provider] = providerModels;
|
|
}
|
|
}
|
|
|
|
models['custom_openai'] = {};
|
|
|
|
return models;
|
|
};
|
|
|
|
export const getAvailableEmbeddingModelProviders = async () => {
|
|
const models = {};
|
|
|
|
for (const provider in embeddingModelProviders) {
|
|
const providerModels = await embeddingModelProviders[provider]();
|
|
if (Object.keys(providerModels).length > 0) {
|
|
models[provider] = providerModels;
|
|
}
|
|
}
|
|
|
|
return models;
|
|
};
|