Merge pull request #2 from ItzCrazyKns/master

merge
This commit is contained in:
overcuriousity 2024-07-10 20:10:39 +02:00 committed by GitHub
commit 859cf113dc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 81 additions and 14 deletions

View file

@ -47,7 +47,7 @@ const generateSuggestions = (
input: SuggestionGeneratorInput, input: SuggestionGeneratorInput,
llm: BaseChatModel, llm: BaseChatModel,
) => { ) => {
(llm as ChatOpenAI).temperature = 0; (llm as unknown as ChatOpenAI).temperature = 0;
const suggestionGeneratorChain = createSuggestionGeneratorChain(llm); const suggestionGeneratorChain = createSuggestionGeneratorChain(llm);
return suggestionGeneratorChain.invoke(input); return suggestionGeneratorChain.invoke(input);
}; };

View file

@ -5,6 +5,8 @@ import logger from '../../utils/logger';
export const loadGroqChatModels = async () => { export const loadGroqChatModels = async () => {
const groqApiKey = getGroqApiKey(); const groqApiKey = getGroqApiKey();
if (!groqApiKey) return {};
try { try {
const chatModels = { const chatModels = {
'LLaMA3 8b': new ChatOpenAI( 'LLaMA3 8b': new ChatOpenAI(

View file

@ -1,7 +1,7 @@
import { loadGroqChatModels } from './groq'; import { loadGroqChatModels } from './groq';
import { loadOllamaChatModels } from './ollama'; import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama';
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModel } from './openai'; import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
import { loadTransformersEmbeddingsModel } from './transformers'; import { loadTransformersEmbeddingsModels } from './transformers';
const chatModelProviders = { const chatModelProviders = {
openai: loadOpenAIChatModels, openai: loadOpenAIChatModels,
@ -10,18 +10,23 @@ const chatModelProviders = {
}; };
const embeddingModelProviders = { const embeddingModelProviders = {
openai: loadOpenAIEmbeddingsModel, openai: loadOpenAIEmbeddingsModels,
local: loadTransformersEmbeddingsModel, local: loadTransformersEmbeddingsModels,
ollama: loadOllamaChatModels, ollama: loadOllamaEmbeddingsModels,
}; };
export const getAvailableChatModelProviders = async () => { export const getAvailableChatModelProviders = async () => {
const models = {}; const models = {};
for (const provider in chatModelProviders) { for (const provider in chatModelProviders) {
models[provider] = await chatModelProviders[provider](); const providerModels = await chatModelProviders[provider]();
if (Object.keys(providerModels).length > 0) {
models[provider] = providerModels
}
} }
models['custom_openai'] = {}
return models; return models;
}; };
@ -29,7 +34,10 @@ export const getAvailableEmbeddingModelProviders = async () => {
const models = {}; const models = {};
for (const provider in embeddingModelProviders) { for (const provider in embeddingModelProviders) {
models[provider] = await embeddingModelProviders[provider](); const providerModels = await embeddingModelProviders[provider]();
if (Object.keys(providerModels).length > 0) {
models[provider] = providerModels
}
} }
return models; return models;

View file

@ -6,6 +6,8 @@ import { ChatOllama } from '@langchain/community/chat_models/ollama';
export const loadOllamaChatModels = async () => { export const loadOllamaChatModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint(); const ollamaEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {};
try { try {
const response = await fetch(`${ollamaEndpoint}/api/tags`, { const response = await fetch(`${ollamaEndpoint}/api/tags`, {
headers: { headers: {
@ -31,9 +33,11 @@ export const loadOllamaChatModels = async () => {
} }
}; };
export const loadOpenAIEmbeddingsModel = async () => { export const loadOllamaEmbeddingsModels = async () => {
const ollamaEndpoint = getOllamaApiEndpoint(); const ollamaEndpoint = getOllamaApiEndpoint();
if (!ollamaEndpoint) return {};
try { try {
const response = await fetch(`${ollamaEndpoint}/api/tags`, { const response = await fetch(`${ollamaEndpoint}/api/tags`, {
headers: { headers: {

View file

@ -5,6 +5,8 @@ import logger from '../../utils/logger';
export const loadOpenAIChatModels = async () => { export const loadOpenAIChatModels = async () => {
const openAIApiKey = getOpenaiApiKey(); const openAIApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {};
try { try {
const chatModels = { const chatModels = {
'GPT-3.5 turbo': new ChatOpenAI({ 'GPT-3.5 turbo': new ChatOpenAI({
@ -36,9 +38,11 @@ export const loadOpenAIChatModels = async () => {
} }
}; };
export const loadOpenAIEmbeddingsModel = async () => { export const loadOpenAIEmbeddingsModels = async () => {
const openAIApiKey = getOpenaiApiKey(); const openAIApiKey = getOpenaiApiKey();
if (!openAIApiKey) return {};
try { try {
const embeddingModels = { const embeddingModels = {
'Text embedding 3 small': new OpenAIEmbeddings({ 'Text embedding 3 small': new OpenAIEmbeddings({

View file

@ -1,7 +1,7 @@
import logger from '../../utils/logger'; import logger from '../../utils/logger';
import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer'; import { HuggingFaceTransformersEmbeddings } from '../huggingfaceTransformer';
export const loadTransformersEmbeddingsModel = async () => { export const loadTransformersEmbeddingsModels = async () => {
try { try {
const embeddingModels = { const embeddingModels = {
'BGE Small': new HuggingFaceTransformersEmbeddings({ 'BGE Small': new HuggingFaceTransformersEmbeddings({

View file

@ -45,7 +45,7 @@ export const handleConnection = async (
chatModelProviders[chatModelProvider][chatModel] && chatModelProviders[chatModelProvider][chatModel] &&
chatModelProvider != 'custom_openai' chatModelProvider != 'custom_openai'
) { ) {
llm = chatModelProviders[chatModelProvider][chatModel] as llm = chatModelProviders[chatModelProvider][chatModel] as unknown as
| BaseChatModel | BaseChatModel
| undefined; | undefined;
} else if (chatModelProvider == 'custom_openai') { } else if (chatModelProvider == 'custom_openai') {
@ -56,7 +56,7 @@ export const handleConnection = async (
configuration: { configuration: {
baseURL: searchParams.get('openAIBaseURL'), baseURL: searchParams.get('openAIBaseURL'),
}, },
}); }) as unknown as BaseChatModel;
} }
if ( if (

View file

@ -83,6 +83,55 @@ const useSocket = (
'embeddingModelProvider', 'embeddingModelProvider',
embeddingModelProvider, embeddingModelProvider,
); );
} else {
const providers = await fetch(
`${process.env.NEXT_PUBLIC_API_URL}/models`,
{
headers: {
'Content-Type': 'application/json',
},
},
).then(async (res) => await res.json());
const chatModelProviders = providers.chatModelProviders;
const embeddingModelProviders = providers.embeddingModelProviders;
if (
Object.keys(chatModelProviders).length > 0 &&
!chatModelProviders[chatModelProvider]
) {
chatModelProvider = Object.keys(chatModelProviders)[0];
localStorage.setItem('chatModelProvider', chatModelProvider);
}
if (
chatModelProvider &&
!chatModelProviders[chatModelProvider][chatModel]
) {
chatModel = Object.keys(chatModelProviders[chatModelProvider])[0];
localStorage.setItem('chatModel', chatModel);
}
if (
Object.keys(embeddingModelProviders).length > 0 &&
!embeddingModelProviders[embeddingModelProvider]
) {
embeddingModelProvider = Object.keys(embeddingModelProviders)[0];
localStorage.setItem(
'embeddingModelProvider',
embeddingModelProvider,
);
}
if (
embeddingModelProvider &&
!embeddingModelProviders[embeddingModelProvider][embeddingModel]
) {
embeddingModel = Object.keys(
embeddingModelProviders[embeddingModelProvider],
)[0];
localStorage.setItem('embeddingModel', embeddingModel);
}
} }
const wsURL = new URL(url); const wsURL = new URL(url);