feat(providers): add gemini

This commit is contained in:
ItzCrazyKns 2024-11-28 20:47:18 +05:30
parent ecad065577
commit 177746235a
8 changed files with 151 additions and 1 deletions

View file

@ -14,6 +14,7 @@ interface Config {
OPENAI: string;
GROQ: string;
ANTHROPIC: string;
GEMINI: string;
};
API_ENDPOINTS: {
SEARXNG: string;
@ -43,6 +44,8 @@ export const getGroqApiKey = () => loadConfig().API_KEYS.GROQ;
export const getAnthropicApiKey = () => loadConfig().API_KEYS.ANTHROPIC;
export const getGeminiApiKey = () => loadConfig().API_KEYS.GEMINI;
export const getSearxngApiEndpoint = () =>
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;

View file

@ -0,0 +1,69 @@
import {
ChatGoogleGenerativeAI,
GoogleGenerativeAIEmbeddings,
} from '@langchain/google-genai';
import { getGeminiApiKey } from '../../config';
import logger from '../../utils/logger';
export const loadGeminiChatModels = async () => {
const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {};
try {
const chatModels = {
'gemini-1.5-flash': {
displayName: 'Gemini 1.5 Flash',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-flash-8b': {
displayName: 'Gemini 1.5 Flash 8B',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-flash-8b',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
'gemini-1.5-pro': {
displayName: 'Gemini 1.5 Pro',
model: new ChatGoogleGenerativeAI({
modelName: 'gemini-1.5-pro',
temperature: 0.7,
apiKey: geminiApiKey,
}),
},
};
return chatModels;
} catch (err) {
logger.error(`Error loading Gemini models: ${err}`);
return {};
}
};
export const loadGeminiEmbeddingsModels = async () => {
const geminiApiKey = getGeminiApiKey();
if (!geminiApiKey) return {};
try {
const embeddingModels = {
'text-embedding-004': {
displayName: 'Text Embedding',
model: new GoogleGenerativeAIEmbeddings({
apiKey: geminiApiKey,
modelName: 'text-embedding-004',
}),
},
};
return embeddingModels;
} catch (err) {
logger.error(`Error loading Gemini embeddings model: ${err}`);
return {};
}
};

View file

@ -3,18 +3,21 @@ import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama';
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
import { loadAnthropicChatModels } from './anthropic';
import { loadTransformersEmbeddingsModels } from './transformers';
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
const chatModelProviders = {
openai: loadOpenAIChatModels,
groq: loadGroqChatModels,
ollama: loadOllamaChatModels,
anthropic: loadAnthropicChatModels,
gemini: loadGeminiChatModels,
};
const embeddingModelProviders = {
openai: loadOpenAIEmbeddingsModels,
local: loadTransformersEmbeddingsModels,
ollama: loadOllamaEmbeddingsModels,
gemini: loadGeminiEmbeddingsModels,
};
export const getAvailableChatModelProviders = async () => {

View file

@ -7,6 +7,7 @@ import {
getGroqApiKey,
getOllamaApiEndpoint,
getAnthropicApiKey,
getGeminiApiKey,
getOpenaiApiKey,
updateConfig,
} from '../config';
@ -52,7 +53,8 @@ router.get('/', async (_, res) => {
config['ollamaApiUrl'] = getOllamaApiEndpoint();
config['anthropicApiKey'] = getAnthropicApiKey();
config['groqApiKey'] = getGroqApiKey();
config['geminiApiKey'] = getGeminiApiKey();
res.status(200).json(config);
} catch (err: any) {
res.status(500).json({ message: 'An error has occurred.' });
@ -68,6 +70,7 @@ router.post('/', async (req, res) => {
OPENAI: config.openaiApiKey,
GROQ: config.groqApiKey,
ANTHROPIC: config.anthropicApiKey,
GEMINI: config.geminiApiKey,
},
API_ENDPOINTS: {
OLLAMA: config.ollamaApiUrl,