feat: integrate Google Gemini with dynamic model fetching and UI support
- Added Gemini AI model integration with automatic model list fetching - Updated configuration system to support Gemini API settings - Enhanced SettingsDialog with Gemini model selection options - Updated dependencies in package.json for Gemini support - Improved provider configuration in index.ts for better model management - Added Gemini API key field in sample configuration template - Updated route configuration for Gemini support
This commit is contained in:
parent
c650d1c3d9
commit
3dbc358ee3
7 changed files with 157 additions and 12 deletions
|
@ -30,6 +30,7 @@
|
||||||
"@langchain/anthropic": "^0.2.3",
|
"@langchain/anthropic": "^0.2.3",
|
||||||
"@langchain/community": "^0.2.16",
|
"@langchain/community": "^0.2.16",
|
||||||
"@langchain/openai": "^0.0.25",
|
"@langchain/openai": "^0.0.25",
|
||||||
|
"@langchain/google-genai": "^0.0.23",
|
||||||
"@xenova/transformers": "^2.17.1",
|
"@xenova/transformers": "^2.17.1",
|
||||||
"axios": "^1.6.8",
|
"axios": "^1.6.8",
|
||||||
"better-sqlite3": "^11.0.0",
|
"better-sqlite3": "^11.0.0",
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
[GENERAL]
|
|
||||||
PORT = 3001 # Port to run the server on
|
|
||||||
SIMILARITY_MEASURE = "cosine" # "cosine" or "dot"
|
|
||||||
KEEP_ALIVE = "5m" # How long to keep Ollama models loaded into memory. (Instead of using -1 use "-1m")
|
|
||||||
|
|
||||||
[API_KEYS]
|
[API_KEYS]
|
||||||
OPENAI = "" # OpenAI API key - sk-1234567890abcdef1234567890abcdef
|
OPENAI = ""
|
||||||
GROQ = "" # Groq API key - gsk_1234567890abcdef1234567890abcdef
|
GROQ = ""
|
||||||
ANTHROPIC = "" # Anthropic API key - sk-ant-1234567890abcdef1234567890abcdef
|
ANTHROPIC = ""
|
||||||
|
GEMINI = ""
|
||||||
|
|
||||||
[API_ENDPOINTS]
|
[API_ENDPOINTS]
|
||||||
SEARXNG = "http://localhost:32768" # SearxNG API URL
|
OLLAMA = ""
|
||||||
OLLAMA = "" # Ollama API URL - http://host.docker.internal:11434
|
SEARXNG = "http://localhost:32768"
|
||||||
|
|
||||||
|
[GENERAL]
|
||||||
|
PORT = 3_001
|
||||||
|
SIMILARITY_MEASURE = "cosine"
|
||||||
|
KEEP_ALIVE = "5m"
|
||||||
|
|
|
@ -14,6 +14,7 @@ interface Config {
|
||||||
OPENAI: string;
|
OPENAI: string;
|
||||||
GROQ: string;
|
GROQ: string;
|
||||||
ANTHROPIC: string;
|
ANTHROPIC: string;
|
||||||
|
Gemini: string;
|
||||||
};
|
};
|
||||||
API_ENDPOINTS: {
|
API_ENDPOINTS: {
|
||||||
SEARXNG: string;
|
SEARXNG: string;
|
||||||
|
@ -43,6 +44,8 @@ export const getGroqApiKey = () => loadConfig().API_KEYS.GROQ;
|
||||||
|
|
||||||
export const getAnthropicApiKey = () => loadConfig().API_KEYS.ANTHROPIC;
|
export const getAnthropicApiKey = () => loadConfig().API_KEYS.ANTHROPIC;
|
||||||
|
|
||||||
|
export const getGeminiApiKey = () => loadConfig().API_KEYS.Gemini;
|
||||||
|
|
||||||
export const getSearxngApiEndpoint = () =>
|
export const getSearxngApiEndpoint = () =>
|
||||||
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
|
process.env.SEARXNG_API_URL || loadConfig().API_ENDPOINTS.SEARXNG;
|
||||||
|
|
||||||
|
|
119
src/lib/providers/gemini.ts
Normal file
119
src/lib/providers/gemini.ts
Normal file
|
@ -0,0 +1,119 @@
|
||||||
|
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
|
||||||
|
import { GoogleGenerativeAIEmbeddings } from '@langchain/google-genai';
|
||||||
|
import { getGeminiApiKey } from '../../config';
|
||||||
|
import logger from '../../utils/logger';
|
||||||
|
import axios from 'axios';
|
||||||
|
|
||||||
|
interface GeminiModel {
|
||||||
|
name: string;
|
||||||
|
baseModelId: string;
|
||||||
|
version: string;
|
||||||
|
displayName: string;
|
||||||
|
description: string;
|
||||||
|
inputTokenLimit: number;
|
||||||
|
outputTokenLimit: number;
|
||||||
|
supportedGenerationMethods: string[];
|
||||||
|
temperature: number;
|
||||||
|
maxTemperature: number;
|
||||||
|
topP: number;
|
||||||
|
topK: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface GeminiModelsResponse {
|
||||||
|
models: GeminiModel[];
|
||||||
|
nextPageToken?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const fetchGeminiModels = async (apiKey: string): Promise<GeminiModel[]> => {
|
||||||
|
try {
|
||||||
|
const response = await axios.get<GeminiModelsResponse>(
|
||||||
|
`https://generativelanguage.googleapis.com/v1beta/models?key=${apiKey}`,
|
||||||
|
);
|
||||||
|
return response.data.models;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error(`Error fetching Gemini models: ${err}`);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadGeminiChatModels = async () => {
|
||||||
|
const geminiApiKey = getGeminiApiKey();
|
||||||
|
|
||||||
|
if (!geminiApiKey) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const models = await fetchGeminiModels(geminiApiKey);
|
||||||
|
const chatModels: Record<string, any> = {};
|
||||||
|
|
||||||
|
// If no models are available from the API, fallback to default models
|
||||||
|
if (!models.length) {
|
||||||
|
chatModels['gemini-pro'] = {
|
||||||
|
displayName: 'Gemini Pro',
|
||||||
|
model: new ChatGoogleGenerativeAI({
|
||||||
|
temperature: 0.7,
|
||||||
|
apiKey: geminiApiKey,
|
||||||
|
modelName: 'gemini-pro',
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
return chatModels;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const model of models) {
|
||||||
|
if (model.supportedGenerationMethods.includes('generateContent')) {
|
||||||
|
chatModels[model.name] = {
|
||||||
|
displayName: model.displayName,
|
||||||
|
model: new ChatGoogleGenerativeAI({
|
||||||
|
temperature: model.temperature || 0.7,
|
||||||
|
apiKey: geminiApiKey,
|
||||||
|
modelName: model.baseModelId,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return chatModels;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error(`Error loading Gemini chat models: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const loadGeminiEmbeddingsModels = async () => {
|
||||||
|
const geminiApiKey = getGeminiApiKey();
|
||||||
|
|
||||||
|
if (!geminiApiKey) return {};
|
||||||
|
|
||||||
|
try {
|
||||||
|
const models = await fetchGeminiModels(geminiApiKey);
|
||||||
|
const embeddingsModels: Record<string, any> = {};
|
||||||
|
|
||||||
|
// If no models are available from the API, fallback to default models
|
||||||
|
if (!models.length) {
|
||||||
|
embeddingsModels['embedding-001'] = {
|
||||||
|
displayName: 'Gemini Embedding',
|
||||||
|
model: new GoogleGenerativeAIEmbeddings({
|
||||||
|
apiKey: geminiApiKey,
|
||||||
|
modelName: 'embedding-001',
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
return embeddingsModels;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const model of models) {
|
||||||
|
if (model.supportedGenerationMethods.includes('embedContent')) {
|
||||||
|
embeddingsModels[model.name] = {
|
||||||
|
displayName: model.displayName,
|
||||||
|
model: new GoogleGenerativeAIEmbeddings({
|
||||||
|
apiKey: geminiApiKey,
|
||||||
|
modelName: model.baseModelId,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return embeddingsModels;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error(`Error loading Gemini embeddings model: ${err}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
};
|
|
@ -3,18 +3,20 @@ import { loadOllamaChatModels, loadOllamaEmbeddingsModels } from './ollama';
|
||||||
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
|
import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai';
|
||||||
import { loadAnthropicChatModels } from './anthropic';
|
import { loadAnthropicChatModels } from './anthropic';
|
||||||
import { loadTransformersEmbeddingsModels } from './transformers';
|
import { loadTransformersEmbeddingsModels } from './transformers';
|
||||||
|
import { loadGeminiChatModels, loadGeminiEmbeddingsModels } from './gemini';
|
||||||
const chatModelProviders = {
|
const chatModelProviders = {
|
||||||
openai: loadOpenAIChatModels,
|
openai: loadOpenAIChatModels,
|
||||||
groq: loadGroqChatModels,
|
groq: loadGroqChatModels,
|
||||||
ollama: loadOllamaChatModels,
|
ollama: loadOllamaChatModels,
|
||||||
anthropic: loadAnthropicChatModels,
|
anthropic: loadAnthropicChatModels,
|
||||||
|
gemini : loadGeminiChatModels
|
||||||
};
|
};
|
||||||
|
|
||||||
const embeddingModelProviders = {
|
const embeddingModelProviders = {
|
||||||
openai: loadOpenAIEmbeddingsModels,
|
openai: loadOpenAIEmbeddingsModels,
|
||||||
local: loadTransformersEmbeddingsModels,
|
local: loadTransformersEmbeddingsModels,
|
||||||
ollama: loadOllamaEmbeddingsModels,
|
ollama: loadOllamaEmbeddingsModels,
|
||||||
|
gemini : loadGeminiEmbeddingsModels
|
||||||
};
|
};
|
||||||
|
|
||||||
export const getAvailableChatModelProviders = async () => {
|
export const getAvailableChatModelProviders = async () => {
|
||||||
|
|
|
@ -7,6 +7,7 @@ import {
|
||||||
getGroqApiKey,
|
getGroqApiKey,
|
||||||
getOllamaApiEndpoint,
|
getOllamaApiEndpoint,
|
||||||
getAnthropicApiKey,
|
getAnthropicApiKey,
|
||||||
|
getGeminiApiKey,
|
||||||
getOpenaiApiKey,
|
getOpenaiApiKey,
|
||||||
updateConfig,
|
updateConfig,
|
||||||
} from '../config';
|
} from '../config';
|
||||||
|
@ -52,7 +53,7 @@ router.get('/', async (_, res) => {
|
||||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||||
config['anthropicApiKey'] = getAnthropicApiKey();
|
config['anthropicApiKey'] = getAnthropicApiKey();
|
||||||
config['groqApiKey'] = getGroqApiKey();
|
config['groqApiKey'] = getGroqApiKey();
|
||||||
|
config['geminiApiKey'] = getGeminiApiKey();
|
||||||
res.status(200).json(config);
|
res.status(200).json(config);
|
||||||
} catch (err: any) {
|
} catch (err: any) {
|
||||||
res.status(500).json({ message: 'An error has occurred.' });
|
res.status(500).json({ message: 'An error has occurred.' });
|
||||||
|
@ -68,6 +69,7 @@ router.post('/', async (req, res) => {
|
||||||
OPENAI: config.openaiApiKey,
|
OPENAI: config.openaiApiKey,
|
||||||
GROQ: config.groqApiKey,
|
GROQ: config.groqApiKey,
|
||||||
ANTHROPIC: config.anthropicApiKey,
|
ANTHROPIC: config.anthropicApiKey,
|
||||||
|
Gemini: config.geminiApiKey,
|
||||||
},
|
},
|
||||||
API_ENDPOINTS: {
|
API_ENDPOINTS: {
|
||||||
OLLAMA: config.ollamaApiUrl,
|
OLLAMA: config.ollamaApiUrl,
|
||||||
|
|
|
@ -63,6 +63,7 @@ interface SettingsType {
|
||||||
openaiApiKey: string;
|
openaiApiKey: string;
|
||||||
groqApiKey: string;
|
groqApiKey: string;
|
||||||
anthropicApiKey: string;
|
anthropicApiKey: string;
|
||||||
|
geminiApiKey: string;
|
||||||
ollamaApiUrl: string;
|
ollamaApiUrl: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -476,6 +477,22 @@ const SettingsDialog = ({
|
||||||
}
|
}
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
|
<div className="flex flex-col space-y-1">
|
||||||
|
<p className="text-black/70 dark:text-white/70 text-sm">
|
||||||
|
Gemini API Key
|
||||||
|
</p>
|
||||||
|
<Input
|
||||||
|
type="text"
|
||||||
|
placeholder="Gemini API key"
|
||||||
|
defaultValue={config.geminiApiKey}
|
||||||
|
onChange={(e) =>
|
||||||
|
setConfig({
|
||||||
|
...config,
|
||||||
|
geminiApiKey: e.target.value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
{isLoading && (
|
{isLoading && (
|
||||||
|
|
Loading…
Add table
Reference in a new issue