Perplexica/src/routes/images.ts

47 lines
1.5 KiB
TypeScript
Raw Normal View History

2024-04-09 16:21:05 +05:30
import express from 'express';
2024-04-20 09:32:19 +05:30
import handleImageSearch from '../agents/imageSearchAgent';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableChatModelProviders } from '../lib/providers';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
2024-04-30 12:18:18 +05:30
import logger from '../utils/logger';
2024-04-09 16:21:05 +05:30
const router = express.Router();
router.post('/', async (req, res) => {
try {
let { query, chat_history, chat_model_provider, chat_model } = req.body;
chat_history = chat_history.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
2024-04-09 16:21:05 +05:30
const chatModels = await getAvailableChatModelProviders();
2024-05-22 10:45:16 +05:30
const provider = chat_model_provider ?? Object.keys(chatModels)[0];
const chatModel = chat_model ?? Object.keys(chatModels[provider])[0];
let llm: BaseChatModel | undefined;
if (chatModels[provider] && chatModels[provider][chatModel]) {
llm = chatModels[provider][chatModel].model as BaseChatModel | undefined;
}
if (!llm) {
res.status(500).json({ message: 'Invalid LLM model selected' });
return;
}
2024-04-09 16:21:05 +05:30
2024-04-20 09:32:19 +05:30
const images = await handleImageSearch({ query, chat_history }, llm);
2024-04-09 16:21:05 +05:30
res.status(200).json({ images });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
2024-04-30 12:18:18 +05:30
logger.error(`Error in image search: ${err.message}`);
2024-04-09 16:21:05 +05:30
}
});
export default router;