feat(video-search): add video search

This commit is contained in:
ItzCrazyKns 2024-04-30 14:31:32 +05:30
parent bb9a2f538d
commit 6e304e7051
No known key found for this signature in database
GPG key ID: 8162927C7CCE3065
6 changed files with 336 additions and 7 deletions

View file

@ -0,0 +1,90 @@
import {
RunnableSequence,
RunnableMap,
RunnableLambda,
} from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts';
import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../lib/searxng';
import type { BaseChatModel } from '@langchain/core/language_models/chat_models';
const VideoSearchChainPrompt = `
You will be given a conversation below and a follow up question. You need to rephrase the follow-up question so it is a standalone question that can be used by the LLM to search Youtube for videos.
You need to make sure the rephrased question agrees with the conversation and is relevant to the conversation.
Example:
1. Follow up question: How does a car work?
Rephrased: How does a car work?
2. Follow up question: What is the theory of relativity?
Rephrased: What is theory of relativity
3. Follow up question: How does an AC work?
Rephrased: How does an AC work
Conversation:
{chat_history}
Follow up question: {query}
Rephrased question:
`;
type VideoSearchChainInput = {
chat_history: BaseMessage[];
query: string;
};
const strParser = new StringOutputParser();
const createVideoSearchChain = (llm: BaseChatModel) => {
return RunnableSequence.from([
RunnableMap.from({
chat_history: (input: VideoSearchChainInput) => {
return formatChatHistoryAsString(input.chat_history);
},
query: (input: VideoSearchChainInput) => {
return input.query;
},
}),
PromptTemplate.fromTemplate(VideoSearchChainPrompt),
llm,
strParser,
RunnableLambda.from(async (input: string) => {
const res = await searchSearxng(input, {
engines: ['youtube'],
});
const videos = [];
res.results.forEach((result) => {
if (
result.thumbnail &&
result.url &&
result.title &&
result.iframe_src
) {
videos.push({
img_src: result.thumbnail,
url: result.url,
title: result.title,
iframe_src: result.iframe_src,
});
}
});
return videos.slice(0, 10);
}),
]);
};
const handleVideoSearch = (
input: VideoSearchChainInput,
llm: BaseChatModel,
) => {
const VideoSearchChain = createVideoSearchChain(llm);
return VideoSearchChain.invoke(input);
};
export default handleVideoSearch;

View file

@ -13,8 +13,10 @@ interface SearxngSearchResult {
url: string;
img_src?: string;
thumbnail_src?: string;
thumbnail?: string;
content?: string;
author?: string;
iframe_src?: string;
}
export const searchSearxng = async (

View file

@ -1,10 +1,12 @@
import express from 'express';
import imagesRouter from './images';
import videosRouter from './videos';
import configRouter from './config';
const router = express.Router();
router.use('/images', imagesRouter);
router.use('/videos', videosRouter);
router.use('/config', configRouter);
export default router;

47
src/routes/videos.ts Normal file
View file

@ -0,0 +1,47 @@
import express from 'express';
import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import { getAvailableProviders } from '../lib/providers';
import { getChatModel, getChatModelProvider } from '../config';
import { HumanMessage, AIMessage } from '@langchain/core/messages';
import logger from '../utils/logger';
import handleVideoSearch from '../agents/videoSearchAgent';
const router = express.Router();
router.post('/', async (req, res) => {
try {
let { query, chat_history } = req.body;
chat_history = chat_history.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
} else if (msg.role === 'assistant') {
return new AIMessage(msg.content);
}
});
const models = await getAvailableProviders();
const provider = getChatModelProvider();
const chatModel = getChatModel();
let llm: BaseChatModel | undefined;
if (models[provider] && models[provider][chatModel]) {
llm = models[provider][chatModel] as BaseChatModel | undefined;
}
if (!llm) {
res.status(500).json({ message: 'Invalid LLM model selected' });
return;
}
const videos = await handleVideoSearch({ chat_history, query }, llm);
res.status(200).json({ videos });
} catch (err) {
res.status(500).json({ message: 'An error has occurred.' });
logger.error(`Error in video search: ${err.message}`);
}
});
export default router;