Fix logging statements and error handling in image search and Searxng API integration
This commit is contained in:
parent
9b5548e9f8
commit
93ff88a329
5 changed files with 192 additions and 99 deletions
|
@ -21,25 +21,36 @@ export const searchSearxng = async (
|
||||||
query: string,
|
query: string,
|
||||||
opts?: SearxngSearchOptions,
|
opts?: SearxngSearchOptions,
|
||||||
) => {
|
) => {
|
||||||
const searxngURL = getSearxngApiEndpoint();
|
try {
|
||||||
|
const searxngURL = getSearxngApiEndpoint();
|
||||||
|
console.log('Searxng API endpoint:', searxngURL);
|
||||||
|
|
||||||
const url = new URL(`${searxngURL}/search?format=json`);
|
const url = new URL(`${searxngURL}/search?format=json`);
|
||||||
url.searchParams.append('q', query);
|
url.searchParams.append('q', query);
|
||||||
|
console.log('Initial URL with query:', url.toString());
|
||||||
|
|
||||||
if (opts) {
|
if (opts) {
|
||||||
Object.keys(opts).forEach((key) => {
|
Object.keys(opts).forEach((key) => {
|
||||||
if (Array.isArray(opts[key])) {
|
if (Array.isArray(opts[key])) {
|
||||||
url.searchParams.append(key, opts[key].join(','));
|
url.searchParams.append(key, opts[key].join(','));
|
||||||
return;
|
} else {
|
||||||
}
|
url.searchParams.append(key, opts[key]);
|
||||||
url.searchParams.append(key, opts[key]);
|
}
|
||||||
});
|
console.log(`Added search option ${key}:`, opts[key]);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log('Final URL with all parameters:', url.toString());
|
||||||
|
const res = await axios.get(url.toString());
|
||||||
|
console.log('API response received');
|
||||||
|
|
||||||
|
const results: SearxngSearchResult[] = res.data.results;
|
||||||
|
const suggestions: string[] = res.data.suggestions;
|
||||||
|
console.log('Results and suggestions extracted from the response');
|
||||||
|
|
||||||
|
return { results, suggestions };
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error during Searxng search:', error);
|
||||||
|
throw error; // Re-throw the error after logging it
|
||||||
}
|
}
|
||||||
|
|
||||||
const res = await axios.get(url.toString());
|
|
||||||
|
|
||||||
const results: SearxngSearchResult[] = res.data.results;
|
|
||||||
const suggestions: string[] = res.data.suggestions;
|
|
||||||
|
|
||||||
return { results, suggestions };
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -11,48 +11,58 @@ import {
|
||||||
const router = express.Router();
|
const router = express.Router();
|
||||||
|
|
||||||
router.get('/', async (_, res) => {
|
router.get('/', async (_, res) => {
|
||||||
const config = {};
|
try {
|
||||||
|
const config = {};
|
||||||
|
|
||||||
const providers = await getAvailableProviders();
|
const providers = await getAvailableProviders();
|
||||||
|
|
||||||
for (const provider in providers) {
|
for (const provider in providers) {
|
||||||
delete providers[provider]['embeddings'];
|
delete providers[provider]['embeddings'];
|
||||||
|
}
|
||||||
|
|
||||||
|
config['providers'] = {};
|
||||||
|
|
||||||
|
for (const provider in providers) {
|
||||||
|
config['providers'][provider] = Object.keys(providers[provider]);
|
||||||
|
}
|
||||||
|
|
||||||
|
config['selectedProvider'] = getChatModelProvider();
|
||||||
|
config['selectedChatModel'] = getChatModel();
|
||||||
|
|
||||||
|
config['openeaiApiKey'] = getOpenaiApiKey();
|
||||||
|
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
||||||
|
|
||||||
|
res.status(200).json(config);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to retrieve configuration:', error);
|
||||||
|
res.status(500).json({ message: 'Internal Server Error' });
|
||||||
}
|
}
|
||||||
|
|
||||||
config['providers'] = {};
|
|
||||||
|
|
||||||
for (const provider in providers) {
|
|
||||||
config['providers'][provider] = Object.keys(providers[provider]);
|
|
||||||
}
|
|
||||||
|
|
||||||
config['selectedProvider'] = getChatModelProvider();
|
|
||||||
config['selectedChatModel'] = getChatModel();
|
|
||||||
|
|
||||||
config['openeaiApiKey'] = getOpenaiApiKey();
|
|
||||||
config['ollamaApiUrl'] = getOllamaApiEndpoint();
|
|
||||||
|
|
||||||
res.status(200).json(config);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
router.post('/', async (req, res) => {
|
router.post('/', async (req, res) => {
|
||||||
const config = req.body;
|
try {
|
||||||
|
const config = req.body;
|
||||||
|
|
||||||
const updatedConfig = {
|
const updatedConfig = {
|
||||||
GENERAL: {
|
GENERAL: {
|
||||||
CHAT_MODEL_PROVIDER: config.selectedProvider,
|
CHAT_MODEL_PROVIDER: config.selectedProvider,
|
||||||
CHAT_MODEL: config.selectedChatModel,
|
CHAT_MODEL: config.selectedChatModel,
|
||||||
},
|
},
|
||||||
API_KEYS: {
|
API_KEYS: {
|
||||||
OPENAI: config.openeaiApiKey,
|
OPENAI: config.openeaiApiKey,
|
||||||
},
|
},
|
||||||
API_ENDPOINTS: {
|
API_ENDPOINTS: {
|
||||||
OLLAMA: config.ollamaApiUrl,
|
OLLAMA: config.ollamaApiUrl,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
updateConfig(updatedConfig);
|
updateConfig(updatedConfig);
|
||||||
|
|
||||||
res.status(200).json({ message: 'Config updated' });
|
res.status(200).json({ message: 'Config updated' });
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to update configuration:', error);
|
||||||
|
res.status(500).json({ message: 'Internal Server Error' });
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
export default router;
|
export default router;
|
||||||
|
|
|
@ -11,6 +11,8 @@ router.post('/', async (req, res) => {
|
||||||
try {
|
try {
|
||||||
let { query, chat_history } = req.body;
|
let { query, chat_history } = req.body;
|
||||||
|
|
||||||
|
console.log('Received request with query:', query);
|
||||||
|
|
||||||
chat_history = chat_history.map((msg: any) => {
|
chat_history = chat_history.map((msg: any) => {
|
||||||
if (msg.role === 'user') {
|
if (msg.role === 'user') {
|
||||||
return new HumanMessage(msg.content);
|
return new HumanMessage(msg.content);
|
||||||
|
@ -19,10 +21,14 @@ router.post('/', async (req, res) => {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
console.log('Processed chat history:', chat_history);
|
||||||
|
|
||||||
const models = await getAvailableProviders();
|
const models = await getAvailableProviders();
|
||||||
const provider = getChatModelProvider();
|
const provider = getChatModelProvider();
|
||||||
const chatModel = getChatModel();
|
const chatModel = getChatModel();
|
||||||
|
|
||||||
|
console.log(`Using provider: ${provider} and model: ${chatModel}`);
|
||||||
|
|
||||||
let llm: BaseChatModel | undefined;
|
let llm: BaseChatModel | undefined;
|
||||||
|
|
||||||
if (models[provider] && models[provider][chatModel]) {
|
if (models[provider] && models[provider][chatModel]) {
|
||||||
|
@ -30,16 +36,22 @@ router.post('/', async (req, res) => {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!llm) {
|
if (!llm) {
|
||||||
|
console.error('Invalid LLM model selected');
|
||||||
res.status(500).json({ message: 'Invalid LLM model selected' });
|
res.status(500).json({ message: 'Invalid LLM model selected' });
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const images = await handleImageSearch({ query, chat_history }, llm);
|
try {
|
||||||
|
const images = await handleImageSearch({ query, chat_history }, llm);
|
||||||
res.status(200).json({ images });
|
res.status(200).json({ images });
|
||||||
|
console.log('Image search successful:', images);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error during image search:', error);
|
||||||
|
res.status(500).json({ message: 'Error during image search' });
|
||||||
|
}
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
|
console.error('An error occurred in the main request handler:', err);
|
||||||
res.status(500).json({ message: 'An error has occurred.' });
|
res.status(500).json({ message: 'An error has occurred.' });
|
||||||
console.log(err.message);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -6,33 +6,74 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
|
||||||
import type { Embeddings } from '@langchain/core/embeddings';
|
import type { Embeddings } from '@langchain/core/embeddings';
|
||||||
|
|
||||||
export const handleConnection = async (ws: WebSocket) => {
|
export const handleConnection = async (ws: WebSocket) => {
|
||||||
const models = await getAvailableProviders();
|
try {
|
||||||
const provider = getChatModelProvider();
|
const models = await getAvailableProviders();
|
||||||
const chatModel = getChatModel();
|
const provider = getChatModelProvider();
|
||||||
|
const chatModel = getChatModel();
|
||||||
|
|
||||||
let llm: BaseChatModel | undefined;
|
let llm: BaseChatModel | undefined;
|
||||||
let embeddings: Embeddings | undefined;
|
let embeddings: Embeddings | undefined;
|
||||||
|
|
||||||
if (models[provider] && models[provider][chatModel]) {
|
if (models[provider] && models[provider][chatModel]) {
|
||||||
llm = models[provider][chatModel] as BaseChatModel | undefined;
|
llm = models[provider][chatModel] as BaseChatModel | undefined;
|
||||||
embeddings = models[provider].embeddings as Embeddings | undefined;
|
embeddings = models[provider].embeddings as Embeddings | undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!llm || !embeddings) {
|
// Separate checks for llm and embeddings
|
||||||
|
if (!llm && !embeddings) {
|
||||||
|
ws.send(
|
||||||
|
JSON.stringify({
|
||||||
|
type: 'error',
|
||||||
|
data: 'Invalid LLM and embeddings model selected',
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
console.error('Invalid LLM and embeddings model selected');
|
||||||
|
} else if (!llm) {
|
||||||
|
ws.send(
|
||||||
|
JSON.stringify({
|
||||||
|
type: 'error',
|
||||||
|
data: 'Invalid LLM model selected',
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
console.error('Invalid LLM model selected');
|
||||||
|
} else if (!embeddings) {
|
||||||
|
ws.send(
|
||||||
|
JSON.stringify({
|
||||||
|
type: 'error',
|
||||||
|
data: 'Invalid embeddings model selected',
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
console.error('Invalid embeddings model selected');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!llm || !embeddings) {
|
||||||
|
ws.close();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ws.on('message', async (message) => {
|
||||||
|
try {
|
||||||
|
await handleMessage(message.toString(), ws, llm, embeddings);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error handling message:', error);
|
||||||
|
ws.send(
|
||||||
|
JSON.stringify({
|
||||||
|
type: 'error',
|
||||||
|
data: 'Error processing your message',
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('close', () => console.log('Connection closed'));
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to establish a connection:', error);
|
||||||
ws.send(
|
ws.send(
|
||||||
JSON.stringify({
|
JSON.stringify({
|
||||||
type: 'error',
|
type: 'error',
|
||||||
data: 'Invalid LLM or embeddings model selected',
|
data: 'Failed to establish a connection',
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
ws.close();
|
ws.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
ws.on(
|
|
||||||
'message',
|
|
||||||
async (message) =>
|
|
||||||
await handleMessage(message.toString(), ws, llm, embeddings),
|
|
||||||
);
|
|
||||||
|
|
||||||
ws.on('close', () => console.log('Connection closed'));
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -66,30 +66,44 @@ export const handleMessage = async (
|
||||||
llm: BaseChatModel,
|
llm: BaseChatModel,
|
||||||
embeddings: Embeddings,
|
embeddings: Embeddings,
|
||||||
) => {
|
) => {
|
||||||
|
let parsedMessage: Message;
|
||||||
try {
|
try {
|
||||||
const parsedMessage = JSON.parse(message) as Message;
|
parsedMessage = JSON.parse(message) as Message;
|
||||||
const id = Math.random().toString(36).substring(7);
|
console.log('Message parsed successfully:', parsedMessage);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error parsing message:', message, error);
|
||||||
|
return ws.send(
|
||||||
|
JSON.stringify({ type: 'error', data: 'Invalid message format' }),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
if (!parsedMessage.content)
|
const id = Math.random().toString(36).substring(7);
|
||||||
return ws.send(
|
console.log('Generated message ID:', id);
|
||||||
JSON.stringify({ type: 'error', data: 'Invalid message format' }),
|
|
||||||
);
|
|
||||||
|
|
||||||
const history: BaseMessage[] = parsedMessage.history.map((msg) => {
|
if (!parsedMessage.content) {
|
||||||
if (msg[0] === 'human') {
|
console.log('Message content is empty');
|
||||||
return new HumanMessage({
|
return ws.send(
|
||||||
content: msg[1],
|
JSON.stringify({ type: 'error', data: 'Invalid message format' }),
|
||||||
});
|
);
|
||||||
} else {
|
}
|
||||||
return new AIMessage({
|
|
||||||
content: msg[1],
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if (parsedMessage.type === 'message') {
|
const history: BaseMessage[] = parsedMessage.history.map((msg) => {
|
||||||
const handler = searchHandlers[parsedMessage.focusMode];
|
if (msg[0] === 'human') {
|
||||||
if (handler) {
|
return new HumanMessage({
|
||||||
|
content: msg[1],
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
return new AIMessage({
|
||||||
|
content: msg[1],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (parsedMessage.type === 'message') {
|
||||||
|
const handler = searchHandlers[parsedMessage.focusMode];
|
||||||
|
if (handler) {
|
||||||
|
console.log('Handling message with focus mode:', parsedMessage.focusMode);
|
||||||
|
try {
|
||||||
const emitter = handler(
|
const emitter = handler(
|
||||||
parsedMessage.content,
|
parsedMessage.content,
|
||||||
history,
|
history,
|
||||||
|
@ -97,12 +111,17 @@ export const handleMessage = async (
|
||||||
embeddings,
|
embeddings,
|
||||||
);
|
);
|
||||||
handleEmitterEvents(emitter, ws, id);
|
handleEmitterEvents(emitter, ws, id);
|
||||||
} else {
|
} catch (error) {
|
||||||
ws.send(JSON.stringify({ type: 'error', data: 'Invalid focus mode' }));
|
console.error(
|
||||||
|
'Error handling message with handler:',
|
||||||
|
parsedMessage.focusMode,
|
||||||
|
error,
|
||||||
|
);
|
||||||
|
ws.send(JSON.stringify({ type: 'error', data: 'Handler failure' }));
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
console.log('Invalid focus mode:', parsedMessage.focusMode);
|
||||||
|
ws.send(JSON.stringify({ type: 'error', data: 'Invalid focus mode' }));
|
||||||
}
|
}
|
||||||
} catch (error) {
|
|
||||||
console.error('Failed to handle message', error);
|
|
||||||
ws.send(JSON.stringify({ type: 'error', data: 'Invalid message format' }));
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue