Fix logging statements and error handling in image search and Searxng API integration

This commit is contained in:
SwiftyOS 2024-04-29 11:01:47 +02:00
parent 9b5548e9f8
commit 93ff88a329
5 changed files with 192 additions and 99 deletions

View file

@ -21,25 +21,36 @@ export const searchSearxng = async (
query: string,
opts?: SearxngSearchOptions,
) => {
const searxngURL = getSearxngApiEndpoint();
try {
const searxngURL = getSearxngApiEndpoint();
console.log('Searxng API endpoint:', searxngURL);
const url = new URL(`${searxngURL}/search?format=json`);
url.searchParams.append('q', query);
const url = new URL(`${searxngURL}/search?format=json`);
url.searchParams.append('q', query);
console.log('Initial URL with query:', url.toString());
if (opts) {
Object.keys(opts).forEach((key) => {
if (Array.isArray(opts[key])) {
url.searchParams.append(key, opts[key].join(','));
return;
}
url.searchParams.append(key, opts[key]);
});
if (opts) {
Object.keys(opts).forEach((key) => {
if (Array.isArray(opts[key])) {
url.searchParams.append(key, opts[key].join(','));
} else {
url.searchParams.append(key, opts[key]);
}
console.log(`Added search option ${key}:`, opts[key]);
});
}
console.log('Final URL with all parameters:', url.toString());
const res = await axios.get(url.toString());
console.log('API response received');
const results: SearxngSearchResult[] = res.data.results;
const suggestions: string[] = res.data.suggestions;
console.log('Results and suggestions extracted from the response');
return { results, suggestions };
} catch (error) {
console.error('Error during Searxng search:', error);
throw error; // Re-throw the error after logging it
}
const res = await axios.get(url.toString());
const results: SearxngSearchResult[] = res.data.results;
const suggestions: string[] = res.data.suggestions;
return { results, suggestions };
};

View file

@ -11,48 +11,58 @@ import {
const router = express.Router();
router.get('/', async (_, res) => {
const config = {};
try {
const config = {};
const providers = await getAvailableProviders();
const providers = await getAvailableProviders();
for (const provider in providers) {
delete providers[provider]['embeddings'];
for (const provider in providers) {
delete providers[provider]['embeddings'];
}
config['providers'] = {};
for (const provider in providers) {
config['providers'][provider] = Object.keys(providers[provider]);
}
config['selectedProvider'] = getChatModelProvider();
config['selectedChatModel'] = getChatModel();
config['openeaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
res.status(200).json(config);
} catch (error) {
console.error('Failed to retrieve configuration:', error);
res.status(500).json({ message: 'Internal Server Error' });
}
config['providers'] = {};
for (const provider in providers) {
config['providers'][provider] = Object.keys(providers[provider]);
}
config['selectedProvider'] = getChatModelProvider();
config['selectedChatModel'] = getChatModel();
config['openeaiApiKey'] = getOpenaiApiKey();
config['ollamaApiUrl'] = getOllamaApiEndpoint();
res.status(200).json(config);
});
router.post('/', async (req, res) => {
const config = req.body;
try {
const config = req.body;
const updatedConfig = {
GENERAL: {
CHAT_MODEL_PROVIDER: config.selectedProvider,
CHAT_MODEL: config.selectedChatModel,
},
API_KEYS: {
OPENAI: config.openeaiApiKey,
},
API_ENDPOINTS: {
OLLAMA: config.ollamaApiUrl,
},
};
const updatedConfig = {
GENERAL: {
CHAT_MODEL_PROVIDER: config.selectedProvider,
CHAT_MODEL: config.selectedChatModel,
},
API_KEYS: {
OPENAI: config.openeaiApiKey,
},
API_ENDPOINTS: {
OLLAMA: config.ollamaApiUrl,
},
};
updateConfig(updatedConfig);
updateConfig(updatedConfig);
res.status(200).json({ message: 'Config updated' });
res.status(200).json({ message: 'Config updated' });
} catch (error) {
console.error('Failed to update configuration:', error);
res.status(500).json({ message: 'Internal Server Error' });
}
});
export default router;

View file

@ -11,6 +11,8 @@ router.post('/', async (req, res) => {
try {
let { query, chat_history } = req.body;
console.log('Received request with query:', query);
chat_history = chat_history.map((msg: any) => {
if (msg.role === 'user') {
return new HumanMessage(msg.content);
@ -19,10 +21,14 @@ router.post('/', async (req, res) => {
}
});
console.log('Processed chat history:', chat_history);
const models = await getAvailableProviders();
const provider = getChatModelProvider();
const chatModel = getChatModel();
console.log(`Using provider: ${provider} and model: ${chatModel}`);
let llm: BaseChatModel | undefined;
if (models[provider] && models[provider][chatModel]) {
@ -30,16 +36,22 @@ router.post('/', async (req, res) => {
}
if (!llm) {
console.error('Invalid LLM model selected');
res.status(500).json({ message: 'Invalid LLM model selected' });
return;
}
const images = await handleImageSearch({ query, chat_history }, llm);
res.status(200).json({ images });
try {
const images = await handleImageSearch({ query, chat_history }, llm);
res.status(200).json({ images });
console.log('Image search successful:', images);
} catch (error) {
console.error('Error during image search:', error);
res.status(500).json({ message: 'Error during image search' });
}
} catch (err) {
console.error('An error occurred in the main request handler:', err);
res.status(500).json({ message: 'An error has occurred.' });
console.log(err.message);
}
});

View file

@ -6,33 +6,74 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models';
import type { Embeddings } from '@langchain/core/embeddings';
export const handleConnection = async (ws: WebSocket) => {
const models = await getAvailableProviders();
const provider = getChatModelProvider();
const chatModel = getChatModel();
try {
const models = await getAvailableProviders();
const provider = getChatModelProvider();
const chatModel = getChatModel();
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
let llm: BaseChatModel | undefined;
let embeddings: Embeddings | undefined;
if (models[provider] && models[provider][chatModel]) {
llm = models[provider][chatModel] as BaseChatModel | undefined;
embeddings = models[provider].embeddings as Embeddings | undefined;
}
if (models[provider] && models[provider][chatModel]) {
llm = models[provider][chatModel] as BaseChatModel | undefined;
embeddings = models[provider].embeddings as Embeddings | undefined;
}
if (!llm || !embeddings) {
// Separate checks for llm and embeddings
if (!llm && !embeddings) {
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid LLM and embeddings model selected',
}),
);
console.error('Invalid LLM and embeddings model selected');
} else if (!llm) {
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid LLM model selected',
}),
);
console.error('Invalid LLM model selected');
} else if (!embeddings) {
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid embeddings model selected',
}),
);
console.error('Invalid embeddings model selected');
}
if (!llm || !embeddings) {
ws.close();
return;
}
ws.on('message', async (message) => {
try {
await handleMessage(message.toString(), ws, llm, embeddings);
} catch (error) {
console.error('Error handling message:', error);
ws.send(
JSON.stringify({
type: 'error',
data: 'Error processing your message',
}),
);
}
});
ws.on('close', () => console.log('Connection closed'));
} catch (error) {
console.error('Failed to establish a connection:', error);
ws.send(
JSON.stringify({
type: 'error',
data: 'Invalid LLM or embeddings model selected',
data: 'Failed to establish a connection',
}),
);
ws.close();
}
ws.on(
'message',
async (message) =>
await handleMessage(message.toString(), ws, llm, embeddings),
);
ws.on('close', () => console.log('Connection closed'));
};

View file

@ -66,30 +66,44 @@ export const handleMessage = async (
llm: BaseChatModel,
embeddings: Embeddings,
) => {
let parsedMessage: Message;
try {
const parsedMessage = JSON.parse(message) as Message;
const id = Math.random().toString(36).substring(7);
parsedMessage = JSON.parse(message) as Message;
console.log('Message parsed successfully:', parsedMessage);
} catch (error) {
console.error('Error parsing message:', message, error);
return ws.send(
JSON.stringify({ type: 'error', data: 'Invalid message format' }),
);
}
if (!parsedMessage.content)
return ws.send(
JSON.stringify({ type: 'error', data: 'Invalid message format' }),
);
const id = Math.random().toString(36).substring(7);
console.log('Generated message ID:', id);
const history: BaseMessage[] = parsedMessage.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
if (!parsedMessage.content) {
console.log('Message content is empty');
return ws.send(
JSON.stringify({ type: 'error', data: 'Invalid message format' }),
);
}
if (parsedMessage.type === 'message') {
const handler = searchHandlers[parsedMessage.focusMode];
if (handler) {
const history: BaseMessage[] = parsedMessage.history.map((msg) => {
if (msg[0] === 'human') {
return new HumanMessage({
content: msg[1],
});
} else {
return new AIMessage({
content: msg[1],
});
}
});
if (parsedMessage.type === 'message') {
const handler = searchHandlers[parsedMessage.focusMode];
if (handler) {
console.log('Handling message with focus mode:', parsedMessage.focusMode);
try {
const emitter = handler(
parsedMessage.content,
history,
@ -97,12 +111,17 @@ export const handleMessage = async (
embeddings,
);
handleEmitterEvents(emitter, ws, id);
} else {
ws.send(JSON.stringify({ type: 'error', data: 'Invalid focus mode' }));
} catch (error) {
console.error(
'Error handling message with handler:',
parsedMessage.focusMode,
error,
);
ws.send(JSON.stringify({ type: 'error', data: 'Handler failure' }));
}
} else {
console.log('Invalid focus mode:', parsedMessage.focusMode);
ws.send(JSON.stringify({ type: 'error', data: 'Invalid focus mode' }));
}
} catch (error) {
console.error('Failed to handle message', error);
ws.send(JSON.stringify({ type: 'error', data: 'Invalid message format' }));
}
};