feat(agents): use ollama models

This commit is contained in:
ItzCrazyKns 2024-04-17 10:22:20 +05:30
parent 89340fcfff
commit 811822c03b
No known key found for this signature in database
GPG key ID: 8162927C7CCE3065
8 changed files with 65 additions and 41 deletions

View file

@ -1,5 +1,5 @@
PORT=3001 PORT=3001
OPENAI_API_KEY= OLLAMA_URL=http://localhost:11434 # url of the ollama server
SIMILARITY_MEASURE=cosine # cosine or dot SIMILARITY_MEASURE=cosine # cosine or dot
SEARXNG_API_URL= # no need to fill this if using docker SEARXNG_API_URL= # no need to fill this if using docker
MODEL_NAME=gpt-3.5-turbo MODEL_NAME=llama2

View file

@ -9,7 +9,9 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { Ollama } from '@langchain/community/llms/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
import computeSimilarity from '../utils/computeSimilarity'; import computeSimilarity from '../utils/computeSimilarity';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const embeddings = new OpenAIEmbeddings({ const embeddings = new OllamaEmbeddings({
modelName: 'text-embedding-3-large', model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const basicAcademicSearchRetrieverPrompt = ` const basicAcademicSearchRetrieverPrompt = `

View file

@ -4,15 +4,15 @@ import {
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { PromptTemplate } from '@langchain/core/prompts'; import { PromptTemplate } from '@langchain/core/prompts';
import { OpenAI } from '@langchain/openai'; import { Ollama } from '@langchain/community/llms/ollama';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import { BaseMessage } from '@langchain/core/messages'; import { BaseMessage } from '@langchain/core/messages';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
}); });
const imageSearchChainPrompt = ` const imageSearchChainPrompt = `

View file

@ -9,7 +9,9 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { Ollama } from '@langchain/community/llms/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
import computeSimilarity from '../utils/computeSimilarity'; import computeSimilarity from '../utils/computeSimilarity';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const embeddings = new OpenAIEmbeddings({ const embeddings = new OllamaEmbeddings({
modelName: 'text-embedding-3-large', model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const basicRedditSearchRetrieverPrompt = ` const basicRedditSearchRetrieverPrompt = `

View file

@ -9,7 +9,9 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { Ollama } from '@langchain/community/llms/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
import computeSimilarity from '../utils/computeSimilarity'; import computeSimilarity from '../utils/computeSimilarity';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const embeddings = new OpenAIEmbeddings({ const embeddings = new OllamaEmbeddings({
modelName: 'text-embedding-3-large', model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const basicSearchRetrieverPrompt = ` const basicSearchRetrieverPrompt = `

View file

@ -9,7 +9,8 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { Ollama } from '@langchain/community/llms/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
@ -17,14 +18,16 @@ import type { StreamEvent } from '@langchain/core/tracers/log_stream';
import formatChatHistoryAsString from '../utils/formatHistory'; import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const basicWolframAlphaSearchRetrieverPrompt = ` const basicWolframAlphaSearchRetrieverPrompt = `

View file

@ -4,13 +4,14 @@ import {
MessagesPlaceholder, MessagesPlaceholder,
} from '@langchain/core/prompts'; } from '@langchain/core/prompts';
import { RunnableSequence } from '@langchain/core/runnables'; import { RunnableSequence } from '@langchain/core/runnables';
import { ChatOpenAI } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import type { StreamEvent } from '@langchain/core/tracers/log_stream'; import type { StreamEvent } from '@langchain/core/tracers/log_stream';
import eventEmitter from 'events'; import eventEmitter from 'events';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });

View file

@ -9,7 +9,9 @@ import {
RunnableMap, RunnableMap,
RunnableLambda, RunnableLambda,
} from '@langchain/core/runnables'; } from '@langchain/core/runnables';
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai'; import { ChatOllama } from '@langchain/community/chat_models/ollama';
import { Ollama } from '@langchain/community/llms/ollama';
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
import { StringOutputParser } from '@langchain/core/output_parsers'; import { StringOutputParser } from '@langchain/core/output_parsers';
import { Document } from '@langchain/core/documents'; import { Document } from '@langchain/core/documents';
import { searchSearxng } from '../core/searxng'; import { searchSearxng } from '../core/searxng';
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
import eventEmitter from 'events'; import eventEmitter from 'events';
import computeSimilarity from '../utils/computeSimilarity'; import computeSimilarity from '../utils/computeSimilarity';
const chatLLM = new ChatOpenAI({ const chatLLM = new ChatOllama({
modelName: process.env.MODEL_NAME, baseUrl: process.env.OLLAMA_URL,
model: process.env.MODEL_NAME,
temperature: 0.7, temperature: 0.7,
}); });
const llm = new OpenAI({ const llm = new Ollama({
temperature: 0, temperature: 0,
modelName: process.env.MODEL_NAME, model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const embeddings = new OpenAIEmbeddings({ const embeddings = new OllamaEmbeddings({
modelName: 'text-embedding-3-large', model: process.env.MODEL_NAME,
baseUrl: process.env.OLLAMA_URL,
}); });
const basicYoutubeSearchRetrieverPrompt = ` const basicYoutubeSearchRetrieverPrompt = `