feat(agents): use ollama models
This commit is contained in:
parent
89340fcfff
commit
811822c03b
8 changed files with 65 additions and 41 deletions
|
@ -1,5 +1,5 @@
|
|||
PORT=3001
|
||||
OPENAI_API_KEY=
|
||||
OLLAMA_URL=http://localhost:11434 # url of the ollama server
|
||||
SIMILARITY_MEASURE=cosine # cosine or dot
|
||||
SEARXNG_API_URL= # no need to fill this if using docker
|
||||
MODEL_NAME=gpt-3.5-turbo
|
||||
MODEL_NAME=llama2
|
|
@ -9,7 +9,9 @@ import {
|
|||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
|
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
|
|||
import eventEmitter from 'events';
|
||||
import computeSimilarity from '../utils/computeSimilarity';
|
||||
|
||||
const chatLLM = new ChatOpenAI({
|
||||
modelName: process.env.MODEL_NAME,
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
const llm = new OpenAI({
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
modelName: process.env.MODEL_NAME,
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const embeddings = new OpenAIEmbeddings({
|
||||
modelName: 'text-embedding-3-large',
|
||||
const embeddings = new OllamaEmbeddings({
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const basicAcademicSearchRetrieverPrompt = `
|
||||
|
|
|
@ -4,15 +4,15 @@ import {
|
|||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { PromptTemplate } from '@langchain/core/prompts';
|
||||
import { OpenAI } from '@langchain/openai';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import { BaseMessage } from '@langchain/core/messages';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
|
||||
const llm = new OpenAI({
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
modelName: process.env.MODEL_NAME,
|
||||
model: process.env.MODEL_NAME,
|
||||
});
|
||||
|
||||
const imageSearchChainPrompt = `
|
||||
|
|
|
@ -9,7 +9,9 @@ import {
|
|||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
|
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
|
|||
import eventEmitter from 'events';
|
||||
import computeSimilarity from '../utils/computeSimilarity';
|
||||
|
||||
const chatLLM = new ChatOpenAI({
|
||||
modelName: process.env.MODEL_NAME,
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
const llm = new OpenAI({
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
modelName: process.env.MODEL_NAME,
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const embeddings = new OpenAIEmbeddings({
|
||||
modelName: 'text-embedding-3-large',
|
||||
const embeddings = new OllamaEmbeddings({
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const basicRedditSearchRetrieverPrompt = `
|
||||
|
|
|
@ -9,7 +9,9 @@ import {
|
|||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
|
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
|
|||
import eventEmitter from 'events';
|
||||
import computeSimilarity from '../utils/computeSimilarity';
|
||||
|
||||
const chatLLM = new ChatOpenAI({
|
||||
modelName: process.env.MODEL_NAME,
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
const llm = new OpenAI({
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
modelName: process.env.MODEL_NAME,
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const embeddings = new OpenAIEmbeddings({
|
||||
modelName: 'text-embedding-3-large',
|
||||
const embeddings = new OllamaEmbeddings({
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const basicSearchRetrieverPrompt = `
|
||||
|
|
|
@ -9,7 +9,8 @@ import {
|
|||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
|
@ -17,14 +18,16 @@ import type { StreamEvent } from '@langchain/core/tracers/log_stream';
|
|||
import formatChatHistoryAsString from '../utils/formatHistory';
|
||||
import eventEmitter from 'events';
|
||||
|
||||
const chatLLM = new ChatOpenAI({
|
||||
modelName: process.env.MODEL_NAME,
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
const llm = new OpenAI({
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
modelName: process.env.MODEL_NAME,
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const basicWolframAlphaSearchRetrieverPrompt = `
|
||||
|
|
|
@ -4,13 +4,14 @@ import {
|
|||
MessagesPlaceholder,
|
||||
} from '@langchain/core/prompts';
|
||||
import { RunnableSequence } from '@langchain/core/runnables';
|
||||
import { ChatOpenAI } from '@langchain/openai';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import type { StreamEvent } from '@langchain/core/tracers/log_stream';
|
||||
import eventEmitter from 'events';
|
||||
|
||||
const chatLLM = new ChatOpenAI({
|
||||
modelName: process.env.MODEL_NAME,
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
|
|
|
@ -9,7 +9,9 @@ import {
|
|||
RunnableMap,
|
||||
RunnableLambda,
|
||||
} from '@langchain/core/runnables';
|
||||
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from '@langchain/openai';
|
||||
import { ChatOllama } from '@langchain/community/chat_models/ollama';
|
||||
import { Ollama } from '@langchain/community/llms/ollama';
|
||||
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama';
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers';
|
||||
import { Document } from '@langchain/core/documents';
|
||||
import { searchSearxng } from '../core/searxng';
|
||||
|
@ -18,18 +20,21 @@ import formatChatHistoryAsString from '../utils/formatHistory';
|
|||
import eventEmitter from 'events';
|
||||
import computeSimilarity from '../utils/computeSimilarity';
|
||||
|
||||
const chatLLM = new ChatOpenAI({
|
||||
modelName: process.env.MODEL_NAME,
|
||||
const chatLLM = new ChatOllama({
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
model: process.env.MODEL_NAME,
|
||||
temperature: 0.7,
|
||||
});
|
||||
|
||||
const llm = new OpenAI({
|
||||
const llm = new Ollama({
|
||||
temperature: 0,
|
||||
modelName: process.env.MODEL_NAME,
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const embeddings = new OpenAIEmbeddings({
|
||||
modelName: 'text-embedding-3-large',
|
||||
const embeddings = new OllamaEmbeddings({
|
||||
model: process.env.MODEL_NAME,
|
||||
baseUrl: process.env.OLLAMA_URL,
|
||||
});
|
||||
|
||||
const basicYoutubeSearchRetrieverPrompt = `
|
||||
|
|
Loading…
Add table
Reference in a new issue