Fix query appearing twice in chat history
The initial query appears twice in the prompt, this is ignored by OpenAI models, however it breaks with Gemini models are they fail with an error stating that AI and User prompts need to alternate. Tested all search modes with both OpenAI GTP3 turbo and Vertex Gemini 1.0 and this changes appears to now function correctly with both.
This commit is contained in:
parent
2e58dab30a
commit
8b9b4085ea
6 changed files with 0 additions and 6 deletions
|
@ -209,7 +209,6 @@ const createBasicAcademicSearchAnsweringChain = (
|
||||||
ChatPromptTemplate.fromMessages([
|
ChatPromptTemplate.fromMessages([
|
||||||
['system', basicAcademicSearchResponsePrompt],
|
['system', basicAcademicSearchResponsePrompt],
|
||||||
new MessagesPlaceholder('chat_history'),
|
new MessagesPlaceholder('chat_history'),
|
||||||
['user', '{query}'],
|
|
||||||
]),
|
]),
|
||||||
llm,
|
llm,
|
||||||
strParser,
|
strParser,
|
||||||
|
|
|
@ -205,7 +205,6 @@ const createBasicRedditSearchAnsweringChain = (
|
||||||
ChatPromptTemplate.fromMessages([
|
ChatPromptTemplate.fromMessages([
|
||||||
['system', basicRedditSearchResponsePrompt],
|
['system', basicRedditSearchResponsePrompt],
|
||||||
new MessagesPlaceholder('chat_history'),
|
new MessagesPlaceholder('chat_history'),
|
||||||
['user', '{query}'],
|
|
||||||
]),
|
]),
|
||||||
llm,
|
llm,
|
||||||
strParser,
|
strParser,
|
||||||
|
|
|
@ -203,7 +203,6 @@ const createBasicWebSearchAnsweringChain = (
|
||||||
ChatPromptTemplate.fromMessages([
|
ChatPromptTemplate.fromMessages([
|
||||||
['system', basicWebSearchResponsePrompt],
|
['system', basicWebSearchResponsePrompt],
|
||||||
new MessagesPlaceholder('chat_history'),
|
new MessagesPlaceholder('chat_history'),
|
||||||
['user', '{query}'],
|
|
||||||
]),
|
]),
|
||||||
llm,
|
llm,
|
||||||
strParser,
|
strParser,
|
||||||
|
|
|
@ -165,7 +165,6 @@ const createBasicWolframAlphaSearchAnsweringChain = (llm: BaseChatModel) => {
|
||||||
ChatPromptTemplate.fromMessages([
|
ChatPromptTemplate.fromMessages([
|
||||||
['system', basicWolframAlphaSearchResponsePrompt],
|
['system', basicWolframAlphaSearchResponsePrompt],
|
||||||
new MessagesPlaceholder('chat_history'),
|
new MessagesPlaceholder('chat_history'),
|
||||||
['user', '{query}'],
|
|
||||||
]),
|
]),
|
||||||
llm,
|
llm,
|
||||||
strParser,
|
strParser,
|
||||||
|
|
|
@ -46,7 +46,6 @@ const createWritingAssistantChain = (llm: BaseChatModel) => {
|
||||||
ChatPromptTemplate.fromMessages([
|
ChatPromptTemplate.fromMessages([
|
||||||
['system', writingAssistantPrompt],
|
['system', writingAssistantPrompt],
|
||||||
new MessagesPlaceholder('chat_history'),
|
new MessagesPlaceholder('chat_history'),
|
||||||
['user', '{query}'],
|
|
||||||
]),
|
]),
|
||||||
llm,
|
llm,
|
||||||
strParser,
|
strParser,
|
||||||
|
|
|
@ -205,7 +205,6 @@ const createBasicYoutubeSearchAnsweringChain = (
|
||||||
ChatPromptTemplate.fromMessages([
|
ChatPromptTemplate.fromMessages([
|
||||||
['system', basicYoutubeSearchResponsePrompt],
|
['system', basicYoutubeSearchResponsePrompt],
|
||||||
new MessagesPlaceholder('chat_history'),
|
new MessagesPlaceholder('chat_history'),
|
||||||
['user', '{query}'],
|
|
||||||
]),
|
]),
|
||||||
llm,
|
llm,
|
||||||
strParser,
|
strParser,
|
||||||
|
|
Loading…
Add table
Reference in a new issue