You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I searched the LangChain.js documentation with the integrated search.
I used the GitHub search to find a similar question and didn't find it.
I am sure that this is a bug in LangChain.js rather than my code.
The bug is not resolved by updating to the latest stable version of LangChain (or the specific integration package).
Example Code
NextJS endpoint
import{HNSWLib}from'@langchain/community/vectorstores/hnswlib'import{BaseMessage}from'@langchain/core/messages'import{ChatPromptTemplate}from'@langchain/core/prompts'import{Runnable}from'@langchain/core/runnables'import{ChatOpenAI,OpenAIEmbeddings}from'@langchain/openai'import{StreamingTextResponse,LangChainStream}from'ai'import{createStuffDocumentsChain}from'langchain/chains/combine_documents'import{createRetrievalChain}from'langchain/chains/retrieval'import{Document}from'langchain/document'import{FunctionalTranslator,SelfQueryRetriever}from'langchain/retrievers/self_query'import{AttributeInfo}from'langchain/schema/query_constructor'import{NextResponse}from'next/server'import{z}from'zod'constQA_PROMPT_TEMPLATE=`You are a good assistant that answers questions. Your knowledge is strictly limited to the following piece of context. Use it to answer the question at the end. If the answer can't be found in the context, just say you don't know. *DO NOT* try to make up an answer. If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context. Give a response in the same language as the question. Context: """"{context}""" Question: """{input}""" Helpful answer in markdown:`typeRetrievalChainType=Runnable<{input: stringchat_history?: BaseMessage[]|string}&{[key: string]: unknown},{context: Document[]answer: any}&{[key: string]: unknown}>constgetDocumentsContents=async(chain: RetrievalChainType)=>{constresult=awaitchain.invoke({input: "Describe what's this content about in one sentence"})returnresult.answer}constgetSelfQueryDocs=async({vectorStore, prompt}: {vectorStore: HNSWLib;prompt: string})=>{constllm=newChatOpenAI({temperature: 0,openAIApiKey: process.env.OPENAI_API_KEY,modelName: 'gpt-4o-mini',})constquestionAnswerChain=awaitcreateStuffDocumentsChain({
llm,prompt: ChatPromptTemplate.fromTemplate(QA_PROMPT_TEMPLATE),})constchain=awaitcreateRetrievalChain({retriever: vectorStore.asRetriever(),combineDocsChain: questionAnswerChain,})constdocumentContents=awaitgetDocumentsContents(chain)if(!documentContents){return[]}constattributeInfo: AttributeInfo[]=[{name: 'version',description: 'The version number of the document, e.g., "v3.1", "4.0"',type: 'string',},]constretriever=awaitSelfQueryRetriever.fromLLM({
documentContents,
vectorStore,
llm,structuredQueryTranslator: newFunctionalTranslator(),
attributeInfo,})constselfQueryDocsResult=awaitretriever.invoke(prompt)returnselfQueryDocsResult}exportasyncfunctionPOST(request: Request){constbody=awaitrequest.json()constbodySchema=z.object({prompt: z.string(),})const{prompt}=bodySchema.parse(body)try{constembeddings=newOpenAIEmbeddings({openAIApiKey: process.env.OPENAI_API_KEY,})constvectorStore=awaitHNSWLib.load('vectorstore/rag-store.index',embeddings)const{stream, handlers}=LangChainStream()constllm=newChatOpenAI({temperature: 0,openAIApiKey: process.env.OPENAI_API_KEY,streaming: true,modelName: 'gpt-4o-mini',callbacks: [handlers],})constselfQueryDocs=awaitgetSelfQueryDocs({
vectorStore,
prompt,})constselfQueryRetriever=awaitHNSWLib.fromDocuments(selfQueryDocs,embeddings)constquestionAnswerChain=awaitcreateStuffDocumentsChain({
llm,prompt: ChatPromptTemplate.fromTemplate(QA_PROMPT_TEMPLATE),})constchain=awaitcreateRetrievalChain({retriever: selfQueryRetriever.asRetriever(),combineDocsChain: questionAnswerChain,})chain.invoke({input: prompt})returnnewStreamingTextResponse(stream)}catch(error){console.log('error',error)returnnewNextResponse(JSON.stringify({error}),{status: 500,headers: {'content-type': 'application/json'},})}}
Error Message and Stack Trace (if applicable)
Error message
Failed to import peggy. Please install peggy (i.e. "npm install peggy" or "yarn add peggy").
I'm trying to use SelfQueryRetriever but it's throwing an error of a missing package on some very specific cases.
Error: Failed to import peggy. Please install peggy (i.e. "npm install peggy" or "yarn add peggy").
It's thrown by /node_modules/langchain/dist/output_parsers/expression_type_handlers/base.js
Following are some examples of prompts that are breaking:
How do CAN and LIN signals join in automotive networks as of ISO 11898 version 2.0?
Where do Zigbee and Wi-Fi signals coexist in IoT devices as of IEEE 802.15.4?
How do OPC and MQTT protocols select data in IIoT as defined by IEC 62541 version 1.4?
Where do USB and Ethernet standards join in smart homes as per IEEE 802.3 version 1.0?
Looks like the criteria is sending a user prompt with SQL words (as, where, select, from, join) along with a word matching the AttributeInfo ('version` in this case) in some specific length. Anything different from that would work, which is very weird to me.
I got a repo set with a reproducible scenario: https://github.com/soutot/langchain-selfquery
You can pull it and follow the instructions to run the code and how to generate your vectorstore and reproduce the error.
I guess it should either include peggy package as Langchain's dependency, or handle the prompt in a different way to prevent crashing. Especially because this is very hard to catch as we don't know what the users prompts could be.
The error you're encountering with the SelfQueryRetriever in LangChain.js is due to the missing peggy package, which is required as a peer dependency for the self-querying retriever to function properly. This package is necessary for parsing capabilities, especially when processing queries that contain SQL keywords and the word "version" [1].
To resolve this issue, you need to install the peggy package in your project. You can do this by running:
npm install peggy
This should resolve the error and allow the SelfQueryRetriever to process user prompts correctly.
Checked other resources
Example Code
NextJS endpoint
Error Message and Stack Trace (if applicable)
Error message
Failed to import peggy. Please install peggy (i.e. "npm install peggy" or "yarn add peggy").
Stack trace
Description
I'm trying to use SelfQueryRetriever but it's throwing an error of a missing package on some very specific cases.
Error:
Failed to import peggy. Please install peggy (i.e. "npm install peggy" or "yarn add peggy").
It's thrown by
/node_modules/langchain/dist/output_parsers/expression_type_handlers/base.js
Following are some examples of prompts that are breaking:
Looks like the criteria is sending a user prompt with SQL words (
as
,where
,select
,from
,join
) along with a word matching the AttributeInfo ('version` in this case) in some specific length. Anything different from that would work, which is very weird to me.I got a repo set with a reproducible scenario: https://github.com/soutot/langchain-selfquery
You can pull it and follow the instructions to run the code and how to generate your vectorstore and reproduce the error.
I guess it should either include
peggy
package as Langchain's dependency, or handle the prompt in a different way to prevent crashing. Especially because this is very hard to catch as we don't know what the users prompts could be.System Info
[email protected] | MIT | deps: 12 | versions: 300
Typescript bindings for langchain
https://github.com/langchain-ai/langchainjs/tree/main/langchain/
keywords: llm, ai, gpt3, chain, prompt, prompt engineering, chatgpt, machine learning, ml, openai, embeddings, vectorstores
dist
.tarball: https://registry.npmjs.org/langchain/-/langchain-0.3.5.tgz
.shasum: 87b282454bc215b12b920d4dd5e35ed58030bad1
.integrity: sha512-Gq0xC45Sq6nszS8kQG9suCrmBsuXH0INMmiF7D2TwPb6mtG35Jiq4grCk9ykpwPsarTHdty3SzUbII/FqiYSSw==
.unpackedSize: 4.7 MB
dependencies:
@langchain/openai: >=0.1.0 <0.4.0
@langchain/textsplitters: >=0.0.0 <0.2.0
js-tiktoken: ^1.0.12
js-yaml: ^4.1.0
jsonpointer: ^5.0.1
langsmith: ^0.2.0
openapi-types: ^12.1.3
p-retry: 4
uuid: ^10.0.0
yaml: ^2.2.1
zod-to-json-schema: ^3.22.3
zod: ^3.22.4
maintainers:
dist-tags:
latest: 0.3.5
next: 0.3.2-rc.0
tag-for-publishing-older-releases: 0.2.20
published 2 weeks ago by basproul [email protected]
The text was updated successfully, but these errors were encountered: