[docs]@deprecated(since="0.2.7",alternative=("example in API reference with more detail: ""https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"# noqa: E501),removal="1.0",)classQAGenerationChain(Chain):"""Base class for question-answer generation chains. This class is deprecated. See below for an alternative implementation. Advantages of this implementation include: - Supports async and streaming; - Surfaces prompt and text splitter for easier customization; - Use of JsonOutputParser supports JSONPatch operations in streaming mode, as well as robustness to markdown. .. code-block:: python from langchain.chains.qa_generation.prompt import CHAT_PROMPT as prompt # Note: import PROMPT if using a legacy non-chat model. from langchain_core.output_parsers import JsonOutputParser from langchain_core.runnables import ( RunnableLambda, RunnableParallel, RunnablePassthrough, ) from langchain_core.runnables.base import RunnableEach from langchain_openai import ChatOpenAI from langchain_text_splitters import RecursiveCharacterTextSplitter llm = ChatOpenAI() text_splitter = RecursiveCharacterTextSplitter(chunk_overlap=500) split_text = RunnableLambda( lambda x: text_splitter.create_documents([x]) ) chain = RunnableParallel( text=RunnablePassthrough(), questions=( split_text | RunnableEach(bound=prompt | llm | JsonOutputParser()) ) ) """llm_chain:LLMChain"""LLM Chain that generates responses from user input and context."""text_splitter:TextSplitter=Field(default=RecursiveCharacterTextSplitter(chunk_overlap=500))"""Text splitter that splits the input into chunks."""input_key:str="text""""Key of the input to the chain."""output_key:str="questions""""Key of the output of the chain."""k:Optional[int]=None"""Number of questions to generate."""
[docs]@classmethoddeffrom_llm(cls,llm:BaseLanguageModel,prompt:Optional[BasePromptTemplate]=None,**kwargs:Any,)->QAGenerationChain:""" Create a QAGenerationChain from a language model. Args: llm: a language model prompt: a prompt template **kwargs: additional arguments Returns: a QAGenerationChain class """_prompt=promptorPROMPT_SELECTOR.get_prompt(llm)chain=LLMChain(llm=llm,prompt=_prompt)returncls(llm_chain=chain,**kwargs)