Source code for langchain_core.document_loaders.base
"""Abstract interface for document loader implementations."""from__future__importannotationsfromabcimportABC,abstractmethodfromtypingimportTYPE_CHECKING,Optionalfromlangchain_core.runnablesimportrun_in_executorifTYPE_CHECKING:fromcollections.abcimportAsyncIterator,Iteratorfromlangchain_text_splittersimportTextSplitterfromlangchain_core.documentsimportDocumentfromlangchain_core.documents.baseimportBlob
[docs]classBaseLoader(ABC):# noqa: B024"""Interface for Document Loader. Implementations should implement the lazy-loading method using generators to avoid loading all Documents into memory at once. `load` is provided just for user convenience and should not be overridden. """# Sub-classes should not implement this method directly. Instead, they# should implement the lazy load method.
[docs]defload(self)->list[Document]:"""Load data into Document objects."""returnlist(self.lazy_load())
[docs]asyncdefaload(self)->list[Document]:"""Load data into Document objects."""return[documentasyncfordocumentinself.alazy_load()]
[docs]defload_and_split(self,text_splitter:Optional[TextSplitter]=None)->list[Document]:"""Load Documents and split into chunks. Chunks are returned as Documents. Do not override this method. It should be considered to be deprecated! Args: text_splitter: TextSplitter instance to use for splitting documents. Defaults to RecursiveCharacterTextSplitter. Returns: List of Documents. """iftext_splitterisNone:try:fromlangchain_text_splittersimportRecursiveCharacterTextSplitterexceptImportErrorase:msg=("Unable to import from langchain_text_splitters. Please specify ""text_splitter or install langchain_text_splitters with ""`pip install -U langchain-text-splitters`.")raiseImportError(msg)frome_text_splitter:TextSplitter=RecursiveCharacterTextSplitter()else:_text_splitter=text_splitterdocs=self.load()return_text_splitter.split_documents(docs)
# Attention: This method will be upgraded into an abstractmethod once it's# implemented in all the existing subclasses.
[docs]deflazy_load(self)->Iterator[Document]:"""A lazy loader for Documents."""iftype(self).load!=BaseLoader.load:returniter(self.load())msg=f"{self.__class__.__name__} does not implement lazy_load()"raiseNotImplementedError(msg)
[docs]asyncdefalazy_load(self)->AsyncIterator[Document]:"""A lazy loader for Documents."""iterator=awaitrun_in_executor(None,self.lazy_load)done=object()whileTrue:doc=awaitrun_in_executor(None,next,iterator,done)# type: ignore[call-arg, arg-type]ifdocisdone:breakyielddoc# type: ignore[misc]
[docs]classBaseBlobParser(ABC):"""Abstract interface for blob parsers. A blob parser provides a way to parse raw data stored in a blob into one or more documents. The parser can be composed with blob loaders, making it easy to reuse a parser independent of how the blob was originally loaded. """
[docs]@abstractmethoddeflazy_parse(self,blob:Blob)->Iterator[Document]:"""Lazy parsing interface. Subclasses are required to implement this method. Args: blob: Blob instance Returns: Generator of documents """
[docs]defparse(self,blob:Blob)->list[Document]:"""Eagerly parse the blob into a document or documents. This is a convenience method for interactive development environment. Production applications should favor the lazy_parse method instead. Subclasses should generally not over-ride this parse method. Args: blob: Blob instance Returns: List of documents """returnlist(self.lazy_parse(blob))