[docs]classAsyncChromiumLoader(BaseLoader):"""Scrape HTML pages from URLs using a headless instance of the Chromium."""
[docs]def__init__(self,urls:List[str],*,headless:bool=True,user_agent:Optional[str]=None,):"""Initialize the loader with a list of URL paths. Args: urls: A list of URLs to scrape content from. headless: Whether to run browser in headless mode. user_agent: The user agent to use for the browser Raises: ImportError: If the required 'playwright' package is not installed. """self.urls=urlsself.headless=headlessself.user_agent=user_agentorget_user_agent()try:importplaywright# noqa: F401exceptImportError:raiseImportError("playwright is required for AsyncChromiumLoader. ""Please install it with `pip install playwright`.")
[docs]asyncdefascrape_playwright(self,url:str)->str:""" Asynchronously scrape the content of a given URL using Playwright's async API. Args: url (str): The URL to scrape. Returns: str: The scraped HTML content or an error message if an exception occurs. """fromplaywright.async_apiimportasync_playwrightlogger.info("Starting scraping...")results=""asyncwithasync_playwright()asp:browser=awaitp.chromium.launch(headless=self.headless)try:page=awaitbrowser.new_page(user_agent=self.user_agent)awaitpage.goto(url)results=awaitpage.content()# Simply get the HTML contentlogger.info("Content scraped")exceptExceptionase:results=f"Error: {e}"awaitbrowser.close()returnresults
[docs]deflazy_load(self)->Iterator[Document]:""" Lazily load text content from the provided URLs. This method yields Documents one at a time as they're scraped, instead of waiting to scrape all URLs before returning. Yields: Document: The scraped content encapsulated within a Document object. """forurlinself.urls:html_content=asyncio.run(self.ascrape_playwright(url))metadata={"source":url}yieldDocument(page_content=html_content,metadata=metadata)
[docs]asyncdefalazy_load(self)->AsyncIterator[Document]:""" Asynchronously load text content from the provided URLs. This method leverages asyncio to initiate the scraping of all provided URLs simultaneously. It improves performance by utilizing concurrent asynchronous requests. Each Document is yielded as soon as its content is available, encapsulating the scraped content. Yields: Document: A Document object containing the scraped content, along with its source URL as metadata. """tasks=[self.ascrape_playwright(url)forurlinself.urls]results=awaitasyncio.gather(*tasks)forurl,contentinzip(self.urls,results):metadata={"source":url}yieldDocument(page_content=content,metadata=metadata)