[docs]classContentFormat(str,Enum):"""Enumerator of the content formats of Confluence page."""EDITOR="body.editor"EXPORT_VIEW="body.export_view"ANONYMOUS_EXPORT_VIEW="body.anonymous_export_view"STORAGE="body.storage"VIEW="body.view"defget_content(self,page:dict)->str:returnpage["body"][self.name.lower()]["value"]
[docs]classConfluenceLoader(BaseLoader):"""Load `Confluence` pages. Port of https://llamahub.ai/l/confluence This currently supports username/api_key, Oauth2 login, personal access token or cookies authentication. Specify a list page_ids and/or space_key to load in the corresponding pages into Document objects, if both are specified the union of both sets will be returned. You can also specify a boolean `include_attachments` to include attachments, this is set to False by default, if set to True all attachments will be downloaded and ConfluenceLoader will extract the text from the attachments and add it to the Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG, SVG, Word and Excel. Confluence API supports difference format of page content. The storage format is the raw XML representation for storage. The view format is the HTML representation for viewing with macros are rendered as though it is viewed by users. You can pass a enum `content_format` argument to specify the content format, this is set to `ContentFormat.STORAGE` by default, the supported values are: `ContentFormat.EDITOR`, `ContentFormat.EXPORT_VIEW`, `ContentFormat.ANONYMOUS_EXPORT_VIEW`, `ContentFormat.STORAGE`, and `ContentFormat.VIEW`. Hint: space_key and page_id can both be found in the URL of a page in Confluence - https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id> Example: .. code-block:: python from langchain_community.document_loaders import ConfluenceLoader loader = ConfluenceLoader( url="https://yoursite.atlassian.com/wiki", username="me", api_key="12345", space_key="SPACE", limit=50, ) documents = loader.load() # Server on perm loader = ConfluenceLoader( url="https://confluence.yoursite.com/", username="me", api_key="your_password", cloud=False, space_key="SPACE", limit=50, ) documents = loader.load() :param url: _description_ :type url: str :param api_key: _description_, defaults to None :type api_key: str, optional :param username: _description_, defaults to None :type username: str, optional :param oauth2: _description_, defaults to {} :type oauth2: dict, optional :param token: _description_, defaults to None :type token: str, optional :param cloud: _description_, defaults to True :type cloud: bool, optional :param number_of_retries: How many times to retry, defaults to 3 :type number_of_retries: Optional[int], optional :param min_retry_seconds: defaults to 2 :type min_retry_seconds: Optional[int], optional :param max_retry_seconds: defaults to 10 :type max_retry_seconds: Optional[int], optional :param confluence_kwargs: additional kwargs to initialize confluence with :type confluence_kwargs: dict, optional :param cookies: _description_, defaults to {} :type cookies: dict, optional :param space_key: Space key retrieved from a confluence URL, defaults to None :type space_key: Optional[str], optional :param page_ids: List of specific page IDs to load, defaults to None :type page_ids: Optional[List[str]], optional :param label: Get all pages with this label, defaults to None :type label: Optional[str], optional :param cql: CQL Expression, defaults to None :type cql: Optional[str], optional :param include_restricted_content: defaults to False :type include_restricted_content: bool, optional :param include_archived_content: Whether to include archived content, defaults to False :type include_archived_content: bool, optional :param include_attachments: defaults to False :type include_attachments: bool, optional :param attachment_filter_func: A function that takes the attachment information from Confluence and decides whether or not the attachment is processed. :param include_comments: defaults to False :type include_comments: bool, optional :param content_format: Specify content format, defaults to ContentFormat.STORAGE, the supported values are: `ContentFormat.EDITOR`, `ContentFormat.EXPORT_VIEW`, `ContentFormat.ANONYMOUS_EXPORT_VIEW`, `ContentFormat.STORAGE`, and `ContentFormat.VIEW`. :type content_format: ContentFormat :param limit: Maximum number of pages to retrieve per request, defaults to 50 :type limit: int, optional :param max_pages: Maximum number of pages to retrieve in total, defaults 1000 :type max_pages: int, optional :param ocr_languages: The languages to use for the Tesseract agent. To use a language, you'll first need to install the appropriate Tesseract language pack. :type ocr_languages: str, optional :param keep_markdown_format: Whether to keep the markdown format, defaults to False :type keep_markdown_format: bool :param keep_newlines: Whether to keep the newlines format, defaults to False :type keep_newlines: bool :raises ValueError: Errors while validating input :raises ImportError: Required dependencies not installed. """
[docs]def__init__(self,url:str,api_key:Optional[str]=None,username:Optional[str]=None,session:Optional[requests.Session]=None,oauth2:Optional[dict]=None,token:Optional[str]=None,cloud:Optional[bool]=True,number_of_retries:Optional[int]=3,min_retry_seconds:Optional[int]=2,max_retry_seconds:Optional[int]=10,confluence_kwargs:Optional[dict]=None,*,cookies:Optional[dict]=None,space_key:Optional[str]=None,page_ids:Optional[List[str]]=None,label:Optional[str]=None,cql:Optional[str]=None,include_restricted_content:bool=False,include_archived_content:bool=False,include_attachments:bool=False,include_comments:bool=False,include_labels:bool=False,content_format:ContentFormat=ContentFormat.STORAGE,limit:Optional[int]=50,max_pages:Optional[int]=1000,ocr_languages:Optional[str]=None,keep_markdown_format:bool=False,keep_newlines:bool=False,attachment_filter_func:Optional[Callable[[dict],bool]]=None,):self.space_key=space_keyself.page_ids=page_idsself.label=labelself.cql=cqlself.include_restricted_content=include_restricted_contentself.include_archived_content=include_archived_contentself.include_attachments=include_attachmentsself.include_comments=include_commentsself.include_labels=include_labelsself.content_format=content_formatself.limit=limitself.max_pages=max_pagesself.ocr_languages=ocr_languagesself.keep_markdown_format=keep_markdown_formatself.keep_newlines=keep_newlinesself.attachment_filter_func=attachment_filter_funcconfluence_kwargs=confluence_kwargsor{}errors=ConfluenceLoader.validate_init_args(url=url,api_key=api_key,username=username,session=session,oauth2=oauth2,cookies=cookies,token=token,)iferrors:raiseValueError(f"Error(s) while validating input: {errors}")try:fromatlassianimportConfluenceexceptImportError:raiseImportError("`atlassian` package not found, please run ""`pip install atlassian-python-api`")self.base_url=urlself.number_of_retries=number_of_retriesself.min_retry_seconds=min_retry_secondsself.max_retry_seconds=max_retry_secondsifsession:self.confluence=Confluence(url=url,session=session,**confluence_kwargs)elifoauth2:self.confluence=Confluence(url=url,oauth2=oauth2,cloud=cloud,**confluence_kwargs)eliftoken:self.confluence=Confluence(url=url,token=token,cloud=cloud,**confluence_kwargs)elifcookies:self.confluence=Confluence(url=url,cookies=cookies,cloud=cloud,**confluence_kwargs)else:self.confluence=Confluence(url=url,username=username,password=api_key,cloud=cloud,**confluence_kwargs,)
[docs]@staticmethoddefvalidate_init_args(url:Optional[str]=None,api_key:Optional[str]=None,username:Optional[str]=None,session:Optional[requests.Session]=None,oauth2:Optional[dict]=None,token:Optional[str]=None,cookies:Optional[dict]=None,)->Union[List,None]:"""Validates proper combinations of init arguments"""errors=[]ifurlisNone:errors.append("Must provide `base_url`")if(api_keyandnotusername)or(usernameandnotapi_key):errors.append("If one of `api_key` or `username` is provided, ""the other must be as well.")non_null_creds=list(xisnotNoneforxin((api_keyorusername),session,oauth2,token,cookies))ifsum(non_null_creds)>1:all_names=("(api_key, username)","session","oauth2","token","cookies")provided=tuple(nforx,ninzip(non_null_creds,all_names)ifx)errors.append(f"Cannot provide a value for more than one of: {all_names}. Received "f"values for: {provided}")if(oauth2andset(oauth2.keys())=={"token","client_id",}andset(oauth2["token"].keys())!={"access_token","token_type",}):# OAuth2 token authenticationerrors.append("You have either omitted require keys or added extra ""keys to the oauth2 dictionary. key values should be ""`['client_id', 'token': ['access_token', 'token_type']]`")if(oauth2andset(oauth2.keys())!={"access_token","access_token_secret","consumer_key","key_cert",}andset(oauth2.keys())!={"token","client_id",}):errors.append("You have either omitted required keys or added extra ""keys to the oauth2 dictionary. key values should be ""`['access_token', 'access_token_secret', 'consumer_key', 'key_cert']` ""or `['client_id', 'token': ['access_token', 'token_type']]`")returnerrorsorNone
def_resolve_param(self,param_name:str,kwargs:Any)->Any:returnkwargs[param_name]ifparam_nameinkwargselsegetattr(self,param_name)def_lazy_load(self,**kwargs:Any)->Iterator[Document]:ifkwargs:logger.warning(f"Received runtime arguments {kwargs}. Passing runtime args to `load`"f" is deprecated. Please pass arguments during initialization instead.")space_key=self._resolve_param("space_key",kwargs)page_ids=self._resolve_param("page_ids",kwargs)label=self._resolve_param("label",kwargs)cql=self._resolve_param("cql",kwargs)include_restricted_content=self._resolve_param("include_restricted_content",kwargs)include_archived_content=self._resolve_param("include_archived_content",kwargs)include_attachments=self._resolve_param("include_attachments",kwargs)include_comments=self._resolve_param("include_comments",kwargs)include_labels=self._resolve_param("include_labels",kwargs)content_format=self._resolve_param("content_format",kwargs)limit=self._resolve_param("limit",kwargs)max_pages=self._resolve_param("max_pages",kwargs)ocr_languages=self._resolve_param("ocr_languages",kwargs)keep_markdown_format=self._resolve_param("keep_markdown_format",kwargs)keep_newlines=self._resolve_param("keep_newlines",kwargs)expand=",".join([content_format.value,"version",*(["metadata.labels"]ifinclude_labelselse[]),])ifnotspace_keyandnotpage_idsandnotlabelandnotcql:raiseValueError("Must specify at least one among `space_key`, `page_ids`, ""`label`, `cql` parameters.")ifspace_key:pages=self.paginate_request(self.confluence.get_all_pages_from_space,space=space_key,limit=limit,max_pages=max_pages,status="any"ifinclude_archived_contentelse"current",expand=expand,)yield fromself.process_pages(pages,include_restricted_content,include_attachments,include_comments,include_labels,content_format,ocr_languages=ocr_languages,keep_markdown_format=keep_markdown_format,keep_newlines=keep_newlines,)iflabel:pages=self.paginate_request(self.confluence.get_all_pages_by_label,label=label,limit=limit,max_pages=max_pages,)ids_by_label=[page["id"]forpageinpages]ifpage_ids:page_ids=list(set(page_ids+ids_by_label))else:page_ids=list(set(ids_by_label))ifcql:pages=self.paginate_request(self._search_content_by_cql,cql=cql,limit=limit,max_pages=max_pages,include_archived_spaces=include_archived_content,expand=expand,)yield fromself.process_pages(pages,include_restricted_content,include_attachments,include_comments,include_labels,content_format,ocr_languages,keep_markdown_format,keep_newlines=keep_newlines,)ifpage_ids:forpage_idinpage_ids:get_page=retry(reraise=True,stop=stop_after_attempt(self.number_of_retries# type: ignore[arg-type]),wait=wait_exponential(multiplier=1,# type: ignore[arg-type]min=self.min_retry_seconds,# type: ignore[arg-type]max=self.max_retry_seconds,# type: ignore[arg-type]),before_sleep=before_sleep_log(logger,logging.WARNING),)(self.confluence.get_page_by_id)page=get_page(page_id=page_id,expand=expand,)ifnotinclude_restricted_contentandnotself.is_public_page(page):continueyieldself.process_page(page,include_attachments,include_comments,include_labels,content_format,ocr_languages,keep_markdown_format,)
[docs]defpaginate_request(self,retrieval_method:Callable,**kwargs:Any)->List:"""Paginate the various methods to retrieve groups of pages. Unfortunately, due to page size, sometimes the Confluence API doesn't match the limit value. If `limit` is >100 confluence seems to cap the response to 100. Also, due to the Atlassian Python package, we don't get the "next" values from the "_links" key because they only return the value from the result key. So here, the pagination starts from 0 and goes until the max_pages, getting the `limit` number of pages with each request. We have to manually check if there are more docs based on the length of the returned list of pages, rather than just checking for the presence of a `next` key in the response like this page would have you do: https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/ :param retrieval_method: Function used to retrieve docs :type retrieval_method: callable :return: List of documents :rtype: List """max_pages=kwargs.pop("max_pages")docs:List[dict]=[]next_url:str=""whilelen(docs)<max_pages:get_pages=retry(reraise=True,stop=stop_after_attempt(self.number_of_retries# type: ignore[arg-type]),wait=wait_exponential(multiplier=1,min=self.min_retry_seconds,# type: ignore[arg-type]max=self.max_retry_seconds,# type: ignore[arg-type]),before_sleep=before_sleep_log(logger,logging.WARNING),)(retrieval_method)ifself.cql:# cursor pagination for CQLbatch,next_url=get_pages(**kwargs,next_url=next_url)ifnotnext_url:docs.extend(batch)breakelse:batch=get_pages(**kwargs,start=len(docs))ifnotbatch:breakdocs.extend(batch)returndocs[:max_pages]
[docs]defis_public_page(self,page:dict)->bool:"""Check if a page is publicly accessible."""ifpage["status"]!="current":returnFalserestrictions=self.confluence.get_all_restrictions_for_content(page["id"])return(notrestrictions["read"]["restrictions"]["user"]["results"]andnotrestrictions["read"]["restrictions"]["group"]["results"])
[docs]defprocess_pages(self,pages:List[dict],include_restricted_content:bool,include_attachments:bool,include_comments:bool,include_labels:bool,content_format:ContentFormat,ocr_languages:Optional[str]=None,keep_markdown_format:Optional[bool]=False,keep_newlines:bool=False,)->Iterator[Document]:"""Process a list of pages into a list of documents."""forpageinpages:ifnotinclude_restricted_contentandnotself.is_public_page(page):continueyieldself.process_page(page,include_attachments,include_comments,include_labels,content_format,ocr_languages=ocr_languages,keep_markdown_format=keep_markdown_format,keep_newlines=keep_newlines,)
[docs]defprocess_page(self,page:dict,include_attachments:bool,include_comments:bool,include_labels:bool,content_format:ContentFormat,ocr_languages:Optional[str]=None,keep_markdown_format:Optional[bool]=False,keep_newlines:bool=False,)->Document:ifkeep_markdown_format:try:frommarkdownifyimportmarkdownifyexceptImportError:raiseImportError("`markdownify` package not found, please run ""`pip install markdownify`")ifinclude_commentsornotkeep_markdown_format:try:frombs4importBeautifulSoupexceptImportError:raiseImportError("`beautifulsoup4` package not found, please run ""`pip install beautifulsoup4`")ifinclude_attachments:attachment_texts=self.process_attachment(page["id"],ocr_languages)else:attachment_texts=[]content=content_format.get_content(page)ifkeep_markdown_format:# Use markdownify to keep the page Markdown styletext=markdownify(content,heading_style="ATX")+"".join(attachment_texts)else:ifkeep_newlines:text=BeautifulSoup(content.replace("</p>","\n</p>").replace("<br />","\n"),"lxml").get_text(" ")+"".join(attachment_texts)else:text=BeautifulSoup(content,"lxml").get_text(" ",strip=True)+"".join(attachment_texts)ifinclude_comments:comments=self.confluence.get_page_comments(page["id"],expand="body.view.value",depth="all")["results"]comment_texts=[BeautifulSoup(comment["body"]["view"]["value"],"lxml").get_text(" ",strip=True)forcommentincomments]text=text+"".join(comment_texts)ifinclude_labels:labels=[label["name"]forlabelinpage.get("metadata",{}).get("labels",{}).get("results",[])]metadata={"title":page["title"],"id":page["id"],"source":self.base_url.strip("/")+page["_links"]["webui"],**({"labels":labels}ifinclude_labelselse{}),}if"version"inpageand"when"inpage["version"]:metadata["when"]=page["version"]["when"]returnDocument(page_content=text,metadata=metadata,)
[docs]defprocess_attachment(self,page_id:str,ocr_languages:Optional[str]=None,)->List[str]:try:fromPILimportImage# noqa: F401exceptImportError:raiseImportError("`Pillow` package not found, please run `pip install Pillow`")# depending on setup you may also need to set the correct path for# poppler and tesseractattachments=self.confluence.get_attachments_from_content(page_id)["results"]texts=[]forattachmentinattachments:ifself.attachment_filter_funcandnotself.attachment_filter_func(attachment):continuemedia_type=attachment["metadata"]["mediaType"]absolute_url=self.base_url+attachment["_links"]["download"]title=attachment["title"]try:ifmedia_type=="application/pdf":text=title+self.process_pdf(absolute_url,ocr_languages)elif(media_type=="image/png"ormedia_type=="image/jpg"ormedia_type=="image/jpeg"):text=title+self.process_image(absolute_url,ocr_languages)elif(media_type=="application/vnd.openxmlformats-officedocument"".wordprocessingml.document"):text=title+self.process_doc(absolute_url)elifmedia_type=="application/vnd.ms-excel":text=title+self.process_xls(absolute_url)elifmedia_type=="image/svg+xml":text=title+self.process_svg(absolute_url,ocr_languages)else:continuetexts.append(text)exceptrequests.HTTPErrorase:ife.response.status_code==404:print(f"Attachment not found at {absolute_url}")# noqa: T201continueelse:raisereturntexts
[docs]defprocess_pdf(self,link:str,ocr_languages:Optional[str]=None,)->str:try:importpytesseractfrompdf2imageimportconvert_from_bytesexceptImportError:raiseImportError("`pytesseract` or `pdf2image` package not found, ""please run `pip install pytesseract pdf2image`")response=self.confluence.request(path=link,absolute=True)text=""if(response.status_code!=200orresponse.content==b""orresponse.contentisNone):returntexttry:images=convert_from_bytes(response.content)exceptValueError:returntextfori,imageinenumerate(images):try:image_text=pytesseract.image_to_string(image,lang=ocr_languages)text+=f"Page {i+1}:\n{image_text}\n\n"exceptpytesseract.TesseractErrorasex:logger.warning(f"TesseractError: {ex}")returntext
[docs]defprocess_image(self,link:str,ocr_languages:Optional[str]=None,)->str:try:importpytesseractfromPILimportImageexceptImportError:raiseImportError("`pytesseract` or `Pillow` package not found, ""please run `pip install pytesseract Pillow`")response=self.confluence.request(path=link,absolute=True)text=""if(response.status_code!=200orresponse.content==b""orresponse.contentisNone):returntexttry:image=Image.open(BytesIO(response.content))exceptOSError:returntextreturnpytesseract.image_to_string(image,lang=ocr_languages)
[docs]defprocess_doc(self,link:str)->str:try:importdocx2txtexceptImportError:raiseImportError("`docx2txt` package not found, please run `pip install docx2txt`")response=self.confluence.request(path=link,absolute=True)text=""if(response.status_code!=200orresponse.content==b""orresponse.contentisNone):returntextfile_data=BytesIO(response.content)returndocx2txt.process(file_data)
[docs]defprocess_xls(self,link:str)->str:importioimportostry:importxlrdexceptImportError:raiseImportError("`xlrd` package not found, please run `pip install xlrd`")try:importpandasaspdexceptImportError:raiseImportError("`pandas` package not found, please run `pip install pandas`")response=self.confluence.request(path=link,absolute=True)text=""if(response.status_code!=200orresponse.content==b""orresponse.contentisNone):returntextfilename=os.path.basename(link)# Getting the whole content of the url after filename,# Example: ".csv?version=2&modificationDate=1631800010678&cacheVersion=1&api=v2"file_extension=os.path.splitext(filename)[1]iffile_extension.startswith(".csv"):# if the extension found in the url is ".csv"content_string=response.content.decode("utf-8")df=pd.read_csv(io.StringIO(content_string))text+=df.to_string(index=False,header=False)+"\n\n"else:workbook=xlrd.open_workbook(file_contents=response.content)forsheetinworkbook.sheets():text+=f"{sheet.name}:\n"forrowinrange(sheet.nrows):forcolinrange(sheet.ncols):text+=f"{sheet.cell_value(row,col)}\t"text+="\n"text+="\n"returntext
[docs]defprocess_svg(self,link:str,ocr_languages:Optional[str]=None,)->str:try:importpytesseractfromPILimportImagefromreportlab.graphicsimportrenderPMfromsvglib.svglibimportsvg2rlgexceptImportError:raiseImportError("`pytesseract`, `Pillow`, `reportlab` or `svglib` package not found, ""please run `pip install pytesseract Pillow reportlab svglib`")response=self.confluence.request(path=link,absolute=True)text=""if(response.status_code!=200orresponse.content==b""orresponse.contentisNone):returntextdrawing=svg2rlg(BytesIO(response.content))img_data=BytesIO()renderPM.drawToFile(drawing,img_data,fmt="PNG")img_data.seek(0)image=Image.open(img_data)returnpytesseract.image_to_string(image,lang=ocr_languages)