"""Fireworks chat wrapper."""from__future__importannotationsimportjsonimportloggingfromoperatorimportitemgetterfromtypingimport(Any,AsyncIterator,Callable,Dict,Iterator,List,Literal,Mapping,Optional,Sequence,Tuple,Type,TypedDict,Union,cast,)fromfireworks.clientimportAsyncFireworks,Fireworks# type: ignorefromlangchain_core._apiimportdeprecatedfromlangchain_core.callbacksimport(AsyncCallbackManagerForLLMRun,CallbackManagerForLLMRun,)fromlangchain_core.language_modelsimportLanguageModelInputfromlangchain_core.language_models.chat_modelsimport(BaseChatModel,LangSmithParams,agenerate_from_stream,generate_from_stream,)fromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,BaseMessageChunk,ChatMessage,ChatMessageChunk,FunctionMessage,FunctionMessageChunk,HumanMessage,HumanMessageChunk,InvalidToolCall,SystemMessage,SystemMessageChunk,ToolCall,ToolMessage,ToolMessageChunk,)fromlangchain_core.messages.toolimport(ToolCallChunk,)fromlangchain_core.messages.toolimport(tool_call_chunkascreate_tool_call_chunk,)fromlangchain_core.output_parsersimportJsonOutputParser,PydanticOutputParserfromlangchain_core.output_parsers.baseimportOutputParserLikefromlangchain_core.output_parsers.openai_toolsimport(JsonOutputKeyToolsParser,PydanticToolsParser,make_invalid_tool_call,parse_tool_call,)fromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_core.runnablesimportRunnable,RunnableMap,RunnablePassthroughfromlangchain_core.toolsimportBaseToolfromlangchain_core.utilsimport(get_pydantic_field_names,)fromlangchain_core.utils.function_callingimport(convert_to_json_schema,convert_to_openai_function,convert_to_openai_tool,)fromlangchain_core.utils.pydanticimportis_basemodel_subclassfromlangchain_core.utils.utilsimport_build_model_kwargs,from_env,secret_from_envfrompydanticimport(BaseModel,ConfigDict,Field,SecretStr,model_validator,)fromtyping_extensionsimportSelflogger=logging.getLogger(__name__)def_convert_dict_to_message(_dict:Mapping[str,Any])->BaseMessage:"""Convert a dictionary to a LangChain message. Args: _dict: The dictionary. Returns: The LangChain message. """role=_dict.get("role")ifrole=="user":returnHumanMessage(content=_dict.get("content",""))elifrole=="assistant":# Fix for azure# Also Fireworks returns None for tool invocationscontent=_dict.get("content","")or""additional_kwargs:Dict={}iffunction_call:=_dict.get("function_call"):additional_kwargs["function_call"]=dict(function_call)tool_calls=[]invalid_tool_calls=[]ifraw_tool_calls:=_dict.get("tool_calls"):additional_kwargs["tool_calls"]=raw_tool_callsforraw_tool_callinraw_tool_calls:try:tool_calls.append(parse_tool_call(raw_tool_call,return_id=True))exceptExceptionase:invalid_tool_calls.append(dict(make_invalid_tool_call(raw_tool_call,str(e))))returnAIMessage(content=content,additional_kwargs=additional_kwargs,tool_calls=tool_calls,invalid_tool_calls=invalid_tool_calls,)elifrole=="system":returnSystemMessage(content=_dict.get("content",""))elifrole=="function":returnFunctionMessage(content=_dict.get("content",""),name=_dict.get("name",""))elifrole=="tool":additional_kwargs={}if"name"in_dict:additional_kwargs["name"]=_dict["name"]returnToolMessage(content=_dict.get("content",""),tool_call_id=_dict.get("tool_call_id",""),additional_kwargs=additional_kwargs,)else:returnChatMessage(content=_dict.get("content",""),role=roleor"")def_convert_message_to_dict(message:BaseMessage)->dict:"""Convert a LangChain message to a dictionary. Args: message: The LangChain message. Returns: The dictionary. """message_dict:Dict[str,Any]ifisinstance(message,ChatMessage):message_dict={"role":message.role,"content":message.content}elifisinstance(message,HumanMessage):message_dict={"role":"user","content":message.content}elifisinstance(message,AIMessage):message_dict={"role":"assistant","content":message.content}if"function_call"inmessage.additional_kwargs:message_dict["function_call"]=message.additional_kwargs["function_call"]# If function call only, content is None not empty stringifmessage_dict["content"]=="":message_dict["content"]=Noneifmessage.tool_callsormessage.invalid_tool_calls:message_dict["tool_calls"]=[_lc_tool_call_to_fireworks_tool_call(tc)fortcinmessage.tool_calls]+[_lc_invalid_tool_call_to_fireworks_tool_call(tc)fortcinmessage.invalid_tool_calls]elif"tool_calls"inmessage.additional_kwargs:message_dict["tool_calls"]=message.additional_kwargs["tool_calls"]# If tool calls only, content is None not empty stringif"tool_calls"inmessage_dictandmessage_dict["content"]=="":message_dict["content"]=Noneelse:passelifisinstance(message,SystemMessage):message_dict={"role":"system","content":message.content}elifisinstance(message,FunctionMessage):message_dict={"role":"function","content":message.content,"name":message.name,}elifisinstance(message,ToolMessage):message_dict={"role":"tool","content":message.content,"tool_call_id":message.tool_call_id,}else:raiseTypeError(f"Got unknown type {message}")if"name"inmessage.additional_kwargs:message_dict["name"]=message.additional_kwargs["name"]returnmessage_dictdef_convert_chunk_to_message_chunk(chunk:Mapping[str,Any],default_class:Type[BaseMessageChunk])->BaseMessageChunk:choice=chunk["choices"][0]_dict=choice["delta"]role=cast(str,_dict.get("role"))content=cast(str,_dict.get("content")or"")additional_kwargs:Dict={}tool_call_chunks:List[ToolCallChunk]=[]if_dict.get("function_call"):function_call=dict(_dict["function_call"])if"name"infunction_callandfunction_call["name"]isNone:function_call["name"]=""additional_kwargs["function_call"]=function_callifraw_tool_calls:=_dict.get("tool_calls"):additional_kwargs["tool_calls"]=raw_tool_callsforrtcinraw_tool_calls:try:tool_call_chunks.append(create_tool_call_chunk(name=rtc["function"].get("name"),args=rtc["function"].get("arguments"),id=rtc.get("id"),index=rtc.get("index"),))exceptKeyError:passifrole=="user"ordefault_class==HumanMessageChunk:returnHumanMessageChunk(content=content)elifrole=="assistant"ordefault_class==AIMessageChunk:ifusage:=chunk.get("usage"):input_tokens=usage.get("prompt_tokens",0)output_tokens=usage.get("completion_tokens",0)usage_metadata={"input_tokens":input_tokens,"output_tokens":output_tokens,"total_tokens":usage.get("total_tokens",input_tokens+output_tokens),}else:usage_metadata=NonereturnAIMessageChunk(content=content,additional_kwargs=additional_kwargs,tool_call_chunks=tool_call_chunks,usage_metadata=usage_metadata,# type: ignore[arg-type])elifrole=="system"ordefault_class==SystemMessageChunk:returnSystemMessageChunk(content=content)elifrole=="function"ordefault_class==FunctionMessageChunk:returnFunctionMessageChunk(content=content,name=_dict["name"])elifrole=="tool"ordefault_class==ToolMessageChunk:returnToolMessageChunk(content=content,tool_call_id=_dict["tool_call_id"])elifroleordefault_class==ChatMessageChunk:returnChatMessageChunk(content=content,role=role)else:returndefault_class(content=content)# type: ignoreclass_FunctionCall(TypedDict):name:str# This is basically a copy and replace for ChatFireworks, except# - I needed to gut out tiktoken and some of the token estimation logic# (not sure how important it is)# - Environment variable is different# we should refactor into some OpenAI-like class in the future
[docs]classChatFireworks(BaseChatModel):"""`Fireworks` Chat large language models API. To use, you should have the environment variable ``FIREWORKS_API_KEY`` set with your API key. Any parameters that are valid to be passed to the fireworks.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain_fireworks.chat_models import ChatFireworks fireworks = ChatFireworks( model_name="accounts/fireworks/models/mixtral-8x7b-instruct") """@propertydeflc_secrets(self)->Dict[str,str]:return{"fireworks_api_key":"FIREWORKS_API_KEY"}@classmethoddefget_lc_namespace(cls)->List[str]:"""Get the namespace of the langchain object."""return["langchain","chat_models","fireworks"]@propertydeflc_attributes(self)->Dict[str,Any]:attributes:Dict[str,Any]={}ifself.fireworks_api_base:attributes["fireworks_api_base"]=self.fireworks_api_basereturnattributes@classmethoddefis_lc_serializable(cls)->bool:"""Return whether this model can be serialized by Langchain."""returnTrueclient:Any=Field(default=None,exclude=True)#: :meta private:async_client:Any=Field(default=None,exclude=True)#: :meta private:model_name:str=Field(default="accounts/fireworks/models/mixtral-8x7b-instruct",alias="model")"""Model name to use."""temperature:float=0.0"""What sampling temperature to use."""stop:Optional[Union[str,List[str]]]=Field(default=None,alias="stop_sequences")"""Default stop sequences."""model_kwargs:Dict[str,Any]=Field(default_factory=dict)"""Holds any model parameters valid for `create` call not explicitly specified."""fireworks_api_key:SecretStr=Field(alias="api_key",default_factory=secret_from_env("FIREWORKS_API_KEY",error_message=("You must specify an api key. ""You can pass it an argument as `api_key=...` or ""set the environment variable `FIREWORKS_API_KEY`."),),)"""Fireworks API key. Automatically read from env variable `FIREWORKS_API_KEY` if not provided. """fireworks_api_base:Optional[str]=Field(alias="base_url",default_factory=from_env("FIREWORKS_API_BASE",default=None))"""Base URL path for API requests, leave blank if not using a proxy or service emulator."""request_timeout:Union[float,Tuple[float,float],Any,None]=Field(default=None,alias="timeout")"""Timeout for requests to Fireworks completion API. Can be float, httpx.Timeout or None."""streaming:bool=False"""Whether to stream the results or not."""n:int=1"""Number of chat completions to generate for each prompt."""max_tokens:Optional[int]=None"""Maximum number of tokens to generate."""max_retries:Optional[int]=None"""Maximum number of retries to make when generating."""model_config=ConfigDict(populate_by_name=True,)@model_validator(mode="before")@classmethoddefbuild_extra(cls,values:Dict[str,Any])->Any:"""Build extra kwargs from additional params that were passed in."""all_required_field_names=get_pydantic_field_names(cls)values=_build_model_kwargs(values,all_required_field_names)returnvalues@model_validator(mode="after")defvalidate_environment(self)->Self:"""Validate that api key and python package exists in environment."""ifself.n<1:raiseValueError("n must be at least 1.")ifself.n>1andself.streaming:raiseValueError("n must be 1 when streaming.")client_params={"api_key":(self.fireworks_api_key.get_secret_value()ifself.fireworks_api_keyelseNone),"base_url":self.fireworks_api_base,"timeout":self.request_timeout,}ifnotself.client:self.client=Fireworks(**client_params).chat.completionsifnotself.async_client:self.async_client=AsyncFireworks(**client_params).chat.completionsifself.max_retries:self.client._max_retries=self.max_retriesself.async_client._max_retries=self.max_retriesreturnself@propertydef_default_params(self)->Dict[str,Any]:"""Get the default parameters for calling Fireworks API."""params={"model":self.model_name,"stream":self.streaming,"n":self.n,"temperature":self.temperature,"stop":self.stop,**self.model_kwargs,}ifself.max_tokensisnotNone:params["max_tokens"]=self.max_tokensreturnparamsdef_get_ls_params(self,stop:Optional[List[str]]=None,**kwargs:Any)->LangSmithParams:"""Get standard params for tracing."""params=self._get_invocation_params(stop=stop,**kwargs)ls_params=LangSmithParams(ls_provider="fireworks",ls_model_name=self.model_name,ls_model_type="chat",ls_temperature=params.get("temperature",self.temperature),)ifls_max_tokens:=params.get("max_tokens",self.max_tokens):ls_params["ls_max_tokens"]=ls_max_tokensifls_stop:=stoporparams.get("stop",None):ls_params["ls_stop"]=ls_stopreturnls_paramsdef_combine_llm_outputs(self,llm_outputs:List[Optional[dict]])->dict:overall_token_usage:dict={}system_fingerprint=Noneforoutputinllm_outputs:ifoutputisNone:# Happens in streamingcontinuetoken_usage=output["token_usage"]iftoken_usageisnotNone:fork,vintoken_usage.items():ifkinoverall_token_usage:overall_token_usage[k]+=velse:overall_token_usage[k]=vifsystem_fingerprintisNone:system_fingerprint=output.get("system_fingerprint")combined={"token_usage":overall_token_usage,"model_name":self.model_name}ifsystem_fingerprint:combined["system_fingerprint"]=system_fingerprintreturncombineddef_stream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,**kwargs:Any,)->Iterator[ChatGenerationChunk]:message_dicts,params=self._create_message_dicts(messages,stop)params={**params,**kwargs,"stream":True}default_chunk_class:Type[BaseMessageChunk]=AIMessageChunkforchunkinself.client.create(messages=message_dicts,**params):ifnotisinstance(chunk,dict):chunk=chunk.model_dump()iflen(chunk["choices"])==0:continuechoice=chunk["choices"][0]message_chunk=_convert_chunk_to_message_chunk(chunk,default_chunk_class)generation_info={}iffinish_reason:=choice.get("finish_reason"):generation_info["finish_reason"]=finish_reasongeneration_info["model_name"]=self.model_namelogprobs=choice.get("logprobs")iflogprobs:generation_info["logprobs"]=logprobsdefault_chunk_class=message_chunk.__class__generation_chunk=ChatGenerationChunk(message=message_chunk,generation_info=generation_infoorNone)ifrun_manager:run_manager.on_llm_new_token(generation_chunk.text,chunk=generation_chunk,logprobs=logprobs)yieldgeneration_chunkdef_generate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[CallbackManagerForLLMRun]=None,stream:Optional[bool]=None,**kwargs:Any,)->ChatResult:should_stream=streamifstreamisnotNoneelseself.streamingifshould_stream:stream_iter=self._stream(messages,stop=stop,run_manager=run_manager,**kwargs)returngenerate_from_stream(stream_iter)message_dicts,params=self._create_message_dicts(messages,stop)params={**params,**({"stream":stream}ifstreamisnotNoneelse{}),**kwargs,}response=self.client.create(messages=message_dicts,**params)returnself._create_chat_result(response)def_create_message_dicts(self,messages:List[BaseMessage],stop:Optional[List[str]])->Tuple[List[Dict[str,Any]],Dict[str,Any]]:params=self._default_paramsifstopisnotNone:params["stop"]=stopmessage_dicts=[_convert_message_to_dict(m)forminmessages]returnmessage_dicts,paramsdef_create_chat_result(self,response:Union[dict,BaseModel])->ChatResult:generations=[]ifnotisinstance(response,dict):response=response.model_dump()token_usage=response.get("usage",{})forresinresponse["choices"]:message=_convert_dict_to_message(res["message"])iftoken_usageandisinstance(message,AIMessage):message.usage_metadata={"input_tokens":token_usage.get("prompt_tokens",0),"output_tokens":token_usage.get("completion_tokens",0),"total_tokens":token_usage.get("total_tokens",0),}generation_info=dict(finish_reason=res.get("finish_reason"))if"logprobs"inres:generation_info["logprobs"]=res["logprobs"]gen=ChatGeneration(message=message,generation_info=generation_info,)generations.append(gen)llm_output={"token_usage":token_usage,"model_name":self.model_name,"system_fingerprint":response.get("system_fingerprint",""),}returnChatResult(generations=generations,llm_output=llm_output)asyncdef_astream(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,**kwargs:Any,)->AsyncIterator[ChatGenerationChunk]:message_dicts,params=self._create_message_dicts(messages,stop)params={**params,**kwargs,"stream":True}default_chunk_class:Type[BaseMessageChunk]=AIMessageChunkasyncforchunkinself.async_client.acreate(messages=message_dicts,**params):ifnotisinstance(chunk,dict):chunk=chunk.model_dump()iflen(chunk["choices"])==0:continuechoice=chunk["choices"][0]message_chunk=_convert_chunk_to_message_chunk(chunk,default_chunk_class)generation_info={}iffinish_reason:=choice.get("finish_reason"):generation_info["finish_reason"]=finish_reasongeneration_info["model_name"]=self.model_namelogprobs=choice.get("logprobs")iflogprobs:generation_info["logprobs"]=logprobsdefault_chunk_class=message_chunk.__class__generation_chunk=ChatGenerationChunk(message=message_chunk,generation_info=generation_infoorNone)ifrun_manager:awaitrun_manager.on_llm_new_token(token=generation_chunk.text,chunk=generation_chunk,logprobs=logprobs,)yieldgeneration_chunkasyncdef_agenerate(self,messages:List[BaseMessage],stop:Optional[List[str]]=None,run_manager:Optional[AsyncCallbackManagerForLLMRun]=None,stream:Optional[bool]=None,**kwargs:Any,)->ChatResult:should_stream=streamifstreamisnotNoneelseself.streamingifshould_stream:stream_iter=self._astream(messages,stop=stop,run_manager=run_manager,**kwargs)returnawaitagenerate_from_stream(stream_iter)message_dicts,params=self._create_message_dicts(messages,stop)params={**params,**({"stream":stream}ifstreamisnotNoneelse{}),**kwargs,}response=awaitself.async_client.acreate(messages=message_dicts,**params)returnself._create_chat_result(response)@propertydef_identifying_params(self)->Dict[str,Any]:"""Get the identifying parameters."""return{"model_name":self.model_name,**self._default_params}def_get_invocation_params(self,stop:Optional[List[str]]=None,**kwargs:Any)->Dict[str,Any]:"""Get the parameters used to invoke the model."""return{"model":self.model_name,**super()._get_invocation_params(stop=stop),**self._default_params,**kwargs,}@propertydef_llm_type(self)->str:"""Return type of chat model."""return"fireworks-chat"
[docs]@deprecated(since="0.2.1",alternative="langchain_fireworks.chat_models.ChatFireworks.bind_tools",removal="1.0.0",)defbind_functions(self,functions:Sequence[Union[Dict[str,Any],Type[BaseModel],Callable,BaseTool]],function_call:Optional[Union[_FunctionCall,str,Literal["auto","none"]]]=None,**kwargs:Any,)->Runnable[LanguageModelInput,BaseMessage]:"""Bind functions (and other objects) to this chat model. Assumes model is compatible with Fireworks function-calling API. NOTE: Using bind_tools is recommended instead, as the `functions` and `function_call` request parameters are officially marked as deprecated by Fireworks. Args: functions: A list of function definitions to bind to this chat model. Can be a dictionary, pydantic model, or callable. Pydantic models and callables will be automatically converted to their schema dictionary representation. function_call: Which function to require the model to call. Must be the name of the single provided function or "auto" to automatically determine which function to call (if any). **kwargs: Any additional parameters to pass to the :class:`~langchain.runnable.Runnable` constructor. """formatted_functions=[convert_to_openai_function(fn)forfninfunctions]iffunction_callisnotNone:function_call=({"name":function_call}ifisinstance(function_call,str)andfunction_callnotin("auto","none")elsefunction_call)ifisinstance(function_call,dict)andlen(formatted_functions)!=1:raiseValueError("When specifying `function_call`, you must provide exactly one ""function.")if(isinstance(function_call,dict)andformatted_functions[0]["name"]!=function_call["name"]):raiseValueError(f"Function call {function_call} was specified, but the only "f"provided function was {formatted_functions[0]['name']}.")kwargs={**kwargs,"function_call":function_call}returnsuper().bind(functions=formatted_functions,**kwargs,)
[docs]defbind_tools(self,tools:Sequence[Union[Dict[str,Any],Type[BaseModel],Callable,BaseTool]],*,tool_choice:Optional[Union[dict,str,Literal["auto","any","none"],bool]]=None,**kwargs:Any,)->Runnable[LanguageModelInput,BaseMessage]:"""Bind tool-like objects to this chat model. Assumes model is compatible with Fireworks tool-calling API. Args: tools: A list of tool definitions to bind to this chat model. Supports any tool definition handled by :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`. tool_choice: Which tool to require the model to call. Must be the name of the single provided function, "auto" to automatically determine which function to call with the option to not call any function, "any" to enforce that some function is called, or a dict of the form: {"type": "function", "function": {"name": <<tool_name>>}}. **kwargs: Any additional parameters to pass to :meth:`~langchain_fireworks.chat_models.ChatFireworks.bind` """formatted_tools=[convert_to_openai_tool(tool)fortoolintools]iftool_choiceisnotNoneandtool_choice:ifisinstance(tool_choice,str)and(tool_choicenotin("auto","any","none")):tool_choice={"type":"function","function":{"name":tool_choice}}ifisinstance(tool_choice,bool):iflen(tools)>1:raiseValueError("tool_choice can only be True when there is one tool. Received "f"{len(tools)} tools.")tool_name=formatted_tools[0]["function"]["name"]tool_choice={"type":"function","function":{"name":tool_name},}kwargs["tool_choice"]=tool_choicereturnsuper().bind(tools=formatted_tools,**kwargs)
[docs]defwith_structured_output(self,schema:Optional[Union[Dict,Type[BaseModel]]]=None,*,method:Literal["function_calling","json_mode","json_schema"]="function_calling",include_raw:bool=False,**kwargs:Any,)->Runnable[LanguageModelInput,Union[Dict,BaseModel]]:"""Model wrapper that returns outputs formatted to match the given schema. Args: schema: The output schema. Can be passed in as: - an OpenAI function/tool schema, - a JSON Schema, - a TypedDict class (support added in 0.1.7), - or a Pydantic class. If ``schema`` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool` for more on how to properly specify types and descriptions of schema fields when specifying a Pydantic or TypedDict class. .. versionchanged:: 0.1.7 Added support for TypedDict class. method: The method for steering model generation, one of: - "function_calling": Uses Fireworks's `tool-calling features <https://docs.fireworks.ai/guides/function-calling>`_. - "json_schema": Uses Fireworks's `structured output feature <https://docs.fireworks.ai/structured-responses/structured-response-formatting>`_. - "json_mode": Uses Fireworks's `JSON mode feature <https://docs.fireworks.ai/structured-responses/structured-response-formatting>`_. .. versionchanged:: 0.2.8 Added support for ``"json_schema"``. include_raw: If False then only the parsed structured output is returned. If an error occurs during model output parsing it will be raised. If True then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict with keys "raw", "parsed", and "parsing_error". Returns: A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. If ``include_raw`` is True, then Runnable outputs a dict with keys: - ``"raw"``: BaseMessage - ``"parsed"``: None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - ``"parsing_error"``: Optional[BaseException] Example: schema=Pydantic class, method="function_calling", include_raw=False: .. code-block:: python from typing import Optional from langchain_fireworks import ChatFireworks from pydantic import BaseModel, Field class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' answer: str # If we provide default values and/or descriptions for fields, these will be passed # to the model. This is an important part of improving a model's ability to # correctly return structured outputs. justification: Optional[str] = Field( default=None, description="A justification for the answer." ) llm = ChatFireworks(model="accounts/fireworks/models/firefunction-v1", temperature=0) structured_llm = llm.with_structured_output(AnswerWithJustification) structured_llm.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> AnswerWithJustification( # answer='They weigh the same', # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' # ) Example: schema=Pydantic class, method="function_calling", include_raw=True: .. code-block:: python from langchain_fireworks import ChatFireworks from pydantic import BaseModel class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' answer: str justification: str llm = ChatFireworks(model="accounts/fireworks/models/firefunction-v1", temperature=0) structured_llm = llm.with_structured_output( AnswerWithJustification, include_raw=True ) structured_llm.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), # 'parsing_error': None # } Example: schema=TypedDict class, method="function_calling", include_raw=False: .. code-block:: python # IMPORTANT: If you are using Python <=3.8, you need to import Annotated # from typing_extensions, not from typing. from typing_extensions import Annotated, TypedDict from langchain_fireworks import ChatFireworks class AnswerWithJustification(TypedDict): '''An answer to the user question along with justification for the answer.''' answer: str justification: Annotated[ Optional[str], None, "A justification for the answer." ] llm = ChatFireworks(model="accounts/fireworks/models/firefunction-v1", temperature=0) structured_llm = llm.with_structured_output(AnswerWithJustification) structured_llm.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { # 'answer': 'They weigh the same', # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' # } Example: schema=OpenAI function schema, method="function_calling", include_raw=False: .. code-block:: python from langchain_fireworks import ChatFireworks oai_schema = { 'name': 'AnswerWithJustification', 'description': 'An answer to the user question along with justification for the answer.', 'parameters': { 'type': 'object', 'properties': { 'answer': {'type': 'string'}, 'justification': {'description': 'A justification for the answer.', 'type': 'string'} }, 'required': ['answer'] } } llm = ChatFireworks(model="accounts/fireworks/models/firefunction-v1", temperature=0) structured_llm = llm.with_structured_output(oai_schema) structured_llm.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { # 'answer': 'They weigh the same', # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' # } Example: schema=Pydantic class, method="json_mode", include_raw=True: .. code-block:: from langchain_fireworks import ChatFireworks from pydantic import BaseModel class AnswerWithJustification(BaseModel): answer: str justification: str llm = ChatFireworks(model="accounts/fireworks/models/firefunction-v1", temperature=0) structured_llm = llm.with_structured_output( AnswerWithJustification, method="json_mode", include_raw=True ) structured_llm.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'. " "What's heavier a pound of bricks or a pound of feathers?" ) # -> { # 'raw': AIMessage(content='{"answer": "They are both the same weight.", "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight."}'), # 'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'), # 'parsing_error': None # } Example: schema=None, method="json_mode", include_raw=True: .. code-block:: structured_llm = llm.with_structured_output(method="json_mode", include_raw=True) structured_llm.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'. " "What's heavier a pound of bricks or a pound of feathers?" ) # -> { # 'raw': AIMessage(content='{"answer": "They are both the same weight.", "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight."}'), # 'parsed': { # 'answer': 'They are both the same weight.', # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.' # }, # 'parsing_error': None # } """# noqa: E501_=kwargs.pop("strict",None)ifkwargs:raiseValueError(f"Received unsupported arguments {kwargs}")is_pydantic_schema=_is_pydantic_class(schema)ifmethod=="function_calling":ifschemaisNone:raiseValueError("schema must be specified when method is 'function_calling'. ""Received None.")formatted_tool=convert_to_openai_tool(schema)tool_name=formatted_tool["function"]["name"]llm=self.bind_tools([schema],tool_choice=tool_name,ls_structured_output_format={"kwargs":{"method":"function_calling"},"schema":formatted_tool,},)ifis_pydantic_schema:output_parser:OutputParserLike=PydanticToolsParser(tools=[schema],# type: ignore[list-item]first_tool_only=True,# type: ignore[list-item])else:output_parser=JsonOutputKeyToolsParser(key_name=tool_name,first_tool_only=True)elifmethod=="json_schema":ifschemaisNone:raiseValueError("schema must be specified when method is 'json_schema'. ""Received None.")formatted_schema=convert_to_json_schema(schema)llm=self.bind(response_format={"type":"json_object","schema":formatted_schema},ls_structured_output_format={"kwargs":{"method":"json_schema"},"schema":schema,},)output_parser=(PydanticOutputParser(pydantic_object=schema)# type: ignore[arg-type]ifis_pydantic_schemaelseJsonOutputParser())elifmethod=="json_mode":llm=self.bind(response_format={"type":"json_object"},ls_structured_output_format={"kwargs":{"method":"json_mode"},"schema":schema,},)output_parser=(PydanticOutputParser(pydantic_object=schema)# type: ignore[type-var, arg-type]ifis_pydantic_schemaelseJsonOutputParser())else:raiseValueError(f"Unrecognized method argument. Expected one of 'function_calling' or "f"'json_mode'. Received: '{method}'")ifinclude_raw:parser_assign=RunnablePassthrough.assign(parsed=itemgetter("raw")|output_parser,parsing_error=lambda_:None)parser_none=RunnablePassthrough.assign(parsed=lambda_:None)parser_with_fallback=parser_assign.with_fallbacks([parser_none],exception_key="parsing_error")returnRunnableMap(raw=llm)|parser_with_fallbackelse:returnllm|output_parser