classOpenAIResponsesModel(Model):""" Implementation of `Model` that uses the OpenAI Responses API. """def__init__(self,model:str|ChatModel,openai_client:AsyncOpenAI,)->None:self.model=modelself._client=openai_clientdef_non_null_or_not_given(self,value:Any)->Any:returnvalueifvalueisnotNoneelseNOT_GIVENasyncdefget_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None,prompt:ResponsePromptParam|None=None,)->ModelResponse:withresponse_span(disabled=tracing.is_disabled())asspan_response:try:response=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,previous_response_id,stream=False,prompt=prompt,)if_debug.DONT_LOG_MODEL_DATA:logger.debug("LLM responded")else:logger.debug("LLM resp:\n"f"""{json.dumps([x.model_dump()forxinresponse.output],indent=2,ensure_ascii=False,)}\n""")usage=(Usage(requests=1,input_tokens=response.usage.input_tokens,output_tokens=response.usage.output_tokens,total_tokens=response.usage.total_tokens,input_tokens_details=response.usage.input_tokens_details,output_tokens_details=response.usage.output_tokens_details,)ifresponse.usageelseUsage())iftracing.include_data():span_response.span_data.response=responsespan_response.span_data.input=inputexceptExceptionase:span_response.set_error(SpanError(message="Error getting response",data={"error":str(e)iftracing.include_data()elsee.__class__.__name__,},))request_id=e.request_idifisinstance(e,APIStatusError)elseNonelogger.error(f"Error getting response:{e}. (request_id:{request_id})")raisereturnModelResponse(output=response.output,usage=usage,response_id=response.id,)asyncdefstream_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None,prompt:ResponsePromptParam|None=None,)->AsyncIterator[ResponseStreamEvent]:""" Yields a partial message as it is generated, as well as the usage information. """withresponse_span(disabled=tracing.is_disabled())asspan_response:try:stream=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,previous_response_id,stream=True,prompt=prompt,)final_response:Response|None=Noneasyncforchunkinstream:ifisinstance(chunk,ResponseCompletedEvent):final_response=chunk.responseyieldchunkiffinal_responseandtracing.include_data():span_response.span_data.response=final_responsespan_response.span_data.input=inputexceptExceptionase:span_response.set_error(SpanError(message="Error streaming response",data={"error":str(e)iftracing.include_data()elsee.__class__.__name__,},))logger.error(f"Error streaming response:{e}")raise@overloadasyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],previous_response_id:str|None,stream:Literal[True],prompt:ResponsePromptParam|None=None,)->AsyncStream[ResponseStreamEvent]:...@overloadasyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],previous_response_id:str|None,stream:Literal[False],prompt:ResponsePromptParam|None=None,)->Response:...asyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],previous_response_id:str|None,stream:Literal[True]|Literal[False]=False,prompt:ResponsePromptParam|None=None,)->Response|AsyncStream[ResponseStreamEvent]:list_input=ItemHelpers.input_to_new_input_list(input)parallel_tool_calls=(Trueifmodel_settings.parallel_tool_callsandtoolsandlen(tools)>0elseFalseifmodel_settings.parallel_tool_callsisFalseelseNOT_GIVEN)tool_choice=Converter.convert_tool_choice(model_settings.tool_choice)converted_tools=Converter.convert_tools(tools,handoffs)response_format=Converter.get_response_format(output_schema)include:list[ResponseIncludable]=converted_tools.includesifmodel_settings.response_includeisnotNone:include=list({*include,*model_settings.response_include})if_debug.DONT_LOG_MODEL_DATA:logger.debug("Calling LLM")else:logger.debug(f"Calling LLM{self.model} with input:\n"f"{json.dumps(list_input,indent=2,ensure_ascii=False)}\n"f"Tools:\n{json.dumps(converted_tools.tools,indent=2,ensure_ascii=False)}\n"f"Stream:{stream}\n"f"Tool choice:{tool_choice}\n"f"Response format:{response_format}\n"f"Previous response id:{previous_response_id}\n")returnawaitself._client.responses.create(previous_response_id=self._non_null_or_not_given(previous_response_id),instructions=self._non_null_or_not_given(system_instructions),model=self.model,input=list_input,include=include,tools=converted_tools.tools,prompt=self._non_null_or_not_given(prompt),temperature=self._non_null_or_not_given(model_settings.temperature),top_p=self._non_null_or_not_given(model_settings.top_p),truncation=self._non_null_or_not_given(model_settings.truncation),max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens),tool_choice=tool_choice,parallel_tool_calls=parallel_tool_calls,stream=stream,extra_headers={**_HEADERS,**(model_settings.extra_headersor{})},extra_query=model_settings.extra_query,extra_body=model_settings.extra_body,text=response_format,store=self._non_null_or_not_given(model_settings.store),reasoning=self._non_null_or_not_given(model_settings.reasoning),metadata=self._non_null_or_not_given(model_settings.metadata),**(model_settings.extra_argsor{}),)def_get_client(self)->AsyncOpenAI:ifself._clientisNone:self._client=AsyncOpenAI()returnself._client