Movatterモバイル変換


[0]ホーム

URL:


Skip to content

OpenAI Responses model

OpenAIResponsesModel

Bases:Model

Implementation ofModel that uses the OpenAI Responses API.

Source code insrc/agents/models/openai_responses.py
classOpenAIResponsesModel(Model):"""    Implementation of `Model` that uses the OpenAI Responses API.    """def__init__(self,model:str|ChatModel,openai_client:AsyncOpenAI,)->None:self.model=modelself._client=openai_clientdef_non_null_or_not_given(self,value:Any)->Any:returnvalueifvalueisnotNoneelseNOT_GIVENasyncdefget_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None,prompt:ResponsePromptParam|None=None,)->ModelResponse:withresponse_span(disabled=tracing.is_disabled())asspan_response:try:response=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,previous_response_id,stream=False,prompt=prompt,)if_debug.DONT_LOG_MODEL_DATA:logger.debug("LLM responded")else:logger.debug("LLM resp:\n"f"""{json.dumps([x.model_dump()forxinresponse.output],indent=2,ensure_ascii=False,)}\n""")usage=(Usage(requests=1,input_tokens=response.usage.input_tokens,output_tokens=response.usage.output_tokens,total_tokens=response.usage.total_tokens,input_tokens_details=response.usage.input_tokens_details,output_tokens_details=response.usage.output_tokens_details,)ifresponse.usageelseUsage())iftracing.include_data():span_response.span_data.response=responsespan_response.span_data.input=inputexceptExceptionase:span_response.set_error(SpanError(message="Error getting response",data={"error":str(e)iftracing.include_data()elsee.__class__.__name__,},))request_id=e.request_idifisinstance(e,APIStatusError)elseNonelogger.error(f"Error getting response:{e}. (request_id:{request_id})")raisereturnModelResponse(output=response.output,usage=usage,response_id=response.id,)asyncdefstream_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None,prompt:ResponsePromptParam|None=None,)->AsyncIterator[ResponseStreamEvent]:"""        Yields a partial message as it is generated, as well as the usage information.        """withresponse_span(disabled=tracing.is_disabled())asspan_response:try:stream=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,previous_response_id,stream=True,prompt=prompt,)final_response:Response|None=Noneasyncforchunkinstream:ifisinstance(chunk,ResponseCompletedEvent):final_response=chunk.responseyieldchunkiffinal_responseandtracing.include_data():span_response.span_data.response=final_responsespan_response.span_data.input=inputexceptExceptionase:span_response.set_error(SpanError(message="Error streaming response",data={"error":str(e)iftracing.include_data()elsee.__class__.__name__,},))logger.error(f"Error streaming response:{e}")raise@overloadasyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],previous_response_id:str|None,stream:Literal[True],prompt:ResponsePromptParam|None=None,)->AsyncStream[ResponseStreamEvent]:...@overloadasyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],previous_response_id:str|None,stream:Literal[False],prompt:ResponsePromptParam|None=None,)->Response:...asyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],previous_response_id:str|None,stream:Literal[True]|Literal[False]=False,prompt:ResponsePromptParam|None=None,)->Response|AsyncStream[ResponseStreamEvent]:list_input=ItemHelpers.input_to_new_input_list(input)parallel_tool_calls=(Trueifmodel_settings.parallel_tool_callsandtoolsandlen(tools)>0elseFalseifmodel_settings.parallel_tool_callsisFalseelseNOT_GIVEN)tool_choice=Converter.convert_tool_choice(model_settings.tool_choice)converted_tools=Converter.convert_tools(tools,handoffs)response_format=Converter.get_response_format(output_schema)include:list[ResponseIncludable]=converted_tools.includesifmodel_settings.response_includeisnotNone:include=list({*include,*model_settings.response_include})if_debug.DONT_LOG_MODEL_DATA:logger.debug("Calling LLM")else:logger.debug(f"Calling LLM{self.model} with input:\n"f"{json.dumps(list_input,indent=2,ensure_ascii=False)}\n"f"Tools:\n{json.dumps(converted_tools.tools,indent=2,ensure_ascii=False)}\n"f"Stream:{stream}\n"f"Tool choice:{tool_choice}\n"f"Response format:{response_format}\n"f"Previous response id:{previous_response_id}\n")returnawaitself._client.responses.create(previous_response_id=self._non_null_or_not_given(previous_response_id),instructions=self._non_null_or_not_given(system_instructions),model=self.model,input=list_input,include=include,tools=converted_tools.tools,prompt=self._non_null_or_not_given(prompt),temperature=self._non_null_or_not_given(model_settings.temperature),top_p=self._non_null_or_not_given(model_settings.top_p),truncation=self._non_null_or_not_given(model_settings.truncation),max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens),tool_choice=tool_choice,parallel_tool_calls=parallel_tool_calls,stream=stream,extra_headers={**_HEADERS,**(model_settings.extra_headersor{})},extra_query=model_settings.extra_query,extra_body=model_settings.extra_body,text=response_format,store=self._non_null_or_not_given(model_settings.store),reasoning=self._non_null_or_not_given(model_settings.reasoning),metadata=self._non_null_or_not_given(model_settings.metadata),**(model_settings.extra_argsor{}),)def_get_client(self)->AsyncOpenAI:ifself._clientisNone:self._client=AsyncOpenAI()returnself._client

stream_responseasync

stream_response(system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None,prompt:ResponsePromptParam|None=None,)->AsyncIterator[ResponseStreamEvent]

Yields a partial message as it is generated, as well as the usage information.

Source code insrc/agents/models/openai_responses.py
asyncdefstream_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None,prompt:ResponsePromptParam|None=None,)->AsyncIterator[ResponseStreamEvent]:"""    Yields a partial message as it is generated, as well as the usage information.    """withresponse_span(disabled=tracing.is_disabled())asspan_response:try:stream=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,previous_response_id,stream=True,prompt=prompt,)final_response:Response|None=Noneasyncforchunkinstream:ifisinstance(chunk,ResponseCompletedEvent):final_response=chunk.responseyieldchunkiffinal_responseandtracing.include_data():span_response.span_data.response=final_responsespan_response.span_data.input=inputexceptExceptionase:span_response.set_error(SpanError(message="Error streaming response",data={"error":str(e)iftracing.include_data()elsee.__class__.__name__,},))logger.error(f"Error streaming response:{e}")raise

Converter

Source code insrc/agents/models/openai_responses.py
classConverter:@classmethoddefconvert_tool_choice(cls,tool_choice:Literal["auto","required","none"]|str|MCPToolChoice|None)->response_create_params.ToolChoice|NotGiven:iftool_choiceisNone:returnNOT_GIVENelifisinstance(tool_choice,MCPToolChoice):return{"server_label":tool_choice.server_label,"type":"mcp","name":tool_choice.name,}eliftool_choice=="required":return"required"eliftool_choice=="auto":return"auto"eliftool_choice=="none":return"none"eliftool_choice=="file_search":return{"type":"file_search",}eliftool_choice=="web_search_preview":return{"type":"web_search_preview",}eliftool_choice=="computer_use_preview":return{"type":"computer_use_preview",}eliftool_choice=="image_generation":return{"type":"image_generation",}eliftool_choice=="code_interpreter":return{"type":"code_interpreter",}eliftool_choice=="mcp":# Note that this is still here for backwards compatibility,# but migrating to MCPToolChoice is recommended.return{"type":"mcp"}# type: ignore [typeddict-item]else:return{"type":"function","name":tool_choice,}@classmethoddefget_response_format(cls,output_schema:AgentOutputSchemaBase|None)->ResponseTextConfigParam|NotGiven:ifoutput_schemaisNoneoroutput_schema.is_plain_text():returnNOT_GIVENelse:return{"format":{"type":"json_schema","name":"final_output","schema":output_schema.json_schema(),"strict":output_schema.is_strict_json_schema(),}}@classmethoddefconvert_tools(cls,tools:list[Tool],handoffs:list[Handoff[Any,Any]],)->ConvertedTools:converted_tools:list[ToolParam]=[]includes:list[ResponseIncludable]=[]computer_tools=[toolfortoolintoolsifisinstance(tool,ComputerTool)]iflen(computer_tools)>1:raiseUserError(f"You can only provide one computer tool. Got{len(computer_tools)}")fortoolintools:converted_tool,include=cls._convert_tool(tool)converted_tools.append(converted_tool)ifinclude:includes.append(include)forhandoffinhandoffs:converted_tools.append(cls._convert_handoff_tool(handoff))returnConvertedTools(tools=converted_tools,includes=includes)@classmethoddef_convert_tool(cls,tool:Tool)->tuple[ToolParam,ResponseIncludable|None]:"""Returns converted tool and includes"""ifisinstance(tool,FunctionTool):converted_tool:ToolParam={"name":tool.name,"parameters":tool.params_json_schema,"strict":tool.strict_json_schema,"type":"function","description":tool.description,}includes:ResponseIncludable|None=Noneelifisinstance(tool,WebSearchTool):ws:WebSearchToolParam={"type":"web_search_preview","user_location":tool.user_location,"search_context_size":tool.search_context_size,}converted_tool=wsincludes=Noneelifisinstance(tool,FileSearchTool):converted_tool={"type":"file_search","vector_store_ids":tool.vector_store_ids,}iftool.max_num_results:converted_tool["max_num_results"]=tool.max_num_resultsiftool.ranking_options:converted_tool["ranking_options"]=tool.ranking_optionsiftool.filters:converted_tool["filters"]=tool.filtersincludes="file_search_call.results"iftool.include_search_resultselseNoneelifisinstance(tool,ComputerTool):converted_tool={"type":"computer_use_preview","environment":tool.computer.environment,"display_width":tool.computer.dimensions[0],"display_height":tool.computer.dimensions[1],}includes=Noneelifisinstance(tool,HostedMCPTool):converted_tool=tool.tool_configincludes=Noneelifisinstance(tool,ImageGenerationTool):converted_tool=tool.tool_configincludes=Noneelifisinstance(tool,CodeInterpreterTool):converted_tool=tool.tool_configincludes=Noneelifisinstance(tool,LocalShellTool):converted_tool={"type":"local_shell",}includes=Noneelse:raiseUserError(f"Unknown tool type:{type(tool)}, tool")returnconverted_tool,includes@classmethoddef_convert_handoff_tool(cls,handoff:Handoff)->ToolParam:return{"name":handoff.tool_name,"parameters":handoff.input_json_schema,"strict":handoff.strict_json_schema,"type":"function","description":handoff.tool_description,}

[8]ページ先頭

©2009-2025 Movatter.jp