Movatterモバイル変換


[0]ホーム

URL:


Skip to content

OpenAI Responses model

OpenAIResponsesModel

Bases:Model

Implementation ofModel that uses the OpenAI Responses API.

Source code insrc/agents/models/openai_responses.py
 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
classOpenAIResponsesModel(Model):"""    Implementation of `Model` that uses the OpenAI Responses API.    """def__init__(self,model:str|ChatModel,openai_client:AsyncOpenAI,*,model_is_explicit:bool=True,)->None:self.model=modelself._model_is_explicit=model_is_explicitself._client=openai_clientdef_non_null_or_omit(self,value:Any)->Any:returnvalueifvalueisnotNoneelseomitasyncdefget_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None=None,conversation_id:str|None=None,prompt:ResponsePromptParam|None=None,)->ModelResponse:withresponse_span(disabled=tracing.is_disabled())asspan_response:try:response=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,previous_response_id=previous_response_id,conversation_id=conversation_id,stream=False,prompt=prompt,)if_debug.DONT_LOG_MODEL_DATA:logger.debug("LLM responded")else:logger.debug("LLM resp:\n"f"""{json.dumps([x.model_dump()forxinresponse.output],indent=2,ensure_ascii=False,)}\n""")usage=(Usage(requests=1,input_tokens=response.usage.input_tokens,output_tokens=response.usage.output_tokens,total_tokens=response.usage.total_tokens,input_tokens_details=response.usage.input_tokens_details,output_tokens_details=response.usage.output_tokens_details,)ifresponse.usageelseUsage())iftracing.include_data():span_response.span_data.response=responsespan_response.span_data.input=inputexceptExceptionase:span_response.set_error(SpanError(message="Error getting response",data={"error":str(e)iftracing.include_data()elsee.__class__.__name__,},))request_id=e.request_idifisinstance(e,APIStatusError)elseNonelogger.error(f"Error getting response:{e}. (request_id:{request_id})")raisereturnModelResponse(output=response.output,usage=usage,response_id=response.id,)asyncdefstream_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None=None,conversation_id:str|None=None,prompt:ResponsePromptParam|None=None,)->AsyncIterator[ResponseStreamEvent]:"""        Yields a partial message as it is generated, as well as the usage information.        """withresponse_span(disabled=tracing.is_disabled())asspan_response:try:stream=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,previous_response_id=previous_response_id,conversation_id=conversation_id,stream=True,prompt=prompt,)final_response:Response|None=Noneasyncforchunkinstream:ifisinstance(chunk,ResponseCompletedEvent):final_response=chunk.responseyieldchunkiffinal_responseandtracing.include_data():span_response.span_data.response=final_responsespan_response.span_data.input=inputexceptExceptionase:span_response.set_error(SpanError(message="Error streaming response",data={"error":str(e)iftracing.include_data()elsee.__class__.__name__,},))logger.error(f"Error streaming response:{e}")raise@overloadasyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],previous_response_id:str|None,conversation_id:str|None,stream:Literal[True],prompt:ResponsePromptParam|None=None,)->AsyncStream[ResponseStreamEvent]:...@overloadasyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],previous_response_id:str|None,conversation_id:str|None,stream:Literal[False],prompt:ResponsePromptParam|None=None,)->Response:...asyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],previous_response_id:str|None=None,conversation_id:str|None=None,stream:Literal[True]|Literal[False]=False,prompt:ResponsePromptParam|None=None,)->Response|AsyncStream[ResponseStreamEvent]:list_input=ItemHelpers.input_to_new_input_list(input)list_input=_to_dump_compatible(list_input)ifmodel_settings.parallel_tool_callsandtools:parallel_tool_calls:bool|Omit=Trueelifmodel_settings.parallel_tool_callsisFalse:parallel_tool_calls=Falseelse:parallel_tool_calls=omittool_choice=Converter.convert_tool_choice(model_settings.tool_choice)converted_tools=Converter.convert_tools(tools,handoffs)converted_tools_payload=_to_dump_compatible(converted_tools.tools)response_format=Converter.get_response_format(output_schema)should_omit_model=promptisnotNoneandnotself._model_is_explicitmodel_param:str|ChatModel|Omit=self.modelifnotshould_omit_modelelseomitshould_omit_tools=promptisnotNoneandlen(converted_tools_payload)==0tools_param:list[ToolParam]|Omit=(converted_tools_payloadifnotshould_omit_toolselseomit)include_set:set[str]=set(converted_tools.includes)ifmodel_settings.response_includeisnotNone:include_set.update(model_settings.response_include)ifmodel_settings.top_logprobsisnotNone:include_set.add("message.output_text.logprobs")include=cast(list[ResponseIncludable],list(include_set))if_debug.DONT_LOG_MODEL_DATA:logger.debug("Calling LLM")else:input_json=json.dumps(list_input,indent=2,ensure_ascii=False,)tools_json=json.dumps(converted_tools_payload,indent=2,ensure_ascii=False,)logger.debug(f"Calling LLM{self.model} with input:\n"f"{input_json}\n"f"Tools:\n{tools_json}\n"f"Stream:{stream}\n"f"Tool choice:{tool_choice}\n"f"Response format:{response_format}\n"f"Previous response id:{previous_response_id}\n"f"Conversation id:{conversation_id}\n")extra_args=dict(model_settings.extra_argsor{})ifmodel_settings.top_logprobsisnotNone:extra_args["top_logprobs"]=model_settings.top_logprobsifmodel_settings.verbosityisnotNone:ifresponse_formatisnotomit:response_format["verbosity"]=model_settings.verbosity# type: ignore [index]else:response_format={"verbosity":model_settings.verbosity}stream_param:Literal[True]|Omit=Trueifstreamelseomitresponse=awaitself._client.responses.create(previous_response_id=self._non_null_or_omit(previous_response_id),conversation=self._non_null_or_omit(conversation_id),instructions=self._non_null_or_omit(system_instructions),model=model_param,input=list_input,include=include,tools=tools_param,prompt=self._non_null_or_omit(prompt),temperature=self._non_null_or_omit(model_settings.temperature),top_p=self._non_null_or_omit(model_settings.top_p),truncation=self._non_null_or_omit(model_settings.truncation),max_output_tokens=self._non_null_or_omit(model_settings.max_tokens),tool_choice=tool_choice,parallel_tool_calls=parallel_tool_calls,stream=cast(Any,stream_param),extra_headers=self._merge_headers(model_settings),extra_query=model_settings.extra_query,extra_body=model_settings.extra_body,text=response_format,store=self._non_null_or_omit(model_settings.store),prompt_cache_retention=self._non_null_or_omit(model_settings.prompt_cache_retention),reasoning=self._non_null_or_omit(model_settings.reasoning),metadata=self._non_null_or_omit(model_settings.metadata),**extra_args,)returncast(Union[Response,AsyncStream[ResponseStreamEvent]],response)def_get_client(self)->AsyncOpenAI:ifself._clientisNone:self._client=AsyncOpenAI()returnself._clientdef_merge_headers(self,model_settings:ModelSettings):return{**_HEADERS,**(model_settings.extra_headersor{}),**(_HEADERS_OVERRIDE.get()or{}),}

stream_responseasync

stream_response(system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None=None,conversation_id:str|None=None,prompt:ResponsePromptParam|None=None,)->AsyncIterator[ResponseStreamEvent]

Yields a partial message as it is generated, as well as the usage information.

Source code insrc/agents/models/openai_responses.py
asyncdefstream_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None=None,conversation_id:str|None=None,prompt:ResponsePromptParam|None=None,)->AsyncIterator[ResponseStreamEvent]:"""    Yields a partial message as it is generated, as well as the usage information.    """withresponse_span(disabled=tracing.is_disabled())asspan_response:try:stream=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,previous_response_id=previous_response_id,conversation_id=conversation_id,stream=True,prompt=prompt,)final_response:Response|None=Noneasyncforchunkinstream:ifisinstance(chunk,ResponseCompletedEvent):final_response=chunk.responseyieldchunkiffinal_responseandtracing.include_data():span_response.span_data.response=final_responsespan_response.span_data.input=inputexceptExceptionase:span_response.set_error(SpanError(message="Error streaming response",data={"error":str(e)iftracing.include_data()elsee.__class__.__name__,},))logger.error(f"Error streaming response:{e}")raise

Converter

Source code insrc/agents/models/openai_responses.py
classConverter:@classmethoddefconvert_tool_choice(cls,tool_choice:Literal["auto","required","none"]|str|MCPToolChoice|None)->response_create_params.ToolChoice|Omit:iftool_choiceisNone:returnomitelifisinstance(tool_choice,MCPToolChoice):return{"server_label":tool_choice.server_label,"type":"mcp","name":tool_choice.name,}eliftool_choice=="required":return"required"eliftool_choice=="auto":return"auto"eliftool_choice=="none":return"none"eliftool_choice=="file_search":return{"type":"file_search",}eliftool_choice=="web_search":return{# TODO: revist the type: ignore comment when ToolChoice is updated in the future"type":"web_search",# type: ignore[misc, return-value]}eliftool_choice=="web_search_preview":return{"type":"web_search_preview",}eliftool_choice=="computer_use_preview":return{"type":"computer_use_preview",}eliftool_choice=="image_generation":return{"type":"image_generation",}eliftool_choice=="code_interpreter":return{"type":"code_interpreter",}eliftool_choice=="mcp":# Note that this is still here for backwards compatibility,# but migrating to MCPToolChoice is recommended.return{"type":"mcp"}# type: ignore[misc, return-value]else:return{"type":"function","name":tool_choice,}@classmethoddefget_response_format(cls,output_schema:AgentOutputSchemaBase|None)->ResponseTextConfigParam|Omit:ifoutput_schemaisNoneoroutput_schema.is_plain_text():returnomitelse:return{"format":{"type":"json_schema","name":"final_output","schema":output_schema.json_schema(),"strict":output_schema.is_strict_json_schema(),}}@classmethoddefconvert_tools(cls,tools:list[Tool],handoffs:list[Handoff[Any,Any]],)->ConvertedTools:converted_tools:list[ToolParam]=[]includes:list[ResponseIncludable]=[]computer_tools=[toolfortoolintoolsifisinstance(tool,ComputerTool)]iflen(computer_tools)>1:raiseUserError(f"You can only provide one computer tool. Got{len(computer_tools)}")fortoolintools:converted_tool,include=cls._convert_tool(tool)converted_tools.append(converted_tool)ifinclude:includes.append(include)forhandoffinhandoffs:converted_tools.append(cls._convert_handoff_tool(handoff))returnConvertedTools(tools=converted_tools,includes=includes)@classmethoddef_convert_tool(cls,tool:Tool)->tuple[ToolParam,ResponseIncludable|None]:"""Returns converted tool and includes"""ifisinstance(tool,FunctionTool):converted_tool:ToolParam={"name":tool.name,"parameters":tool.params_json_schema,"strict":tool.strict_json_schema,"type":"function","description":tool.description,}includes:ResponseIncludable|None=Noneelifisinstance(tool,WebSearchTool):# TODO: revist the type: ignore comment when ToolParam is updated in the futureconverted_tool={"type":"web_search","filters":tool.filters.model_dump()iftool.filtersisnotNoneelseNone,# type: ignore [typeddict-item]"user_location":tool.user_location,"search_context_size":tool.search_context_size,}includes=Noneelifisinstance(tool,FileSearchTool):converted_tool={"type":"file_search","vector_store_ids":tool.vector_store_ids,}iftool.max_num_results:converted_tool["max_num_results"]=tool.max_num_resultsiftool.ranking_options:converted_tool["ranking_options"]=tool.ranking_optionsiftool.filters:converted_tool["filters"]=tool.filtersincludes="file_search_call.results"iftool.include_search_resultselseNoneelifisinstance(tool,ComputerTool):converted_tool={"type":"computer_use_preview","environment":tool.computer.environment,"display_width":tool.computer.dimensions[0],"display_height":tool.computer.dimensions[1],}includes=Noneelifisinstance(tool,HostedMCPTool):converted_tool=tool.tool_configincludes=Noneelifisinstance(tool,ApplyPatchTool):converted_tool=cast(ToolParam,{"type":"apply_patch"})includes=Noneelifisinstance(tool,ShellTool):converted_tool=cast(ToolParam,{"type":"shell"})includes=Noneelifisinstance(tool,ImageGenerationTool):converted_tool=tool.tool_configincludes=Noneelifisinstance(tool,CodeInterpreterTool):converted_tool=tool.tool_configincludes=Noneelifisinstance(tool,LocalShellTool):converted_tool={"type":"local_shell",}includes=Noneelse:raiseUserError(f"Unknown tool type:{type(tool)}, tool")returnconverted_tool,includes@classmethoddef_convert_handoff_tool(cls,handoff:Handoff)->ToolParam:return{"name":handoff.tool_name,"parameters":handoff.input_json_schema,"strict":handoff.strict_json_schema,"type":"function","description":handoff.tool_description,}

[8]ページ先頭

©2009-2025 Movatter.jp