Movatterモバイル変換


[0]ホーム

URL:


Skip to content

OpenAI Chat Completions model

OpenAIChatCompletionsModel

Bases:Model

Source code insrc/agents/models/openai_chatcompletions.py
 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
classOpenAIChatCompletionsModel(Model):def__init__(self,model:str|ChatModel,openai_client:AsyncOpenAI,)->None:self.model=modelself._client=openai_clientdef_non_null_or_omit(self,value:Any)->Any:returnvalueifvalueisnotNoneelseomitasyncdefget_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None=None,# unusedconversation_id:str|None=None,# unusedprompt:ResponsePromptParam|None=None,)->ModelResponse:withgeneration_span(model=str(self.model),model_config=model_settings.to_json_dict()|{"base_url":str(self._client.base_url)},disabled=tracing.is_disabled(),)asspan_generation:response=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,span_generation,tracing,stream=False,prompt=prompt,)message:ChatCompletionMessage|None=Nonefirst_choice:Choice|None=Noneifresponse.choicesandlen(response.choices)>0:first_choice=response.choices[0]message=first_choice.messageif_debug.DONT_LOG_MODEL_DATA:logger.debug("Received model response")else:ifmessageisnotNone:logger.debug("LLM resp:\n%s\n",json.dumps(message.model_dump(),indent=2,ensure_ascii=False),)else:finish_reason=first_choice.finish_reasoniffirst_choiceelse"-"logger.debug(f"LLM resp had no message. finish_reason:{finish_reason}")usage=(Usage(requests=1,input_tokens=response.usage.prompt_tokens,output_tokens=response.usage.completion_tokens,total_tokens=response.usage.total_tokens,# BeforeValidator in Usage normalizes these from Chat Completions typesinput_tokens_details=response.usage.prompt_tokens_details,# type: ignore[arg-type]output_tokens_details=response.usage.completion_tokens_details,# type: ignore[arg-type])ifresponse.usageelseUsage())iftracing.include_data():span_generation.span_data.output=([message.model_dump()]ifmessageisnotNoneelse[])span_generation.span_data.usage={"input_tokens":usage.input_tokens,"output_tokens":usage.output_tokens,}items=Converter.message_to_output_items(message)ifmessageisnotNoneelse[]logprob_models=Noneiffirst_choiceandfirst_choice.logprobsandfirst_choice.logprobs.content:logprob_models=ChatCmplHelpers.convert_logprobs_for_output_text(first_choice.logprobs.content)iflogprob_models:self._attach_logprobs_to_output(items,logprob_models)returnModelResponse(output=items,usage=usage,response_id=None,)def_attach_logprobs_to_output(self,output_items:list[ResponseOutputItem],logprobs:list[Logprob])->None:foroutput_iteminoutput_items:ifnotisinstance(output_item,ResponseOutputMessage):continueforcontentinoutput_item.content:ifisinstance(content,ResponseOutputText):content.logprobs=logprobsreturnasyncdefstream_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None=None,# unusedconversation_id:str|None=None,# unusedprompt:ResponsePromptParam|None=None,)->AsyncIterator[TResponseStreamEvent]:"""        Yields a partial message as it is generated, as well as the usage information.        """withgeneration_span(model=str(self.model),model_config=model_settings.to_json_dict()|{"base_url":str(self._client.base_url)},disabled=tracing.is_disabled(),)asspan_generation:response,stream=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,span_generation,tracing,stream=True,prompt=prompt,)final_response:Response|None=NoneasyncforchunkinChatCmplStreamHandler.handle_stream(response,stream):yieldchunkifchunk.type=="response.completed":final_response=chunk.responseiftracing.include_data()andfinal_response:span_generation.span_data.output=[final_response.model_dump()]iffinal_responseandfinal_response.usage:span_generation.span_data.usage={"input_tokens":final_response.usage.input_tokens,"output_tokens":final_response.usage.output_tokens,}@overloadasyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],span:Span[GenerationSpanData],tracing:ModelTracing,stream:Literal[True],prompt:ResponsePromptParam|None=None,)->tuple[Response,AsyncStream[ChatCompletionChunk]]:...@overloadasyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],span:Span[GenerationSpanData],tracing:ModelTracing,stream:Literal[False],prompt:ResponsePromptParam|None=None,)->ChatCompletion:...asyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],span:Span[GenerationSpanData],tracing:ModelTracing,stream:bool=False,prompt:ResponsePromptParam|None=None,)->ChatCompletion|tuple[Response,AsyncStream[ChatCompletionChunk]]:converted_messages=Converter.items_to_messages(input)ifsystem_instructions:converted_messages.insert(0,{"content":system_instructions,"role":"system",},)converted_messages=_to_dump_compatible(converted_messages)iftracing.include_data():span.span_data.input=converted_messagesifmodel_settings.parallel_tool_callsandtools:parallel_tool_calls:bool|Omit=Trueelifmodel_settings.parallel_tool_callsisFalse:parallel_tool_calls=Falseelse:parallel_tool_calls=omittool_choice=Converter.convert_tool_choice(model_settings.tool_choice)response_format=Converter.convert_response_format(output_schema)converted_tools=[Converter.tool_to_openai(tool)fortoolintools]iftoolselse[]forhandoffinhandoffs:converted_tools.append(Converter.convert_handoff_tool(handoff))converted_tools=_to_dump_compatible(converted_tools)tools_param=converted_toolsifconverted_toolselseomitif_debug.DONT_LOG_MODEL_DATA:logger.debug("Calling LLM")else:messages_json=json.dumps(converted_messages,indent=2,ensure_ascii=False,)tools_json=json.dumps(converted_tools,indent=2,ensure_ascii=False,)logger.debug(f"{messages_json}\n"f"Tools:\n{tools_json}\n"f"Stream:{stream}\n"f"Tool choice:{tool_choice}\n"f"Response format:{response_format}\n")reasoning_effort=model_settings.reasoning.effortifmodel_settings.reasoningelseNonestore=ChatCmplHelpers.get_store_param(self._get_client(),model_settings)stream_options=ChatCmplHelpers.get_stream_options_param(self._get_client(),model_settings,stream=stream)stream_param:Literal[True]|Omit=Trueifstreamelseomitret=awaitself._get_client().chat.completions.create(model=self.model,messages=converted_messages,tools=tools_param,temperature=self._non_null_or_omit(model_settings.temperature),top_p=self._non_null_or_omit(model_settings.top_p),frequency_penalty=self._non_null_or_omit(model_settings.frequency_penalty),presence_penalty=self._non_null_or_omit(model_settings.presence_penalty),max_tokens=self._non_null_or_omit(model_settings.max_tokens),tool_choice=tool_choice,response_format=response_format,parallel_tool_calls=parallel_tool_calls,stream=cast(Any,stream_param),stream_options=self._non_null_or_omit(stream_options),store=self._non_null_or_omit(store),reasoning_effort=self._non_null_or_omit(reasoning_effort),verbosity=self._non_null_or_omit(model_settings.verbosity),top_logprobs=self._non_null_or_omit(model_settings.top_logprobs),prompt_cache_retention=self._non_null_or_omit(model_settings.prompt_cache_retention),extra_headers=self._merge_headers(model_settings),extra_query=model_settings.extra_query,extra_body=model_settings.extra_body,metadata=self._non_null_or_omit(model_settings.metadata),**(model_settings.extra_argsor{}),)ifisinstance(ret,ChatCompletion):returnretresponses_tool_choice=OpenAIResponsesConverter.convert_tool_choice(model_settings.tool_choice)ifresponses_tool_choiceisNoneorresponses_tool_choiceisomit:# For Responses API data compatibility with Chat Completions patterns,# we need to set "none" if tool_choice is absent.# Without this fix, you'll get the following error:# pydantic_core._pydantic_core.ValidationError: 4 validation errors for Response# tool_choice.literal['none','auto','required']#   Input should be 'none', 'auto' or 'required'# see also: https://github.com/openai/openai-agents-python/issues/980responses_tool_choice="auto"response=Response(id=FAKE_RESPONSES_ID,created_at=time.time(),model=self.model,object="response",output=[],tool_choice=responses_tool_choice,# type: ignore[arg-type]top_p=model_settings.top_p,temperature=model_settings.temperature,tools=[],parallel_tool_calls=parallel_tool_callsorFalse,reasoning=model_settings.reasoning,)returnresponse,retdef_get_client(self)->AsyncOpenAI:ifself._clientisNone:self._client=AsyncOpenAI()returnself._clientdef_merge_headers(self,model_settings:ModelSettings):return{**HEADERS,**(model_settings.extra_headersor{}),**(HEADERS_OVERRIDE.get()or{}),}

stream_responseasync

stream_response(system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None=None,conversation_id:str|None=None,prompt:ResponsePromptParam|None=None,)->AsyncIterator[TResponseStreamEvent]

Yields a partial message as it is generated, as well as the usage information.

Source code insrc/agents/models/openai_chatcompletions.py
asyncdefstream_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None=None,# unusedconversation_id:str|None=None,# unusedprompt:ResponsePromptParam|None=None,)->AsyncIterator[TResponseStreamEvent]:"""    Yields a partial message as it is generated, as well as the usage information.    """withgeneration_span(model=str(self.model),model_config=model_settings.to_json_dict()|{"base_url":str(self._client.base_url)},disabled=tracing.is_disabled(),)asspan_generation:response,stream=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,span_generation,tracing,stream=True,prompt=prompt,)final_response:Response|None=NoneasyncforchunkinChatCmplStreamHandler.handle_stream(response,stream):yieldchunkifchunk.type=="response.completed":final_response=chunk.responseiftracing.include_data()andfinal_response:span_generation.span_data.output=[final_response.model_dump()]iffinal_responseandfinal_response.usage:span_generation.span_data.usage={"input_tokens":final_response.usage.input_tokens,"output_tokens":final_response.usage.output_tokens,}

[8]ページ先頭

©2009-2025 Movatter.jp