Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Tools

MCPToolApprovalFunctionmodule-attribute

MCPToolApprovalFunction=Callable[[MCPToolApprovalRequest],MaybeAwaitable[MCPToolApprovalFunctionResult],]

A function that approves or rejects a tool call.

LocalShellExecutormodule-attribute

LocalShellExecutor=Callable[[LocalShellCommandRequest],MaybeAwaitable[str]]

A function that executes a command on a shell.

ShellExecutormodule-attribute

ShellExecutor=Callable[[ShellCommandRequest],MaybeAwaitable[Union[str,ShellResult]],]

Executes a shell command sequence and returns either text or structured output.

ToolOutputText

Bases:BaseModel

Represents a tool output that should be sent to the model as text.

Source code insrc/agents/tool.py
classToolOutputText(BaseModel):"""Represents a tool output that should be sent to the model as text."""type:Literal["text"]="text"text:str

ToolOutputTextDict

Bases:TypedDict

TypedDict variant for text tool outputs.

Source code insrc/agents/tool.py
classToolOutputTextDict(TypedDict,total=False):"""TypedDict variant for text tool outputs."""type:Literal["text"]text:str

ToolOutputImage

Bases:BaseModel

Represents a tool output that should be sent to the model as an image.

You can provide either animage_url (URL or data URL) or afile_id for previously uploadedcontent. The optionaldetail can control vision detail.

Source code insrc/agents/tool.py
classToolOutputImage(BaseModel):"""Represents a tool output that should be sent to the model as an image.    You can provide either an `image_url` (URL or data URL) or a `file_id` for previously uploaded    content. The optional `detail` can control vision detail.    """type:Literal["image"]="image"image_url:str|None=Nonefile_id:str|None=Nonedetail:Literal["low","high","auto"]|None=None@model_validator(mode="after")defcheck_at_least_one_required_field(self)->ToolOutputImage:"""Validate that at least one of image_url or file_id is provided."""ifself.image_urlisNoneandself.file_idisNone:raiseValueError("At least one of image_url or file_id must be provided")returnself

check_at_least_one_required_field

check_at_least_one_required_field()->ToolOutputImage

Validate that at least one of image_url or file_id is provided.

Source code insrc/agents/tool.py
@model_validator(mode="after")defcheck_at_least_one_required_field(self)->ToolOutputImage:"""Validate that at least one of image_url or file_id is provided."""ifself.image_urlisNoneandself.file_idisNone:raiseValueError("At least one of image_url or file_id must be provided")returnself

ToolOutputImageDict

Bases:TypedDict

TypedDict variant for image tool outputs.

Source code insrc/agents/tool.py
classToolOutputImageDict(TypedDict,total=False):"""TypedDict variant for image tool outputs."""type:Literal["image"]image_url:NotRequired[str]file_id:NotRequired[str]detail:NotRequired[Literal["low","high","auto"]]

ToolOutputFileContent

Bases:BaseModel

Represents a tool output that should be sent to the model as a file.

Provide one offile_data (base64),file_url, orfile_id. You may alsoprovide an optionalfilename when usingfile_data to hint file name.

Source code insrc/agents/tool.py
classToolOutputFileContent(BaseModel):"""Represents a tool output that should be sent to the model as a file.    Provide one of `file_data` (base64), `file_url`, or `file_id`. You may also    provide an optional `filename` when using `file_data` to hint file name.    """type:Literal["file"]="file"file_data:str|None=Nonefile_url:str|None=Nonefile_id:str|None=Nonefilename:str|None=None@model_validator(mode="after")defcheck_at_least_one_required_field(self)->ToolOutputFileContent:"""Validate that at least one of file_data, file_url, or file_id is provided."""ifself.file_dataisNoneandself.file_urlisNoneandself.file_idisNone:raiseValueError("At least one of file_data, file_url, or file_id must be provided")returnself

check_at_least_one_required_field

check_at_least_one_required_field()->(ToolOutputFileContent)

Validate that at least one of file_data, file_url, or file_id is provided.

Source code insrc/agents/tool.py
@model_validator(mode="after")defcheck_at_least_one_required_field(self)->ToolOutputFileContent:"""Validate that at least one of file_data, file_url, or file_id is provided."""ifself.file_dataisNoneandself.file_urlisNoneandself.file_idisNone:raiseValueError("At least one of file_data, file_url, or file_id must be provided")returnself

ToolOutputFileContentDict

Bases:TypedDict

TypedDict variant for file content tool outputs.

Source code insrc/agents/tool.py
classToolOutputFileContentDict(TypedDict,total=False):"""TypedDict variant for file content tool outputs."""type:Literal["file"]file_data:NotRequired[str]file_url:NotRequired[str]file_id:NotRequired[str]filename:NotRequired[str]

FunctionToolResultdataclass

Source code insrc/agents/tool.py
@dataclassclassFunctionToolResult:tool:FunctionTool"""The tool that was run."""output:Any"""The output of the tool."""run_item:RunItem"""The run item that was produced as a result of the tool call."""

toolinstance-attribute

The tool that was run.

outputinstance-attribute

output:Any

The output of the tool.

run_iteminstance-attribute

run_item:RunItem

The run item that was produced as a result of the tool call.

FunctionTooldataclass

A tool that wraps a function. In most cases, you should use thefunction_tool helpers tocreate a FunctionTool, as they let you easily wrap a Python function.

Source code insrc/agents/tool.py
@dataclassclassFunctionTool:"""A tool that wraps a function. In most cases, you should use  the `function_tool` helpers to    create a FunctionTool, as they let you easily wrap a Python function.    """name:str"""The name of the tool, as shown to the LLM. Generally the name of the function."""description:str"""A description of the tool, as shown to the LLM."""params_json_schema:dict[str,Any]"""The JSON schema for the tool's parameters."""on_invoke_tool:Callable[[ToolContext[Any],str],Awaitable[Any]]"""A function that invokes the tool with the given context and parameters. The params passed    are:    1. The tool run context.    2. The arguments from the LLM, as a JSON string.    You must return a one of the structured tool output types (e.g. ToolOutputText, ToolOutputImage,    ToolOutputFileContent) or a string representation of the tool output, or a list of them,    or something we can call `str()` on.    In case of errors, you can either raise an Exception (which will cause the run to fail) or    return a string error message (which will be sent back to the LLM).    """strict_json_schema:bool=True"""Whether the JSON schema is in strict mode. We **strongly** recommend setting this to True,    as it increases the likelihood of correct JSON input."""is_enabled:bool|Callable[[RunContextWrapper[Any],AgentBase],MaybeAwaitable[bool]]=True"""Whether the tool is enabled. Either a bool or a Callable that takes the run context and agent    and returns whether the tool is enabled. You can use this to dynamically enable/disable a tool    based on your context/state."""# Tool-specific guardrailstool_input_guardrails:list[ToolInputGuardrail[Any]]|None=None"""Optional list of input guardrails to run before invoking this tool."""tool_output_guardrails:list[ToolOutputGuardrail[Any]]|None=None"""Optional list of output guardrails to run after invoking this tool."""def__post_init__(self):ifself.strict_json_schema:self.params_json_schema=ensure_strict_json_schema(self.params_json_schema)

nameinstance-attribute

name:str

The name of the tool, as shown to the LLM. Generally the name of the function.

descriptioninstance-attribute

description:str

A description of the tool, as shown to the LLM.

params_json_schemainstance-attribute

params_json_schema:dict[str,Any]

The JSON schema for the tool's parameters.

on_invoke_toolinstance-attribute

on_invoke_tool:Callable[[ToolContext[Any],str],Awaitable[Any]]

A function that invokes the tool with the given context and parameters. The params passedare:1. The tool run context.2. The arguments from the LLM, as a JSON string.

You must return a one of the structured tool output types (e.g. ToolOutputText, ToolOutputImage,ToolOutputFileContent) or a string representation of the tool output, or a list of them,or something we can callstr() on.In case of errors, you can either raise an Exception (which will cause the run to fail) orreturn a string error message (which will be sent back to the LLM).

strict_json_schemaclass-attributeinstance-attribute

strict_json_schema:bool=True

Whether the JSON schema is in strict mode. Westrongly recommend setting this to True,as it increases the likelihood of correct JSON input.

is_enabledclass-attributeinstance-attribute

is_enabled:(bool|Callable[[RunContextWrapper[Any],AgentBase],MaybeAwaitable[bool],])=True

Whether the tool is enabled. Either a bool or a Callable that takes the run context and agentand returns whether the tool is enabled. You can use this to dynamically enable/disable a toolbased on your context/state.

tool_input_guardrailsclass-attributeinstance-attribute

tool_input_guardrails:(list[ToolInputGuardrail[Any]]|None)=None

Optional list of input guardrails to run before invoking this tool.

tool_output_guardrailsclass-attributeinstance-attribute

tool_output_guardrails:(list[ToolOutputGuardrail[Any]]|None)=None

Optional list of output guardrails to run after invoking this tool.

FileSearchTooldataclass

A hosted tool that lets the LLM search through a vector store. Currently only supported withOpenAI models, using the Responses API.

Source code insrc/agents/tool.py
@dataclassclassFileSearchTool:"""A hosted tool that lets the LLM search through a vector store. Currently only supported with    OpenAI models, using the Responses API.    """vector_store_ids:list[str]"""The IDs of the vector stores to search."""max_num_results:int|None=None"""The maximum number of results to return."""include_search_results:bool=False"""Whether to include the search results in the output produced by the LLM."""ranking_options:RankingOptions|None=None"""Ranking options for search."""filters:Filters|None=None"""A filter to apply based on file attributes."""@propertydefname(self):return"file_search"

vector_store_idsinstance-attribute

vector_store_ids:list[str]

The IDs of the vector stores to search.

max_num_resultsclass-attributeinstance-attribute

max_num_results:int|None=None

The maximum number of results to return.

include_search_resultsclass-attributeinstance-attribute

include_search_results:bool=False

Whether to include the search results in the output produced by the LLM.

ranking_optionsclass-attributeinstance-attribute

ranking_options:RankingOptions|None=None

Ranking options for search.

filtersclass-attributeinstance-attribute

filters:Filters|None=None

A filter to apply based on file attributes.

WebSearchTooldataclass

A hosted tool that lets the LLM search the web. Currently only supported with OpenAI models,using the Responses API.

Source code insrc/agents/tool.py
@dataclassclassWebSearchTool:"""A hosted tool that lets the LLM search the web. Currently only supported with OpenAI models,    using the Responses API.    """user_location:UserLocation|None=None"""Optional location for the search. Lets you customize results to be relevant to a location."""filters:WebSearchToolFilters|None=None"""A filter to apply based on file attributes."""search_context_size:Literal["low","medium","high"]="medium""""The amount of context to use for the search."""@propertydefname(self):return"web_search"

user_locationclass-attributeinstance-attribute

user_location:UserLocation|None=None

Optional location for the search. Lets you customize results to be relevant to a location.

filtersclass-attributeinstance-attribute

filters:Filters|None=None

A filter to apply based on file attributes.

search_context_sizeclass-attributeinstance-attribute

search_context_size:Literal["low","medium","high"]=("medium")

The amount of context to use for the search.

ComputerTooldataclass

A hosted tool that lets the LLM control a computer.

Source code insrc/agents/tool.py
@dataclassclassComputerTool:"""A hosted tool that lets the LLM control a computer."""computer:Computer|AsyncComputer"""The computer implementation, which describes the environment and dimensions of the computer,    as well as implements the computer actions like click, screenshot, etc.    """on_safety_check:Callable[[ComputerToolSafetyCheckData],MaybeAwaitable[bool]]|None=None"""Optional callback to acknowledge computer tool safety checks."""@propertydefname(self):return"computer_use_preview"

computerinstance-attribute

The computer implementation, which describes the environment and dimensions of the computer,as well as implements the computer actions like click, screenshot, etc.

on_safety_checkclass-attributeinstance-attribute

on_safety_check:(Callable[[ComputerToolSafetyCheckData],MaybeAwaitable[bool]]|None)=None

Optional callback to acknowledge computer tool safety checks.

ComputerToolSafetyCheckDatadataclass

Information about a computer tool safety check.

Source code insrc/agents/tool.py
@dataclassclassComputerToolSafetyCheckData:"""Information about a computer tool safety check."""ctx_wrapper:RunContextWrapper[Any]"""The run context."""agent:Agent[Any]"""The agent performing the computer action."""tool_call:ResponseComputerToolCall"""The computer tool call."""safety_check:PendingSafetyCheck"""The pending safety check to acknowledge."""

ctx_wrapperinstance-attribute

ctx_wrapper:RunContextWrapper[Any]

The run context.

agentinstance-attribute

agent:Agent[Any]

The agent performing the computer action.

tool_callinstance-attribute

tool_call:ResponseComputerToolCall

The computer tool call.

safety_checkinstance-attribute

safety_check:PendingSafetyCheck

The pending safety check to acknowledge.

MCPToolApprovalRequestdataclass

A request to approve a tool call.

Source code insrc/agents/tool.py
@dataclassclassMCPToolApprovalRequest:"""A request to approve a tool call."""ctx_wrapper:RunContextWrapper[Any]"""The run context."""data:McpApprovalRequest"""The data from the MCP tool approval request."""

ctx_wrapperinstance-attribute

ctx_wrapper:RunContextWrapper[Any]

The run context.

datainstance-attribute

data:McpApprovalRequest

The data from the MCP tool approval request.

MCPToolApprovalFunctionResult

Bases:TypedDict

The result of an MCP tool approval function.

Source code insrc/agents/tool.py
classMCPToolApprovalFunctionResult(TypedDict):"""The result of an MCP tool approval function."""approve:bool"""Whether to approve the tool call."""reason:NotRequired[str]"""An optional reason, if rejected."""

approveinstance-attribute

approve:bool

Whether to approve the tool call.

reasoninstance-attribute

reason:NotRequired[str]

An optional reason, if rejected.

HostedMCPTooldataclass

A tool that allows the LLM to use a remote MCP server. The LLM will automatically list andcall tools, without requiring a round trip back to your code.If you want to run MCP servers locally via stdio, in a VPC or other non-publicly-accessibleenvironment, or you just prefer to run tool calls locally, then you can instead use the serversinagents.mcp and passAgent(mcp_servers=[...]) to the agent.

Source code insrc/agents/tool.py
@dataclassclassHostedMCPTool:"""A tool that allows the LLM to use a remote MCP server. The LLM will automatically list and    call tools, without requiring a round trip back to your code.    If you want to run MCP servers locally via stdio, in a VPC or other non-publicly-accessible    environment, or you just prefer to run tool calls locally, then you can instead use the servers    in `agents.mcp` and pass `Agent(mcp_servers=[...])` to the agent."""tool_config:Mcp"""The MCP tool config, which includes the server URL and other settings."""on_approval_request:MCPToolApprovalFunction|None=None"""An optional function that will be called if approval is requested for an MCP tool. If not    provided, you will need to manually add approvals/rejections to the input and call    `Runner.run(...)` again."""@propertydefname(self):return"hosted_mcp"

tool_configinstance-attribute

tool_config:Mcp

The MCP tool config, which includes the server URL and other settings.

on_approval_requestclass-attributeinstance-attribute

on_approval_request:MCPToolApprovalFunction|None=None

An optional function that will be called if approval is requested for an MCP tool. If notprovided, you will need to manually add approvals/rejections to the input and callRunner.run(...) again.

CodeInterpreterTooldataclass

A tool that allows the LLM to execute code in a sandboxed environment.

Source code insrc/agents/tool.py
@dataclassclassCodeInterpreterTool:"""A tool that allows the LLM to execute code in a sandboxed environment."""tool_config:CodeInterpreter"""The tool config, which includes the container and other settings."""@propertydefname(self):return"code_interpreter"

tool_configinstance-attribute

tool_config:CodeInterpreter

The tool config, which includes the container and other settings.

ImageGenerationTooldataclass

A tool that allows the LLM to generate images.

Source code insrc/agents/tool.py
@dataclassclassImageGenerationTool:"""A tool that allows the LLM to generate images."""tool_config:ImageGeneration"""The tool config, which image generation settings."""@propertydefname(self):return"image_generation"

tool_configinstance-attribute

tool_config:ImageGeneration

The tool config, which image generation settings.

LocalShellCommandRequestdataclass

A request to execute a command on a shell.

Source code insrc/agents/tool.py
@dataclassclassLocalShellCommandRequest:"""A request to execute a command on a shell."""ctx_wrapper:RunContextWrapper[Any]"""The run context."""data:LocalShellCall"""The data from the local shell tool call."""

ctx_wrapperinstance-attribute

ctx_wrapper:RunContextWrapper[Any]

The run context.

datainstance-attribute

data:LocalShellCall

The data from the local shell tool call.

LocalShellTooldataclass

A tool that allows the LLM to execute commands on a shell.

For more details, see:https://platform.openai.com/docs/guides/tools-local-shell

Source code insrc/agents/tool.py
@dataclassclassLocalShellTool:"""A tool that allows the LLM to execute commands on a shell.    For more details, see:    https://platform.openai.com/docs/guides/tools-local-shell    """executor:LocalShellExecutor"""A function that executes a command on a shell."""@propertydefname(self):return"local_shell"

executorinstance-attribute

A function that executes a command on a shell.

ShellCallOutcomedataclass

Describes the terminal condition of a shell command.

Source code insrc/agents/tool.py
@dataclassclassShellCallOutcome:"""Describes the terminal condition of a shell command."""type:Literal["exit","timeout"]exit_code:int|None=None

ShellCommandOutputdataclass

Structured output for a single shell command execution.

Source code insrc/agents/tool.py
@dataclassclassShellCommandOutput:"""Structured output for a single shell command execution."""stdout:str=""stderr:str=""outcome:ShellCallOutcome=field(default_factory=_default_shell_outcome)command:str|None=Noneprovider_data:dict[str,Any]|None=None@propertydefexit_code(self)->int|None:returnself.outcome.exit_code@propertydefstatus(self)->Literal["completed","timeout"]:return"timeout"ifself.outcome.type=="timeout"else"completed"

ShellResultdataclass

Result returned by a shell executor.

Source code insrc/agents/tool.py
@dataclassclassShellResult:"""Result returned by a shell executor."""output:list[ShellCommandOutput]max_output_length:int|None=Noneprovider_data:dict[str,Any]|None=None

ShellActionRequestdataclass

Action payload for a next-generation shell call.

Source code insrc/agents/tool.py
@dataclassclassShellActionRequest:"""Action payload for a next-generation shell call."""commands:list[str]timeout_ms:int|None=Nonemax_output_length:int|None=None

ShellCallDatadataclass

Normalized shell call data provided to shell executors.

Source code insrc/agents/tool.py
@dataclassclassShellCallData:"""Normalized shell call data provided to shell executors."""call_id:straction:ShellActionRequeststatus:Literal["in_progress","completed"]|None=Noneraw:Any|None=None

ShellCommandRequestdataclass

A request to execute a modern shell call.

Source code insrc/agents/tool.py
@dataclassclassShellCommandRequest:"""A request to execute a modern shell call."""ctx_wrapper:RunContextWrapper[Any]data:ShellCallData

ShellTooldataclass

Next-generation shell tool. LocalShellTool will be deprecated in favor of this.

Source code insrc/agents/tool.py
@dataclassclassShellTool:"""Next-generation shell tool. LocalShellTool will be deprecated in favor of this."""executor:ShellExecutorname:str="shell"@propertydeftype(self)->str:return"shell"

ApplyPatchTooldataclass

Hosted apply_patch tool. Lets the model request file mutations via unified diffs.

Source code insrc/agents/tool.py
@dataclassclassApplyPatchTool:"""Hosted apply_patch tool. Lets the model request file mutations via unified diffs."""editor:ApplyPatchEditorname:str="apply_patch"@propertydeftype(self)->str:return"apply_patch"

default_tool_error_function

default_tool_error_function(ctx:RunContextWrapper[Any],error:Exception)->str

The default tool error function, which just returns a generic error message.

Source code insrc/agents/tool.py
defdefault_tool_error_function(ctx:RunContextWrapper[Any],error:Exception)->str:"""The default tool error function, which just returns a generic error message."""returnf"An error occurred while running the tool. Please try again. Error:{str(error)}"

function_tool

function_tool(func:ToolFunction[...],*,name_override:str|None=None,description_override:str|None=None,docstring_style:DocstringStyle|None=None,use_docstring_info:bool=True,failure_error_function:ToolErrorFunction|None=None,strict_mode:bool=True,is_enabled:bool|Callable[[RunContextWrapper[Any],AgentBase],MaybeAwaitable[bool],]=True,)->FunctionTool
function_tool(*,name_override:str|None=None,description_override:str|None=None,docstring_style:DocstringStyle|None=None,use_docstring_info:bool=True,failure_error_function:ToolErrorFunction|None=None,strict_mode:bool=True,is_enabled:bool|Callable[[RunContextWrapper[Any],AgentBase],MaybeAwaitable[bool],]=True,)->Callable[[ToolFunction[...]],FunctionTool]
function_tool(func:ToolFunction[...]|None=None,*,name_override:str|None=None,description_override:str|None=None,docstring_style:DocstringStyle|None=None,use_docstring_info:bool=True,failure_error_function:ToolErrorFunction|None=default_tool_error_function,strict_mode:bool=True,is_enabled:bool|Callable[[RunContextWrapper[Any],AgentBase],MaybeAwaitable[bool],]=True,)->(FunctionTool|Callable[[ToolFunction[...]],FunctionTool])

Decorator to create a FunctionTool from a function. By default, we will:1. Parse the function signature to create a JSON schema for the tool's parameters.2. Use the function's docstring to populate the tool's description.3. Use the function's docstring to populate argument descriptions.The docstring style is detected automatically, but you can override it.

If the function takes aRunContextWrapper as the first argument, itmust match thecontext type of the agent that uses the tool.

Parameters:

NameTypeDescriptionDefault
funcToolFunction[...] | None

The function to wrap.

None
name_overridestr | None

If provided, use this name for the tool instead of the function's name.

None
description_overridestr | None

If provided, use this description for the tool instead of thefunction's docstring.

None
docstring_styleDocstringStyle | None

If provided, use this style for the tool's docstring. If not provided,we will attempt to auto-detect the style.

None
use_docstring_infobool

If True, use the function's docstring to populate the tool'sdescription and argument descriptions.

True
failure_error_functionToolErrorFunction | None

If provided, use this function to generate an error message whenthe tool call fails. The error message is sent to the LLM. If you pass None, then noerror message will be sent and instead an Exception will be raised.

default_tool_error_function
strict_modebool

Whether to enable strict mode for the tool's JSON schema. Westronglyrecommend setting this to True, as it increases the likelihood of correct JSON input.If False, it allows non-strict JSON schemas. For example, if a parameter has a defaultvalue, it will be optional, additional properties are allowed, etc. See here for more:https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#supported-schemas

True
is_enabledbool |Callable[[RunContextWrapper[Any],AgentBase],MaybeAwaitable[bool]]

Whether the tool is enabled. Can be a bool or a callable that takes the runcontext and agent and returns whether the tool is enabled. Disabled tools are hiddenfrom the LLM at runtime.

True
Source code insrc/agents/tool.py
deffunction_tool(func:ToolFunction[...]|None=None,*,name_override:str|None=None,description_override:str|None=None,docstring_style:DocstringStyle|None=None,use_docstring_info:bool=True,failure_error_function:ToolErrorFunction|None=default_tool_error_function,strict_mode:bool=True,is_enabled:bool|Callable[[RunContextWrapper[Any],AgentBase],MaybeAwaitable[bool]]=True,)->FunctionTool|Callable[[ToolFunction[...]],FunctionTool]:"""    Decorator to create a FunctionTool from a function. By default, we will:    1. Parse the function signature to create a JSON schema for the tool's parameters.    2. Use the function's docstring to populate the tool's description.    3. Use the function's docstring to populate argument descriptions.    The docstring style is detected automatically, but you can override it.    If the function takes a `RunContextWrapper` as the first argument, it *must* match the    context type of the agent that uses the tool.    Args:        func: The function to wrap.        name_override: If provided, use this name for the tool instead of the function's name.        description_override: If provided, use this description for the tool instead of the            function's docstring.        docstring_style: If provided, use this style for the tool's docstring. If not provided,            we will attempt to auto-detect the style.        use_docstring_info: If True, use the function's docstring to populate the tool's            description and argument descriptions.        failure_error_function: If provided, use this function to generate an error message when            the tool call fails. The error message is sent to the LLM. If you pass None, then no            error message will be sent and instead an Exception will be raised.        strict_mode: Whether to enable strict mode for the tool's JSON schema. We *strongly*            recommend setting this to True, as it increases the likelihood of correct JSON input.            If False, it allows non-strict JSON schemas. For example, if a parameter has a default            value, it will be optional, additional properties are allowed, etc. See here for more:            https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#supported-schemas        is_enabled: Whether the tool is enabled. Can be a bool or a callable that takes the run            context and agent and returns whether the tool is enabled. Disabled tools are hidden            from the LLM at runtime.    """def_create_function_tool(the_func:ToolFunction[...])->FunctionTool:schema=function_schema(func=the_func,name_override=name_override,description_override=description_override,docstring_style=docstring_style,use_docstring_info=use_docstring_info,strict_json_schema=strict_mode,)asyncdef_on_invoke_tool_impl(ctx:ToolContext[Any],input:str)->Any:try:json_data:dict[str,Any]=json.loads(input)ifinputelse{}exceptExceptionase:if_debug.DONT_LOG_TOOL_DATA:logger.debug(f"Invalid JSON input for tool{schema.name}")else:logger.debug(f"Invalid JSON input for tool{schema.name}:{input}")raiseModelBehaviorError(f"Invalid JSON input for tool{schema.name}:{input}")fromeif_debug.DONT_LOG_TOOL_DATA:logger.debug(f"Invoking tool{schema.name}")else:logger.debug(f"Invoking tool{schema.name} with input{input}")try:parsed=(schema.params_pydantic_model(**json_data)ifjson_dataelseschema.params_pydantic_model())exceptValidationErrorase:raiseModelBehaviorError(f"Invalid JSON input for tool{schema.name}:{e}")fromeargs,kwargs_dict=schema.to_call_args(parsed)ifnot_debug.DONT_LOG_TOOL_DATA:logger.debug(f"Tool call args:{args}, kwargs:{kwargs_dict}")ifinspect.iscoroutinefunction(the_func):ifschema.takes_context:result=awaitthe_func(ctx,*args,**kwargs_dict)else:result=awaitthe_func(*args,**kwargs_dict)else:ifschema.takes_context:result=the_func(ctx,*args,**kwargs_dict)else:result=the_func(*args,**kwargs_dict)if_debug.DONT_LOG_TOOL_DATA:logger.debug(f"Tool{schema.name} completed.")else:logger.debug(f"Tool{schema.name} returned{result}")returnresultasyncdef_on_invoke_tool(ctx:ToolContext[Any],input:str)->Any:try:returnawait_on_invoke_tool_impl(ctx,input)exceptExceptionase:iffailure_error_functionisNone:raiseresult=failure_error_function(ctx,e)ifinspect.isawaitable(result):returnawaitresult_error_tracing.attach_error_to_current_span(SpanError(message="Error running tool (non-fatal)",data={"tool_name":schema.name,"error":str(e),},))if_debug.DONT_LOG_TOOL_DATA:logger.debug(f"Tool{schema.name} failed")else:logger.error(f"Tool{schema.name} failed:{input}{e}",exc_info=e,)returnresultreturnFunctionTool(name=schema.name,description=schema.descriptionor"",params_json_schema=schema.params_json_schema,on_invoke_tool=_on_invoke_tool,strict_json_schema=strict_mode,is_enabled=is_enabled,)# If func is actually a callable, we were used as @function_tool with no parenthesesifcallable(func):return_create_function_tool(func)# Otherwise, we were used as @function_tool(...), so return a decoratordefdecorator(real_func:ToolFunction[...])->FunctionTool:return_create_function_tool(real_func)returndecorator

[8]ページ先頭

©2009-2025 Movatter.jp