Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

feat: Add output function tracing#2191

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.

Already on GitHub?Sign in to your account

Merged
alexmojaki merged 18 commits intopydantic:mainfrombitnahian:2108-bitnahian
Jul 16, 2025
Merged
Show file tree
Hide file tree
Changes from1 commit
Commits
Show all changes
18 commits
Select commitHold shift + click to select a range
0ae3c18
Add span generation to ObjectOutputProcessor
bitnahianJul 13, 2025
539cfd3
Add working version with non-ToolCallPart call
bitnahianJul 13, 2025
0bb1ba3
Add working version
bitnahianJul 13, 2025
16b9f4d
Working tests
bitnahianJul 13, 2025
491c856
Fix tool_response serialisation in tracing
bitnahianJul 13, 2025
e2b96a2
Add tests for TraceContext
bitnahianJul 13, 2025
2d736b1
Add tracing for TextOutputSchema
bitnahianJul 13, 2025
61b17de
Raise errors in unreachable code
bitnahianJul 13, 2025
ef8c0af
simplify
alexmojakiJul 15, 2025
eb7be2c
dedupe function schema call to one helper function
bitnahianJul 15, 2025
16e5aad
Merge branch 'main' into 2108-bitnahian
bitnahianJul 16, 2025
c3ee3ab
Add list snapshot for retry test
bitnahianJul 16, 2025
b210ee8
Make trace context non-nullable
bitnahianJul 16, 2025
ae7cd7e
Update tests/test_logfire.py
bitnahianJul 16, 2025
2fe4642
Fix PR comments
bitnahianJul 16, 2025
a3a25e8
Fix more with_calls
bitnahianJul 16, 2025
6fa702d
Fix more PR comments
bitnahianJul 16, 2025
b2bb60e
Move statements outside try block
bitnahianJul 16, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
PrevPrevious commit
NextNext commit
Add working version with non-ToolCallPart call
  • Loading branch information
@bitnahian
bitnahian committedJul 13, 2025
commit539cfd3c07a72228de4889c5ac4fcb114f797e22
3 changes: 2 additions & 1 deletionpydantic_ai_slim/pydantic_ai/_agent_graph.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -589,7 +589,8 @@ async def _handle_text_response(
try:
if isinstance(output_schema, _output.TextOutputSchema):
run_context = build_run_context(ctx)
result_data = await output_schema.process(text, run_context)
trace_context = _output.build_trace_context(ctx)
result_data = await output_schema.process(text, run_context, trace_context)
else:
m = _messages.RetryPromptPart(
content='Plain text responses are not permitted, please include your response in a tool call',
Expand Down
78 changes: 76 additions & 2 deletionspydantic_ai_slim/pydantic_ai/_output.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -97,6 +97,10 @@ def call(self) -> _messages.ToolCallPart:
raise UserError('No tool call is set in the trace context.')
return self._call

def has_call(self) -> bool:
"""Check if a tool call is set in the trace context."""
return self._call is not None

@contextmanager
def with_call(self, call: _messages.ToolCallPart) -> Iterator[None]:
"""Context manager to set the current tool call."""
Expand DownExpand Up@@ -736,7 +740,8 @@ async def process(
if self._function_schema:
try:
# Wraps the output function call in an OpenTelemetry span if trace_context is provided.
if trace_context:
if trace_context and trace_context.has_call():
# This is a tool call context, so we include tool call attributes
message = trace_context.call
span_attributes = {
'gen_ai.tool.name': message.tool_name,
Expand DownExpand Up@@ -774,6 +779,39 @@ async def process(
'tool_response',
str(output),
)
elif trace_context and not trace_context.has_call():
# This is not a tool call (e.g., PromptedOutput), so create a span without tool call attributes
function_name = getattr(self._function_schema.function, '__name__', 'output_function')
span_attributes = {
**({'tool_arguments': json.dumps(output)} if trace_context.include_content else {}),
'logfire.msg': f'running output function: {function_name}',
# add the JSON schema so these attributes are formatted nicely in Logfire
'logfire.json_schema': json.dumps(
{
'type': 'object',
'properties': {
**(
{
'tool_arguments': {'type': 'object'},
'tool_response': {'type': 'object'},
}
if trace_context.include_content
else {}
),
},
}
),
}
with trace_context.tracer.start_as_current_span(
'running output function',
attributes=span_attributes,
) as span:
output = await self._function_schema.call(output, run_context)
if trace_context.include_content and span.is_recording():
span.set_attribute(
'tool_response',
str(output),
)
else:
output = await self._function_schema.call(output, run_context)
except ModelRetry as r:
Expand DownExpand Up@@ -947,7 +985,43 @@ async def process(
args = {self._str_argument_name: data}

try:
output = await self._function_schema.call(args, run_context)
# Wraps the output function call in an OpenTelemetry span if trace_context is provided.
# Note: PlainTextOutputProcessor is used for text responses (not tool calls),
# so we don't have tool call attributes like gen_ai.tool.name or gen_ai.tool.call.id
if trace_context:
function_name = getattr(self._function_schema.function, '__name__', 'text_output_function')
span_attributes = {
**({'tool_arguments': json.dumps(args)} if trace_context.include_content else {}),
'logfire.msg': f'running text output function: {function_name}',
# add the JSON schema so these attributes are formatted nicely in Logfire
'logfire.json_schema': json.dumps(
{
'type': 'object',
'properties': {
**(
{
'tool_arguments': {'type': 'object'},
'tool_response': {'type': 'object'},
}
if trace_context.include_content
else {}
),
},
}
),
}
with trace_context.tracer.start_as_current_span(
'running text output function',
attributes=span_attributes,
) as span:
output = await self._function_schema.call(args, run_context)
if trace_context.include_content and span.is_recording():
span.set_attribute(
'tool_response',
str(output),
)
else:
output = await self._function_schema.call(args, run_context)
except ModelRetry as r:
if wrap_validation_errors:
m = _messages.RetryPromptPart(
Expand Down
146 changes: 85 additions & 61 deletionstests/test_logfire.py
View file
Open in desktop
Original file line numberDiff line numberDiff line change
Expand Up@@ -9,12 +9,13 @@
from pydantic import BaseModel
from typing_extensions import NotRequired, TypedDict

from pydantic_ai import Agent, RunContext
from pydantic_ai import Agent
from pydantic_ai._utils import get_traceparent
from pydantic_ai.messages import ModelMessage, ModelResponse, ToolCallPart
from pydantic_ai.messages import ModelMessage, ModelResponse,TextPart,ToolCallPart
from pydantic_ai.models.function import AgentInfo, FunctionModel
from pydantic_ai.models.instrumented import InstrumentationSettings, InstrumentedModel
from pydantic_ai.models.test import TestModel
from pydantic_ai.output import PromptedOutput, TextOutput

from .conftest import IsStr

Expand DownExpand Up@@ -778,90 +779,113 @@ def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse:


def upcase_text(text: str) -> str:
"""Convert text to uppercase."""
return text.upper()


class OrderInfo(BaseModel):
order_id: str
total: float
status: str
@pytest.mark.skipif(not logfire_installed, reason='logfire not installed')
@pytest.mark.parametrize('include_content', [True, False])
def test_text_output_function_logfire_attributes(
get_logfire_summary: Callable[[], LogfireSummary],
include_content: bool,
) -> None:
"""Test logfire attributes for TextOutput functions (PlainTextOutputProcessor)."""

def call_text_response(_: list[ModelMessage], info: AgentInfo) -> ModelResponse:
# Return a plain text response (not a tool call)
from pydantic_ai.messages import TextPart

return ModelResponse(parts=[TextPart(content='hello world')])

instrumentation_settings = InstrumentationSettings(include_content=include_content)
my_agent = Agent(model=FunctionModel(call_text_response), instrument=instrumentation_settings)

result = my_agent.run_sync('Say hello', output_type=TextOutput(upcase_text))
assert result.output == 'HELLO WORLD' # Assuming model returns 'hello world'

summary = get_logfire_summary()

# Find the text output function span attributes
text_function_attributes = None
for attributes in summary.attributes.values():
if 'running text output function' in attributes.get('logfire.msg', ''):
text_function_attributes = attributes
break

if include_content:
assert text_function_attributes is not None
# Verify the basic span attributes without tool call attributes
assert 'tool_arguments' in text_function_attributes
assert 'tool_response' in text_function_attributes
assert 'logfire.msg' in text_function_attributes
assert 'logfire.json_schema' in text_function_attributes
# These tool call specific attributes should NOT be present
assert 'gen_ai.tool.name' not in text_function_attributes
assert 'gen_ai.tool.call.id' not in text_function_attributes
# Verify the content
assert text_function_attributes['tool_response'] == 'HELLO WORLD'
assert 'upcase_text' in text_function_attributes['logfire.msg']
else:
# When include_content=False, we might still have the span but without content
if text_function_attributes is not None:
assert 'tool_arguments' not in text_function_attributes
assert 'tool_response' not in text_function_attributes
assert 'gen_ai.tool.name' not in text_function_attributes
assert 'gen_ai.tool.call.id' not in text_function_attributes


@pytest.mark.skipif(not logfire_installed, reason='logfire not installed')
@pytest.mark.parametrize('include_content', [True, False])
deftest_output_type_function_with_run_context_logfire_attributes(
deftest_prompted_output_function_logfire_attributes(
get_logfire_summary: Callable[[], LogfireSummary],
include_content: bool,
) -> None:
"""Testlogfire attributesforoutput functionsthat use RunContext."""
"""Testthat spans are createdforPromptedOutput functionswith appropriate attributes."""

def process_order(ctx: RunContext[None], order_data: str, customer_id: int) -> OrderInfo:
# Function with RunContext as first parameter and multiple other parameters
return OrderInfo(order_id='ORD-123', total=59.99, status='processed')
def upcase_text(text: str) -> str:
return text.upper()

def call_tool(_: list[ModelMessage], info: AgentInfo) -> ModelResponse:
assert info.output_tools is not None
args_json = '{"order_data": "customer_order_details", "customer_id": 12345}'
return ModelResponse(parts=[ToolCallPart(info.output_tools[0].name, args_json)])
call_count = 0

def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse:
nonlocal call_count
call_count += 1
# Simulate the model returning JSON that will be parsed and used to call the function
return ModelResponse(parts=[TextPart(content='{"text": "hello world"}')])

instrumentation_settings = InstrumentationSettings(include_content=include_content)
my_agent = Agent(model=FunctionModel(call_tool), instrument=instrumentation_settings)
agent = Agent(
model=FunctionModel(call_tool), instrument=instrumentation_settings, output_type=PromptedOutput(upcase_text)
)

result = my_agent.run_sync('Process order', output_type=process_order)
assert result.output == OrderInfo(order_id='ORD-123', total=59.99, status='processed')
result = agent.run_sync('test')

# Check that the function was called and returned the expected result
assert result.output == 'HELLO WORLD'
assert call_count == 1

summary = get_logfire_summary()

# Find the output function span attributes
output_function_attributes = None
for attributes in summary.attributes.values():
if attributes.get('gen_ai.tool.name') == 'final_result':
if attributes.get('logfire.msg', '').startswith('running output function: upcase_text'):
output_function_attributes = attributes
break

assert output_function_attributes is not None

# Check that tool call attributes are NOT present (this is not a tool call)
assert 'gen_ai.tool.name' not in output_function_attributes
assert 'gen_ai.tool.call.id' not in output_function_attributes

# Check content inclusion based on include_content flag
if include_content:
assert output_function_attributes == snapshot(
{
'gen_ai.tool.name': 'final_result',
'gen_ai.tool.call.id': IsStr(),
'tool_arguments': '{"order_data": "customer_order_details", "customer_id": 12345}',
'tool_response': "order_id='ORD-123' total=59.99 status='processed'",
'logfire.msg': 'running tool: final_result',
'logfire.json_schema': IsJson(
snapshot(
{
'type': 'object',
'properties': {
'tool_arguments': {'type': 'object'},
'tool_response': {'type': 'object'},
'gen_ai.tool.name': {},
'gen_ai.tool.call.id': {},
},
}
)
),
'logfire.span_type': 'span',
}
)
assert 'tool_arguments' in output_function_attributes
assert 'tool_response' in output_function_attributes
# tool_arguments should contain the parsed JSON arguments
assert output_function_attributes['tool_arguments'] == '{"text": "hello world"}'
assert output_function_attributes['tool_response'] == 'HELLO WORLD'
else:
assert output_function_attributes == snapshot(
{
'gen_ai.tool.name': 'final_result',
'gen_ai.tool.call.id': IsStr(),
'logfire.msg': 'running tool: final_result',
'logfire.json_schema': IsJson(
snapshot(
{
'type': 'object',
'properties': {
'gen_ai.tool.name': {},
'gen_ai.tool.call.id': {},
},
}
)
),
'logfire.span_type': 'span',
}
)
assert 'tool_arguments' not in output_function_attributes
assert 'tool_response' not in output_function_attributes

[8]ページ先頭

©2009-2025 Movatter.jp