|
| 1 | +importbase64 |
| 2 | +fromcollections.abcimportSequence |
| 3 | +fromtypingimportLiteral |
| 4 | + |
| 5 | +from .importexceptions,messages |
| 6 | + |
| 7 | +try: |
| 8 | +frommcpimporttypesasmcp_types |
| 9 | +exceptImportErroras_import_error: |
| 10 | +raiseImportError( |
| 11 | +'Please install the `mcp` package to use the MCP server, ' |
| 12 | +'you can use the `mcp` optional group — `pip install "pydantic-ai-slim[mcp]"`' |
| 13 | + )from_import_error |
| 14 | + |
| 15 | + |
| 16 | +defmap_from_mcp_params(params:mcp_types.CreateMessageRequestParams)->list[messages.ModelMessage]: |
| 17 | +"""Convert from MCP create message request parameters to pydantic-ai messages.""" |
| 18 | +pai_messages:list[messages.ModelMessage]= [] |
| 19 | +request_parts:list[messages.ModelRequestPart]= [] |
| 20 | +ifparams.systemPrompt: |
| 21 | +request_parts.append(messages.SystemPromptPart(content=params.systemPrompt)) |
| 22 | +response_parts:list[messages.ModelResponsePart]= [] |
| 23 | +formsginparams.messages: |
| 24 | +content=msg.content |
| 25 | +ifmsg.role=='user': |
| 26 | +# if there are any response parts, add a response message wrapping them |
| 27 | +ifresponse_parts: |
| 28 | +pai_messages.append(messages.ModelResponse(parts=response_parts)) |
| 29 | +response_parts= [] |
| 30 | + |
| 31 | +# TODO(Marcelo): We can reuse the `_map_tool_result_part` from the mcp module here. |
| 32 | +ifisinstance(content,mcp_types.TextContent): |
| 33 | +user_part_content:str|Sequence[messages.UserContent]=content.text |
| 34 | +else: |
| 35 | +# image content |
| 36 | +user_part_content= [ |
| 37 | +messages.BinaryContent(data=base64.b64decode(content.data),media_type=content.mimeType) |
| 38 | + ] |
| 39 | + |
| 40 | +request_parts.append(messages.UserPromptPart(content=user_part_content)) |
| 41 | +else: |
| 42 | +# role is assistant |
| 43 | +# if there are any request parts, add a request message wrapping them |
| 44 | +ifrequest_parts: |
| 45 | +pai_messages.append(messages.ModelRequest(parts=request_parts)) |
| 46 | +request_parts= [] |
| 47 | + |
| 48 | +response_parts.append(map_from_sampling_content(content)) |
| 49 | + |
| 50 | +ifresponse_parts: |
| 51 | +pai_messages.append(messages.ModelResponse(parts=response_parts)) |
| 52 | +ifrequest_parts: |
| 53 | +pai_messages.append(messages.ModelRequest(parts=request_parts)) |
| 54 | +returnpai_messages |
| 55 | + |
| 56 | + |
| 57 | +defmap_from_pai_messages(pai_messages:list[messages.ModelMessage])->tuple[str,list[mcp_types.SamplingMessage]]: |
| 58 | +"""Convert from pydantic-ai messages to MCP sampling messages. |
| 59 | +
|
| 60 | + Returns: |
| 61 | + A tuple containing the system prompt and a list of sampling messages. |
| 62 | + """ |
| 63 | +sampling_msgs:list[mcp_types.SamplingMessage]= [] |
| 64 | + |
| 65 | +defadd_msg( |
| 66 | +role:Literal['user','assistant'], |
| 67 | +content:mcp_types.TextContent|mcp_types.ImageContent|mcp_types.AudioContent, |
| 68 | + ): |
| 69 | +sampling_msgs.append(mcp_types.SamplingMessage(role=role,content=content)) |
| 70 | + |
| 71 | +system_prompt:list[str]= [] |
| 72 | +forpai_messageinpai_messages: |
| 73 | +ifisinstance(pai_message,messages.ModelRequest): |
| 74 | +ifpai_message.instructionsisnotNone: |
| 75 | +system_prompt.append(pai_message.instructions) |
| 76 | + |
| 77 | +forpartinpai_message.parts: |
| 78 | +ifisinstance(part,messages.SystemPromptPart): |
| 79 | +system_prompt.append(part.content) |
| 80 | +ifisinstance(part,messages.UserPromptPart): |
| 81 | +ifisinstance(part.content,str): |
| 82 | +add_msg('user',mcp_types.TextContent(type='text',text=part.content)) |
| 83 | +else: |
| 84 | +forchunkinpart.content: |
| 85 | +ifisinstance(chunk,str): |
| 86 | +add_msg('user',mcp_types.TextContent(type='text',text=chunk)) |
| 87 | +elifisinstance(chunk,messages.BinaryContent)andchunk.is_image: |
| 88 | +add_msg( |
| 89 | +'user', |
| 90 | +mcp_types.ImageContent( |
| 91 | +type='image', |
| 92 | +data=base64.b64decode(chunk.data).decode(), |
| 93 | +mimeType=chunk.media_type, |
| 94 | + ), |
| 95 | + ) |
| 96 | +# TODO(Marcelo): Add support for audio content. |
| 97 | +else: |
| 98 | +raiseNotImplementedError(f'Unsupported content type:{type(chunk)}') |
| 99 | +else: |
| 100 | +add_msg('assistant',map_from_model_response(pai_message)) |
| 101 | +return''.join(system_prompt),sampling_msgs |
| 102 | + |
| 103 | + |
| 104 | +defmap_from_model_response(model_response:messages.ModelResponse)->mcp_types.TextContent: |
| 105 | +"""Convert from a model response to MCP text content.""" |
| 106 | +text_parts:list[str]= [] |
| 107 | +forpartinmodel_response.parts: |
| 108 | +ifisinstance(part,messages.TextPart): |
| 109 | +text_parts.append(part.content) |
| 110 | +# TODO(Marcelo): We should ignore ThinkingPart here. |
| 111 | +else: |
| 112 | +raiseexceptions.UnexpectedModelBehavior(f'Unexpected part type:{type(part).__name__}, expected TextPart') |
| 113 | +returnmcp_types.TextContent(type='text',text=''.join(text_parts)) |
| 114 | + |
| 115 | + |
| 116 | +defmap_from_sampling_content( |
| 117 | +content:mcp_types.TextContent|mcp_types.ImageContent|mcp_types.AudioContent, |
| 118 | +)->messages.TextPart: |
| 119 | +"""Convert from sampling content to a pydantic-ai text part.""" |
| 120 | +ifisinstance(content,mcp_types.TextContent):# pragma: no branch |
| 121 | +returnmessages.TextPart(content=content.text) |
| 122 | +else: |
| 123 | +raiseNotImplementedError('Image and Audio responses in sampling are not yet supported') |