Movatterモバイル変換


[0]ホーム

URL:


Skip to content

LiteLLM Models

LitellmModel

Bases:Model

This class enables using any model via LiteLLM. LiteLLM allows you to acess OpenAPI,Anthropic, Gemini, Mistral, and many other models.See supported models here:litellm models.

Source code insrc/agents/extensions/models/litellm_model.py
 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
classLitellmModel(Model):"""This class enables using any model via LiteLLM. LiteLLM allows you to acess OpenAPI,    Anthropic, Gemini, Mistral, and many other models.    See supported models here: [litellm models](https://docs.litellm.ai/docs/providers).    """def__init__(self,model:str,base_url:str|None=None,api_key:str|None=None,):self.model=modelself.base_url=base_urlself.api_key=api_keyasyncdefget_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None,prompt:Any|None=None,)->ModelResponse:withgeneration_span(model=str(self.model),model_config=model_settings.to_json_dict()|{"base_url":str(self.base_urlor""),"model_impl":"litellm"},disabled=tracing.is_disabled(),)asspan_generation:response=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,span_generation,tracing,stream=False,prompt=prompt,)assertisinstance(response.choices[0],litellm.types.utils.Choices)if_debug.DONT_LOG_MODEL_DATA:logger.debug("Received model response")else:logger.debug(f"""LLM resp:\n{json.dumps(response.choices[0].message.model_dump(),indent=2,ensure_ascii=False)}\n""")ifhasattr(response,"usage"):response_usage=response.usageusage=(Usage(requests=1,input_tokens=response_usage.prompt_tokens,output_tokens=response_usage.completion_tokens,total_tokens=response_usage.total_tokens,input_tokens_details=InputTokensDetails(cached_tokens=getattr(response_usage.prompt_tokens_details,"cached_tokens",0)or0),output_tokens_details=OutputTokensDetails(reasoning_tokens=getattr(response_usage.completion_tokens_details,"reasoning_tokens",0)or0),)ifresponse.usageelseUsage())else:usage=Usage()logger.warning("No usage information returned from Litellm")iftracing.include_data():span_generation.span_data.output=[response.choices[0].message.model_dump()]span_generation.span_data.usage={"input_tokens":usage.input_tokens,"output_tokens":usage.output_tokens,}items=Converter.message_to_output_items(LitellmConverter.convert_message_to_openai(response.choices[0].message))returnModelResponse(output=items,usage=usage,response_id=None,)asyncdefstream_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],tracing:ModelTracing,previous_response_id:str|None,prompt:Any|None=None,)->AsyncIterator[TResponseStreamEvent]:withgeneration_span(model=str(self.model),model_config=model_settings.to_json_dict()|{"base_url":str(self.base_urlor""),"model_impl":"litellm"},disabled=tracing.is_disabled(),)asspan_generation:response,stream=awaitself._fetch_response(system_instructions,input,model_settings,tools,output_schema,handoffs,span_generation,tracing,stream=True,prompt=prompt,)final_response:Response|None=NoneasyncforchunkinChatCmplStreamHandler.handle_stream(response,stream):yieldchunkifchunk.type=="response.completed":final_response=chunk.responseiftracing.include_data()andfinal_response:span_generation.span_data.output=[final_response.model_dump()]iffinal_responseandfinal_response.usage:span_generation.span_data.usage={"input_tokens":final_response.usage.input_tokens,"output_tokens":final_response.usage.output_tokens,}@overloadasyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],span:Span[GenerationSpanData],tracing:ModelTracing,stream:Literal[True],prompt:Any|None=None,)->tuple[Response,AsyncStream[ChatCompletionChunk]]:...@overloadasyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],span:Span[GenerationSpanData],tracing:ModelTracing,stream:Literal[False],prompt:Any|None=None,)->litellm.types.utils.ModelResponse:...asyncdef_fetch_response(self,system_instructions:str|None,input:str|list[TResponseInputItem],model_settings:ModelSettings,tools:list[Tool],output_schema:AgentOutputSchemaBase|None,handoffs:list[Handoff],span:Span[GenerationSpanData],tracing:ModelTracing,stream:bool=False,prompt:Any|None=None,)->litellm.types.utils.ModelResponse|tuple[Response,AsyncStream[ChatCompletionChunk]]:converted_messages=Converter.items_to_messages(input)ifsystem_instructions:converted_messages.insert(0,{"content":system_instructions,"role":"system",},)iftracing.include_data():span.span_data.input=converted_messagesparallel_tool_calls=(Trueifmodel_settings.parallel_tool_callsandtoolsandlen(tools)>0elseFalseifmodel_settings.parallel_tool_callsisFalseelseNone)tool_choice=Converter.convert_tool_choice(model_settings.tool_choice)response_format=Converter.convert_response_format(output_schema)converted_tools=[Converter.tool_to_openai(tool)fortoolintools]iftoolselse[]forhandoffinhandoffs:converted_tools.append(Converter.convert_handoff_tool(handoff))if_debug.DONT_LOG_MODEL_DATA:logger.debug("Calling LLM")else:logger.debug(f"Calling Litellm model:{self.model}\n"f"{json.dumps(converted_messages,indent=2,ensure_ascii=False)}\n"f"Tools:\n{json.dumps(converted_tools,indent=2,ensure_ascii=False)}\n"f"Stream:{stream}\n"f"Tool choice:{tool_choice}\n"f"Response format:{response_format}\n")reasoning_effort=model_settings.reasoning.effortifmodel_settings.reasoningelseNonestream_options=Noneifstreamandmodel_settings.include_usageisnotNone:stream_options={"include_usage":model_settings.include_usage}extra_kwargs={}ifmodel_settings.extra_query:extra_kwargs["extra_query"]=model_settings.extra_queryifmodel_settings.metadata:extra_kwargs["metadata"]=model_settings.metadataifmodel_settings.extra_bodyandisinstance(model_settings.extra_body,dict):extra_kwargs.update(model_settings.extra_body)# Add kwargs from model_settings.extra_args, filtering out None valuesifmodel_settings.extra_args:extra_kwargs.update(model_settings.extra_args)ret=awaitlitellm.acompletion(model=self.model,messages=converted_messages,tools=converted_toolsorNone,temperature=model_settings.temperature,top_p=model_settings.top_p,frequency_penalty=model_settings.frequency_penalty,presence_penalty=model_settings.presence_penalty,max_tokens=model_settings.max_tokens,tool_choice=self._remove_not_given(tool_choice),response_format=self._remove_not_given(response_format),parallel_tool_calls=parallel_tool_calls,stream=stream,stream_options=stream_options,reasoning_effort=reasoning_effort,extra_headers={**HEADERS,**(model_settings.extra_headersor{})},api_key=self.api_key,base_url=self.base_url,**extra_kwargs,)ifisinstance(ret,litellm.types.utils.ModelResponse):returnretresponse=Response(id=FAKE_RESPONSES_ID,created_at=time.time(),model=self.model,object="response",output=[],tool_choice=cast(Literal["auto","required","none"],tool_choice)iftool_choice!=NOT_GIVENelse"auto",top_p=model_settings.top_p,temperature=model_settings.temperature,tools=[],parallel_tool_calls=parallel_tool_callsorFalse,reasoning=model_settings.reasoning,)returnresponse,retdef_remove_not_given(self,value:Any)->Any:ifisinstance(value,NotGiven):returnNonereturnvalue

[8]ページ先頭

©2009-2025 Movatter.jp