Movatterモバイル変換


[0]ホーム

URL:


Skip to content

OpenAI STT

OpenAISTTTranscriptionSession

Bases:StreamedTranscriptionSession

A transcription session for OpenAI's STT model.

Source code insrc/agents/voice/models/openai_stt.py
 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
classOpenAISTTTranscriptionSession(StreamedTranscriptionSession):"""A transcription session for OpenAI's STT model."""def__init__(self,input:StreamedAudioInput,client:AsyncOpenAI,model:str,settings:STTModelSettings,trace_include_sensitive_data:bool,trace_include_sensitive_audio_data:bool,):self.connected:bool=Falseself._client=clientself._model=modelself._settings=settingsself._turn_detection=settings.turn_detectionorDEFAULT_TURN_DETECTIONself._trace_include_sensitive_data=trace_include_sensitive_dataself._trace_include_sensitive_audio_data=trace_include_sensitive_audio_dataself._input_queue:asyncio.Queue[npt.NDArray[np.int16|np.float32]]=input.queueself._output_queue:asyncio.Queue[str|ErrorSentinel|SessionCompleteSentinel]=(asyncio.Queue())self._websocket:websockets.ClientConnection|None=Noneself._event_queue:asyncio.Queue[dict[str,Any]|WebsocketDoneSentinel]=asyncio.Queue()self._state_queue:asyncio.Queue[dict[str,Any]]=asyncio.Queue()self._turn_audio_buffer:list[npt.NDArray[np.int16|np.float32]]=[]self._tracing_span:Span[TranscriptionSpanData]|None=None# tasksself._listener_task:asyncio.Task[Any]|None=Noneself._process_events_task:asyncio.Task[Any]|None=Noneself._stream_audio_task:asyncio.Task[Any]|None=Noneself._connection_task:asyncio.Task[Any]|None=Noneself._stored_exception:Exception|None=Nonedef_start_turn(self)->None:self._tracing_span=transcription_span(model=self._model,model_config={"temperature":self._settings.temperature,"language":self._settings.language,"prompt":self._settings.prompt,"turn_detection":self._turn_detection,},)self._tracing_span.start()def_end_turn(self,_transcript:str)->None:iflen(_transcript)<1:returnifself._tracing_span:ifself._trace_include_sensitive_audio_data:self._tracing_span.span_data.input=_audio_to_base64(self._turn_audio_buffer)self._tracing_span.span_data.input_format="pcm"ifself._trace_include_sensitive_data:self._tracing_span.span_data.output=_transcriptself._tracing_span.finish()self._turn_audio_buffer=[]self._tracing_span=Noneasyncdef_event_listener(self)->None:assertself._websocketisnotNone,"Websocket not initialized"asyncformessageinself._websocket:try:event=json.loads(message)ifevent.get("type")=="error":raiseSTTWebsocketConnectionError(f"Error event:{event.get('error')}")ifevent.get("type")in["session.updated","transcription_session.updated","session.created","transcription_session.created",]:awaitself._state_queue.put(event)awaitself._event_queue.put(event)exceptExceptionase:awaitself._output_queue.put(ErrorSentinel(e))raiseSTTWebsocketConnectionError("Error parsing events")fromeawaitself._event_queue.put(WebsocketDoneSentinel())asyncdef_configure_session(self)->None:assertself._websocketisnotNone,"Websocket not initialized"awaitself._websocket.send(json.dumps({"type":"transcription_session.update","session":{"input_audio_format":"pcm16","input_audio_transcription":{"model":self._model},"turn_detection":self._turn_detection,},}))asyncdef_setup_connection(self,ws:websockets.ClientConnection)->None:self._websocket=wsself._listener_task=asyncio.create_task(self._event_listener())try:event=await_wait_for_event(self._state_queue,["session.created","transcription_session.created"],SESSION_CREATION_TIMEOUT,)exceptTimeoutErrorase:wrapped_err=STTWebsocketConnectionError("Timeout waiting for transcription_session.created event")awaitself._output_queue.put(ErrorSentinel(wrapped_err))raisewrapped_errfromeexceptExceptionase:awaitself._output_queue.put(ErrorSentinel(e))raiseeawaitself._configure_session()try:event=await_wait_for_event(self._state_queue,["session.updated","transcription_session.updated"],SESSION_UPDATE_TIMEOUT,)if_debug.DONT_LOG_MODEL_DATA:logger.debug("Session updated")else:logger.debug(f"Session updated:{event}")exceptTimeoutErrorase:wrapped_err=STTWebsocketConnectionError("Timeout waiting for transcription_session.updated event")awaitself._output_queue.put(ErrorSentinel(wrapped_err))raisewrapped_errfromeexceptExceptionase:awaitself._output_queue.put(ErrorSentinel(e))raiseasyncdef_handle_events(self)->None:whileTrue:try:event=awaitasyncio.wait_for(self._event_queue.get(),timeout=EVENT_INACTIVITY_TIMEOUT)ifisinstance(event,WebsocketDoneSentinel):# processed all events and websocket is donebreakevent_type=event.get("type","unknown")ifevent_type=="conversation.item.input_audio_transcription.completed":transcript=cast(str,event.get("transcript",""))iflen(transcript)>0:self._end_turn(transcript)self._start_turn()awaitself._output_queue.put(transcript)awaitasyncio.sleep(0)# yield controlexceptasyncio.TimeoutError:# No new events for a while. Assume the session is done.breakexceptExceptionase:awaitself._output_queue.put(ErrorSentinel(e))raiseeawaitself._output_queue.put(SessionCompleteSentinel())asyncdef_stream_audio(self,audio_queue:asyncio.Queue[npt.NDArray[np.int16|np.float32]])->None:assertself._websocketisnotNone,"Websocket not initialized"self._start_turn()whileTrue:buffer=awaitaudio_queue.get()ifbufferisNone:breakself._turn_audio_buffer.append(buffer)try:awaitself._websocket.send(json.dumps({"type":"input_audio_buffer.append","audio":base64.b64encode(buffer.tobytes()).decode("utf-8"),}))exceptwebsockets.ConnectionClosed:breakexceptExceptionase:awaitself._output_queue.put(ErrorSentinel(e))raiseeawaitasyncio.sleep(0)# yield controlasyncdef_process_websocket_connection(self)->None:try:asyncwithwebsockets.connect("wss://api.openai.com/v1/realtime?intent=transcription",additional_headers={"Authorization":f"Bearer{self._client.api_key}","OpenAI-Beta":"realtime=v1","OpenAI-Log-Session":"1",},)asws:awaitself._setup_connection(ws)self._process_events_task=asyncio.create_task(self._handle_events())self._stream_audio_task=asyncio.create_task(self._stream_audio(self._input_queue))self.connected=Trueifself._listener_task:awaitself._listener_taskelse:logger.error("Listener task not initialized")raiseAgentsException("Listener task not initialized")exceptExceptionase:awaitself._output_queue.put(ErrorSentinel(e))raiseedef_check_errors(self)->None:ifself._connection_taskandself._connection_task.done():exc=self._connection_task.exception()ifexcandisinstance(exc,Exception):self._stored_exception=excifself._process_events_taskandself._process_events_task.done():exc=self._process_events_task.exception()ifexcandisinstance(exc,Exception):self._stored_exception=excifself._stream_audio_taskandself._stream_audio_task.done():exc=self._stream_audio_task.exception()ifexcandisinstance(exc,Exception):self._stored_exception=excifself._listener_taskandself._listener_task.done():exc=self._listener_task.exception()ifexcandisinstance(exc,Exception):self._stored_exception=excdef_cleanup_tasks(self)->None:ifself._listener_taskandnotself._listener_task.done():self._listener_task.cancel()ifself._process_events_taskandnotself._process_events_task.done():self._process_events_task.cancel()ifself._stream_audio_taskandnotself._stream_audio_task.done():self._stream_audio_task.cancel()ifself._connection_taskandnotself._connection_task.done():self._connection_task.cancel()asyncdeftranscribe_turns(self)->AsyncIterator[str]:self._connection_task=asyncio.create_task(self._process_websocket_connection())whileTrue:try:turn=awaitself._output_queue.get()exceptasyncio.CancelledError:breakif(turnisNoneorisinstance(turn,ErrorSentinel)orisinstance(turn,SessionCompleteSentinel)):self._output_queue.task_done()breakyieldturnself._output_queue.task_done()ifself._tracing_span:self._end_turn("")ifself._websocket:awaitself._websocket.close()self._check_errors()ifself._stored_exception:raiseself._stored_exceptionasyncdefclose(self)->None:ifself._websocket:awaitself._websocket.close()self._cleanup_tasks()

OpenAISTTModel

Bases:STTModel

A speech-to-text model for OpenAI.

Source code insrc/agents/voice/models/openai_stt.py
classOpenAISTTModel(STTModel):"""A speech-to-text model for OpenAI."""def__init__(self,model:str,openai_client:AsyncOpenAI,):"""Create a new OpenAI speech-to-text model.        Args:            model: The name of the model to use.            openai_client: The OpenAI client to use.        """self.model=modelself._client=openai_client@propertydefmodel_name(self)->str:returnself.modeldef_non_null_or_not_given(self,value:Any)->Any:returnvalueifvalueisnotNoneelseNone# NOT_GIVENasyncdeftranscribe(self,input:AudioInput,settings:STTModelSettings,trace_include_sensitive_data:bool,trace_include_sensitive_audio_data:bool,)->str:"""Transcribe an audio input.        Args:            input: The audio input to transcribe.            settings: The settings to use for the transcription.        Returns:            The transcribed text.        """withtranscription_span(model=self.model,input=input.to_base64()iftrace_include_sensitive_audio_dataelse"",input_format="pcm",model_config={"temperature":self._non_null_or_not_given(settings.temperature),"language":self._non_null_or_not_given(settings.language),"prompt":self._non_null_or_not_given(settings.prompt),},)asspan:try:response=awaitself._client.audio.transcriptions.create(model=self.model,file=input.to_audio_file(),prompt=self._non_null_or_not_given(settings.prompt),language=self._non_null_or_not_given(settings.language),temperature=self._non_null_or_not_given(settings.temperature),)iftrace_include_sensitive_data:span.span_data.output=response.textreturnresponse.textexceptExceptionase:span.span_data.output=""span.set_error(SpanError(message=str(e),data={}))raiseeasyncdefcreate_session(self,input:StreamedAudioInput,settings:STTModelSettings,trace_include_sensitive_data:bool,trace_include_sensitive_audio_data:bool,)->StreamedTranscriptionSession:"""Create a new transcription session.        Args:            input: The audio input to transcribe.            settings: The settings to use for the transcription.            trace_include_sensitive_data: Whether to include sensitive data in traces.            trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces.        Returns:            A new transcription session.        """returnOpenAISTTTranscriptionSession(input,self._client,self.model,settings,trace_include_sensitive_data,trace_include_sensitive_audio_data,)

__init__

__init__(model:str,openai_client:AsyncOpenAI)

Create a new OpenAI speech-to-text model.

Parameters:

NameTypeDescriptionDefault
modelstr

The name of the model to use.

required
openai_clientAsyncOpenAI

The OpenAI client to use.

required
Source code insrc/agents/voice/models/openai_stt.py
def__init__(self,model:str,openai_client:AsyncOpenAI,):"""Create a new OpenAI speech-to-text model.    Args:        model: The name of the model to use.        openai_client: The OpenAI client to use.    """self.model=modelself._client=openai_client

transcribeasync

transcribe(input:AudioInput,settings:STTModelSettings,trace_include_sensitive_data:bool,trace_include_sensitive_audio_data:bool,)->str

Transcribe an audio input.

Parameters:

NameTypeDescriptionDefault
inputAudioInput

The audio input to transcribe.

required
settingsSTTModelSettings

The settings to use for the transcription.

required

Returns:

TypeDescription
str

The transcribed text.

Source code insrc/agents/voice/models/openai_stt.py
asyncdeftranscribe(self,input:AudioInput,settings:STTModelSettings,trace_include_sensitive_data:bool,trace_include_sensitive_audio_data:bool,)->str:"""Transcribe an audio input.    Args:        input: The audio input to transcribe.        settings: The settings to use for the transcription.    Returns:        The transcribed text.    """withtranscription_span(model=self.model,input=input.to_base64()iftrace_include_sensitive_audio_dataelse"",input_format="pcm",model_config={"temperature":self._non_null_or_not_given(settings.temperature),"language":self._non_null_or_not_given(settings.language),"prompt":self._non_null_or_not_given(settings.prompt),},)asspan:try:response=awaitself._client.audio.transcriptions.create(model=self.model,file=input.to_audio_file(),prompt=self._non_null_or_not_given(settings.prompt),language=self._non_null_or_not_given(settings.language),temperature=self._non_null_or_not_given(settings.temperature),)iftrace_include_sensitive_data:span.span_data.output=response.textreturnresponse.textexceptExceptionase:span.span_data.output=""span.set_error(SpanError(message=str(e),data={}))raisee

create_sessionasync

create_session(input:StreamedAudioInput,settings:STTModelSettings,trace_include_sensitive_data:bool,trace_include_sensitive_audio_data:bool,)->StreamedTranscriptionSession

Create a new transcription session.

Parameters:

NameTypeDescriptionDefault
inputStreamedAudioInput

The audio input to transcribe.

required
settingsSTTModelSettings

The settings to use for the transcription.

required
trace_include_sensitive_databool

Whether to include sensitive data in traces.

required
trace_include_sensitive_audio_databool

Whether to include sensitive audio data in traces.

required

Returns:

TypeDescription
StreamedTranscriptionSession

A new transcription session.

Source code insrc/agents/voice/models/openai_stt.py
asyncdefcreate_session(self,input:StreamedAudioInput,settings:STTModelSettings,trace_include_sensitive_data:bool,trace_include_sensitive_audio_data:bool,)->StreamedTranscriptionSession:"""Create a new transcription session.    Args:        input: The audio input to transcribe.        settings: The settings to use for the transcription.        trace_include_sensitive_data: Whether to include sensitive data in traces.        trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces.    Returns:        A new transcription session.    """returnOpenAISTTTranscriptionSession(input,self._client,self.model,settings,trace_include_sensitive_data,trace_include_sensitive_audio_data,)

[8]ページ先頭

©2009-2025 Movatter.jp