a
    f                     @  s   d dl mZ d dlmZ d dlmZ d dlZddlmZ ddl	m
Z
mZmZmZmZ ddlmZmZ dd	lmZ dd
lmZmZ ddlmZmZmZmZ ddlmZ ddlmZ ddl m!Z! ddgZ"G dd deZ#G dd deZ$G dd dZ%G dd dZ&G dd dZ'G dd dZ(dS )    )annotations)Union)LiteralN   )_legacy_response)	NOT_GIVENBodyQueryHeadersNotGiven)maybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)StreamedBinaryAPIResponseAsyncStreamedBinaryAPIResponse#to_custom_streamed_response_wrapper)async_to_custom_streamed_response_wrapper)speech_create_params)make_request_options)SpeechModelSpeechAsyncSpeechc                   @  s`   e Zd ZeddddZeddddZeeddded	d
dddddddddd
ddZdS )r   SpeechWithRawResponsereturnc                 C  s   t | S N)r   self r    V/var/www/ai-form-bot/venv/lib/python3.9/site-packages/openai/resources/audio/speech.pywith_raw_response    s    zSpeech.with_raw_responseSpeechWithStreamingResponsec                 C  s   t | S r   )r#   r   r    r    r!   with_streaming_response$   s    zSpeech.with_streaming_responseNresponse_formatspeedextra_headersextra_query
extra_bodytimeoutstrUnion[str, SpeechModel]<Literal['alloy', 'echo', 'fable', 'onyx', 'nova', 'shimmer']>Literal['mp3', 'opus', 'aac', 'flac', 'wav', 'pcm'] | NotGivenfloat | NotGivenHeaders | NoneQuery | NoneBody | None'float | httpx.Timeout | None | NotGiven+_legacy_response.HttpxBinaryResponseContent
inputmodelvoicer&   r'   r(   r)   r*   r+   r   c       	   
   	   C  sD   ddi|pi }| j dt|||||dtjt||||	dtjdS )  
        Generates audio from the input text.

        Args:
          input: The text to generate audio for. The maximum length is 4096 characters.

          model:
              One of the available [TTS models](https://platform.openai.com/docs/models/tts):
              `tts-1` or `tts-1-hd`

          voice: The voice to use when generating the audio. Supported voices are `alloy`,
              `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
              available in the
              [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).

          response_format: The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
              `wav`, and `pcm`.

          speed: The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
              the default.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Acceptapplication/octet-stream/audio/speechr7   r8   r9   r&   r'   r(   r)   r*   r+   bodyoptionsZcast_to)_postr   r   SpeechCreateParamsr   r   HttpxBinaryResponseContent
r   r7   r8   r9   r&   r'   r(   r)   r*   r+   r    r    r!   create(   s"    ,
zSpeech.create__name__
__module____qualname__r   r"   r$   r   rG   r    r    r    r!   r      s   	c                   @  s`   e Zd ZeddddZeddddZeeddded	d
dddddddddd
ddZdS )r   AsyncSpeechWithRawResponser   c                 C  s   t | S r   )rL   r   r    r    r!   r"   i   s    zAsyncSpeech.with_raw_response AsyncSpeechWithStreamingResponsec                 C  s   t | S r   )rM   r   r    r    r!   r$   m   s    z#AsyncSpeech.with_streaming_responseNr%   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   c       	   
   	     sP   ddi|pi }| j dt|||||dtjI dH t||||	dtjdI dH S )r:   r;   r<   r=   r>   Nr?   r@   )rC   r   r   rD   r   r   rE   rF   r    r    r!   rG   q   s"    ,
zAsyncSpeech.createrH   r    r    r    r!   r   h   s   	c                   @  s   e Zd ZdddddZdS )r   r   Nonespeechr   c                 C  s   || _ t|j| _d S r   )_speechr   Zto_raw_response_wrapperrG   r   rP   r    r    r!   __init__   s    zSpeechWithRawResponse.__init__NrI   rJ   rK   rS   r    r    r    r!   r      s   r   c                   @  s   e Zd ZdddddZdS )rL   r   rN   rO   c                 C  s   || _ t|j| _d S r   )rQ   r   Zasync_to_raw_response_wrapperrG   rR   r    r    r!   rS      s    z#AsyncSpeechWithRawResponse.__init__NrT   r    r    r    r!   rL      s   rL   c                   @  s   e Zd ZdddddZdS )r#   r   rN   rO   c                 C  s   || _ t|jt| _d S r   )rQ   r   rG   r   rR   r    r    r!   rS      s
    z$SpeechWithStreamingResponse.__init__NrT   r    r    r    r!   r#      s   r#   c                   @  s   e Zd ZdddddZdS )rM   r   rN   rO   c                 C  s   || _ t|jt| _d S r   )rQ   r   rG   r   rR   r    r    r!   rS      s
    z)AsyncSpeechWithStreamingResponse.__init__NrT   r    r    r    r!   rM      s   rM   ))
__future__r   typingr   typing_extensionsr   Zhttpx r   _typesr   r   r	   r
   r   Z_utilsr   r   _compatr   Z	_resourcer   r   	_responser   r   r   r   Ztypes.audior   Z_base_clientr   Ztypes.audio.speech_modelr   __all__r   r   r   rL   r#   rM   r    r    r    r!   <module>   s&   II		
