
    OYhfX                    p   d dl mZ d dlmZmZmZmZmZmZ d dl	m
Z
 d dlZddlmZ ddlmZ ddlmZmZmZmZmZ dd	lmZmZmZ dd
lmZ ddlmZmZ ddlm Z m!Z! ddl"m#Z#m$Z$ ddl%m&Z& ddl'm(Z( ddl)m*Z* ddgZ+ G d de      Z, G d de      Z- G d d      Z. G d d      Z/ G d d      Z0 G d d      Z1y)    )annotations)DictListUnionIterableOptionaloverload)LiteralN   )_legacy_response)completion_create_params)	NOT_GIVENBodyQueryHeadersNotGiven)required_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)StreamAsyncStream)make_request_options)
Completion) ChatCompletionStreamOptionsParamCompletionsAsyncCompletionsc                     e Zd Zedd       Zedd       Zeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd	       Zeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd
       Z e	ddgg d      eeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zy)r    c                    t        |       S N)CompletionsWithRawResponseselfs    [/var/www/feuerwehr-webapp/venv/lib/python3.12/site-packages/openai/resources/completions.pywith_raw_responsezCompletions.with_raw_response    s    )$//    c                    t        |       S r$   ) CompletionsWithStreamingResponser&   s    r(   with_streaming_responsez#Completions.with_streaming_response$   s    /55r*   Nbest_ofechofrequency_penalty
logit_biaslogprobs
max_tokensnpresence_penaltyseedstopstreamstream_optionssuffixtemperaturetop_puserextra_headersextra_query
extra_bodytimeoutmodelpromptc                    yu  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models/overview) for
              descriptions of them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
              Mathematically, the bias is added to the logits generated by the model prior to
              sampling. The exact effect will vary per model, but values between -1 and 1
              should decrease or increase likelihood of selection; values like -100 or 100
              should result in a ban or exclusive selection of the relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
              list of the 5 most likely tokens. The API will always return the `logprob` of
              the sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
              completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          stream_options: Options for streaming response. Only set this when you set `stream: true`.

          suffix: The suffix that comes after a completion of inserted text.

              This parameter is only supported for `gpt-3.5-turbo-instruct`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        N r'   rC   rD   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   s                          r(   createzCompletions.create(       n 	r*   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r:   r;   r<   r=   r>   r?   r@   rA   rB   c                    yu  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models/overview) for
              descriptions of them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
              Mathematically, the bias is added to the logits generated by the model prior to
              sampling. The exact effect will vary per model, but values between -1 and 1
              should decrease or increase likelihood of selection; values like -100 or 100
              should result in a ban or exclusive selection of the relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
              list of the 5 most likely tokens. The API will always return the `logprob` of
              the sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
              completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          stream_options: Options for streaming response. Only set this when you set `stream: true`.

          suffix: The suffix that comes after a completion of inserted text.

              This parameter is only supported for `gpt-3.5-turbo-instruct`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrG   r'   rC   rD   r9   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r:   r;   r<   r=   r>   r?   r@   rA   rB   s                          r(   rI   zCompletions.create   rJ   r*   c                    yrM   rG   rN   s                          r(   rI   zCompletions.createZ  rJ   r*   rC   rD   r9   c          
        | j                  dt        i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d|it        j                        t	        ||||      t
        |xs dt        t
                 S Nz/completionsrC   rD   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   )r?   r@   rA   rB   F)bodyoptionscast_tor9   
stream_cls)_postr   r   CompletionCreateParamsr   r   r   rH   s                          r(   rI   zCompletions.create  s4   : zz Uf w D	
 (): !*  !*  '(8 D D f %n f  ";!" U#$ D%( )??+. )+Q[el ?Uj)=  
 	
r*   )returnr%   )rY   r,   .rC   KUnion[str, Literal['gpt-3.5-turbo-instruct', 'davinci-002', 'babbage-002']]rD   CUnion[str, List[str], Iterable[int], Iterable[Iterable[int]], None]r/   Optional[int] | NotGivenr0   Optional[bool] | NotGivenr1   Optional[float] | NotGivenr2   #Optional[Dict[str, int]] | NotGivenr3   r]   r4   r]   r5   r]   r6   r_   r7   r]   r8   0Union[Optional[str], List[str], None] | NotGivenr9   z#Optional[Literal[False]] | NotGivenr:   5Optional[ChatCompletionStreamOptionsParam] | NotGivenr;   Optional[str] | NotGivenr<   r_   r=   r_   r>   str | NotGivenr?   Headers | Noner@   Query | NonerA   Body | NonerB   'float | httpx.Timeout | None | NotGivenrY   r   ).rC   r[   rD   r\   r9   Literal[True]r/   r]   r0   r^   r1   r_   r2   r`   r3   r]   r4   r]   r5   r]   r6   r_   r7   r]   r8   ra   r:   rb   r;   rc   r<   r_   r=   r_   r>   rd   r?   re   r@   rf   rA   rg   rB   rh   rY   zStream[Completion]).rC   r[   rD   r\   r9   boolr/   r]   r0   r^   r1   r_   r2   r`   r3   r]   r4   r]   r5   r]   r6   r_   r7   r]   r8   ra   r:   rb   r;   rc   r<   r_   r=   r_   r>   rd   r?   re   r@   rf   rA   rg   rB   rh   rY   Completion | Stream[Completion]).rC   r[   rD   r\   r/   r]   r0   r^   r1   r_   r2   r`   r3   r]   r4   r]   r5   r]   r6   r_   r7   r]   r8   ra   r9   3Optional[Literal[False]] | Literal[True] | NotGivenr:   rb   r;   rc   r<   r_   r=   r_   r>   rd   r?   re   r@   rf   rA   rg   rB   rh   rY   rk   
__name__
__module____qualname__r   r)   r-   r	   r   rI   r   rG   r*   r(   r    r       ss   0 0 6 6  -6*38A:C-6/8&/7@)2AJ6?PY+42;,5( )-$("&;D5V [V T	V
 *V (V 6V 8V +V -V $V 5V 'V ?V 4V  N!V" )#V$ 0%V& *'V( )V. &/V0 "1V2  3V4 95V6 
7V Vp  -6*38A:C-6/8&/7@)2AJPY+42;,5( )-$("&;D5V [V T	V
 V *V (V 6V 8V +V -V $V 5V 'V ?V  N!V" )#V$ 0%V& *'V( )V. &/V0 "1V2  3V4 95V6 
7V Vp  -6*38A:C-6/8&/7@)2AJPY+42;,5( )-$("&;D5V [V T	V
 V *V (V 6V 8V +V -V $V 5V 'V ?V  N!V" )#V$ 0%V& *'V( )V. &/V0 "1V2  3V4 95V6 
)7V Vp GX&(EF -6*38A:C-6/8&/7@)2AJFOPY+42;,5( )-$("&;D5;
 [;
 T	;

 *;
 (;
 6;
 8;
 +;
 -;
 $;
 5;
 ';
 ?;
 D;
  N!;
" )#;
$ 0%;
& *';
( );
. &/;
0 "1;
2  3;
4 95;
6 
)7;
 G;
r*   c                     e Zd Zedd       Zedd       Zeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd	       Zeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd
       Z e	ddgg d      eeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zy)r!   c                    t        |       S r$   )AsyncCompletionsWithRawResponser&   s    r(   r)   z"AsyncCompletions.with_raw_response3  s    .t44r*   c                    t        |       S r$   )%AsyncCompletionsWithStreamingResponser&   s    r(   r-   z(AsyncCompletions.with_streaming_response7  s    4T::r*   Nr.   rC   rD   c                  K   ywrF   rG   rH   s                          r(   rI   zAsyncCompletions.create;       n 	   rK   c                  K   ywrM   rG   rN   s                          r(   rI   zAsyncCompletions.create  rw   rx   c                  K   ywrM   rG   rN   s                          r(   rI   zAsyncCompletions.createm  rw   rx   rP   c          
     >  K   | j                  dt        i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d|it        j                         d {   t	        ||||      t
        |xs dt        t
                  d {   S 7 57 wrR   )rW   r   r   rX   r   r   r   rH   s                          r(   rI   zAsyncCompletions.create  sM    : ZZ,Uf w D	
 (): !*  !*  '(8 D D f %n f  ";!" U#$ D%( )??+ . )+Q[el ?U":.=   
 
 	

s$   A!B#B
$0BBBB)rY   rs   )rY   ru   rZ   ).rC   r[   rD   r\   r9   ri   r/   r]   r0   r^   r1   r_   r2   r`   r3   r]   r4   r]   r5   r]   r6   r_   r7   r]   r8   ra   r:   rb   r;   rc   r<   r_   r=   r_   r>   rd   r?   re   r@   rf   rA   rg   rB   rh   rY   zAsyncStream[Completion]).rC   r[   rD   r\   r9   rj   r/   r]   r0   r^   r1   r_   r2   r`   r3   r]   r4   r]   r5   r]   r6   r_   r7   r]   r8   ra   r:   rb   r;   rc   r<   r_   r=   r_   r>   rd   r?   re   r@   rf   rA   rg   rB   rh   rY   $Completion | AsyncStream[Completion]).rC   r[   rD   r\   r/   r]   r0   r^   r1   r_   r2   r`   r3   r]   r4   r]   r5   r]   r6   r_   r7   r]   r8   ra   r9   rl   r:   rb   r;   rc   r<   r_   r=   r_   r>   rd   r?   re   r@   rf   rA   rg   rB   rh   rY   r|   rm   rG   r*   r(   r!   r!   2  ss   5 5 ; ;  -6*38A:C-6/8&/7@)2AJ6?PY+42;,5( )-$("&;D5V [V T	V
 *V (V 6V 8V +V -V $V 5V 'V ?V 4V  N!V" )#V$ 0%V& *'V( )V. &/V0 "1V2  3V4 95V6 
7V Vp  -6*38A:C-6/8&/7@)2AJPY+42;,5( )-$("&;D5V [V T	V
 V *V (V 6V 8V +V -V $V 5V 'V ?V  N!V" )#V$ 0%V& *'V( )V. &/V0 "1V2  3V4 95V6 
!7V Vp  -6*38A:C-6/8&/7@)2AJPY+42;,5( )-$("&;D5V [V T	V
 V *V (V 6V 8V +V -V $V 5V 'V ?V  N!V" )#V$ 0%V& *'V( )V. &/V0 "1V2  3V4 95V6 
.7V Vp GX&(EF -6*38A:C-6/8&/7@)2AJFOPY+42;,5( )-$("&;D5;
 [;
 T	;

 *;
 (;
 6;
 8;
 +;
 -;
 $;
 5;
 ';
 ?;
 D;
  N!;
" )#;
$ 0%;
& *';
( );
. &/;
0 "1;
2  3;
4 95;
6 
.7;
 G;
r*   c                      e Zd ZddZy)r%   c                Z    || _         t        j                  |j                        | _        y r$   )_completionsr   to_raw_response_wrapperrI   r'   completionss     r(   __init__z#CompletionsWithRawResponse.__init__F  s%    '&>>
r*   Nr   r    rY   Nonern   ro   rp   r   rG   r*   r(   r%   r%   E      
r*   r%   c                      e Zd ZddZy)rs   c                Z    || _         t        j                  |j                        | _        y r$   )r   r   async_to_raw_response_wrapperrI   r   s     r(   r   z(AsyncCompletionsWithRawResponse.__init__O  s%    '&DD
r*   Nr   r!   rY   r   r   rG   r*   r(   rs   rs   N  r   r*   rs   c                      e Zd ZddZy)r,   c                F    || _         t        |j                        | _        y r$   )r   r   rI   r   s     r(   r   z)CompletionsWithStreamingResponse.__init__X  s    '2
r*   Nr   r   rG   r*   r(   r,   r,   W  r   r*   r,   c                      e Zd ZddZy)ru   c                F    || _         t        |j                        | _        y r$   )r   r   rI   r   s     r(   r   z.AsyncCompletionsWithStreamingResponse.__init__a  s    '8
r*   Nr   r   rG   r*   r(   ru   ru   `  r   r*   ru   )2
__future__r   typingr   r   r   r   r   r	   typing_extensionsr
   httpx r   typesr   _typesr   r   r   r   r   _utilsr   r   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   r   _base_clientr   types.completionr   /types.chat.chat_completion_stream_options_paramr   __all__r    r!   r%   rs   r,   ru   rG   r*   r(   <module>r      s    # B B %   , > > 
 & 9 X , * ^,
-P
/ P
fP
' P
f
 

 

 

 
r*   