
    OYhf                      d dl mZ d dlmZmZmZmZmZmZ d dl	m
Z
 d dlZddlmZ ddlmZmZmZmZmZ ddlmZmZmZ dd	lmZ dd
lmZmZ ddlmZmZ ddl m!Z!m"Z" ddl#m$Z$ ddl%m&Z& ddl'm(Z( ddl)m*Z* ddl+m,Z, ddl-m.Z. ddl/m0Z0 ddl1m2Z2 ddl3m4Z4 ddgZ5 G d de      Z6 G d de      Z7 G d d      Z8 G d d      Z9 G d d      Z: G d  d!      Z;y)"    )annotations)DictListUnionIterableOptionaloverload)LiteralN   )_legacy_response)	NOT_GIVENBodyQueryHeadersNotGiven)required_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)StreamAsyncStream)completion_create_params)make_request_options)	ChatModel)ChatCompletion)ChatCompletionChunk)ChatCompletionToolParam)ChatCompletionMessageParam) ChatCompletionStreamOptionsParam)#ChatCompletionToolChoiceOptionParamCompletionsAsyncCompletionsc                     e Zd Zedd       Zedd       Zeeeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zeeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd	       Zeeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd
       Z e	ddgg d      eeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zy)r%   c                    t        |       S N)CompletionsWithRawResponseselfs    `/var/www/feuerwehr-webapp/venv/lib/python3.12/site-packages/openai/resources/chat/completions.pywith_raw_responsezCompletions.with_raw_response%   s    )$//    c                    t        |       S r)   ) CompletionsWithStreamingResponser+   s    r-   with_streaming_responsez#Completions.with_streaming_response)   s    /55r/   Nfrequency_penaltyfunction_call	functions
logit_biaslogprobs
max_tokensnparallel_tool_callspresence_penaltyresponse_formatseedstopstreamstream_optionstemperaturetool_choicetoolstop_logprobstop_puserextra_headersextra_query
extra_bodytimeoutmessagesmodelc                    ya  
        Creates a model response for the given chat conversation.

        Args:
          messages: A list of messages comprising the conversation so far.
              [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).

          model: ID of the model to use. See the
              [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
              table for details on which models work with the Chat API.

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          function_call: Deprecated in favor of `tool_choice`.

              Controls which (if any) function is called by the model. `none` means the model
              will not call a function and instead generates a message. `auto` means the model
              can pick between generating a message or calling a function. Specifying a
              particular function via `{"name": "my_function"}` forces the model to call that
              function.

              `none` is the default when no functions are present. `auto` is the default if
              functions are present.

          functions: Deprecated in favor of `tools`.

              A list of functions the model may generate JSON inputs for.

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the
              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
              bias is added to the logits generated by the model prior to sampling. The exact
              effect will vary per model, but values between -1 and 1 should decrease or
              increase likelihood of selection; values like -100 or 100 should result in a ban
              or exclusive selection of the relevant token.

          logprobs: Whether to return log probabilities of the output tokens or not. If true,
              returns the log probabilities of each output token returned in the `content` of
              `message`.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
              completion.

              The total length of input tokens and generated tokens is limited by the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many chat completion choices to generate for each input message. Note that
              you will be charged based on the number of generated tokens across all of the
              choices. Keep `n` as `1` to minimize costs.

          parallel_tool_calls: Whether to enable
              [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
              during tool use.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          response_format: An object specifying the format that the model must output. Compatible with
              [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
              all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

              Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
              message the model generates is valid JSON.

              **Important:** when using JSON mode, you **must** also instruct the model to
              produce JSON yourself via a system or user message. Without this, the model may
              generate an unending stream of whitespace until the generation reaches the token
              limit, resulting in a long-running and seemingly "stuck" request. Also note that
              the message content may be partially cut off if `finish_reason="length"`, which
              indicates the generation exceeded `max_tokens` or the conversation exceeded the
              max context length.

          seed: This feature is in Beta. If specified, our system will make a best effort to
              sample deterministically, such that repeated requests with the same `seed` and
              parameters should return the same result. Determinism is not guaranteed, and you
              should refer to the `system_fingerprint` response parameter to monitor changes
              in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens.

          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
              sent as data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          stream_options: Options for streaming response. Only set this when you set `stream: true`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
              not call any tool and instead generates a message. `auto` means the model can
              pick between generating a message or calling one or more tools. `required` means
              the model must call one or more tools. Specifying a particular tool via
              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
              call that tool.

              `none` is the default when no tools are present. `auto` is the default if tools
              are present.

          tools: A list of tools the model may call. Currently, only functions are supported as a
              tool. Use this to provide a list of functions the model may generate JSON inputs
              for. A max of 128 functions are supported.

          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
              return at each token position, each with an associated log probability.
              `logprobs` must be set to `true` if this parameter is used.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        N r,   rL   rM   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   s                              r-   createzCompletions.create-       \ 	r/   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   c                    ya  
        Creates a model response for the given chat conversation.

        Args:
          messages: A list of messages comprising the conversation so far.
              [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).

          model: ID of the model to use. See the
              [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
              table for details on which models work with the Chat API.

          stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
              sent as data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          function_call: Deprecated in favor of `tool_choice`.

              Controls which (if any) function is called by the model. `none` means the model
              will not call a function and instead generates a message. `auto` means the model
              can pick between generating a message or calling a function. Specifying a
              particular function via `{"name": "my_function"}` forces the model to call that
              function.

              `none` is the default when no functions are present. `auto` is the default if
              functions are present.

          functions: Deprecated in favor of `tools`.

              A list of functions the model may generate JSON inputs for.

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the
              tokenizer) to an associated bias value from -100 to 100. Mathematically, the
              bias is added to the logits generated by the model prior to sampling. The exact
              effect will vary per model, but values between -1 and 1 should decrease or
              increase likelihood of selection; values like -100 or 100 should result in a ban
              or exclusive selection of the relevant token.

          logprobs: Whether to return log probabilities of the output tokens or not. If true,
              returns the log probabilities of each output token returned in the `content` of
              `message`.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
              completion.

              The total length of input tokens and generated tokens is limited by the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many chat completion choices to generate for each input message. Note that
              you will be charged based on the number of generated tokens across all of the
              choices. Keep `n` as `1` to minimize costs.

          parallel_tool_calls: Whether to enable
              [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
              during tool use.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)

          response_format: An object specifying the format that the model must output. Compatible with
              [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
              all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

              Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
              message the model generates is valid JSON.

              **Important:** when using JSON mode, you **must** also instruct the model to
              produce JSON yourself via a system or user message. Without this, the model may
              generate an unending stream of whitespace until the generation reaches the token
              limit, resulting in a long-running and seemingly "stuck" request. Also note that
              the message content may be partially cut off if `finish_reason="length"`, which
              indicates the generation exceeded `max_tokens` or the conversation exceeded the
              max context length.

          seed: This feature is in Beta. If specified, our system will make a best effort to
              sample deterministically, such that repeated requests with the same `seed` and
              parameters should return the same result. Determinism is not guaranteed, and you
              should refer to the `system_fingerprint` response parameter to monitor changes
              in the backend.

          stop: Up to 4 sequences where the API will stop generating further tokens.

          stream_options: Options for streaming response. Only set this when you set `stream: true`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
              not call any tool and instead generates a message. `auto` means the model can
              pick between generating a message or calling one or more tools. `required` means
              the model must call one or more tools. Specifying a particular tool via
              `{"type": "function", "function": {"name": "my_function"}}` forces the model to
              call that tool.

              `none` is the default when no tools are present. `auto` is the default if tools
              are present.

          tools: A list of tools the model may call. Currently, only functions are supported as a
              tool. Use this to provide a list of functions the model may generate JSON inputs
              for. A max of 128 functions are supported.

          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
              return at each token position, each with an associated log probability.
              `logprobs` must be set to `true` if this parameter is used.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrP   r,   rL   rM   r@   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   s                              r-   rR   zCompletions.create   rS   r/   c                    yrV   rP   rW   s                              r-   rR   zCompletions.create  rS   r/   rL   rM   r@   c                  | j                  dt        i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d||||||dt        j                        t	        ||||      t
        |xs dt        t                 S Nz/chat/completionsrL   rM   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   )rC   rD   rE   rF   rG   )rH   rI   rJ   rK   F)bodyoptionscast_tor@   
stream_cls)_postr   r   CompletionCreateParamsr   r   r   r    rQ   s                              r-   rR   zCompletions.create=  sB   B zz U (): $]	
   !*  !*  *+> '(8 & D D f  %n!" ";#$ $/"$0" -0 )??36 )+Q[el #?U12E  #
 #	
r/   )returnr*   )rb   r1   6rL   $Iterable[ChatCompletionMessageParam]rM   Union[str, ChatModel]r4   Optional[float] | NotGivenr5   0completion_create_params.FunctionCall | NotGivenr6   6Iterable[completion_create_params.Function] | NotGivenr7   #Optional[Dict[str, int]] | NotGivenr8   Optional[bool] | NotGivenr9   Optional[int] | NotGivenr:   rk   r;   bool | NotGivenr<   rf   r=   2completion_create_params.ResponseFormat | NotGivenr>   rk   r?   *Union[Optional[str], List[str]] | NotGivenr@   z#Optional[Literal[False]] | NotGivenrA   5Optional[ChatCompletionStreamOptionsParam] | NotGivenrB   rf   rC   .ChatCompletionToolChoiceOptionParam | NotGivenrD   ,Iterable[ChatCompletionToolParam] | NotGivenrE   rk   rF   rf   rG   str | NotGivenrH   Headers | NonerI   Query | NonerJ   Body | NonerK   'float | httpx.Timeout | None | NotGivenrb   r   )6rL   rd   rM   re   r@   Literal[True]r4   rf   r5   rg   r6   rh   r7   ri   r8   rj   r9   rk   r:   rk   r;   rl   r<   rf   r=   rm   r>   rk   r?   rn   rA   ro   rB   rf   rC   rp   rD   rq   rE   rk   rF   rf   rG   rr   rH   rs   rI   rt   rJ   ru   rK   rv   rb   zStream[ChatCompletionChunk])6rL   rd   rM   re   r@   boolr4   rf   r5   rg   r6   rh   r7   ri   r8   rj   r9   rk   r:   rk   r;   rl   r<   rf   r=   rm   r>   rk   r?   rn   rA   ro   rB   rf   rC   rp   rD   rq   rE   rk   rF   rf   rG   rr   rH   rs   rI   rt   rJ   ru   rK   rv   rb   ,ChatCompletion | Stream[ChatCompletionChunk])6rL   rd   rM   re   r4   rf   r5   rg   r6   rh   r7   ri   r8   rj   r9   rk   r:   rk   r;   rl   r<   rf   r=   rm   r>   rk   r?   rn   r@   3Optional[Literal[False]] | Literal[True] | NotGivenrA   ro   rB   rf   rC   rp   rD   rq   rE   rk   rF   rf   rG   rr   rH   rs   rI   rt   rJ   ru   rK   rv   rb   ry   
__name__
__module____qualname__r   r.   r2   r	   r   rR   r   rP   r/   r-   r%   r%   $   sx   0 0 6 6  9BJSLU:C.7/8&//87@NW)2;D6?PY2;FO>G1:,5( )-$("&;D=m 7m %	m
 6m Hm Jm 8m ,m -m $m -m 5m Lm 'm  9!m" 4#m$ N%m& 0'm( D)m* <+m, /-m. */m0 1m6 &7m8 "9m:  ;m< 9=m> 
?m m^  9BJSLU:C.7/8&//87@NW)2;DPY2;FO>G1:,5( )-$("&;D=m 7m %	m
 m 6m Hm Jm 8m ,m -m $m -m 5m Lm  '!m" 9#m$ N%m& 0'm( D)m* <+m, /-m. */m0 1m6 &7m8 "9m:  ;m< 9=m> 
%?m m^  9BJSLU:C.7/8&//87@NW)2;DPY2;FO>G1:,5( )-$("&;D=m 7m %	m
 m 6m Hm Jm 8m ,m -m $m -m 5m Lm  '!m" 9#m$ N%m& 0'm( D)m* <+m, /-m. */m0 1m6 &7m8 "9m:  ;m< 9=m> 
6?m m^ J(*IJ 9BJSLU:C.7/8&//87@NW)2;DFOPY2;FO>G1:,5( )-$("&;D=C
 7C
 %	C

 6C
 HC
 JC
 8C
 ,C
 -C
 $C
 -C
 5C
 LC
 'C
  9!C
" D#C
$ N%C
& 0'C
( D)C
* <+C
, /-C
. */C
0 1C
6 &7C
8 "9C
:  ;C
< 9=C
> 
6?C
 KC
r/   c                     e Zd Zedd       Zedd       Zeeeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zeeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd	       Zeeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd
       Z e	ddgg d      eeeeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zy)r&   c                    t        |       S r)   )AsyncCompletionsWithRawResponser+   s    r-   r.   z"AsyncCompletions.with_raw_response  s    .t44r/   c                    t        |       S r)   )%AsyncCompletionsWithStreamingResponser+   s    r-   r2   z(AsyncCompletions.with_streaming_response  s    4T::r/   Nr3   rL   rM   c                  K   ywrO   rP   rQ   s                              r-   rR   zAsyncCompletions.create       \ 	   rT   c                  K   ywrV   rP   rW   s                              r-   rR   zAsyncCompletions.create=  r   r   c                  K   ywrV   rP   rW   s                              r-   rR   zAsyncCompletions.create  r   r   rY   c               F  K   | j                  dt        i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d||||||dt        j                         d {   t	        ||||      t
        |xs dt        t                  d {   S 7 57 wr[   )r`   r   r   ra   r   r   r   r    rQ   s                              r-   rR   zAsyncCompletions.create  s[    B ZZ,U (): $]	
   !*  !*  *+> '(8 & D D f  %n!" ";#$ $/"$0" -0 )??3 6 )+Q[el #?U"#67E   #
 #
 #	
#
s$   A%B!'B
(0B!BB!B!)rb   r   )rb   r   rc   )6rL   rd   rM   re   r@   rw   r4   rf   r5   rg   r6   rh   r7   ri   r8   rj   r9   rk   r:   rk   r;   rl   r<   rf   r=   rm   r>   rk   r?   rn   rA   ro   rB   rf   rC   rp   rD   rq   rE   rk   rF   rf   rG   rr   rH   rs   rI   rt   rJ   ru   rK   rv   rb   z AsyncStream[ChatCompletionChunk])6rL   rd   rM   re   r@   rx   r4   rf   r5   rg   r6   rh   r7   ri   r8   rj   r9   rk   r:   rk   r;   rl   r<   rf   r=   rm   r>   rk   r?   rn   rA   ro   rB   rf   rC   rp   rD   rq   rE   rk   rF   rf   rG   rr   rH   rs   rI   rt   rJ   ru   rK   rv   rb   1ChatCompletion | AsyncStream[ChatCompletionChunk])6rL   rd   rM   re   r4   rf   r5   rg   r6   rh   r7   ri   r8   rj   r9   rk   r:   rk   r;   rl   r<   rf   r=   rm   r>   rk   r?   rn   r@   rz   rA   ro   rB   rf   rC   rp   rD   rq   rE   rk   rF   rf   rG   rr   rH   rs   rI   rt   rJ   ru   rK   rv   rb   r   r{   rP   r/   r-   r&   r&     sx   5 5 ; ;  9BJSLU:C.7/8&//87@NW)2;D6?PY2;FO>G1:,5( )-$("&;D=m 7m %	m
 6m Hm Jm 8m ,m -m $m -m 5m Lm 'm  9!m" 4#m$ N%m& 0'm( D)m* <+m, /-m. */m0 1m6 &7m8 "9m:  ;m< 9=m> 
?m m^  9BJSLU:C.7/8&//87@NW)2;DPY2;FO>G1:,5( )-$("&;D=m 7m %	m
 m 6m Hm Jm 8m ,m -m $m -m 5m Lm  '!m" 9#m$ N%m& 0'm( D)m* <+m, /-m. */m0 1m6 &7m8 "9m:  ;m< 9=m> 
*?m m^  9BJSLU:C.7/8&//87@NW)2;DPY2;FO>G1:,5( )-$("&;D=m 7m %	m
 m 6m Hm Jm 8m ,m -m $m -m 5m Lm  '!m" 9#m$ N%m& 0'm( D)m* <+m, /-m. */m0 1m6 &7m8 "9m:  ;m< 9=m> 
;?m m^ J(*IJ 9BJSLU:C.7/8&//87@NW)2;DFOPY2;FO>G1:,5( )-$("&;D=C
 7C
 %	C

 6C
 HC
 JC
 8C
 ,C
 -C
 $C
 -C
 5C
 LC
 'C
  9!C
" D#C
$ N%C
& 0'C
( D)C
* <+C
, /-C
. */C
0 1C
6 &7C
8 "9C
:  ;C
< 9=C
> 
;?C
 KC
r/   c                      e Zd ZddZy)r*   c                Z    || _         t        j                  |j                        | _        y r)   )_completionsr   to_raw_response_wrapperrR   r,   completionss     r-   __init__z#CompletionsWithRawResponse.__init__  s%    '&>>
r/   Nr   r%   rb   Noner|   r}   r~   r   rP   r/   r-   r*   r*         
r/   r*   c                      e Zd ZddZy)r   c                Z    || _         t        j                  |j                        | _        y r)   )r   r   async_to_raw_response_wrapperrR   r   s     r-   r   z(AsyncCompletionsWithRawResponse.__init__  s%    '&DD
r/   Nr   r&   rb   r   r   rP   r/   r-   r   r     r   r/   r   c                      e Zd ZddZy)r1   c                F    || _         t        |j                        | _        y r)   )r   r   rR   r   s     r-   r   z)CompletionsWithStreamingResponse.__init__  s    '2
r/   Nr   r   rP   r/   r-   r1   r1     r   r/   r1   c                      e Zd ZddZy)r   c                F    || _         t        |j                        | _        y r)   )r   r   rR   r   s     r-   r   z.AsyncCompletionsWithStreamingResponse.__init__   s    '8
r/   Nr   r   rP   r/   r-   r   r     r   r/   r   )<
__future__r   typingr   r   r   r   r   r	   typing_extensionsr
   httpx r   _typesr   r   r   r   r   _utilsr   r   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   r   
types.chatr   _base_clientr   types.chat_modelr   types.chat.chat_completionr    types.chat.chat_completion_chunkr    %types.chat.chat_completion_tool_paramr!   (types.chat.chat_completion_message_paramr"   /types.chat.chat_completion_stream_options_paramr#   3types.chat.chat_completion_tool_choice_option_paramr$   __all__r%   r&   r*   r   r1   r   rP   r/   r-   <module>r      s    # B B %    ? ? 
 ' : Y - 2 * 8 C L R _ f,
-]	
/ ]	
@]	
' ]	
@
 

 

 

 
r/   