U
    h                     @  s&  d dl mZ d dlZd dlmZmZmZmZmZ d dl	m
  mZ d dlmZ d dlmZ d dlmZ ddd	d
dZddddddddddddddddddd	ddZdddddddddddddddddddd
ddZejd d!G d"d# d#ejZd+dd$d#d%d&d'Zd,ddd$d(d)d*ZdS )-    )annotationsN)ListIterableIteratorOptionalUnion)get_default_text_client)
text_types)model_typeszUnion[str, dict[str, str]]zglm.TextPrompt)promptreturnc                 C  s6   t | trtj| dS t | tr*t| S td d S )N)textz.Expected string or dictionary for text prompt.)
isinstancestrglmZ
TextPromptdict	TypeErrorr    r   </tmp/pip-unpacked-wheel-doshhd5e/google/generativeai/text.py_make_text_prompt   s
    


r   models/chat-lamda-001modelr   temperaturecandidate_countmax_output_tokenstop_ptop_kstop_sequenceszmodel_types.ModelNameOptionszOptional[str]zOptional[float]zOptional[int]zUnion[str, Iterable[str]]zglm.GenerateTextRequest)	r   r   r   r   r   r   r   r   r   c              
   C  sJ   t | } t|d}t|tr$|g}|r0t|}tj| |||||||dS )Nr   r   )r
   make_model_namer   r   r   listr   ZGenerateTextRequestr   r   r   r   _make_generate_text_request&   s     


r"   zmodels/text-bison-001)r   r   r   r   r   r   r   clientz&Optional[model_types.ModelNameOptions]r   zOptional[glm.TextServiceClient]ztext_types.Completion)
r   r   r   r   r   r   r   r   r#   r   c        	   
   
   C  s$   t | |||||||d}	t||	dS )a  Calls the API and returns a `types.Completion` containing the response.

    Args:
        model: Which model to call, as a string or a `types.Model`.
        prompt: Free-form input text given to the model. Given a prompt, the model will
                generate text that completes the input text.
        temperature: Controls the randomness of the output. Must be positive.
            Typical values are in the range: `[0.0,1.0]`. Higher values produce a
            more random and varied response. A temperature of zero will be deterministic.
        candidate_count: The **maximum** number of generated response messages to return.
            This value must be between `[1, 8]`, inclusive. If unset, this
            will default to `1`.

            Note: Only unique candidates are returned. Higher temperatures are more
            likely to produce unique candidates. Setting `temperature=0.0` will always
            return 1 candidate regardless of the `candidate_count`.
        max_output_tokens: Maximum number of tokens to include in a candidate. Must be greater
                           than zero. If unset, will default to 64.
        top_k: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling.
            `top_k` sets the maximum number of tokens to sample from on each step.
        top_p: The API uses combined [nucleus](https://arxiv.org/abs/1904.09751) and top-k sampling.
            `top_p` configures the nucleus sampling. It sets the maximum cumulative
            probability of tokens to sample from.
            For example, if the sorted probabilities are
            `[0.5, 0.2, 0.1, 0.1, 0.05, 0.05]` a `top_p` of `0.8` will sample
            as `[0.625, 0.25, 0.125, 0, 0, 0].
        stop_sequences: A set of up to 5 character sequences that will stop output generation.
          If specified, the API will stop at the first appearance of a stop
          sequence. The stop sequence will not be included as part of the response.
        client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.

    Returns:
        A `types.Completion` containing the model's text completion response.
    r   )r#   request)r"   _generate_response)
r   r   r   r   r   r   r   r   r#   r$   r   r   r   generate_textD   s    .r&   F)initc                   @  s   e Zd Zdd ZdS )
Completionc                 K  s>   |  D ]\}}t| || qd | _| jr:| jd d | _d S )Nr   output)itemssetattrresult
candidates)selfkwargskeyvaluer   r   r   __init__   s
    zCompletion.__init__N)__name__
__module____qualname__r2   r   r   r   r   r(      s   r(   zglm.TextServiceClient)r$   r#   r   c                 C  s8   |d krt  }|| }t||}tf d|i|S )NZ_client)r   r&   typeto_dictr(   )r$   r#   responser   r   r   r%      s
    
r%   )r   r   r#   c                 C  s`   | dkrd} n
t | } |dkr&t }tj| |d}||}t||}|d d |d< |S )a  Calls the API to create an embedding for the text passed in.

    Args:
        model: Which model to call, as a string or a `types.Model`.

        text: Free-form input text given to the model. Given a string, the model will
              generate an embedding based on the input text.

        client: If you're not relying on a default client, you pass a `glm.TextServiceClient` instead.

    Returns:
        Dictionary containing the embedding (list of float values) for the input text.
    Nr   )r   r   Z	embeddingr1   )r
   r    r   r   ZEmbedTextRequestZ
embed_textr6   r7   )r   r   r#   Zembedding_requestZembedding_responseZembedding_dictr   r   r   generate_embeddings   s    

r9   )N)N)
__future__r   Zdataclassestypingr   r   r   r   r   Zgoogle.ai.generativelanguageZaiZgenerativelanguager   Zgoogle.generativeai.clientr   Zgoogle.generativeai.typesr	   r
   r   r"   r&   Z	dataclassr(   r%   r9   r   r   r   r   <module>   s<   " $<
 