
    )j4                         d dl Z d dlmZ d dlmZmZmZmZ d dlm	Z
 ddddd ddg fdededed	ed
edededee         dee
j        ge
j        f         fdZ	 	 	 	 	 	 	 d&deeeef                  dee         dee         dee         dee         dee         dee         fdZ ee
j        e
j        j        e
j        j                  de
j        d
ede
j        fd            Z ee
j        e
j        j        e
j        j                  	 d'de
j        ded	ede
j        fd            Z ee
j        e
j        j        e
j        j                  de
j        dede
j        fd            Z ee
j        e
j        j        e
j        j                  de
j        dededee         de
j        f
d            Z ee
j        e
j        j        e
j        j                  d              Zd(d!ed"efd#Zd(d!ed"efd$Zd(d!ed"efd%ZdS ))    N)partial)CallableDictListOptional           temptop_pmin_pmin_tokens_to_keeptop_kxtc_probabilityxtc_thresholdxtc_special_tokensreturnc                 <   	  dk    rd S g 	dk    rdk     r	                     fd           dk    r	                     fd           dk    r	                     fd           dk    r	                     fd           	 fd	}|S )
an  
    Make a sampler function for use with ``generate_step``.

    Args:
        temp (float): The temperature for sampling, if 0 the argmax is used.
          Default: ``0``.
        top_p (float, optional): Nulceus sampling, higher means model considers
          more less likely words.
        min_p (float, optional): The minimum value (scaled by the top token's
          probability) that a token probability must have to be considered.
        min_tokens_to_keep (int, optional): Minimum number of tokens that cannot
          be filtered by min_p sampling.
        top_k (int, optional): The top k tokens ranked by probability to constrain
          the sampling to.
        xtc_probability (float, optional): The probability of applying XTC
            sampling.
        xtc_threshold (float, optional): The threshold the probs need to reach
            for being sampled.
        xtc_special_tokens (list(int), optional): List of special tokens IDs to
            be excluded from XTC sampling.


    Returns:
        Callable[mx.array, mx.array]:
            A sampler which takes log-probabilities and returns tokens.
    r   c                 .    t          j        | d          S )Naxis)mxargmax)xs    ]/lsinfo/ai/hellotax_ai/base_platform/venv/lib/python3.11/site-packages/mlx_lm/sample_utils.py<lambda>zmake_sampler.<locals>.<lambda>/   s    12...           ?c                 $    t          |           S N)apply_top_p)r   r   s    r   r   zmake_sampler.<locals>.<lambda>4       +a*?*? r   r   c                 &    t          |           S r    )apply_min_p)r   r   r   s    r   r   zmake_sampler.<locals>.<lambda>6   s    +a@R*S*S r   c                 (    t          |           S r    )	apply_xtc)r   r   r   r   s    r   r   zmake_sampler.<locals>.<lambda>9   s    i?MCUVV r   c                 $    t          |           S r    )apply_top_k)r   r   s    r   r   zmake_sampler.<locals>.<lambda><   r"   r   c                 D    D ]} ||           } t          |           S r    )categorical_sampling)logprobsmethodsampling_methodsr
   s     r   samplerzmake_sampler.<locals>.sampler?   s3    & 	( 	(Fvh''HH#Hd333r   )append)
r
   r   r   r   r   r   r   r   r.   r-   s
   ```````` @r   make_samplerr0   
   s   H qyy... qyyUS[[ ? ? ? ?@@@|| S S S S STTTVVVVVV	
 	
 	
 qyy ? ? ? ?@@@4 4 4 4 4 4 Nr      
logit_biasrepetition_penaltyrepetition_context_sizepresence_penaltypresence_context_sizefrequency_penaltyfrequency_context_sizec                    g }| rt          j        t          |                                                     t          j        t          |                                                     fd}|                    |           t          ||ft          ||ft          ||fg}	|	D ]-\  }
}}|%|dk    r|                     |
||                     .|S )a  
    Make logits processors for use with ``generate_step``.

    Args:
        repetition_penalty (float, optional): A (sign-aware) multiplicative
          penalty for repeating tokens.
        repetition_context_size (int, optional): The number of tokens to
          consider for repetition penalty. Default: ``20``.
        presence_penalty (float, optional): An additive penalty to reduce
          repeating tokens.
        presence_context_size (int, optional): The number of tokens to consider
          for the presence penalty. Default: ``20``.
        frequency_penalty (float, optional): An additive penalty to reduce
          repeating tokens. The tokens are penalized proportionally to their
          frequency.
        frequency_context_size (int, optional): The number of tokens to consider
          for the frequency penalty. Default: ``20``.
        logit_bias (dictionary, optional): Additive logit bias.

    Returns:
        List[Callable[[mx.array, mx.array], mx.array]]:
            A list of logits processors. Each processor in the list is a
            callable which takes an array of tokens and an array of logits
            and returns the updated logits.
    c                 L    |j         d d f                                       S r    )atadd)_logitsindicesvaluess     r   logit_bias_processorz4make_logits_processors.<locals>.logit_bias_processoro   s&    9QQQZ(,,V444r   Nr   )	r   arraylistkeysr@   r/   make_repetition_penaltymake_presence_penaltymake_frequency_penalty)r2   r3   r4   r5   r6   r7   r8   logits_processorsrA   repetition_penaltiesmake_penaltypenaltycontext_sizer?   r@   s                @@r   make_logits_processorsrM   H   s   D  7(4
 1 12233$z00223344	5 	5 	5 	5 	5 	5 	  !5666 
!"46MN	 02GH	!24JK 0D J J+g|7a<<$$\\'<%H%HIIIr   )inputsoutputsr+   c           	      Z   | j         d         }t          |t                    rd|cxk     r|k     sn t          d| d| d          t	          j        |  |dz
  d          d|d	f         }t	          j        | |t	          j        t          d
           | j	                  d          }|S )z
    Sample from only the top K tokens ranked by probability.

    Args:
        logprobs: A vector of log probabilities.
        top_k (int): Top k tokens to sample from.
    r   r   z(`top_k` has to be an integer in the (0, z] interval, but is .r	   )kthr   .Ninfr   )
shape
isinstanceint
ValueErrorr   argpartitionput_along_axisrB   floatdtype)r+   r   
vocab_sizemask_idxmasked_logprobss        r   r(   r(      s     #JeS!! 
!e*@*@*@*@j*@*@*@*@ z         
 
 	
 yeaibAAA#uvv+NH'(BHeEll]HNCC"  O r   c                 x   d|cxk    rdk    sn t          d|           t          |t                    r|dk     rt          d|           t          j        |  d          }t          j        | |d          }|ddddf         }|t          j        |          z   }||k     }d	|d
d|f<   t          j        |t          d           |          }t          j
        t          j        |          |t          j        |j        d         |j                  d          }	t          j        ||	d          }
|
S )ag  
    Apply min-p sampling to the logprobs.

    Min-p keeps all tokens that are above a minimum probability, scaled by the
    probability of the most likely token. As a result, the filter is more
    aggressive given a very high-probability token.

    Args:
        logprobs: A vector of log probabilities.
        min_p (float): Minimum token probability. Typical values are in the
            0.01-0.2 range, comparably selective as setting `top_p` in the
            0.99-0.8 range.
        min_tokens_to_keep (int, optional): Minimum number of tokens that cannot
            be filtered. Default: ``1``.

    r   r   z9`min_p` has to be a float in the [0, 1] interval, but is r	   z:`min_tokens_to_keep` has to be a positive integer, but is r   r   NF.rS   r[   )rW   rU   rV   r   argsorttake_along_axismathlogwhererZ   rY   
zeros_likearangerT   r[   )r+   r   r   sorted_indicessorted_logprobstop_logprobsscaled_min_ptokens_to_removeselected_logprobsinverse_indicesoriginal_order_logprobss              r   r$   r$      s   , #OOO
 
 	
 (#.. 
3E3I3I]I[]]
 
 	
 Z	333N(>KKKO #111ac6*L  $(5//1L '516S----. !1E%LL=/RR '
n%%
	.&r*.2FGGG	  O !0?   #"r   c                    t          j        |           }t          j        | d          }t          j        ||d          }t          j        |d          }t          j        t          j        |          |t          j        |j        d         |j	                  d          }t          j        ||d          }t          j
        |d|z
  k    | t          d                     S )z
    Apply top-p (nucleus) sampling to logits.

    Args:
        logprobs: A vector of log probabilities.
        top_p: The cumulative probability threshold for top-p filtering.
    Returns:
        token selected based on the top-p criterion.
    r   r   r`   r	   rS   )r   expra   rb   cumsumrY   rf   rg   rT   r[   re   rZ   )r+   r   probsrh   sorted_probscumulative_probsrn   s          r   r!   r!      s     F8EZr222N%e^"EEELyB777 '
n%%
	.&r*.2FGGG	  O )*:ORTUUU 81u9$	u  r   r>   c           	         d|cxk    rdk    sn t          d|           d|cxk    rdk    sn t          d|           t          j        | d          }|t          j        ||k    |t          j                                                  k    }|rd|d|f<   t          j        t          j                            dd	          |k    | t          j        |t          j         |                     S )
a}  
    Apply XTC sampling to the logits.

    Args:
        logits: The logits from the model's output.
        xtc_probability (float): Probability of XTC sampling to happen for each token
        xtc_threshold (float): The threshold the probs need to reach for being sampled.
        special_tokens_ids (list(int)): List of special tokens IDs to be excluded from XTC sampling.
    r   g      ?z?`threshold` has to be a float in the [0, 0.5] interval, but is r   z?`probability` has to be a float in the [0, 1] interval, but is r   F.r	   )rW   r   softmaxre   rS   minrandomuniform)r>   r   r   r   rs   masks         r   r&   r&      s     %%%%#%%%%]m]]
 
 	
 ''''C''''_o__
 
 	
 Jvr""E28EM15"&AAEEGGGD .(-S$$%8
	!Q/1
w''  r   c                 L    t           j                            | d|z  z            S )Nr	   )r   ry   categorical)r>   r
   s     r   r*   r*   $  s!    9  1t8!4555r   rK   rL   c                 ~      dk     st           t          t          f          st          d             fd}|S )ax  
    Make repetition penalty processor.

    Paper: https://arxiv.org/abs/1909.05858

    Args:
        penalty (float): The repetition penalty factor to be applied.
        context_size (int): The number of previous tokens to use.
            Default: ``20``.

    Returns:
        Callable[[mx.array, List[int]], mx.array]:
            The repetition penalty processor.
    r   z*penalty must be a non-negative float, got c                     t          |           dk    r@|  d          } |d d | f         }t          j        |dk     |z  |z            }||d d | f<   |S Nr   )lenr   re   )tokensr>   selected_logitsrL   rK   s      r   repetition_penalty_processorz=make_repetition_penalty.<locals>.repetition_penalty_processor;  sv    v;;??\MNN+F$QQQY/O h!#')') O
 !0F111f9r   )rU   rV   rZ   rW   )rK   rL   r   s   `` r   rE   rE   )  s^     {{*WsEl;;{OgOOPPP
 
 
 
 
 
 ('r   c                       fd}|S )a  
    Make a presence penalty processor.

    Corresponds to the OpenAI option with the same name. Namely, subtracts
    ``penalty`` from a logit if the token has occured at least once in the
    ``context_size`` previous tokens.

    Args:
        penalty (float): The presence penalty to be applied.
        context_size (int): The number of previous tokens to use.
            Default: ``20``.

    Returns:
        Callable[[mx.array, List[int]], mx.array]
    c                 l    t          |           dk    r|  d          } |d d | fxx         z  cc<   |S r   )r   r   r>   rL   rK   s     r   presence_penalty_processorz9make_presence_penalty.<locals>.presence_penalty_processor[  sK    v;;??\MNN+F111f9(r    )rK   rL   r   s   `` r   rF   rF   J  s*    "      &%r   c                       fd}|S )aL  
    Make a frequency penalty processor.

    Corresponds to the OpenAI option with the same name. Namely, subtracts
    ``penalty`` from a logit for every time that the token has occured in the
    ``context_size`` previous tokens.

    The difference with the presence penalty is that the more often a token
    occurs the more it will be penalized.

    Args:
        penalty (float): The frequency penalty to be applied.
        context_size (int): The number of previous tokens to use.
            Default: ``20``.

    Returns:
        Callable[[mx.array, List[int]], mx.array]
    c                     t          |           dk    r/|  d          } |j        d d | f                                       }|S r   )r   r;   subtractr   s     r   frequency_penalty_processorz;make_frequency_penalty.<locals>.frequency_penalty_processorx  sI    v;;??\MNN+FYqqq&y)227;;Fr   r   )rK   rL   r   s   `` r   rG   rG   d  s*    (      '&r   )NNr1   Nr1   Nr1   )r	   )r1   )rc   	functoolsr   typingr   r   r   r   mlx.corecorer   rZ   rV   rB   r0   rM   compilery   stater(   r$   r!   r&   r*   rE   rF   rG   r   r   r   <module>r      s          1 1 1 1 1 1 1 1 1 1 1 1        $&; ;
;; ; 	;
 ; ; ; S	; rxj"("#; ; ; ;~ .2*.-/(,+-)-,.6 6c5j)*6 6 &c]6 uo	6
 $C=6  6 %SM6 6 6 6r 	BIORY_EEEh X   FE0 	BIORY_EEE  =# =#h=#=# =# X	=# =# =# FE=#@ 	BIORY_EEE "(  5  RX       FE F 	BIORY_EEE!H!! ! S		!
 X! ! ! FE!H 	BIORY_EEE6 6 FE6( (U (# ( ( ( (B& &5 & & & & &4' 'E ' ' ' ' ' ' 'r   