o
    I5i7}                  *   @   s  d dl mZmZmZmZmZ d dlZd dlmZ ddlm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZmZmZmZmZmZ ddgZG dd deZd	d
e de de	 de de d e_dee dee dee dee dee dee dee dee dededededeeef dededededef$dd Zdee dee dee dee dee dee dee dee dededededeeef dededededef$d!d"Z dee dee dee dee dee dee dee dee dededededeeef dededededed#df&d$d%Z!eed&		'	'				'd+dee dee dee dee dee dee d(ee deded)ee dee dee dededededeeef dededef(d*dZ"dS ),    )castListOptionalTupleUnionN)Tensor   )_capturable_doc_default_to_fused_or_foreach_device_dtype_check_for_fused_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc
_fused_doc!_get_capturable_supported_devices_get_scalar_dtype
_get_value_maximize_doc_stack_if_compiling_use_grad_for_differentiable_view_as_real
DeviceDict	OptimizerParamsTAdamadamc                       s   e Zd Z					dddddddded	eeef d
eeef dededede	e dededede	e f fddZ
 fddZdd ZedddZ  ZS )r   MbP?g?g+?:0yE>r   FN)foreachmaximize
capturabledifferentiablefusedparamslrbetasepsweight_decayamsgradr   r    r!   r"   r#   c                   s  t |tr|r|	std| dkrtdd|ks"td| d|ks-td| d|d   kr9dk sCn td	|d  d|d   krOdk sYn td
|d  d|ksdtd| t||||||||	|
|d
}t || |r|
rtdd| _|rtdd S d S )NElr as a Tensor is not supported for capturable=False and foreach=Truer   zTensor lr must be 1-element        zInvalid learning rate: zInvalid epsilon value: r         ?z#Invalid beta parameter at index 0: z#Invalid beta parameter at index 1: zInvalid weight_decay value: )
r%   r&   r'   r(   r)   r    r   r!   r"   r#   z)`fused` does not support `differentiable`Tz0`fused` and `foreach` cannot be `True` together.)	
isinstancer   
ValueErrornumeldictsuper__init__RuntimeError_step_supports_amp_scaling)selfr$   r%   r&   r'   r(   r)   r   r    r!   r"   r#   defaults	__class__ ^/lsinfo/ai/hellotax_ai/llm_service/venv_embed/lib/python3.10/site-packages/torch/optim/adam.pyr2   !   sL   
zAdam.__init__c                    s   t  | | jD ]e}|dd |dd |dd  |dd |dd |dd }|d D ]:}| j|g }t|d	krmt|d
 smt	|d
 }|d sW|d rctj
|t|d|jdntj
|t d|d
< q3q	d S )Nr)   Fr    r   r!   r"   r#   r$   r   stepis_fuseddtypedevicer?   )r1   __setstate__param_groups
setdefaultstategetlentorch	is_tensorfloattensorr   r@   )r5   rE   groupr#   pp_statestep_valr7   r9   r:   rB   [   s2   
zAdam.__setstate__c                 C   s~  d}|d D ]}	|	j d ur|t|	O }||	 |	j jr!td||	j  | j|	 }
t|
dkr||d r:t|	 |d sB|d rPtj	dt
|d d|	jd	ntjd
t
 d|
d< tj|	tjd|
d< tj|	tjd|
d< |d r|tj|	tjd|
d< ||
d  ||
d  |d r||
d  |d r|
d jrtd|d rt|d r|d std||
d  q|S )NFr$   zJAdam does not support sparse gradients, please consider SparseAdam insteadr   r#   r!   r9   r<   r>   r+   rA   r;   )memory_formatexp_avg
exp_avg_sqr)   max_exp_avg_sqr"   zB`requires_grad` is not supported for `step` in differentiable moder   r%   r*   )gradrH   
is_complexappend	is_sparser3   rE   rG   r   zerosr   r@   rK   
zeros_likepreserve_formatrequires_gradrI   )r5   rL   params_with_gradgradsexp_avgsexp_avg_sqsmax_exp_avg_sqsstate_stepshas_complexrM   rE   r9   r9   r:   _init_groupr   sl   








zAdam._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]S}g }g }g }g }g }g }	|d \}
}| |||||||	}t||||||	f|d ||
||d |d |d |d |d |d	 |d
 |d t| ddt| ddd q$|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr&   r)   r%   r(   r'   r    r   r!   r"   r#   
grad_scale	found_inf)r)   rb   beta1beta2r%   r(   r'   r    r   r!   r"   r#   rd   re   ) _cuda_graph_capture_health_checkrH   enable_gradrC   rc   r   getattr)r5   closurelossrL   r\   r]   r^   r_   r`   ra   rf   rg   rb   r9   r9   r:   r;      s^   





z	Adam.step)r   r   r   r   FN)__name__
__module____qualname__r   r   rJ   r   r   boolr   r2   rB   rc   r   r;   __classcell__r9   r9   r7   r:   r       sN    	

	
:Ka  Implements Adam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \beta_1, \beta_2
                \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}          \\
            &\hspace{13mm}      \lambda \text{ (weight decay)},  \: \textit{amsgrad},
                \:\textit{maximize}                                                              \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0\leftarrow 0 \text{ (second moment)},\: \widehat{v_0}^{max}\leftarrow 0\\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\

            &\hspace{5mm}\textbf{if} \: \textit{maximize}:                                       \\
            &\hspace{10mm}g_t           \leftarrow   -\nabla_{\theta} f_t (\theta_{t-1})         \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})          \\
            &\hspace{5mm}\textbf{if} \: \lambda \neq 0                                           \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{5mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{5mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{5mm}\widehat{v_t} \leftarrow   v_t/\big(1-\beta_2^t \big)                   \\
            &\hspace{5mm}\textbf{if} \: amsgrad                                                  \\
            &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max},
                \widehat{v_t})                                                                   \\
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big)                                 \\
            &\hspace{5mm}\textbf{else}                                                           \\
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/
                \big(\sqrt{\widehat{v_t}} + \epsilon \big)                                       \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_.
    a  
    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR
            is not yet supported for all our implementations. Please use a float
            LR if you are not also specifying fused=True or capturable=True.
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        amsgrad (bool, optional): whether to use the AMSGrad variant of this
            algorithm from the paper `On the Convergence of Adam and Beyond`_
            (default: False)
        z	
        a=  
    .. Note::
        A prototype implementation of Adam and AdamW for MPS supports `torch.float32` and `torch.float16`.
    .. _Adam\: A Method for Stochastic Optimization:
        https://arxiv.org/abs/1412.6980
    .. _On the Convergence of Adam and Beyond:
        https://openreview.net/forum?id=ryQu7f-RZ

    r$   r]   r^   r_   r`   ra   rd   re   r)   rb   rf   rg   r%   r(   r'   r    r!   r"   c       
   !      C   s  |d u r|d u s
J t j rt|tsJ t| D ]:\}}|s%|| n||  }|| }|| }|| }t j sV|rVt }|j	j
|j	j
krN|j	j
|v sVJ d| d|d7 }|dkre|j||d}t |rt |}t |}t |}|rt || ||< t |}||d|
  ||j|| d| d |s|r|}d|
|  }d||  }|| }| }| }|r|r||  }n|| }|| t || ||  ||  || } n| ||  || } |||  nEt|}d|
|  }d||  }|| }|d }|r/t j|| ||| d ||  | |} n	| | |} |j|| | d |rUt | | rUt || ||< qd S )	NIIf capturable=True, params and state_steps must be on supported devices: .r   r   alpha)value      ?)out)rH   jitis_scriptingr-   rJ   	enumerate_utilsis_compilingr   r@   typeaddrU   view_as_reallerp_mul_addcmul_conjnegsqrtclonecopy_maximumadd_addcdiv_r   view_as_complex)!r$   r]   r^   r_   r`   ra   rd   re   r)   rb   rf   rg   r%   r(   r'   r    r!   r"   iparamrT   rQ   rR   step_tcapturable_supported_devicesr;   bias_correction1bias_correction2	step_sizestep_size_negbias_correction2_sqrtrS   denomr9   r9   r:   _single_tensor_adam@  sv   








r   c       
   %         sd  t | dkrd S ttr|stdtj s5|r5tddtfddt	| |D s5J d d|d u r=|d u s?J |rEJ d	t
| |||||g}| D ]Z\\}}}}}}}ttt |}ttt |}ttt |}ttt |}ttt |}|	r|rttt |}t||||| nt|||| |rt|}tj s|d jrtj|tjd
ddd
d nt|d |dkr|rtj|||d ntj|||d}t||d   t| t|||d  ~|r\t |} t|}!t| d t|!d t|! t|  t|  t|! | }"|!}#|r=ttt |}t|| t|}$nt|}$t|$|# t|$| t|$|" t |||$ qT fdd|D } fdd|D }!t!fdd| D }"dd |!D }#|rttt |}t|| t|}$nt|}$t|$|# t|$| t |||$|" qTd S )Nr   r*   F)supports_xlac                 3   s0    | ]\}}|j j|j jko|j j v V  qd S rm   )r@   r   ).0rM   r;   )r   r9   r:   	<genexpr>  s    

z%_multi_tensor_adam.<locals>.<genexpr>rs   rt   z#_foreach ops don't support autogradr,   cpu)r@   ru   r   c                       g | ]
}d  t |  qS r   r   r   r;   )rf   r9   r:   
<listcomp>P      z&_multi_tensor_adam.<locals>.<listcomp>c                    r   r   r   r   )rg   r9   r:   r   S  r   c                    s   g | ]} | d  qS )r9   r   bc)r%   r9   r:   r   W  s    c                 S   s   g | ]}|d  qS )rx   r9   r   r9   r9   r:   r   Y  s    )"rG   r-   r   r3   rH   r}   r~   r   allzipr   "_group_tensors_by_device_and_dtypevaluesr   r   r   _foreach_negis_cpu_foreach_add_rK   _foreach_add_foreach_lerp__foreach_mul__foreach_addcmul__foreach_pow_foreach_sub__foreach_neg__foreach_div__foreach_reciprocal__foreach_sqrt__foreach_maximum__foreach_sqrt_foreach_addcdiv_r   )%r$   r]   r^   r_   r`   ra   rd   re   r)   rb   rf   rg   r%   r(   r'   r    r!   r"   grouped_tensorsdevice_params_device_grads_device_exp_avgs_device_exp_avg_sqs_device_max_exp_avg_sqs_device_state_steps__device_paramsdevice_gradsdevice_exp_avgsdevice_exp_avg_sqsdevice_state_stepsdevice_max_exp_avg_sqsr   r   r   r   exp_avg_sq_sqrtr9   )rf   rg   r   r%   r:   _multi_tensor_adam  s   











 r   returnc       
   %      C   s  | sd S |r
t d|d ur|j|ini }|d ur|j|ini }t|tr1t|jdkr1|j|ind }t| |||||g}| D ]\\}}\\}}}}}}}tt	t |}tt	t |}tt	t |} tt	t |}!tt	t |}"|j
dkr|d u r|d u sJ d\}#}$|d ur|||j|dd}#|d ur|||j|dd}$|d ur||vr|j|dd||< || }t|"d tj||| |!||"|||
|||||#|$d	 |$d urt|"|$gt|"  qBd S )
Nz9Adam with fused=True does not support differentiable=Truer   mps)NNT)non_blocking)r@   r   r   )	r)   r%   rf   rg   r(   r'   r    rd   re   )r3   r@   r-   r   strr   r   itemsr   r   r   rD   torH   r   _fused_adam_r   rG   )%r$   r]   r^   r_   r`   ra   rd   re   r)   rb   rf   rg   r%   r(   r'   r    r!   r"   grad_scale_dictfound_inf_dictlr_dictr   r@   r   r   r   r   r   r   r   r   r   r   r   r   device_grad_scaledevice_found_infr9   r9   r:   _fused_adaml  s   $
r   )single_tensor_fnFr   r#   c                C   s   |	du r|du rt | |dd\}}|rt|tr|sd}|	du r"d}	|du r(d}tj s:tdd |D s:td|rEtj	 rEtd|	rPtj	 rPtd|	rZtj	 sZt
}n|rdtj	 sdt}nt}|| ||||||||||||||||
|d	 dS )
znFunctional API that performs Adam algorithm computation.

    See :class:`~torch.optim.Adam` for details.
    NF)	use_fusedc                 s   s    | ]	}t |tjV  qd S rm   )r-   rH   r   )r   tr9   r9   r:   r     s    
zadam.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsz6torch.jit.script not supported with foreach optimizersz4torch.jit.script not supported with fused optimizers)r)   rb   rf   rg   r%   r(   r'   r    r!   r"   rd   re   )r
   r-   r   rH   r}   r~   r   r3   rz   r{   r   r   r   )r$   r]   r^   r_   r`   ra   r   r!   r"   r#   rd   re   rb   r)   rf   rg   r%   r(   r'   r    r   funcr9   r9   r:   r     sZ   "

)NFFNNNF)#typingr   r   r   r   r   rH   r   	optimizerr	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   __all__r   __doc__rq   rJ   r   r   r   r   r9   r9   r9   r:   <module>   sp  L ['G


w


 6


a
	

