
    yj                        d dl mZ d dlmZ d dlZd dlmZmZ d dlm	Z	 d dl
mZ erd dlmZ d dlmZ g Zej        	 	 ddd
dd            ZdS )    )annotations)TYPE_CHECKINGN)core	framework)gradients_with_optimizer)check_and_create_dir)Sequence)TensorF)dump_backward_graph_pathtensorsTensor | Sequence[Tensor]grad_tensors'Tensor | Sequence[Tensor | None] | Noneretain_graphboolr   
str | NonereturnNonec               L   dd} || d          } t          |           t          t          |                     k    s
J d	            |Kt          |t          t          f          s|g}|D ](}|$t          |t
          j                  s
J d            )ng }t          |          dk    r*t          |           t          |          k    s
J d            t          |t                    s
J d            t          |           t          j
                            | |||           d
S )a#
  
    Compute the backward gradients of given tensors.

    Args:
        tensors(list of Tensors): the tensors which the gradient to be computed. The tensors can not contain the same tensor.

        grad_tensors(list of Tensors of None, optional): the init gradients of the `tensors`` .If not None, it must have the same length with ``tensors`` ,
            and if any of the elements is None, then the init gradient is the default value which is filled with 1.0.
            If None, all the gradients of the ``tensors`` is the default value which is filled with 1.0.
            Defaults to None.

        retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would
            like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter
            :code:`retain_graph` to True, then the grads will be retained. Thus, setting it to False is much more memory-efficient.
            Defaults to False.
        dump_backward_graph_path(str, optional): Specifies the directory path for storing the debug file.
            If this parameter is specified, the backward-related graph (in dot format)
            and the debugging call stack information will be generated in this directory.
    Returns:
        NoneType: None


    Examples:
        .. code-block:: python

            >>> import paddle
            >>> x = paddle.to_tensor([[1, 2], [3, 4]], dtype='float32', stop_gradient=False)
            >>> y = paddle.to_tensor([[3, 2], [3, 4]], dtype='float32')

            >>> grad_tensor1 = paddle.to_tensor([[1,2], [2, 3]], dtype='float32')
            >>> grad_tensor2 = paddle.to_tensor([[1,1], [1, 1]], dtype='float32')

            >>> z1 = paddle.matmul(x, y)
            >>> z2 = paddle.matmul(x, y)

            >>> paddle.autograd.backward([z1, z2], [grad_tensor1, grad_tensor2], True)
            >>> print(x.grad)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
            [[12., 18.],
             [17., 25.]])


            >>> x.clear_grad()

            >>> paddle.autograd.backward([z1, z2], [grad_tensor1, None], True)
            >>> print(x.grad)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
            [[12., 18.],
             [17., 25.]])

            >>> x.clear_grad()

            >>> paddle.autograd.backward([z1, z2])
            >>> print(x.grad)
            Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
            [[10., 14.],
             [10., 14.]])


    in_out_listSequence[Tensor] | Tensornamestrr   Sequence[Tensor]c                J   | J | d            t          | t          t          f          rOt          |           dk    sJ | d            | D ]*}t          |t          j                  sJ d| d            +| S t          | t          j                  sJ | d            | gS )Nz should not be Noner   z cannot be emptyzElements of z must be paddle.Tensorz! must be Tensor or list of Tensor)
isinstancelisttuplelenpaddler
   )r   r   each_vars      m/lsinfo/ai/hellotax_ai/data_center/backend/venv/lib/python3.11/site-packages/paddle/autograd/backward_mode.pycheck_tensorszbackward.<locals>.check_tensorsf   s     &&4(D(D(D&&&kD%=11 	!{##a'''D)B)B)B''''  !(FM::  ?4??? :  k6=99  ::: 9  =     r   z[The argument 'tensors' of paddle.autograd.backward contains duplicate paddle.Tensor object.NzThe argument 'grad_tensors' of paddle.autograd.backward is invalid, it can be 'None', 'paddle.Tensor' or 'list[None/paddle.Tensor]'.r   z3The length of grad_tensors must be equal to tensorsz"retain_graph must be True or False)r   r   r   r   r   r   )r   setr   r   r   r    r
   r   r   r   eagerrun_backward)r   r   r   r   r#   each_tensors         r"   backwardr)   !   sh   J! ! ! !$ mGY//Gw<<3s7||,,,,,e -,, ,u66 	*(>L' 	 	K&!+v}==   [ =	 
<17||s<00000A 100 lD))OO+OOO)1222J|-E    r$   )NF)
r   r   r   r   r   r   r   r   r   r   )
__future__r   typingr   r    paddle.baser   r   paddle.base.backwardr   paddle.utils.downloadr   collections.abcr	   r
   __all__dygraph_onlyr)    r$   r"   <module>r3      s    # " " " " "              ' ' ' ' ' ' ' ' 9 9 9 9 9 9 6 6 6 6 6 6 ((((((   =Aq
 ,0q q q q q q q qr$   