
    yjJ                    &   d dl mZ d dlmZmZmZmZ d dlmZ d dl	Z	d dl
mZ erd dlmZ d dl	mZ g Z ed          Z G d	 d
          Z G d dej        j        e          Z G d de          Z G d dej        j        ee          ZddZdS )    )annotations)TYPE_CHECKINGAnyCallableTypeVar)ConcatenateN)core)Sequence)Tensor_RetTc                  v    e Zd ZU dZded<   ded<   ded<   ded<   ded<   ddZddZddZddZddZ	ddZ
dS )PyLayerContextaP  
    ``PyLayerContext`` can assist the :ref:`api_paddle_autograd_PyLayer` in implementing certain functionalities.

    Examples:
        .. code-block:: python

            >>> import paddle
            >>> from paddle.autograd import PyLayer

            >>> class cus_tanh(PyLayer):
            ...     @staticmethod
            ...     def forward(ctx, x):
            ...         # ctx is a object of PyLayerContext.
            ...         y = paddle.tanh(x)
            ...         ctx.save_for_backward(y)
            ...         return y
            ...
            ...     @staticmethod
            ...     def backward(ctx, dy):
            ...         # ctx is a object of PyLayerContext.
            ...         y, = ctx.saved_tensor()
            ...         grad = dy * (1 - paddle.square(y))
            ...         return grad
    tuple[Tensor, ...]	containernot_inplace_tensorsnon_differentiableboolmaterialize_gradsgrad_in_dtype_consistentflagreturnNonec                    || _         dS )a  
        Set whether to maintain gradient input dtype consistency between forward output and backward input.

        Note:
            This API should be called only inside `forward`.
            By default, backward input gradients are automatically cast to match the dtype of forward outputs.
            Set this to `False` to disable automatic casting and maintain original gradient dtypes in backward.

        Args:
            flag (bool): Whether to enable automatic dtype conversion in backward.
                - `True`:  Cast backward input gradient to match forward output dtype (default behavior)
                - `False`: Preserve original dtype of backward input gradient

        Returns:
            None

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer
                >>> paddle.seed(2025)
                >>> class cus_tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         y = paddle.tanh(x)
                ...         # Pass tensors to backward.
                ...         ctx.save_for_backward(y)
                ...         # The gradient input in the backward process
                ...         # will not be automatically cast to the dtype of the forward output.
                ...         ctx.set_grad_in_dtype_consistent(False)
                ...         return y
                ...
                ...     @staticmethod
                ...     def backward(ctx, dy):
                ...
                ...         # Get the tensors passed by forward.
                ...         y, = ctx.saved_tensor()
                ...         grad = dy * (1 - paddle.square(y))
                ...         return grad
                ...
                >>> class cus_tanh_cast_grad(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         y = paddle.tanh(x)
                ...         # Pass tensors to backward.
                ...         ctx.save_for_backward(y)
                ...         return y
                ...
                ...     @staticmethod
                ...     def backward(ctx, dy):
                ...         # Get the tensors passed by forward.
                ...         y, = ctx.saved_tensor()
                ...         grad = dy * (1 - paddle.square(y))
                ...         # The gradient input in cus_tanh be cast to bfloat16 manually,
                ...         # and cus_tanh will not cast the gradient to the dtype of the forward output.
                ...         grad = paddle.cast(grad,paddle.float16)
                ...         return grad
                ...
                >>> x = paddle.randn([3,3]).astype("float32")
                >>> x.stop_gradient = False
                >>> y = cus_tanh.apply(x)
                >>> z = cus_tanh_cast_grad.apply(y)
                >>> z.sum().backward()

        N)r   )selfr   s     h/lsinfo/ai/hellotax_ai/data_center/backend/venv/lib/python3.11/site-packages/paddle/autograd/py_layer.pyset_grad_in_dtype_consistentz+PyLayerContext.set_grad_in_dtype_consistentD   s    F )-%%%    tensorsr   c                    || _         dS )a  
        Saves given tensors that backward need. Use ``saved_tensor`` in the `backward` to get the saved tensors.

        Note:
            This API should be called at most once, and only inside `forward`.

        Args:
            tensors(list of Tensors): Tensors to be stored.

        Returns:
            None

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer

                >>> class cus_tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         # ctx is a context object that store some objects for backward.
                ...         y = paddle.tanh(x)
                ...         # Pass tensors to backward.
                ...         ctx.save_for_backward(y)
                ...         return y
                ...
                ...     @staticmethod
                ...     def backward(ctx, dy):
                ...         # Get the tensors passed by forward.
                ...         y, = ctx.saved_tensor()
                ...         grad = dy * (1 - paddle.square(y))
                ...         return grad

        Nr   )r   r   s     r   save_for_backwardz PyLayerContext.save_for_backward   s    H !r   c                    | j         S )af  
        Get the tensors stored by ``save_for_backward``.

        Returns:
            list of Tensors or None: If context contains tensors stored by `save_for_backward`,
            then return these tensors, otherwise return None.

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer

                >>> class cus_tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         # ctx is a context object that store some objects for backward.
                ...         y = paddle.tanh(x)
                ...         # Pass tensors to backward.
                ...         ctx.save_for_backward(y)
                ...         return y
                ...
                ...     @staticmethod
                ...     def backward(ctx, dy):
                ...         # Get the tensors passed by forward.
                ...         y, = ctx.saved_tensor()
                ...         grad = dy * (1 - paddle.square(y))
                ...         return grad
        r    )r   s    r   saved_tensorzPyLayerContext.saved_tensor   s    < ~r   argsc                    || _         dS )a  
        Marks inputs as not inplace.
        This should be called at most once, only from inside the `forward` method,
        and all arguments should be Tensor inputs.

        If the Tensor returned by `forward` method is the same as the Tensor input of forward,
        and this Tensor is marked as not_inplace, then Paddle will help the user create a new Tensor as output.
        Thereby preventing the auto grad information of the input Tensor from being overwritten.

        Examples:
            .. code-block:: python

                >>> import paddle

                >>> class Exp(paddle.autograd.PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         ctx.mark_not_inplace(x)
                ...         return x
                ...
                ...     @staticmethod
                ...     def backward(ctx, grad_output):
                ...         out = grad_output.exp()
                ...         return out

                >>> paddle.seed(2023)
                >>> x = paddle.randn((1, 1))
                >>> x.stop_gradient = False
                >>> attn_layers = []
                >>> for idx in range(0, 2):
                ...     attn_layers.append(Exp())

                >>> for step in range(0, 2):
                ...     a = x
                ...     for j in range(0,2):
                ...         a = attn_layers[j].apply(x)
                ...     a.backward()
        N)r   r   r$   s     r   mark_not_inplacezPyLayerContext.mark_not_inplace   s    N $(   r   c                    || _         dS )a  
        Marks outputs as non-differentiable.
        This should be called at most once, only from inside the `forward` method,
        and all arguments should be tensor outputs.

        This will mark outputs as not requiring gradients, increasing the
        efficiency of backward computation. You still need to accept a gradient
        for each output in `backward`, but it's always going to
        be a zero tensor with the same shape as the shape of a corresponding
        output.

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer
                >>> import numpy as np

                >>> class Tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         a = x + x
                ...         b = x + x + x
                ...         ctx.mark_non_differentiable(a)
                ...         return a, b
                ...
                ...     @staticmethod
                ...     def backward(ctx, grad_a, grad_b):
                ...         assert np.equal(grad_a.numpy(), paddle.zeros([1]).numpy())
                ...         assert np.equal(grad_b.numpy(), paddle.ones([1], dtype="float64").numpy())
                ...         return grad_b

                >>> x = paddle.ones([1], dtype="float64")
                >>> x.stop_gradient = False
                >>> a, b = Tanh.apply(x)
                >>> b.sum().backward()
        N)r   r&   s     r   mark_non_differentiablez&PyLayerContext.mark_non_differentiable   s    L #'r   valuec                    || _         dS )a  
        Sets whether to materialize output grad tensors. Default is True.

        This should be called only from inside the `forward` method.

        If True, undefined output grad tensors will be expanded to tensors full
        of zeros prior to calling the `backward` method.

        If False, undefined output grad tensors will be None.

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer
                >>> import numpy as np

                >>> class Tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         return x+x+x, x+x
                ...
                ...     @staticmethod
                ...     def backward(ctx, grad, grad2):
                ...         assert np.equal(grad2.numpy(), paddle.zeros([1]).numpy())
                ...         return grad

                >>> class Tanh2(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         ctx.set_materialize_grads(False)
                ...         return x+x+x, x+x
                ...
                ...     @staticmethod
                ...     def backward(ctx, grad, grad2):
                ...         assert grad2==None
                ...         return grad

                >>> x = paddle.ones([1], dtype="float64")
                >>> x.stop_gradient = False
                >>> Tanh.apply(x)[0].backward()

                >>> x2 = paddle.ones([1], dtype="float64")
                >>> x2.stop_gradient = False
                >>> Tanh2.apply(x2)[0].backward()
        N)r   )r   r*   s     r   set_materialize_gradsz$PyLayerContext.set_materialize_grads   s    ^ "'r   N)r   r   r   r   )r   r   r   r   )r   r   )r$   r   r   r   )r*   r   r   r   )__name__
__module____qualname____doc____annotations__r   r!   r#   r'   r)   r,    r   r   r   r   $   s          2 "!!!++++****""""C- C- C- C-J$! $! $! $!L   @'( '( '( '(R&' &' &' &'P/' /' /' /' /' /'r   r   c                      e Zd Zd ZdS )PyLayerBackwardc                (     | j         j        | g|R  S N)_forward_clsbackwardr&   s     r   r8   zPyLayerBackward.backwardS  s     )t )$66666r   N)r-   r.   r/   r8   r2   r   r   r4   r4   R  s#        7 7 7 7 7r   r4   c                       e Zd Z fdZ xZS )PyLayerMetac                    t          |dz   t          fd| i          | _        t                                          |||           d S )N	_backwardr7   )typer4   _backward_functionsuper__init__)clsnamebasesattrs	__class__s       r   r@   zPyLayerMeta.__init__X  sM    !%; 2^S4I"
 "
 	ue,,,,,r   )r-   r.   r/   r@   __classcell__)rE   s   @r   r:   r:   W  s8        - - - - - - - - -r   r:   c                  B    e Zd ZdZedd	            Zedd
            ZdS )PyLayera	  
    Paddle implements Python custom operators on the PaddlePaddle framework by creating a subclass of
    ``PyLayer``, which must comply with the following rules:

    1. The subclass must contain static ``forward`` and ``backward`` functions, with the first argument being
    :ref:`api_paddle_autograd_PyLayerContext`. If a returned value in ``backward`` corresponds to a ``Tensor`` that
    requires gradients in ``forward``, the returned value must be a ``Tensor``.

    2. Except for the first argument, other arguments of ``backward`` are gradients of the output ``Tensors``
    of ``forward``. Therefore, the number of input ``Tensor`` in ``backward`` must be the same as the number
    of output ``Tensor`` in ``forward``. If you need to use input ``Tensor`` from ``forward`` in ``backward``,
    you can save these ``Tensors`` by inputting them into :ref:`api_paddle_autograd_PyLayerContext`'s
    ``save_for_backward`` method and use them in ``backward`` later.

    3. The output of ``backward`` can be ``Tensor`` or ``list/tuple(Tensor)``, which are gradients of the
    output ``Tensor`` of ``forward``. Therefore, the number of output ``Tensor`` in ``backward`` is the same
    as the number of input ``Tensor`` in ``forward``.

    After building the custom operator, apply it by running the ``apply`` method.

    Examples:
        .. code-block:: python

            >>> import paddle
            >>> from paddle.autograd import PyLayer

            >>> class cus_tanh(PyLayer):
            ...     @staticmethod
            ...     def forward(ctx, x):
            ...         y = paddle.tanh(x)
            ...         # Pass tensors to backward.
            ...         ctx.save_for_backward(y)
            ...         return y
            ...
            ...     @staticmethod
            ...     def backward(ctx, dy):
            ...         # Get the tensors passed by forward.
            ...         y, = ctx.saved_tensor()
            ...         grad = dy * (1 - paddle.square(y))
            ...         return grad

            >>> paddle.seed(2023)
            >>> data = paddle.randn([2, 3], dtype="float64")
            >>> data.stop_gradient = False
            >>> z = cus_tanh.apply(data)
            >>> z.mean().backward()

            >>> print(data.grad)
            Tensor(shape=[2, 3], dtype=float64, place=Place(cpu), stop_gradient=True,
            [[0.16604150, 0.05858341, 0.14051214],
             [0.15677770, 0.01564609, 0.02991660]])
    ctxr   r$   r   kwargsr   Tensor | Sequence[Tensor]c                     t          d          )a  
        It is to be overloaded by subclasses. It must accept a object of :ref:`api_paddle_autograd_PyLayerContext` as
        the first argument, followed by any number of arguments (tensors or other types).
        `None` can not be included in the returned result.

        Args:
            *args(tuple): input of PyLayer.
            **kwargs(dict): input of PyLayer.

        Returns:
            tensors or other types : output of PyLayer.

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer

                >>> class cus_tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         y = paddle.tanh(x)
                ...         # Pass tensors to backward.
                ...         ctx.save_for_backward(y)
                ...         return y
                ...
                ...     @staticmethod
                ...     def backward(ctx, dy):
                ...         # Get the tensors passed by forward.
                ...         y, = ctx.saved_tensor()
                ...         grad = dy * (1 - paddle.square(y))
                ...         return grad
        z4You must implement the forward function for PyLayer.NotImplementedError)rI   r$   rJ   s      r   forwardzPyLayer.forward  s    J "B
 
 	
r   c                     t          d          )a  
        This is a function to calculate the gradient. It is to be overloaded by subclasses.
        It must accept a object of :ref:`api_paddle_autograd_PyLayerContext` as the first
        argument, and the rest arguments are the gradient of forward's output tensors.
        Output tensors of backward are the gradient of forward's input tensors.

        Args:
            *args(tuple): The gradient of forward's output tensor(s).
            **kwargs(dict): The gradient of forward's output tensor(s).

        Returns:
            Tensor or list of Tensors: The gradient of forward's input tensor(s).

        Examples:
            .. code-block:: python

                >>> import paddle
                >>> from paddle.autograd import PyLayer

                >>> class cus_tanh(PyLayer):
                ...     @staticmethod
                ...     def forward(ctx, x):
                ...         y = paddle.tanh(x)
                ...         # Pass tensors to backward.
                ...         ctx.save_for_backward(y)
                ...         return y
                ...
                ...     @staticmethod
                ...     def backward(ctx, dy):
                ...         # Get the tensors passed by forward.
                ...         y, = ctx.saved_tensor()
                ...         grad = dy * (1 - paddle.square(y))
                ...         return grad
        z5You must implement the backward function for PyLayer.rM   )rI   r$   s     r   r8   zPyLayer.backward  s    J "C
 
 	
r   N)rI   r   r$   r   rJ   r   r   rK   )rI   r   r$   r   r   rK   )r-   r.   r/   r0   staticmethodrO   r8   r2   r   r   rH   rH   `  s`        3 3j &
 &
 &
 \&
P &
 &
 &
 \&
 &
 &
r   rH   )	metaclassr8   1Callable[Concatenate[PyLayerContext, ...], _RetT]r   c                     d fd}|S )	NrI   r   r$   r   r   r   c                    t           j        j                                        5   | g|R  }d d d            n# 1 swxY w Y   |S r6   )paddlebasedygraphno_grad)rI   r$   outputsr8   s      r   wrapperz$once_differentiable.<locals>.wrapper  s    [ ((** 	+ 	+hs*T***G	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+ 	+s   
;??)rI   r   r$   r   r   r   r2   )r8   r[   s   ` r   once_differentiabler\     s(         
 Nr   )r8   rS   r   rS   )
__future__r   typingr   r   r   r   typing_extensionsr   rV   paddle.baser	   collections.abcr
   r   __all__r   r   eagerrH   r4   r=   r:   r\   r2   r   r   <module>rd      s   # " " " " " 8 8 8 8 8 8 8 8 8 8 8 8 ) ) ) ) ) )        ((((((  	k' k' k' k' k' k' k' k'\	7 7 7 7 7dj(. 7 7 7
- - - - -$ - - -F
 F
 F
 F
 F
dj .K F
 F
 F
 F
R     r   