§
    z”jŽ  ã                  ó¼   — d dl mZ d dlmZmZ erd dlmZ d dlmZ d dl	m
Z
 d dlmZ d dlZd dlZd dlmZ d dlmZ d d	lmZ d
dlmZmZ 	 	 	 	 d d!d„Z	 	 	 d"d#d„ZdS )$é    )Úannotations)ÚTYPE_CHECKINGÚAny)ÚSequence)ÚTensor)Útask)ÚGroupN)Ú	framework)Ústreamé   )Úconvert_object_to_tensorÚconvert_tensor_to_objectTÚtensorr   Útensor_listúSequence[Tensor] | NoneÚsrcÚintÚgroupúGroup | NoneÚsync_opÚboolÚreturnútask | Nonec                ó2   — t          j        | ||||¦  «        S )ak  

    Scatter a tensor to all participators. As shown below, one process is started with a GPU and the source of the scatter
    is GPU0. Through scatter operator, the data in GPU0 will be sent to all GPUs averagely.

    .. image:: https://githubraw.cdn.bcebos.com/PaddlePaddle/docs/develop/docs/api/paddle/distributed/img/scatter.png
        :width: 800
        :alt: scatter
        :align: center

    Args:
        tensor (Tensor): The output Tensor. Its data type
            should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16.
        tensor_list (list|tuple): A list/tuple of Tensors to scatter. Every element in the list must be a Tensor whose data type
            should be float16, float32, float64, int32, int64, int8, uint8, bool or bfloat16. Default value is None.
        src (int): The source rank id. Default value is 0.
        group (Group, optional): The group instance return by new_group or None for global default group.
        sync_op (bool, optional): Whether this op is a sync op. The default value is True.

    Returns:
        None.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> if dist.get_rank() == 0:
            ...     data1 = paddle.to_tensor([7, 8, 9])
            ...     data2 = paddle.to_tensor([10, 11, 12])
            ...     dist.scatter(data1, src=1)
            >>> else:
            ...     data1 = paddle.to_tensor([1, 2, 3])
            ...     data2 = paddle.to_tensor([4, 5, 6])
            ...     dist.scatter(data1, tensor_list=[data1, data2], src=1)
            >>> print(data1, data2)
            >>> # [1, 2, 3] [10, 11, 12] (2 GPUs, out for rank 0)
            >>> # [4, 5, 6] [4, 5, 6] (2 GPUs, out for rank 1)
    )r   Úscatter)r   r   r   r   r   s        úx/lsinfo/ai/hellotax_ai/data_center/backend/venv/lib/python3.11/site-packages/paddle/distributed/communication/scatter.pyr   r   '   s   € õb Œ>˜& +¨s°E¸7ÑCÔCÐCó    Úout_object_listú	list[Any]Úin_object_listúlist[Any] | NoneÚNonec                ó¼  — t          j        ¦   «         s
J d¦   «         ‚t          j        ¦   «         }g }g }||k    rQ|D ]>}t	          |¦  «        \  }}	|                     |¦  «         |                     |	¦  «         Œ?t          |¦  «        }
nt          j        g d¬¦  «        }
t          j
        |
|¦  «         t          |
                     ¦   «         ¦  «        }g }|D ]U}|                     ¦   «         }t          j        ||g¦  «        }t          j        |¦  «        }|                     |¦  «         ŒVt          j        |gd¬¦  «        }t#          |||k    r|nd||¦  «         t          j        g d¬¦  «        }t#          |||k    r|nd||¦  «         |                      ¦   «          |                      t'          ||                     ¦   «         ¦  «        ¦  «         dS )a  

    Scatter picklable objects from the source to all others. Similar to scatter(), but python object can be passed in.

    Args:
        out_object_list (list): The list of objects to store the scattered objects.
        in_object_list (list): The list of objects to scatter. Only objects on the src rank will be scattered.
        src (int): The source rank in global view.
        group (Group): The group instance return by new_group or None for global default group.

    Returns:
        None.

    Warning:
        This API only supports the dygraph mode.

    Examples:
        .. code-block:: python

            >>> # doctest: +REQUIRES(env: DISTRIBUTED)
            >>> import paddle.distributed as dist

            >>> dist.init_parallel_env()
            >>> out_object_list = [] # type: ignore
            >>> if dist.get_rank() == 0:
            ...     in_object_list = [{'foo': [1, 2, 3]}, {'foo': [4, 5, 6]}]
            >>> else:
            ...     in_object_list = [{'bar': [1, 2, 3]}, {'bar': [4, 5, 6]}]
            >>> dist.scatter_object_list(out_object_list, in_object_list, src=1)
            >>> print(out_object_list)
            >>> # [{'bar': [1, 2, 3]}] (2 GPUs, out for rank 0)
            >>> # [{'bar': [4, 5, 6]}] (2 GPUs, out for rank 1)
    z6scatter_object_list doesn't support static graph mode.Úint64)ÚdtypeÚuint8N)r
   Úin_dynamic_modeÚdistÚget_rankr   ÚappendÚmaxÚpaddleÚemptyr   Ú	broadcastr   ÚitemÚnumpyÚnpÚresizeÚ	to_tensorr   Úclearr   )r   r    r   r   ÚrankÚin_obj_tensorsÚin_obj_sizesÚobjÚ
obj_tensorÚobj_sizeÚmax_obj_size_tensorÚmax_obj_sizeÚin_tensor_listr   Ú
numpy_dataÚ	in_tensorÚ
out_tensorÚout_tensor_sizes                     r   Úscatter_object_listrB   [   sÿ  € õN Ô$Ñ&Ô&ð ð Ø@ñô Ð&õ Œ=‰?Œ?€DØ€NØ€Làˆs‚{€{Ø!ð 	*ð 	*ˆCÝ#;¸CÑ#@Ô#@Ñ ˆJ˜Ø×!Ò! *Ñ-Ô-Ð-Ø×Ò Ñ)Ô)Ð)Ð)Ý! ,Ñ/Ô/ÐÐå$œl¨2°WÐ=Ñ=Ô=ÐÝ
ÔÐ(¨#Ñ.Ô.Ð.ÝÐ*×/Ò/Ñ1Ô1Ñ2Ô2€Lð €NØ ð )ð )ˆØ—\’\‘^”^ˆ
Ý”Y˜z¨L¨>Ñ:Ô:ˆ
ÝÔ$ ZÑ0Ô0ˆ	Ø×Ò˜iÑ(Ô(Ð(Ð(Ý”˜|˜n°GÐ<Ñ<Ô<€JÝˆJ¨$°#ª+¨+˜˜¸4ÀÀeÑLÔLÐLå”l 2¨WÐ5Ñ5Ô5€OÝˆO¨T°Sª[¨[˜\˜\¸dÀCÈÑOÔOÐOà×ÒÑÔÐØ×ÒÝ  ¨_×-AÒ-AÑ-CÔ-CÑDÔDñô ð ð ð r   )Nr   NT)r   r   r   r   r   r   r   r   r   r   r   r   )Nr   N)
r   r   r    r!   r   r   r   r   r   r"   )Ú
__future__r   Útypingr   r   Úcollections.abcr   r,   r   Úpaddle.base.corer   Ú&paddle.distributed.communication.groupr	   r0   r1   Úpaddle.distributedÚdistributedr(   r
   Ú paddle.distributed.communicationr   Úserialization_utilsr   r   r   rB   © r   r   ú<module>rM      sV  ðð #Ð "Ð "Ð "Ð "Ð "à %Ð %Ð %Ð %Ð %Ð %Ð %Ð %àð =Ø(Ð(Ð(Ð(Ð(Ð(àÐÐÐÐÐØ%Ð%Ð%Ð%Ð%Ð%Ø<Ð<Ð<Ð<Ð<Ð<à Ð Ð Ð à €€€Ø !Ð !Ð !Ð !Ð !Ð !Ø Ð Ð Ð Ð Ð Ø 3Ð 3Ð 3Ð 3Ð 3Ð 3ðð ð ð ð ð ð ð ð ,0ØØØð1Dð 1Dð 1Dð 1Dð 1Dðl (,ØØð	Jð Jð Jð Jð Jð Jð Jr   