o
    4/iy                  &   @   s  d Z ddlmZ ddlZddlmZmZmZmZm	Z	m
Z
 ddlZddlmZ ddlmZmZmZ ddlmZ dd	lmZmZmZmZmZmZmZ dd
lmZ dd ZeddG dd dZedddddZ edddddZ!e e!gZ"eddG dd dZ#e#e dde#e dde#e!ddgZ$dej%dej%de&de&de&dej%d e&d!ej%fd"d#Z'ej(d$d% Z)ej(d&d' Z*ej(d(d) Z+G d*d+ d+Z,e			,	,				-d>dej%dej%d.ej%d/e&de&de&d0ej%d1ej%d2e&de&d3eej% d4e	e-ej%f d5e	e-ej%f d6eeej%  d7ee- d8e.dB d9e/d!ej%f$d:d;Z0e		,	,		d?dej%dej%d.ej%d/e&de&de&d0ej%d1ej%d2e&d3eej% d4e	e-ej%f d5e	e-ej%f d6eeej%  d8e.dB d!ej%fd<d=Z1dS )@a3  
Copyright (c) 2023 by FlashInfer team.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

  http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
    )	dataclassN)ListLiteralOptionalTupleUnionoverload   )flashinfer_api)gen_batch_mla_modulegen_trtllm_gen_fmha_modulesetup_cubin_loader)gen_mla_module)MaskModecheck_shape_dtype_devicedetermine_mla_backenddevice_support_pdlget_compute_capabilityget_device_sm_countlog2e)xqa_mlac                 C   s  | j dkrtd| j  |j dkrtd|j  |j dkr'td|j  |j dkr4td|j  | j\}}}|jd }|dkrJtd	| ||ksR|d
kr\td| d| |j\}}	|jd }
||krwtd| d| d| |	d|
  dkrtd|	d|
d S )N   z"Expected q_nope_pe.ndim == 3, got z&Expected ckv_kpe_cache.ndim == 3, got r	   zExpected kv_len.ndim == 1, got    z#Expected page_table.ndim == 2, got    z&Expected 128 heads for q_nope_pe, got i@  z;Expected head dim 576 for q_nope_pe and ckv_kpe_cache, got  and Expected batch size z$ for q_nope_pe and block_table, got r   <Expected block_num % (128 / block_size) == 0, got block_num= and block_size=)ndim
ValueErrorshape)	q_nope_peckv_kpe_cachekv_len
page_tableB_qHD_qD_ckvB_block_table	block_num
block_size r,   [/lsinfo/ai/hellotax_ai/llm_service/venv_vllm/lib/python3.10/site-packages/flashinfer/mla.py_check_cutlass_shape&   s6   






r.   T)frozenc                   @   s2   e Zd ZU dZeed< eed< eed< eed< dS )MLAHeadDimensionsa  
    The dimensions of a single MLA head.

    Args:
        qk_nope_head_dim (int): The number of input channels without positional information in non-absorb mode.
        qk_rope_head_dim (int): The number of channels carrying positional information for both absorb and non-absorb modes.
        v_head_dim (int): The number of value channels, which is also the output head dimension in non-absorb mode.
        kv_lora_rank (int): The dimension of the compressed key-value representation across heads.
    qk_nope_head_dimqk_rope_head_dim
v_head_dimkv_lora_rankN)__name__
__module____qualname____doc__int__annotations__r,   r,   r,   r-   r0   C   s   
 
r0   r   @   i   )r1   r2   r3   r4      c                   @   s"   e Zd ZU dZeed< eed< dS )MLALayerDimensionsz
    The dimensions of an MLA layer.

    Args:
        head_dimensions (MLAHeadDimensions): The dimensions of a single MLA head.
        num_heads (int): The number of heads in the MLA layer.
    head_dimensions	num_headsN)r5   r6   r7   r8   r0   r:   r9   r,   r,   r,   r-   r=   f   s   
 r=   )r>   r?       querykv_cacher4   r2   sparse_mla_top_kr$   	page_sizereturnc                 C   sd  | j dkrtd| j  |j dkr|d}n|j dkr%td|j  |tjko.|tjk}|tjko8|tjk}|sJ|sJtd| d| dt | j\}	}
}}|jd }|| }||ksb||krotd	| d
| d| |dkr|j}||	|
|fkrtd| |S |j\}}|}|	|krtd|	 d|	 d| |d|  dkrtd|d||S )N   zExpected query.ndim == 4, got r   r	   z&Expected kv_cache.ndim == 3 or 4, got z-Unsupported MLA dimensions, got kv_lora_rank=z and qk_rope_head_dim=z, supported dimensions are: zExpected head dim z for query and kv_cache, got r   r   zKExpected page_table.shape == (num_seqs, num_tokens, sparse_mla_top_k), got r   z  for query and block_table, got r   r   r   )	r   r   	unsqueezedeepseek_mla_dimensionsr4   r2   smaller_mla_dimensionssupported_mla_head_dimensionsr    )rA   rB   r4   r2   rC   r$   rD   is_deepseek_dimensionsis_smaller_mla_dimensionsnum_seqs
num_tokens_qk_head_dimckv_dimexpected_qk_head_dimpage_table_shaper)   r*   r+   r,   r,   r-   _check_trtllm_gen_mla_shape   sR   
	





rT   c                  C   s   t  } |  }t|   |S N)r   build_and_loadr   get_library_path)modopr,   r,   r-   get_trtllm_gen_fmha_module   s   rZ   c                   C   s
   t   S rU   )r   rV   r,   r,   r,   r-   get_mla_module   s   
r[   c                 G   s   t | g|R   S rU   )r   rV   )backendargsr,   r,   r-   get_batch_mla_module   s   r^   c                   @   sD  e Zd ZdZe						d*dejdedeej deej d	eej d
eej de	ddfddZ
e	d+dejdejd	ejd
ejdededededededejdejdeddfddZe							d,dejdejdejdejdeej deej d ed d!eej d"eej d#eej d$edejfd%d&Ze			'				d-dejdejdejdejdeej deej d ed' d!eej d"eej d#eej d$edeejejf fd(d&Ze							d,dejdejdejdejdeej deej d ed!eej d"eej d#eej d$edeejeejejf f fd)d&ZdS ).BatchMLAPagedAttentionWrappera
  Wrapper class for MLA (`Multi-head Latent Attention <https://arxiv.org/abs/2405.04434>`_)
    PagedAttention on DeepSeek models. This kernel can be used in decode, and incremental prefill
    and should be used together with `Matrix Absorption trick
    <https://github.com/madsys-dev/deepseekv2-profile/blob/main/workspace/blog/optimizing-mla.md>`_:
    where :math:`W_{UQ}` is absorbed with :math:`W_{UK}`, and :math:`W_{UV}` is
    absorbed with :math:`W_{O}`.
    For MLA attention without Matrix Absorption (``head_dim_qk=192`` and ``head_dim_vo=128``, which is
    used in prefilling self-attention stage), please use
    :class:`flashinfer.prefill.BatchPrefillWithRaggedKVCacheWrapper`.

    More information about The Paged KV-Cache layout in MLA is explained in our tutorial
    :ref:`MLA Page Layout <mla-page-layout>`.

    For more details about the MLA computation, Matrix Absorption and FlashInfer's MLA implementation,
    please refer to our `blog post <http://flashinfer.ai/2025/02/10/flashinfer-deepseek-mla.html>`_.

    Example
    -------
    >>> import torch
    >>> import flashinfer
    >>> num_local_heads = 128
    >>> batch_size = 114
    >>> head_dim_ckv = 512
    >>> head_dim_kpe = 64
    >>> page_size = 1
    >>> mla_wrapper = flashinfer.mla.BatchMLAPagedAttentionWrapper(
    ...     torch.empty(128 * 1024 * 1024, dtype=torch.int8).to(0),
    ...     backend="fa2"
    ... )
    >>> q_indptr = torch.arange(0, batch_size + 1).to(0).int() # for decode, each query length is 1
    >>> kv_lens = torch.full((batch_size,), 999, dtype=torch.int32).to(0)
    >>> kv_indptr = torch.arange(0, batch_size + 1).to(0).int() * 999
    >>> kv_indices = torch.arange(0, batch_size * 999).to(0).int()
    >>> q_nope = torch.randn(
    ...     batch_size * 1, num_local_heads, head_dim_ckv, dtype=torch.bfloat16, device="cuda"
    ... )
    >>> q_pe = torch.zeros(
    ...     batch_size * 1, num_local_heads, head_dim_kpe, dtype=torch.bfloat16, device="cuda"
    ... )
    >>> ckv = torch.randn(
    ...     batch_size * 999, 1, head_dim_ckv, dtype=torch.bfloat16, device="cuda"
    ... )
    >>> kpe = torch.zeros(
    ...     batch_size * 999, 1, head_dim_kpe, dtype=torch.bfloat16, device="cuda"
    ... )
    >>> sm_scale = 1.0 / ((128 + 64) ** 0.5)  # use head dimension before matrix absorption
    >>> mla_wrapper.plan(
    ...     q_indptr,
    ...     kv_indptr,
    ...     kv_indices,
    ...     kv_lens,
    ...     num_local_heads,
    ...     head_dim_ckv,
    ...     head_dim_kpe,
    ...     page_size,
    ...     False,  # causal
    ...     sm_scale,
    ...     q_nope.dtype,
    ...     ckv.dtype,
    ... )
    >>> o = mla_wrapper.run(q_nope, q_pe, ckv, kpe, return_lse=False)
    >>> o.shape
    torch.Size([114, 128, 512])
    FNautofloat_workspace_bufferuse_cuda_graph	qo_indptr	kv_indptr
kv_indices
kv_len_arrr\   rE   c                 C   s   || _ |j| _|dkr|| _dS tjdtj| jd| _tj| jj| jjddd| _	|| _
|| _|| _|| _|| _|dkrDt| j| _dS || _dS )	an  Constructor for BatchMLAPagedAttentionWrapper.

        Parameters
        ----------
        float_workspace_buffer : torch.Tensor
            The user reserved workspace buffer used to store intermediate attention results in
            split-k algorithm. The recommended size is 128MB, the device of the workspace buffer
            should be the same as the device of the input tensors.
        use_cuda_graph : bool, optional
            Whether to enable CUDA graph capture for the prefill kernels, if enabled, the
            auxiliary data structures will be stored in provided buffers. The ``batch_size``
            cannot change during the lifecycle of this wrapper when CUDAGraph is enabled.
        qo_indptr_buf : Optional[torch.Tensor]
            The user reserved buffer to store the ``qo_indptr`` array, the size of the buffer
            should be ``[batch_size + 1]``.
            This argument is only effective when ``use_cuda_graph`` is ``True``.
        kv_indptr_buf : Optional[torch.Tensor]
            The user reserved buffer to store the ``kv_indptr`` array, the size of the buffer
            should be ``[batch_size + 1]``.
            This argument is only effective when ``use_cuda_graph`` is ``True``.
        kv_indices_buf : Optional[torch.Tensor]
            The user reserved buffer to store the ``kv_indices`` array.
            This argument is only effective when ``use_cuda_graph`` is ``True``.
        kv_len_arr_buf : Optional[torch.Tensor]
            The user reserved buffer to store the ``kv_len_arr`` array, the size of the buffer
            should be ``[batch_size]``.
            This argument is only effective when ``use_cuda_graph`` is ``True``.
        backend : str
            The implementation backend, could be ``auto``/``fa2`` or ``fa3``. Defaults to ``auto``.
            If set to ``auto``, the function will automatically choose the backend based on the
            device architecture and kernel availability. If ``cutlass`` is provided, the MLA
            kernels will be generated by CUTLASS and only float_workspace_buffer is required and
            other arguments are ignored.
        cutlassN)   dtypedeviceTcpu)rj   
pin_memoryrk   r`   )_float_workspace_bufferrk   _backendtorchemptyuint8_int_workspace_bufferr    rj    _pin_memory_int_workspace_buffer_use_cuda_graph_qo_indptr_buf_kv_indptr_buf_kv_indices_buf_kv_len_arr_bufr   )selfra   rb   rc   rd   re   rf   r\   r,   r,   r-   __init__  s,   -

z&BatchMLAPagedAttentionWrapper.__init__r?   head_dim_ckvhead_dim_kperD   causalsm_scaleq_data_typekv_data_typeuse_profilerc                 C   s  t | j||||j|||| _|d}|d}|d}| jrG| jj|dd | jj|dd | j	dt
| j|dd | jj|dd n$|j| jdd| _|j| jdd| _|j| jdd| _	|j| jdd| _|	| _|| _|
| _|| _| j| j| j| j||||||		| _dS )a  Plan the MLA attention computation.

        Parameters
        ----------
        qo_indptr : torch.IntTensor
            The indptr of the query/output tensor, shape: ``[batch_size + 1]``.
            For decoding attention, the length of each query is 1, and the content
            of the tensor should be ``[0, 1, 2, ..., batch_size]``.
        kv_indptr : torch.IntTensor
            The indptr of the paged kv-cache, shape: ``[batch_size + 1]``.
        kv_indices : torch.IntTensor
            The page indices of the paged kv-cache, shape: ``[kv_indptr[-1]]`` or larger.
        kv_len_arr : torch.IntTensor
            The query length of each request, shape: ``[batch_size]``.
        num_heads : int
            The number of heads in query/output tensor.
        head_dim_ckv : int
            The head dimension of compressed-kv.
        head_dim_kpe : int
            The head dimension for rope k-cache.
        page_size : int
            The page size of the paged kv-cache.
        causal : bool
            Whether to use causal attention.
        sm_scale : float
            The scale factor for softmax operation.
        q_data_type : torch.dtype
            The data type of the query tensor.
        kv_data_type : torch.dtype
            The data type of the kv-cache tensor.
        use_profiler : bool, optional
            Whether to enable intra-kernel profiler, default is False.
        rl   T)non_blockingN)r^   ro   rj   _cached_moduletoru   rv   copy_rw   rx   lenry   rk   _causal
_page_size	_sm_scale_use_profilerplanrn   rs   rt   
_plan_info)rz   rc   rd   re   rf   r?   r|   r}   rD   r~   r   r   r   r   qo_indptr_hostkv_indptr_hostkv_len_arr_hostr,   r,   r-   r   Y  sJ   2




z"BatchMLAPagedAttentionWrapper.planq_nopeq_pe	ckv_cache	kpe_cacheoutlse
return_lseprofiler_bufferr#   r$   return_lse_base_on_ec                 C      d S rU   r,   rz   r   r   r   r   r   r   r   r   r#   r$   r   r,   r,   r-   run     z!BatchMLAPagedAttentionWrapper.runTc                 C   r   rU   r,   r   r,   r,   r-   r     r   c                 C   s  | j dkr^|rtd|durtdt | _|du r!t|}nt||j|j|j	d tj
||gdd}tj
||gdd}t|||	|
 tjdtj| j	d	}| j| j|||||	|
 |S |du ri| jritd
|jd }| j}| j}| j}|r}tjjntjj}| j	}|du rt|}nt||j|j|j	d |r|du rtj|jdd tj|d	}nt||jdd tj|j	d | jr|fnd}| jj| j| j| j||||| j|||||||g|R   |r||fS |S )a  Run the MLA attention computation.

        Parameters
        ----------
        q_nope : torch.Tensor
            The query tensor without rope, shape: ``[batch_size, num_heads, head_dim_ckv]``.
        q_pe : torch.Tensor
            The rope part of the query tensor, shape: ``[batch_size, num_heads, head_dim_kpe]``.
        ckv_cache : torch.Tensor
            The compressed kv-cache tensor (without rope), shape: ``[num_pages, page_size, head_dim_ckv]``.
            ``head_dim_ckv`` is 512 in DeepSeek v2/v3 models.
        kpe_cache : torch.Tensor
            The rope part of the kv-cache tensor, shape: ``[num_pages, page_size, head_dim_kpe]``.
            ``head_dim_kpe`` is 64 in DeepSeek v2/v3 models.
        out : Optional[torch.Tensor]
            The output tensor, if not provided, will be allocated internally.
        lse : Optional[torch.Tensor]
            The log-sum-exp of attention logits, if not provided, will be allocated internally.
        return_lse : bool, optional
            Whether to return the log-sum-exp value, default is False.
        profiler_buffer : Optional[torch.Tensor]
            The buffer to store the profiler data.
        kv_len : Optional[torch.Tensor]
            The query length of each request, shape: ``[batch_size]``. Required when ``backend`` is ``cutlass``.
        page_table : Optional[torch.Tensor]
            The page table of the paged kv-cache, shape: ``[batch_size, num_pages]``. Required when ``backend`` is ``cutlass``.
        rg   z4return_lse does not support cutlass backend for now.Nz9profiler_buffer does not support cutlass backend for now.r   )dimr   ri   z5Profiler is enabled, profiler_buffer must be providedr	   r   r   r,   )ro   r   r[   r   rp   
empty_liker   r    rj   rk   catr.   rq   float32cutlass_mla_paged_attentionrn   r   r   r   r   r   CAUSALvalue
NON_CAUSALr   rs   r   rx   )rz   r   r   r   r   r   r   r   r   r#   r$   r   r!   r"   r?   rD   r   r~   	mask_moderk   profiler_argsr,   r,   r-   r     s   
*	
)FNNNNr`   )F)NNFNNNF)NNTNNNF)r5   r6   r7   r8   r
   rp   Tensorboolr   strr{   r9   floatrj   r   r   r   r   r   r   r,   r,   r,   r-   r_      s8   A	F	
Z	
	
	
r_         ?r`   workspace_bufferr1   block_tablesseq_lensmax_seq_lenr   
bmm1_scale
bmm2_scalesinks#skip_softmax_threshold_scale_factor
enable_pdlr\   c                 C   sf  |dkrt | jd dkrdnd}t|tjr#|jtjksJ |t }t|tjr1|jtjks1J |dkrt | jd dksJ| jtjksJ|jtjkrVt	d| j d|j |d	ur^t	d
| 
ddkrot	d| 
d |d	urwt	dt| ||d||||||
||||S |dkr+|d	u rt| jn|}t j}t| j}|
d}|dkr|dkrt	d| |d	ur|	dkrt	dt| ||||	||}|
d	u r| jd	d |f }tj|tj| jd}
n| j\}}}}t|
|||gtj| jd | 
d}| 
d}| dd} ||
d	| |||||||||ddd|d|	||| |  |d	| |
S t	d| d)a  
    Parameters
    ----------
    query: [batch_size, q_len_per_request, num_heads, head_dim_qk], head_dim_qk = qk_nope_head_dim (kv_lora_rank) + qk_rope_head_dim, should be concated q_nope + q_rope; q_len_per_request is the MTP query length.
    kv_cache: [num_pages, page_size, head_dim_ckv + head_dim_kpe] or [num_pages, 1, page_size, head_dim_ckv + head_dim_kpe], should be concated ckv_cache + kpe_cache. Both 3D and 4D formats are supported for backward compatibility.
    workspace_buffer: [num_semaphores, 4], used for multi_block mode. Must be initialized to 0 for its first use.
    qk_nope_head_dim: qk_nope_head_dim, must be 128 or 64
    kv_lora_rank: kv_lora_rank, must be 512 or 256
    qk_rope_head_dim: qk_rope_head_dim, must be 64
    sparse_mla_top_k: sparse MLA top k, must be 0 for non-sparse MLA.
    block_tables: page_table of kv cache, [batch_size, num_pages]
    seq_lens: query_len
    max_seq_len: max sequence length for kv_cache
    out: output tensor, if not provided, will be allocated internally
    bmm1_scale: fused scale for mla bmm1 input.
        when using trtllm-gen backend, it can be a torch.Tensor with dtype torch.float32.
    bmm2_scale: fused scale for mla bmm2 input.
        when using trtllm-gen backend, it can be a torch.Tensor with dtype torch.float32.
    sinks: additional value per head in the denominator of the softmax.
    skip_softmax_threshold_scale_factor: threshold scale factor for skipping softmax operations.
        Providing a value for this parameter enables skip-softmax sparsity as described in: https://arxiv.org/abs/2512.12087
        If no value is provided, then standard attention is used.
        Setting the threshold to a higher value generally increases kernel performance at the cost of accuracy degradation.
        The actual threshold value equals the provided threshold_scale_factor divided by the context length.
    backend : str = "auto"
        The implementation backend, could be ``auto``/``xqa`` or ``trtllm-gen``. Defaults to ``auto``.
        When set to ``auto``, the backend will be chosen based on the device architecture and kernel availability.
        For sm_100 and sm_103 (blackwell architecture), ``auto`` will choose ``trtllm-gen`` backend.
        For sm_120 (blackwell architecture), ``auto`` will choose ``xqa`` backend.

    Note
    ----
    In MLA, the actual BMM1 and BMM2 scales applied would be fused as:
    bmm1_scale = q_scale * k_scale * sm_scale / (head_dim_qk ** 0.5)
    bmm2_scale = v_scale * o_scale
    or,
    bmm1_scale = torch.Tensor([q_scale * k_scale * sm_scale / (head_dim_qk ** 0.5))
    bmm2_scale = torch.Tensor([v_scale * o_scale])

    The two scale factors should be static constant for cuda graph capture.
    Either (bmm1_scale, bmm2_scale) or (bmm1_scale_log2_tensor, bmm2_scale_tensor) should be provided.

    For static constant scale factors, the scale factors should be provided as float.
        - (bmm1_scale, bmm2_scale)
    For on-device fused scale tensors, which could dynamically change, the scale factors should be provided as torch.Tensor.
        - (bmm1_scale_log2_tensor, bmm2_scale_tensor)
        - Currently, only fp8 tensor core operation supports this mode.
    When both are provided, the dynamic scale factor tensors will be used.
    r`   r   
   z
trtllm-genxqa   z=XQA MLA only supports fp8 operation on SM120/SM121 GPUs, got r   NXQA MLA does not support sinksr	   2XQA MLA only supports q_len_per_request == 1, got z-skip_softmax is not supported for XQA backendr   r@   r;   z(Supported block_size are 32 and 64, got z,skip_softmax is not supported for sparse MLAri   r   zBackend z not supported)r   rk   
isinstancerp   r   rj   r   r   float8_e4m3fnr   size"xqa_batch_decode_with_kv_cache_mlar   rZ   trtllm_paged_attention_decoder   rT   r    rq   bfloat16r   flattennumelelement_size)rA   rB   r   r1   r4   r2   r   r   r   rC   r   r   r   r   r   r   r\   run_funcsm_countr+   	out_shape
batch_sizerO   num_q_heads	max_q_lenr,   r,   r-   %trtllm_batch_decode_with_kv_cache_mlaK  s   E





r   c                 C   s\  |du r	t | jn|}t| j}|d}| d}|dkr%td| | jtjks1|jtjkr=td| j d|j |durEtdt| |||d||}|	du rh| j	dd	 |f }tj
|tj| jd
}	n| j	\}}}}t|	|||gtj| jd |tj}|dd }|dd }|dd}|d}t| |||||	||||
|||d |	S )a  
    Parameters:
    query: [batch_size, q_len_per_request, num_heads, head_dim_qk], head_dim_qk = qk_nope_head_dim (kv_lora_rank) + qk_rope_head_dim, should be concated q_nope + q_rope; q_len_per_request is the MTP query length.
    kv_cache: [num_pages, page_size, head_dim_ckv + head_dim_kpe] or [num_pages, 1, page_size, head_dim_ckv + head_dim_kpe], should be concated ckv_cache + kpe_cache. Both 3D and 4D formats are supported for backward compatibility.
    workspace_buffer: torch.Tensor. Must be initialized to 0 for its first use.
    qk_nope_head_dim: qk_nope_head_dim, must be 128
    kv_lora_rank: kv_lora_rank, must be 512
    qk_rope_head_dim: qk_rope_head_dim, must be 64
    block_tables: page_table of kv cache, [batch_size, num_pages]
    seq_lens: query_len
    max_seq_len: max sequence length for kv_cache
    out: output tensor, if not provided, will be allocated internally
    bmm1_scale: fused scale for mla bmm1 input. Can be a float or a torch.Tensor.
    bmm2_scale: fused scale for mla bmm2 input. Can be a float or a torch.Tensor.
    sinks: additional value per head in the denominator of the softmax.

    Note:
    In MLA, the actual BMM1 and BMM2 scales applied would be fused as:
    bmm1_scale = q_scale * k_scale * sm_scale / (head_dim_qk ** 0.5)
    bmm2_scale = v_scale * o_scale

    The two scale factors should be static constant for cuda graph capture.
    Either (bmm1_scale, bmm2_scale) or (bmm1_scale_log2_tensor, bmm2_scale_tensor) should be provided.

    For static constant scale factors, the scale factors should be provided as float.
        - (bmm1_scale, bmm2_scale)
    For on-device fused scale tensors, which could dynamically change, the scale factors should be provided as torch.Tensor.
        - (bmm1_scale_log2_tensor, bmm2_scale_tensor)
        - Currently, only fp8 tensor core operation supports this mode.
    When both are provided, the dynamic scale factor tensors will be used.
    Nr   r	   r   z5XQA MLA only supports fp8 tensor core operation, got r   r   r   r   ri   r   rh   r   )q_scalekv_scaler   r   )r   rk   r   r   r   rj   rp   r   rT   r    rq   r   r   viewrr   squeezerG   r   )rA   rB   r   r1   r4   r2   r   r   r   r   r   r   r   r   r   r+   q_len_per_requestr   r   rO   r   workspace_u8	semaphorescratchkv_cache_newseq_lens_newr,   r,   r-   r     sn   0




r   )r   Nr   r   NNNr`   )Nr   r   NN)2r8   dataclassesr   	functoolstypingr   r   r   r   r   r   rp   api_loggingr
   jitr   r   r   jit.mlar   utilsr   r   r   r   r   r   r   r   r   r.   r0   rH   rI   rJ   r=   supported_mla_layer_dimensionsr   r9   rT   cacherZ   r[   r^   r_   r   r   r   r   r   r,   r,   r,   r-   <module>   s(    $	
=


  }	
 <	
