o
    :/ixL                    @   s  d dl Z d dlmZmZmZ d dlmZ d dlmZm	Z	 d dl
Z
d dlmZ d dlmZ d dlmZ d dlmZmZ d dlmZmZmZmZ d d	lmZmZmZmZmZmZ d d
l m!Z! d dl"m#Z#m$Z$m%Z%m&Z&m'Z' d dl(m)Z)m*Z* d dl+m,Z, d dl-m.Z.m/Z/m0Z0m1Z1 d dl2m3Z3 d dl4m5Z5m6Z6m7Z7m8Z8m9Z9m:Z:m;Z;m<Z< d dl=m>Z> d dl?m@Z@mAZAmBZBmCZCmDZDmEZE d dlFmGZG d dlHmIZI d dlJmKZK d dlLmMZMmNZNmOZOmPZPmQZQ d dlRmSZS d dlTmUZU d dlVmWZWmXZX d dlYmZZZm[Z[m\Z\ d dl]m^Z^m_Z_m`Z` d dlambZb d dlcmdZd d dlemfZf d dlgmhZh d d limjZj d d!lkmlZl d d"lmmnZn d d#lompZp d d$lqmrZr er;d d%lsmtZt eGeuZved&ePeOB ePeOB d'Zwed(e`eUB dB d)Zxed*e	d+ZyG d,d- d-ZzdS ).    N)CallableIterableSequence)Path)TYPE_CHECKINGAny)ValidationError)tqdm)TypeVaroverload)BeamSearchInstanceBeamSearchOutputBeamSearchSequencecreate_sort_beams_key_function)AttentionConfigCompilationConfigPoolerConfigProfilerConfigStructuredOutputsConfigis_init_field)CompilationMode)ConvertOptionHfOverrides
ModelDTypeRunnerOptionTokenizerMode)WeightTransferInitRequestWeightTransferUpdateRequest)
EngineArgs)ChatCompletionMessageParamChatTemplateConfigChatTemplateContentFormatOptionload_chat_template)init_pooling_io_processors)	ScoreDataScoreMultiModalParam_cosine_similaritycompress_token_type_idscompute_maxsim_scoreget_score_promptscore_data_to_promptsvalidate_score_input)log_non_default_args)
DataPromptProcessorInputs
PromptTypeSingletonPrompt
TextPromptTokensPrompt)init_logger)LoRARequest)QuantizationMethods)ClassificationRequestOutputEmbeddingRequestOutputPoolingRequestOutputRequestOutputScoringRequestOutput)current_platform)PoolingParams)
ChatParamsmerge_kwargs)conversation_to_seqparse_model_promptprompt_to_seq)BeamSearchParamsRequestOutputKindSamplingParams)PoolingTask)TokenizerLike)UsageContext)Counter)is_mistral_tokenizer)
maybe_tqdm)	PauseMode)	LLMEngine)LogitsProcessor)Metric_O)bounddefault_P)rP   _R)rQ   c                K   @   s  e Zd ZdZddddddddddddddddddddddddddddddddddd	"d
ededededB deeB dededede	e dB de
dededB dedB dedB deeB dB de
dedede
de
de
dee dB d ed!ed"ed#eeB dB d$edB d%eeef dB d&edB d'eeef eB dB d(eeef eB dB d)eeef eB dB d*e
dB d+e
eeef B eB dB d,e	eee B  dB d-ed.dfJd/d0Zd.efd1d2Zdd4ed.e
fd5d6Zdd7d8Zd.efd9d:Z 	dd3dddd;d<e!e"e! B d=ee"e B dB d>ee#d?e$f B d@e"e% e%B dB dAe	e
 dB dBeeef dB d.e	e& fdCdDZ'				3	dd<e!e"e! B d=ee"e B dB d@e"e% e%B dB dAe	e
 dB d>ee#d?e$f B dBeeef dB d.e	e fdEdFZ(e)d3dGd>ee#d?e$f B d.e	e&e*B  fdHdIZ+e)d3dGdJee, e-ee, d?f B d>ee#d?e$f B d.e	e, fdKdIZ+	dd3dGdJee e-ee d?f B dB d>ee#d?e$f B d.e	e fdLdIZ+dMe.d@e%dB d.e%dB fdNdOZ/		P	ddQee#d?e0f B dRedB dSe-d-eeef dB d.e	e0 f
dTdUZ1dVe#e2j3ge0f d.e	e0 fdWdXZ4			dd<e	e5e6B  dYe7d@e	e% e%B dB d>edZe
dB d.e	e8 fd[d\Z9	dd<e"e! dBeeef dB d.e"e. fd]d^Z:	ddMe!dBeeef dB d.e.fd_d`Z;				3				ddae"e	e<  dedB dbe=dceeef dB ddedeedfe	eeef  dB dBeeef dB d%eeef dB d.e"e. fdgdhZ>				3				ddie	e< dedB dbe=dceeef dB ddedeedfe	eeef  dB dBeeef dB d%eeef dB d.e.fdjdkZ?		3				3					ddle	e< e"e	e<  B d=ee"e B dB d>ee#d?e$f B d@e"e% e%B dB dedB dbe=ddedeedfe	eeef  dB dceeef dB dBeeef dB d%eeef dB d.e	e& fdmdnZ@	dd3ddddod<e!e"e! B eAB dpeBe"eB B dB d>ee#d?e$f B d@e	e% e%B dB dqeCdB dBeeef dB d.e	e* fdrdsZDd3ddddtd<e!e"e! B d>ee#d?e$f B dpeBe"eB B dB d@e	e% e%B dB dBeeef dB d.e	eE fdudvZFdd3dddwd<e!e"e! B dpeBe"eB B dB d>ee#d?e$f B d@e	e% e%B dB dBeeef dB d.e	eG fdxdyZHdd3dddwd<e!e"e! B dpeBe"eB B dB d>ee#d?e$f B d@e	e% e%B dB dBeeef dB d.e	e* fdzd{ZId|e	eJ d}e	eJ d>ee#d?e$f B dpeBdB d@e	e% e%B dB dBeeef d.e	eK fd~dZLd|e	eJ d}e	eJ d>ee#d?e$f B dpeBdB d@e	e% e%B dB dBeeef d.e	eK fddZMd|e	eJ d}e	eJ d>ee#d?e$f B dpeBdB d@e	e% e%B dB dBeeef dedB d.e	eK fddZNd3dddddd|eOe"eO B ePB e	eP B d}eOe"eO B ePB e	eP B d>ee#d?e$f B dpeBdB d@e	e% e%B dB dBeeef dB dedB d.e	eK fddZQddedB d.dfddZRdddZS	ddeded.efddZTdde
deUfddZVdde	e dB fddZWd.e	d fddZXdYeYe"eY B de
d.e"eY fddZZd@e%dB e"e%dB  B de
d.e"e%dB  fddZ[dAe	e
 dB de
d.e"e
 fddZ\d3dddd;d<e!e"e! B dYeeBB e"eeBB  B d>ee#d?e$f B d@e"e% e%B dB dAe	e
 dB dBeeef dB d.e	e fddZ]d3dddd;d<e!e"e! B dYeeBB e"eeBB  B dJee, d>ee#d?e$f B d@e"e% e%B dB dAe	e
 dB dBeeef dB fddZ^d3dddd3dddddd
dle	e< e"e	e<  B dYeeBB e"eeBB  B dJee, d>ee#d?e$f B d@e"e% e%B dB dedB dbe=ddedeedfe	eeef  dB dceeef dB dBeeef dB d%eeef dB fddZ_ddd3dd<e`e. dYe"eeBB  dJee, de"e%dB  dB de"e
 dB d>ee#d?e$f B fddZadddd<e`e. dYe"eeBB  de"e%dB  dB de"e
 dB d.e	e f
ddZb		ddMe.dYeeBB d@e%dB dAe
d.ef
ddZcd3dGdJee, e-ee, d?f B d>ee#d?e$f B d.e	e, fddZddeeeB d.dfddZfdegeB d.dfddZhd.efddZidS )LLMax  An LLM for generating texts from given prompts and sampling parameters.

    This class includes a tokenizer, a language model (possibly distributed
    across multiple GPUs), and GPU memory space allocated for intermediate
    states (aka KV cache). Given a batch of prompts and sampling parameters,
    this class generates texts from the model, using an intelligent batching
    mechanism and efficient memory management.

    Args:
        model: The name or path of a HuggingFace Transformers model.
        tokenizer: The name or path of a HuggingFace Transformers tokenizer.
        tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
            if available, and "slow" will always use the slow tokenizer.
        skip_tokenizer_init: If true, skip initialization of tokenizer and
            detokenizer. Expect valid prompt_token_ids and None for prompt
            from the input.
        trust_remote_code: Trust remote code (e.g., from HuggingFace) when
            downloading the model and tokenizer.
        allowed_local_media_path: Allowing API requests to read local images
            or videos from directories specified by the server file system.
            This is a security risk. Should only be enabled in trusted
            environments.
        allowed_media_domains: If set, only media URLs that belong to this
            domain can be used for multi-modal inputs.
        tensor_parallel_size: The number of GPUs to use for distributed
            execution with tensor parallelism.
        dtype: The data type for the model weights and activations. Currently,
            we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
            the `dtype` attribute of the Transformers model's config. However,
            if the `dtype` in the config is `float32`, we will use `float16` instead.
        quantization: The method used to quantize the model weights. Currently,
            we support "awq", "gptq", and "fp8" (experimental).
            If None, we first check the `quantization_config` attribute in the
            model config file. If that is None, we assume the model weights are
            not quantized and use `dtype` to determine the data type of
            the weights.
        revision: The specific model version to use. It can be a branch name,
            a tag name, or a commit id.
        tokenizer_revision: The specific tokenizer version to use. It can be a
            branch name, a tag name, or a commit id.
        chat_template: The chat template to apply.
        seed: The seed to initialize the random number generator for sampling.
        gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
            reserve for the model weights, activations, and KV cache. Higher
            values will increase the KV cache size and thus improve the model's
            throughput. However, if the value is too high, it may cause out-of-
            memory (OOM) errors.
        kv_cache_memory_bytes: Size of KV Cache per GPU in bytes. By default,
            this is set to None and vllm can automatically infer the kv cache
            size based on gpu_memory_utilization. However, users may want to
            manually specify the kv cache memory size. kv_cache_memory_bytes
            allows more fine-grain control of how much memory gets used when
            compared with using gpu_memory_utilization. Note that
            kv_cache_memory_bytes (when not-None) ignores
            gpu_memory_utilization
        cpu_offload_gb: The size (GiB) of CPU memory to use for offloading
            the model weights. This virtually increases the GPU memory space
            you can use to hold the model weights, at the cost of CPU-GPU data
            transfer for every forward pass.
        offload_group_size: Prefetch offloading: Group every N layers
            together. Offload last `offload_num_in_group` layers of each group.
            Default is 0 (disabled).
        offload_num_in_group: Prefetch offloading: Number of layers to
            offload per group. Default is 1.
        offload_prefetch_step: Prefetch offloading: Number of layers to
            prefetch ahead. Higher values hide more latency but use more GPU
            memory. Default is 1.
        offload_params: Prefetch offloading: Set of parameter name segments
            to selectively offload. Only parameters whose names contain one of
            these segments will be offloaded (e.g., {"gate_up_proj", "down_proj"}
            for MLP weights, or {"w13_weight", "w2_weight"} for MoE expert
            weights). If None or empty, all parameters are offloaded.
        enforce_eager: Whether to enforce eager execution. If True, we will
            disable CUDA graph and always execute the model in eager mode.
            If False, we will use CUDA graph and eager execution in hybrid.
        enable_return_routed_experts: Whether to return routed experts.
        disable_custom_all_reduce: See
            [ParallelConfig][vllm.config.ParallelConfig].
        hf_token: The token to use as HTTP bearer authorization for remote files
            . If `True`, will use the token generated when running
            `hf auth login` (stored in `~/.cache/huggingface/token`).
        hf_overrides: If a dictionary, contains arguments to be forwarded to the
            HuggingFace config. If a callable, it is called to update the
            HuggingFace config.
        mm_processor_kwargs: Arguments to be forwarded to the model's processor
            for multi-modal data, e.g., image processor. Overrides for the
            multi-modal processor obtained from `AutoProcessor.from_pretrained`.
            The available overrides depend on the model that is being run.
            For example, for Phi-3-Vision: `{"num_crops": 4}`.
        pooler_config: Initialize non-default pooling config for the pooling model,
            e.g., `PoolerConfig(seq_pooling_type="MEAN", use_activation=False)`.
        compilation_config: Either an integer or a dictionary. If it is an
            integer, it is used as the mode of compilation optimization. If it
            is a dictionary, it can specify the full compilation configuration.
        attention_config: Configuration for attention mechanisms. Can be a
            dictionary or an AttentionConfig instance. If a dictionary, it will
            be converted to an AttentionConfig. Allows specifying the attention
            backend and other attention-related settings.
        **kwargs: Arguments for [`EngineArgs`][vllm.EngineArgs].

    Note:
        This class is intended to be used for offline inference. For online
        serving, use the [AsyncLLMEngine][vllm.AsyncLLMEngine] class instead.
    autoNF    r   g?)"runnerconvert	tokenizertokenizer_modeskip_tokenizer_inittrust_remote_codeallowed_local_media_pathallowed_media_domainstensor_parallel_sizedtypequantizationrevisiontokenizer_revisionchat_templateseedgpu_memory_utilizationcpu_offload_gboffload_group_sizeoffload_num_in_groupoffload_prefetch_stepoffload_paramsenforce_eagerenable_return_routed_expertsdisable_custom_all_reducehf_tokenhf_overridesmm_processor_kwargspooler_configstructured_outputs_configprofiler_configattention_configkv_cache_memory_bytescompilation_configlogits_processorsmodelrX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   rr   rs   rt   ru   rv   rw   rx   ry   kwargsreturnc       "   3   
   K   s  d|$v r|$ d ddl}%|%jdtdd d|$vrd|$d< d	|$v r1|$d	 }&t|&tr1t|&|$d	< d
|$v rmt|$d
 trmddl	m
}' |$d
 }(z|'d@i |(|$d
< W n tyl }) ztd|(|) td|) |)d})~)ww |du rsi }dtdtt dtfdd}*t|"trtt|"d}+n|*|"t}+|*|t},|*|t}-|*| t}.t|$dd}/|$d}0|/dkr|0dkst std|/ dtd@i d|d|d|d|d|d|d |d!|d"|	d#|
d$|d%|d&|d'|d(|d)|d*|!d+|d,|d-|d.|d/|pt d0|d1|d2|d3|d4|d5|d6|d7|,d8|-d9|.d:|+d;|#|$}1t|1 tj|1tj d<| _!t| j!| _"t# | _$d| _%| j!& }2t'd=|2 |2| _(| j!j)| _)| j!j*| _*t+|| _,| j!j-| _-| j!j.| _.t/| j,d>| _0t1|2| j)| j*| j0d?| _2d| _3dS d0|d1|d2|d3|d4|d5|d6|d7|,d8|-d9|.d:|+d;|#|$}1t|1 tj|1tj d<| _!t| j!| _"t# | _$d| _%| j!& }2t'd=|2 |2| _(| j!j)| _)| j!j*| _*t+|| _,| j!j-| _-| j!j.| _.t/| j,d>| _0t1|2| j)| j*| j0d?| _2d| _3dS )AzLLM constructor.
swap_spacer   Nz]The 'swap_space' parameter is deprecated and ignored. It will be removed in a future version.   )
stackleveldisable_log_statsT
worker_clskv_transfer_config)KVTransferConfigz[Failed to convert 'kv_transfer_config' dict to KVTransferConfig object. Dict: %s. Error: %sz'Invalid 'kv_transfer_config' provided: valueclsr|   c                    s<   | du r  S t | tr di  fdd|  D S | S )z0Convert dict/None/instance to a config instance.Nc                    s    i | ]\}}t  |r||qS  )r   ).0kvr   r   a/lsinfo/ai/hellotax_ai/llm_service/venv_vllm/lib/python3.10/site-packages/vllm/entrypoints/llm.py
<dictcomp>5  s     z6LLM.__init__.<locals>._make_config.<locals>.<dictcomp>r   )
isinstancedictitems)r   r   r   r   r   _make_config0  s
   
 z"LLM.__init__.<locals>._make_config)modedata_parallel_sizerW   distributed_executor_backendexternal_launcherzLLM(data_parallel_size=z) is not supported for single-process usage and may hang. Please use the explicit multi-process data-parallel example at 'examples/offline_inference/data_parallel.py'.rz   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   rf   rg   rw   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   rr   rs   rt   ru   rv   rx   ry   )engine_argsusage_contextzSupported tasks: %s)re   )supported_tasksmodel_configrendererchat_template_configr   )4popwarningswarnDeprecationWarningr   typecloudpickledumpsr   vllm.config.kv_transferr   r   loggererror
ValueErrorr   rS   intr   r   r   r   r   getr;   is_tpur   setr,   rL   from_engine_argsrG   	LLM_CLASS
llm_engineengine_classrH   request_counterdefault_sampling_paramsget_supported_tasksinfor   r   r   r"   re   io_processorinput_processorr    r   r#   pooling_io_processors_cached_repr)3selfrz   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   ro   rp   rq   rr   rs   rt   ru   rv   rw   rx   ry   r{   r   r   r   raw_config_dicter   compilation_config_instancestructured_outputs_instanceprofiler_config_instanceattention_config_instance_dp_size_distributed_executor_backendr   r   r   r   r   __init__   s  ,







	
 !"&






 !"&






zLLM.__init__c                 C   
   | j  S N)r   get_tokenizerr   r   r   r   r     s   
zLLM.get_tokenizerT
include_dpc                 C   s   | j jj}|r
|jS |jS )a  Get the world size from the parallel config.

        Args:
            include_dp: If True (default), returns the world size including
                data parallelism (TP * PP * DP). If False, returns the world
                size without data parallelism (TP * PP).

        Returns:
            The world size (tensor_parallel_size * pipeline_parallel_size),
            optionally multiplied by data_parallel_size if include_dp is True.
        )r   vllm_configparallel_configworld_size_across_dp
world_size)r   r   r   r   r   r   get_world_size  s   
zLLM.get_world_sizec                 C   s   | j   | j  d S r   )r   clear_mm_cacher   reset_mm_cacher   r   r   r   r     s   
zLLM.reset_mm_cachec                 C   s4   | j d u r| j | _ | j rtjdi | j S t S )Nr   )r   r   get_diff_sampling_paramrD   from_optionalr   r   r   r   get_default_sampling_params  s
   
zLLM.get_default_sampling_params)use_tqdmlora_requestprioritytokenization_kwargspromptssampling_paramsr   .r   r   r   c             	   C   s@   | j j}|dkrtd|du r|  }| j||t||||dS )a  Generates the completions for the input prompts.

        This class automatically batches the given prompts, considering
        the memory constraint. For the best performance, put all of your prompts
        into a single list and pass it to this method.

        Args:
            prompts: The prompts to the LLM. You may pass a sequence of prompts
                for batch inference. See [PromptType][vllm.inputs.PromptType]
                for more details about the format of each prompt.
            sampling_params: The sampling parameters for text generation. If
                None, we use the default sampling parameters.
                When it is a single value, it is applied to every prompt.
                When it is a list, the list must have the same length as the
                prompts and it is paired one by one with the prompt.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            priority: The priority of the requests, if any.
                Only applicable when priority scheduling policy is enabled.
                If provided, must be a list of integers matching the length
                of `prompts`, where each priority value corresponds to the prompt
                at the same index.
            tokenization_kwargs: Overrides for `tokenizer.encode`.

        Returns:
            A list of `RequestOutput` objects containing the
            generated completions in the same order as the input prompts.
        generatezLLM.generate() is only supported for generative models. Try passing `--runner generate` to use the model as a generative model.N)r   paramsoutput_typer   r   r   r   )r   runner_typer   r   _run_completionr9   )r   r   r   r   r   r   r   r   r   r   r   r     s    )zLLM.generatec                 C   s>   | j j}|dkrtd|du r|  }| j||||||dS )a  Enqueue prompts for generation without waiting for completion.

        This method adds requests to the engine queue but does not start
        processing them. Use wait_for_completion() to process the queued
        requests and get results.

        Args:
            prompts: The prompts to the LLM. See generate() for details.
            sampling_params: The sampling parameters for text generation.
            lora_request: LoRA request to use for generation, if any.
            priority: The priority of the requests, if any.
            use_tqdm: If True, shows a tqdm progress bar while adding requests.
            tokenization_kwargs: Overrides for `tokenizer.encode`.

        Returns:
            A list of request IDs for the enqueued requests.
        r   z6LLM.enqueue() is only supported for generative models.Nr   r   r   r   r   r   )r   r   r   r   _add_completion_requests)r   r   r   r   r   r   r   r   r   r   r   enqueue  s   zLLM.enqueuer   c                C      d S r   r   )r   r   r   r   r   wait_for_completion   s   zLLM.wait_for_completionr   c                C   r   r   r   r   r   r   r   r   r   r   '  s   c                C   s   |du rt tf}| j||dS )a  Wait for all enqueued requests to complete and return results.

        This method processes all requests currently in the engine queue
        and returns their outputs. Use after enqueue() to get results.

        Args:
            output_type: The expected output type, defaults to RequestOutput.
            use_tqdm: If True, shows a tqdm progress bar.

        Returns:
            A list of output objects for all completed requests.
        Nr   )r9   r8   _run_enginer   r   r   r   r   /  s   promptc           
      C   s   |d dkr|S | j jj}|d u rd n|j}|s|S |d  }t|| }|s-|S t|dkr;t	d| |S |
 }|| }t||d }	|rZ|j|	krXt	d |S t||	|S )Nr   
multimodalmm_placeholdersrW   zMultiple modality specific loras were registered and would be used by a single prompt consuming several modalities; currently we only support one lora per request; as such, lora(s) registered with modalities: %s will be skippedzA modality with a registered lora and a lora_request with a different ID were provided; falling back to the lora_request as we only apply one LoRARequest per prompt)r   r   lora_configdefault_mm_loraskeysr   intersectionlenr   warningr   sortedindexlora_int_idr4   )
r   r   r   r   r   prompt_modalitiesr   modality_namemodality_lora_pathmodality_lora_idr   r   r   _resolve_mm_loraF  s<   

zLLM._resolve_mm_lorar   methodtimeoutargsc                 C   s   | j ||||S )a  
        Execute an RPC call on all workers.

        Args:
            method: Name of the worker method to execute, or a callable that
                is serialized and sent to all workers to execute.

                If the method is a callable, it should accept an additional
                `self` argument, in addition to the arguments passed in `args`
                and `kwargs`. The `self` argument will be the worker object.
            timeout: Maximum time in seconds to wait for execution. Raises a
                [`TimeoutError`][] on timeout. `None` means wait indefinitely.
            args: Positional arguments to pass to the worker method.
            kwargs: Keyword arguments to pass to the worker method.

        Returns:
            A list containing the results from each worker.

        Note:
            It is recommended to use this API to only pass control messages,
            and set up data-plane communication to pass data.
        )r   collective_rpc)r   r   r   r   r{   r   r   r   r   z  s   zLLM.collective_rpcfuncc                 C   s   | j |S )a  
        Run a function directly on the model inside each worker,
        returning the result for each of them.

        !!! warning
            To reduce the overhead of data transfer, avoid returning large
            arrays or tensors from this method. If you must return them,
            make sure you move them to CPU first to avoid taking up additional
            VRAM!
        )r   apply_model)r   r   r   r   r   r     s   zLLM.apply_modelr   concurrency_limitc           ,      C   s  |j }|j}|j}|j}	|j}
| j }|j}t||
}| 	|}| 
|t|}|r6|dur6td d}|du r>t|}td| d|dd}g }t||D ]\}}|d d	kr]td
|t||dd qOtdt||D ]}||||  }t|}|rt|dddd}td |D ]}ttdd |D g }dgttdd |D  }tt|dd |dd }t|dkr n| jdd |D | |t|tdd |D dd}t||D ]m\\}}}g }t||D ]Q} ||  }!||  }"|"jd jdur@|"jd jd }#|# D ]1\}$}%t|!j |!j!|$g |!j|#g |!j"|!j#|%j$ d}&|$|kr9|	s9|j%|& q||& qqt&||dd}'|'d| |_'qqqpg }(|D ]-}|j%(|j' t&|j%|dd})|)d| }*|*D ]
}+|)|+j!|+_*qo|(t+|*d qV|(S )a  
        Generate sequences using beam search.

        Args:
            prompts: A list of prompts. Each prompt can be a string or a list
                of token IDs.
            params: The beam search parameters.
            lora_request: LoRA request to use for generation, if any.
            use_tqdm: Whether to use tqdm to display the progress bar.
            concurrency_limit: The maximum number of concurrent requests.
                If None, the number of concurrent requests is unlimited.
        NzSProgress bar is not supported when using concurrency_limit. Disabling progress bar.Fr~   rW   T)logprobs
max_tokenstemperature
skip_cloner   embedsz.Embedding prompt not supported for beam search)r   r   r   zBeam searchtoken)descunit
unit_scalezThe progress bar shows the upper bound on token steps and may finish early due to stopping conditions. It does not reflect instance-level progress.c                 s   s    | ]}|j V  qd S r   )beamsr   instancer   r   r   	<genexpr>  s    z"LLM.beam_search.<locals>.<genexpr>c                 s       | ]}t |jV  qd S r   )r   r  r  r   r   r   r        

c                 s   s    | ]}|  V  qd S r   )
get_promptr   beamr   r   r   r    s    c                 S   s   g | ]}|j qS r   )r   r  r   r   r   
<listcomp>  s    z#LLM.beam_search.<locals>.<listcomp>r   r   r   lora_requestsr   )tokensr   r   cum_logprob)keyreverse)	sequences),
beam_widthr   r   
ignore_eoslength_penaltyr   r   eos_token_idr   _preprocess_cmpl_lora_request_to_seqr   r   r   rD   zipNotImplementedErrorappendr   ranger	   listsum	itertools
accumulate_render_and_run_requests_params_to_seqr9   outputsr   r   r   orig_promptr  r   r  logprob	completedr   r  extenddecodetextr   ),r   r   r   r   r   r   r  r   r   r  r  rZ   r  sort_beams_keyengine_promptsr  r   	instanceslora_reqr   prompt_startinstances_batch
token_iter_	all_beamsposinstance_start_and_endoutputstartendr  instance_new_beamsicurrent_beamresultr   token_idlogprob_objnew_beamsorted_beamsr%  sorted_completed
best_beamsr  r   r   r   beam_search  s   





	!zLLM.beam_searchc                    s@   | j }| j  fdd|D }|jjdi |pi }|||S )a@  
        Convert prompt inputs from LLM APIs (other than [LLM.chat][]) into
        a format that can be passed to `_add_request`.

        Refer to [LLM.generate][] for a complete description of the arguments.

        Returns:
            A list of `ProcessorInputs` objects ready to be passed into LLMEngine.
        c                    s   g | ]}t  |qS r   )r@   r   r   r   r   r   r  U  s    
z(LLM._preprocess_cmpl.<locals>.<listcomp>Nr   )r   r   default_cmpl_tok_paramswith_kwargsrender_cmpl)r   r   r   r   parsed_prompts
tok_paramsr   rF  r   r  D  s   

zLLM._preprocess_cmplc                 C   s   |  |g|\}|S r   )r  )r   r   r   engine_promptr   r   r   _preprocess_cmpl_one^  s   zLLM._preprocess_cmpl_oneconversationschat_template_content_formatchat_template_kwargsadd_generation_promptcontinue_final_messagetoolsc
                 C   s`   | j }
t||t|t|||t|
jdd}|
jjdi |pi }|
j|||d|	id\}}|S )a2  
        Convert a list of conversations into prompts so that they can then
        be used as input for other LLM APIs.

        Refer to [LLM.chat][] for a complete description of the arguments.

        Returns:
            A list of `ProcessorInputs` objects ready to be passed into LLMEngine.
        )rQ  rR  rS  tokenize)re   rO  rP  rr   )prompt_extrasNr   )	r   r=   r>   r   rI   rZ   default_chat_tok_paramsrH  render_chat)r   rN  re   rO  rP  rQ  rR  rS  r   rr   r   chat_paramsrK  r3  r-  r   r   r   _preprocess_chatf  s0   

zLLM._preprocess_chatconversationc
                 C   s$   | j |g||||||||	d	\}
|
S )Nre   rO  rP  rQ  rR  rS  r   rr   )rY  )r   rZ  re   rO  rP  rQ  rR  rS  r   rr   rL  r   r   r   _preprocess_chat_one  s   zLLM._preprocess_chat_onemessagesc                 C   sP   | j }|j}|dkrtd|du r|  }| j||t|||||
|||	||dS )a	  
        Generate responses for a chat conversation.

        The chat conversation is converted into a text prompt using the
        tokenizer and calls the [generate][vllm.LLM.generate] method to generate
        the responses.

        Multi-modal inputs can be passed in the same way you would pass them
        to the OpenAI API.

        Args:
            messages: A sequence of conversations or a single conversation.

                - Each conversation is represented as a list of messages.
                - Each message is a dictionary with 'role' and 'content' keys.

            sampling_params: The sampling parameters for text generation.
                If None, we use the default sampling parameters. When it
                is a single value, it is applied to every prompt. When it
                is a list, the list must have the same length as the
                prompts and it is paired one by one with the prompt.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            chat_template: The template to use for structuring the chat.
                If not provided, the model's default chat template will be used.
            chat_template_content_format: The format to render message content.

                - "string" will render the content as a string.
                  Example: `"Who are you?"`
                - "openai" will render the content as a list of dictionaries,
                  similar to OpenAI schema.
                  Example: `[{"type": "text", "text": "Who are you?"}]`

            add_generation_prompt: If True, adds a generation template
                to each message.
            continue_final_message: If True, continues the final message in
                the conversation instead of starting a new one. Cannot be
                `True` if `add_generation_prompt` is also `True`.
            chat_template_kwargs: Additional kwargs to pass to the chat
                template.
            tokenization_kwargs: Overrides for `tokenizer.encode`.
            mm_processor_kwargs: Overrides for `processor.__call__`.

        Returns:
            A list of `RequestOutput` objects containing the generated
            responses in the same order as the input messages.
        r   z{LLM.chat() is only supported for generative models. Try passing `--runner generate` to use the model as a generative model.N)r]  r   r   r   r   re   rO  rP  rQ  rR  rS  r   rr   )r   r   r   r   	_run_chatr9   )r   r]  r   r   r   re   rO  rQ  rR  rS  rP  r   rr   r   r   r   r   r   chat  s.   BzLLM.chat)r   r   pooling_taskr   pooling_paramsr`  c                   s  |du rt d j}|j}|dkrt dt|trd|v r jdu r(t d|d}	|	du r5t d j|	}
 jj|
d}t	|} fd	d
 
|t|D }|D ]
}|jdu rad|_qW j||t|||d} jdustJ  j|}tt d|t|ddg ddgS |du rt }t	|} 
|t|}|D ]}|jdu r||_q|j|krd|jd|d}t |q| jv r j| }|||} |t|} dt|} j||||d  j|td}||}|S  j||t|||d}|S )a  Apply pooling to the hidden states corresponding to the input
        prompts.

        This class automatically batches the given prompts, considering
        the memory constraint. For the best performance, put all of your prompts
        into a single list and pass it to this method.

        Args:
            prompts: The prompts to the LLM. You may pass a sequence of prompts
                for batch inference. See [PromptType][vllm.inputs.PromptType]
                for more details about the format of each prompt.
            pooling_params: The pooling parameters for pooling. If None, we
                use the default pooling parameters.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            pooling_task: Override the pooling task to use.
            tokenization_kwargs: Overrides for `tokenizer.encode`.

        Returns:
            A list of `PoolingRequestOutput` objects containing the
            pooled hidden states in the same order as the input prompts.
        Na  pooling_task required for `LLM.encode`
Please use one of the more specific methods or set the pooling_task when using `LLM.encode`:
  - For embeddings, use `LLM.embed(...)` or `pooling_task="embed"`.
  - For classification logits, use `LLM.classify(...)` or `pooling_task="classify"`.
  - For similarity scores, use `LLM.score(...)`.
  - For rewards, use `LLM.reward(...)` or `pooling_task="token_classify"`
  - For token classification, use `pooling_task="token_classify"`
  - For multi-vector retrieval, use `pooling_task="token_embed"`poolingzvLLM.encode() is only supported for pooling models. Try passing `--runner pooling` to use the model as a pooling model.datazNo IOProcessor plugin installed. Please refer to the documentation and to the 'prithvi_geospatial_mae_io_processor' offline inference example for more details.zThe 'data' field of the prompt is expected to contain the prompt data and it cannot be None. Refer to the documentation of the IOProcessor in use for more details.)r   c                    s   g | ]} j |qS r   )r   merge_pooling_params)r   paramr   r   r   r  e  s    
zLLM.encode.<locals>.<listcomp>plugin)r   r   r   r   r   r   rV   num_cached_tokensr   T)
request_idr%  rg  prompt_token_idsfinishedz You cannot overwrite param.task=z with pooling_task=!r   r   r  
prioritiesr   r   )r   r   r   r   r   r   r   
parse_datapre_processrA   r$  r   taskr   r8   post_processr   getattrr<   r   pre_process_offliner  _priority_to_seq_render_and_add_requestsr   post_process_offline)r   r   ra  r   r   r`  r   r   r   prompt_datavalidated_promptprompts_seq
params_seqpr%  processed_outputsre  msgr   processor_inputsseq_lora_requestsseq_priorityr   r   r   encode  s   $










z
LLM.encoder   ra  r   r   c                C   s6   d| j vr	td| j||||d|d}dd |D S )a  
        Generate an embedding vector for each prompt.

        This class automatically batches the given prompts, considering
        the memory constraint. For the best performance, put all of your prompts
        into a single list and pass it to this method.

        Args:
            prompts: The prompts to the LLM. You may pass a sequence of prompts
                for batch inference. See [PromptType][vllm.inputs.PromptType]
                for more details about the format of each prompt.
            pooling_params: The pooling parameters for pooling. If None, we
                use the default pooling parameters.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            tokenization_kwargs: Overrides for `tokenizer.encode`.

        Returns:
            A list of `EmbeddingRequestOutput` objects containing the
            embedding vectors in the same order as the input prompts.
        embedz_Embedding API is not supported by this model. Try converting the model using `--convert embed`.r   ra  r   r`  r   c                 S      g | ]}t |qS r   )r7   	from_baser   itemr   r   r   r        zLLM.embed.<locals>.<listcomp>r   r   r  )r   r   r   ra  r   r   r   r   r   r   r       
!	z	LLM.embed)ra  r   r   r   c                C   s6   d| j vr	td| j||||d|d}dd |D S )a  
        Generate class logits for each prompt.

        This class automatically batches the given prompts, considering
        the memory constraint. For the best performance, put all of your prompts
        into a single list and pass it to this method.

        Args:
            prompts: The prompts to the LLM. You may pass a sequence of prompts
                for batch inference. See [PromptType][vllm.inputs.PromptType]
                for more details about the format of each prompt.
            pooling_params: The pooling parameters for pooling. If None, we
                use the default pooling parameters.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            tokenization_kwargs: Overrides for `tokenizer.encode`.

        Returns:
            A list of `ClassificationRequestOutput` objects containing the
            embedding vectors in the same order as the input prompts.
        classifyzgClassification API is not supported by this model. Try converting the model using `--convert classify`.r  c                 S   r  r   )r6   r  r  r   r   r   r    r  z LLM.classify.<locals>.<listcomp>r  )r   r   ra  r   r   r   r   r   r   r   r    r  zLLM.classifyc               C   s   | j ||||d|dS )a  
        Generate rewards for each prompt.

        Args:
            prompts: The prompts to the LLM. You may pass a sequence of prompts
                for batch inference. See [PromptType][vllm.inputs.PromptType]
                for more details about the format of each prompt.
            pooling_params: The pooling parameters for pooling. If None, we
                use the default pooling parameters.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            tokenization_kwargs: Overrides for `tokenizer.encode`.

        Returns:
            A list of `PoolingRequestOutput` objects containing the
            pooled hidden states in the same order as the input prompts.
        token_classifyr   r   ra  r`  r   )r  )r   r   ra  r   r   r   r   r   r   reward  s   z
LLM.rewarddata_1data_2c                C   s   |   }g }|| D ]}	t|	tstd||	 q
| j||||d|d}
|
dt| }|
t|d  }t|dkrB|t| }t|||d}dd |D S )	Nz;Embedding scores currently do not support multimodal input.r  r  r   rW   )rZ   embed_1embed_2c                 S   r  r   r:   r  r  r   r   r   r  m  r  z(LLM._embedding_score.<locals>.<listcomp>)r   r   strr  r  r  r   r&   )r   r  r  r   ra  r   r   rZ   input_textsr+  encoded_outputencoded_output_1encoded_output_2scoresr   r   r   _embedding_scoreD  s4   

	zLLM._embedding_scorec             
   C   s   ddl m} |  }| j}	t|d|	}
t|d|	}| j|
| |||d|d}|dt|
 }|t|
d }t|dkrB|t| }g }g }|j }durP|g}t||D ]3\}}|j	j
}|j	j
}t||}|j| |j }|t|j d	|j ||d
||j|j dd qUdd |D S )z
        Late interaction scoring (ColBERT MaxSim).

        Encodes queries and documents into per-token embeddings, then computes
        MaxSim: sum over query tokens of max similarity to any document token.
        r   )PoolingOutputquerydocumenttoken_embedr  NrW   r3  )rc  T)rh  r%  ri  rg  rj  c                 S   r  r   r  r  r   r   r   r    r  z/LLM._late_interaction_score.<locals>.<listcomp>)vllm.outputsr  r   r   r*   r  r   pad_token_idr  r%  rc  r(   ri  r  r8   rh  rg  )r   r  r  r   ra  r   r   r  rZ   r   	prompts_1	prompts_2r  r  r  r  paddingr  emb_1emb_2q_embd_embmaxsim_scorer  r   r   r   _late_interaction_scoreo  sH   	


zLLM._late_interaction_scorescore_templatec             	   C   s  | j }|  }	t|	rtdt|dkr|t| }|d u r%tdd}n|jd u r-d|_tt  }
tt  }dd t	||D }|D ]5\}}t
||||	||d\}}|dd  }rn| }t|}d	|i|_|
| n|
| || qC| j||
t||d
}dd |D S )Nz0Score API is not supported for Mistral tokenizerrW   score)rq  c                 S   s   g | ]\}}||fqS r   r   )r   t1t2r   r   r   r    s    z-LLM._cross_encoding_score.<locals>.<listcomp>)r   r  r  rZ   r   r  token_type_idscompressed_token_type_ids)r   r   r   r   r   c                 S   r  r   r  r  r   r   r   r    r  )r   r   rI   r   r   r<   rq  r  r/   r  r)   r   cloner'   extra_kwargsr  r   r8   )r   r  r  r   ra  r   r   r  r   rZ   pooling_params_listr   input_pairsqdr3  rL  r  r   
compressedr%  r   r   r   _cross_encoding_score  sJ   



	

zLLM._cross_encoding_score)r   ra  r   r   re   c            	      s  | j }|j}	|	dkrtd| j | j j}
|
dk}|
dk}|s.t fdddD r.td|r=t|jd	d
dkr=td|sG|durGtd|j}|j	}t
||||d\}}| j}|jjdi |pbi }| }|rw| j|||||||dS |r| j||||||dS | j||||||dS )a]  Generate similarity scores for all pairs `<text,text_pair>` or
          `<multi-modal data, multi-modal data pair>`.

        The inputs can be `1 -> 1`, `1 -> N` or `N -> N`.
        In the `1 - N` case the `data_1` input will be replicated `N`
        times to pair with the `data_2` inputs.
        The input pairs are used to build a list of prompts for the
        cross encoder model. This class automatically batches the prompts,
        considering the memory constraint. For the best performance, put all
        of your inputs into a single list and pass it to this method.

        Supports both text and multi-modal data (images, etc.) when used with
        appropriate multi-modal models. For multi-modal inputs, ensure the
        prompt structure matches the model's expected input format.

        Args:
            data_1: Can be a single prompt, a list of prompts or
                `ScoreMultiModalParam`, which can contain either text or
                multi-modal data. When a list, it must have the same length as
                the `data_2` list.
            data_2: The data to pair with the query to form the input to
                the LLM. Can be text or multi-modal data. See [PromptType]
                [vllm.inputs.PromptType] for more details about the format of
                each prompt.
            pooling_params: The pooling parameters for pooling. If None, we
                use the default pooling parameters.
            use_tqdm: If `True`, shows a tqdm progress bar.
                If a callable (e.g., `functools.partial(tqdm, leave=False)`),
                it is used to create the progress bar.
                If `False`, no progress bar is created.
            lora_request: LoRA request to use for generation, if any.
            chat_template: The chat template to use for the scoring. If None, we
                use the model's default chat template.
            tokenization_kwargs: Overrides for `tokenizer.encode`.
        Returns:
            A list of `ScoringRequestOutput` objects containing the
            generated scores in the same order as the input prompts.
        rb  zuLLM.score() is only supported for pooling models. Try passing `--runner pooling` to use the model as a pooling model.zlate-interactionzcross-encoderc                 3   s    | ]}| vV  qd S r   r   )r   tr   r   r   r  8  s    
zLLM.score.<locals>.<genexpr>)r  r  zsScore API is not supported by this model. Try converting the model using `--convert embed` or `--convert classify`.
num_labelsr   rW   z.Score API is only enabled for num_labels == 1.Nz9chat_template is only supported for cross-encoder models.)is_multimodal_modelarchitecture)r   ra  r   r   r  r  r   )r   r   r   r   
score_typeallrs  	hf_configr  r  r+   r   rG  rH  get_encode_kwargsr  r  r  )r   r  r  r   ra  r   r   re   r   r   r  is_late_interactionis_cross_encoderr  r  score_data_1score_data_2r   rK  encode_kwargsr   r  r   r    s|   8

		z	LLM.scoreprofile_prefixc                 C      | j | dS )a<  Start profiling with optional custom trace prefix.

        Args:
            profile_prefix: Optional prefix for the trace file names. If provided,
                           trace files will be named as "<prefix>_dp<X>_pp<Y>_tp<Z>".
                           If not provided, default naming will be used.
        N)r   start_profile)r   r  r   r   r   r  v  s   zLLM.start_profilec                 C   s   | j   d S r   )r   stop_profiler   r   r   r   r    s   zLLM.stop_profilereset_running_requestsreset_connectorc                 C   s   | j ||S r   )r   reset_prefix_cache)r   r  r  r   r   r   r    s   zLLM.reset_prefix_cacheabortlevelr   c                 C   s   | j j||d dS )a  
        Put the engine to sleep. The engine should not process any requests.
        The caller should guarantee that no requests are being processed
        during the sleep period, before `wake_up` is called.

        Args:
            level: The sleep level.
                - Level 0: Pause scheduling but continue accepting requests.
                           Requests are queued but not processed.
                - Level 1: Offload model weights to CPU, discard KV cache.
                           The content of kv cache is forgotten. Good for
                           sleeping and waking up the engine to run the same
                           model again. Please make sure there's enough CPU
                           memory to store the model weights.
                - Level 2: Discard all GPU memory (weights + KV cache).
                           Good for sleeping and waking up the engine to run
                           a different model or update the model, where
                           previous model weights are not needed. It reduces
                           CPU memory pressure.
            mode: How to handle any existing requests, can be "abort", "wait",
                or "keep".
        )r  r   N)r   sleep)r   r  r   r   r   r   r    s   z	LLM.sleeptagsc                 C   r  )a(  
        Wake up the engine from sleep mode. See the [sleep][vllm.LLM.sleep]
        method for more details.

        Args:
            tags: An optional list of tags to reallocate the engine memory
                for specific memory allocations. Values must be in
                `("weights", "kv_cache", "scheduling")`. If None, all memory
                is reallocated. wake_up should be called with all tags
                (or None) before the engine is used again.
                Use tags=["scheduling"] to resume from level 0 sleep.
        N)r   wake_up)r   r  r   r   r   r    s   zLLM.wake_uprN   c                 C   r   )a  Return a snapshot of aggregated metrics from Prometheus.

        Returns:
            A `MetricSnapshot` instance capturing the current state
            of all aggregated metrics from Prometheus.

        Note:
            This method is only available with the V1 LLM engine.
        )r   get_metricsr   r   r   r   r    s   

zLLM.get_metricsnum_requestsc                 C   s>   t |trt||krtd| dt| d|S |g| S )NThe lengths of prompts (z) and params () must be the same.r   r   r   r   )r   r   r  r   r   r   r$       

zLLM._params_to_seqc                 C   s>   t |trt||krtd| dt| d|S |g| S )Nr  z) and lora_request (r  r  )r   r   r  r   r   r   r    r  zLLM._lora_request_to_seqc                 C   s<   |d urt ||krtd| dt | d|S dg| S )Nr  z) and priority (r  r   )r   r   )r   r   r  r   r   r   ru    s   
zLLM._priority_to_seqc                   sd   t |} |t|} |t|}	 |t|}
 j fddt||ddD ||	|
dS )Nc                 3   s    | ]	}  |V  qd S r   )rM  rE  r   r   r   r   r     s
    

z/LLM._add_completion_requests.<locals>.<genexpr>zRendering promptsr   r   rl  )rA   r$  r   r  ru  rv  rJ   )r   r   r   r   r   r   r   seq_prompts
seq_paramsr  r  r   r  r   r     s    zLLM._add_completion_requestsc                C   s$   | j ||||||d | j||dS )Nr   rn  )r   r   )r   r   r   r   r   r   r   r   r   r   r   r     s   zLLM._run_completion)
r   r   re   rO  rQ  rR  rS  rP  r   rr   c       
      
      sd   t |}|t|}|t|}j f	ddt||ddD ||||dS )Nc                 3   s,    | ]}j | d 	V  qdS )r[  N)r\  )r   rZ  	rQ  re   rO  rP  rR  rr   r   r   rS  r   r   r  =  s    
z LLM._run_chat.<locals>.<genexpr>zRendering conversationsr  r  )r?   r$  r   r  r#  rJ   )r   r]  r   r   r   r   re   rO  rQ  rR  rS  rP  r   rr   	seq_convsr  r  r   r  r   r^  $  s    zLLM._run_chat)r  rm  r   r  rm  c                C   s8   t |ttfrtd | j||||d | j||dS )Na>  Rendering all prompts before adding them to the engine is less efficient than performing both on the same prompt before processing the next prompt. You should instead pass a generator that renders one prompt per iteration, as that allows engine execution to begin for the first prompt while processing the next prompt.rl  r   )r   r  tupler   warning_oncerv  r   )r   r   r   r   r  rm  r   r   r   r   r#  U  s   
	zLLM._render_and_run_requests)r  rm  c          
   
   C   s   g }z0t |D ](\}}| j||| | ||d u rd n|| |d u r$dn|| d}|| qW |S  tyJ }	 z|rD| jj|dd |	d }	~	ww )Nr   r   r   T)internal)	enumerate_add_requestr   r  	Exceptionr   abort_request)
r   r   r   r  rm  added_request_idsr;  r   rh  r   r   r   r   rv  r  s*   	zLLM._render_and_add_requestsc                 C   s6   t |tr	tj|_tt| j}| jj	|||||dS )Nr  )
r   rD   rC   
FINAL_ONLYoutput_kindr  nextr   r   add_request)r   r   r   r   r   rh  r   r   r   r    s   
zLLM._add_requestc             	   C   sZ  |r | j  }t|r|nt}||dddddddddd}g }d}d}| j  r| j  }	|	D ]g}
t|
|s;J |
jr||
 |rt|
t	rt
|
j}|
jd usVJ |t
|
j| 7 }||jd	  }|td
d |
jD 7 }||jd	  }d|dd|dd|_|| n|d |j|kr|  q2| j  s+|r|  t|dd dS )NzProcessed promptsTzest. speed input: r   z.2fz toks/s, output: z toks/s)totalr   dynamic_ncolspostfixelapsedc                 s   r  r   )r   	token_ids)r   stpr   r   r   r    r  z"LLM._run_engine.<locals>.<genexpr>rW   c                 S   s
   t | jS r   )r   rh  )xr   r   r   <lambda>  s   
 z!LLM._run_engine.<locals>.<lambda>)r  )r   get_num_unfinished_requestscallabler	   has_unfinished_requestsstepr   rj  r  r9   r   r%  ri  format_dictr   r  updatenrefreshcloser   )r   r   r   r  	tqdm_funcpbarr%  total_in_tokstotal_out_toksstep_outputsr7  r  in_spdout_spdr   r   r   r     sR   











zLLM._run_enginerequestc                 C   0   t |tr	|d n|j}| jjdd|id dS )z
        Initialize weight transfer for RL training.

        Args:
            request: Weight transfer initialization request with backend-specific info
        	init_infoinit_weight_transfer_enginer{   N)r   r   r  r   r   )r   r  init_info_dictr   r   r   r    s
   

zLLM.init_weight_transfer_enginec                 C   r  )z
        Update the weights of the model.

        Args:
            request: Weight update request with backend-specific update info
        update_infoupdate_weightsr	  N)r   r   r  r   r   )r   r  update_info_dictr   r   r   r    s
   
zLLM.update_weightsc                 C   sB   | j du r| jd}|r|d | _ | j S d| jjd| _ | j S )z;Return a transformers-style hierarchical view of the model.Nget_model_inspectionr   z
LLM(model=))r   r   r   r   rz   )r   resultsr   r   r   __repr__  s   

zLLM.__repr__)T)r|   Nr   )NNNTN)Nr   N)NFN)NrU   NTFNNN)NTNNrU   TFNNNN)FF)rW   r  )Nr   )j__name__
__module____qualname____doc__r  r   r   r   boolr  r   r   r5   r   floatr   r   r   r   r   r   r   r   r   r   rM   r   rF   r   r   r   rD   r   r/   r   r   r	   r4   r9   r   r   r   r8   r   rO   r  r.   r   rS   r   nnModuler   r2   r1   rB   r   rD  r  rM  r   r!   rY  r\  r_  r-   r<   rE   r  r7   r  r6   r  r  r$   r:   r  r  r  r0   r%   r  r  r  r  rK   r  r  r  rR   r$  r  ru  r   r   r^  r   r#  rv  r  r   r   r  r   r  r  r   r   r   r   rT   o   sF   m	





 #$%&'()
 A



	
A


*
	

7
" 

 !


	

4	


	

a	
 /

6

7
	
'
	
+
	
D
	

I
 











	

'

	


 

	

7


"


!

7
rT   ){r!  collections.abcr   r   r   pathlibr   typingr   r   r   torch.nnr  pydanticr   	tqdm.autor	   typing_extensionsr
   r   vllm.beam_searchr   r   r   r   vllm.configr   r   r   r   r   r   vllm.config.compilationr   vllm.config.modelr   r   r   r   r   %vllm.distributed.weight_transfer.baser   r   vllm.engine.arg_utilsr   vllm.entrypoints.chat_utilsr   r    r!   r"   /vllm.entrypoints.pooling.io_processor_factoriesr#   $vllm.entrypoints.pooling.score.utilsr$   r%   r&   r'   r(   r)   r*   r+   vllm.entrypoints.utilsr,   vllm.inputs.datar-   r.   r/   r0   r1   r2   vllm.loggerr3   vllm.lora.requestr4   'vllm.model_executor.layers.quantizationr5   r  r6   r7   r8   r9   r:   vllm.platformsr;   vllm.pooling_paramsr<   vllm.renderersr=   r>    vllm.renderers.inputs.preprocessr?   r@   rA   vllm.sampling_paramsrB   rC   rD   
vllm.tasksrE   vllm.tokenizersrF   vllm.usage.usage_librG   vllm.utils.counterrH   vllm.utils.mistralrI   vllm.utils.tqdm_utilsrJ   vllm.v1.enginerK   vllm.v1.engine.llm_enginerL   vllm.v1.sample.logits_processorrM   vllm.v1.metrics.readerrN   r  r   rO   rR   rS   rT   r   r   r   r   <module>   sb    (
 