
    j                     j    d dl mZmZ ddlmZmZmZmZmZm	Z	m
Z
mZ ddlmZ g dZd Zd Zd Zd	 Zd
S )    )get_default_deviceparse_device   )DEFAULT_CPU_THREADSDEFAULT_DEVICEDEFAULT_ENABLE_MKLDNNDEFAULT_MKLDNN_CACHE_CAPACITYDEFAULT_PRECISIONDEFAULT_USE_TENSORRTSUPPORTED_PRECISION_LISTDEFAULT_USE_CINN)str2bool)paddlepaddle_staticpaddle_dynamictransformersc                    t           d d |t          t          t          t          t
          t          d
}|                                 |                                z
  }|D ]}t          d|           i || } | d         0| d         t          vr!t          d| d          dt           d          | d         t          vr!t          d| d          dt           d          |                     d	          | d
<   |                     d          | d<   | S )N)
deviceengineengine_config
enable_hpiuse_tensorrt	precisionenable_mkldnnmkldnn_cache_capacitycpu_threadsenable_cinnzUnknown argument: r   zInvalid engine: z. Supported values are: .r   zInvalid precision: r   	use_pptrtpptrt_precision)r   r   r
   r   r	   r   r   keys
ValueErrorSUPPORTED_INFERENCE_ENGINE_LISTr   pop)kwargsdefault_enable_hpidefault_valsunknown_namesnames        f/lsinfo/ai/hellotax_ai/data_center/backend/venv/lib/python3.11/site-packages/paddleocr/_common_args.pyparse_common_argsr+   %   sB    (,&.!>*' L KKMML$5$5$7$77M 6 64d44555'''F 	x$8$CCCkvh/kkIhkkk
 
 	
 k":::j&"5jjOgjjj
 
 	
 !**^44F; &

; 7 7FM    c                    i }|dk    rB| d         r4| d         dk    rd|d<   nX| d         dk    sJ | d                     d|d<   n6d	|d<   n0|d
k    r%| d         r| d         |d<   nd	|d<   | d         |d<   nd	|d<   | d         |d<   |S )Ngpur   r    fp32trt_fp32run_modefp16trt_fp16r   cpur   r   r   r    )common_argsdevice_typecfgs      r*   "_build_paddle_static_engine_configr9   L   s    
Ce{# 		',-77",J"#45???%B??? #-J&C
OO			' 	'+67N+OC'((&C
O(7M"J$]3CJr,   c                    |d         }|t                      }t          |          \  }}i }||d<   |d         |d<   |d         |d<   |d         }|d         }t          ||          }|||d<   n|dk    r||d<   n|dv rd|i|d<   nd |d<   |S )Nr   r   r   use_hpipr   r   )Nr   )r   r   r9   )	
model_namer6   r   r7   _init_kwargsuser_engine_configr   builts	            r*   prepare_common_init_argsrA   e   s    "F~#%%!&))NKK"K'1K),7K
$_5"F.{KHHE%'9O$$	?	"	"',O$$	#	#	#(7'?O$$'+O$r,   c                   |rd}nd}|                      dt          t          |           |                      dt          t          d           |                      dt          |d	           |                      d
t          t
          d           |                      dt          t          t          d           |                      dt          t          d           |                      dt          t          d           |                      dt          t          d           |                      dt          t          d           d S )Na  Device(s) to use for inference, e.g., `cpu`, `gpu`, `npu`, `gpu:0`, `gpu:0,1`. If multiple devices are specified, inference will be performed in parallel. Note that parallel inference is not always supported. By default, GPU 0 will be used if available; otherwise, the CPU will be used.zDevice to use for inference, e.g., `cpu`, `gpu`, `npu`, `gpu:0`. By default, GPU 0 will be used if available; otherwise, the CPU will be used.z--device)typedefaulthelpz--engineznInference engine to use. For CLI, engine-specific configuration should be set in the PaddleX YAML config file.)rC   choicesrE   z--enable_hpiz&Enable the high performance inference.z--use_tensorrtzWhether to use the Paddle Inference TensorRT subgraph engine. If the model does not support TensorRT acceleration, even if this flag is set, acceleration will not be used.z--precisionzPPrecision for TensorRT when using the Paddle Inference TensorRT subgraph engine.)rC   rD   rF   rE   z--enable_mkldnnzEnable MKL-DNN acceleration for inference. If MKL-DNN is unavailable or the model does not support it, acceleration will not be used even if this flag is set.z--mkldnn_cache_capacityzMKL-DNN cache capacity.z--cpu_threadsz/Number of threads to use for inference on CPUs.z--enable_cinnz!Whether to use the CINN compiler.)add_argumentstrr   r#   r   r   r
   r   r   intr	   r   r   )parserr&   allow_multiple_deviceshelp_s       r*   add_common_cli_optsrM      s    a q a
	     /}	     "5	     $ {	     !(_     % n	     !-&	     #>	      0	      r,   N)paddlex.utils.devicer   r   
_constantsr   r   r   r	   r
   r   r   r   
_utils.clir   r#   r+   r9   rA   rM   r5   r,   r*   <module>rQ      s    B A A A A A A A	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 !          # # # $ $ $N  2  6; ; ; ; ;r,   