#!/usr/bin/env bash
# Start Qwen3-VL-8B-Instruct via vLLM on port 8400 (tp=2, BF16)

set -euo pipefail

MODEL_PATH="/lsinfo/ai/hellotax_ai/llm_service/base_models/Qwen3-VL-8B-Instruct"
PORT=8400
LOG_FILE="/lsinfo/ai/hellotax_ai/llm_service/logs/ocr-${PORT}.log"
PID_FILE="/lsinfo/ai/hellotax_ai/llm_service/logs/ocr-${PORT}.pid"

mkdir -p "$(dirname "$LOG_FILE")"

if [[ -f "$PID_FILE" ]] && kill -0 "$(cat "$PID_FILE")" 2>/dev/null; then
  echo "OCR service already running (PID $(cat "$PID_FILE"))"
  exit 0
fi

echo "Starting vLLM — Qwen3-VL-8B-Instruct on port ${PORT}..."

export OMP_NUM_THREADS=4
nohup /lsinfo/ai/hellotax_ai/llm_service/venv_vllm/bin/python -m vllm.entrypoints.openai.api_server \
  --model "${MODEL_PATH}" \
  --host 0.0.0.0 \
  --port "${PORT}" \
  --served-model-name Qwen3-VL-8B-Instruct \
  --tensor-parallel-size 2 \
  --dtype bfloat16 \
  --gpu-memory-utilization 0.12 \
  --max-model-len 8192 \
  --max-num-seqs 2 \
  --trust-remote-code \
  --api-key sk-local \
  --limit-mm-per-prompt image=5,video=1 \
  > "${LOG_FILE}" 2>&1 &

echo $! > "${PID_FILE}"
echo "PID $(cat "$PID_FILE") — log: ${LOG_FILE}"

echo -n "Waiting for OCR service to be ready"
for i in $(seq 1 60); do
  if curl -sf http://localhost:${PORT}/health > /dev/null 2>&1; then
    echo " ready."
    exit 0
  fi
  echo -n "."
  sleep 5
done
echo " timeout. Check log: ${LOG_FILE}"
exit 1
