import asyncio
import random
import re

import httpx
from bs4 import BeautifulSoup

from app.services.tax_data_processor.adapters.base import (
    BaseSourceAdapter,
    DocumentItem,
    ParsedDocument,
)

from common_logging import get_logger

logger = get_logger(__name__)

_LIST_URL = 'http://www.chinatax.gov.cn/chinatax/manuscriptList/n3255681'
_LIST_PARAMS = {'_isAgg': 'false', '_pageSize': '20', '_template': 'index', '_channelName': '', '_keyWH': 'wenhao'}
_HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'}

def _parse_cn_date(raw: str) -> str | None:
    m = re.search('(\\d{4})年(\\d{1,2})月(\\d{1,2})日', raw)
    return f'{m.group(1)}-{int(m.group(2)):02d}-{int(m.group(3)):02d}' if m else None

class QAChinataxMessageAdapter(BaseSourceAdapter):

    async def get_total_pages(self) -> int:
        async with httpx.AsyncClient(timeout=30, headers=_HEADERS, follow_redirects=True) as client:
            r = await client.get(_LIST_URL, params={**_LIST_PARAMS, 'page': 1})
            r.raise_for_status()
        soup = BeautifulSoup(r.text, 'html.parser')
        m = re.search('共(\\d+)页', soup.get_text())
        return int(m.group(1)) if m else 1

    async def list_documents(self, page: int=1) -> list[DocumentItem]:
        for attempt in range(3):
            try:
                async with httpx.AsyncClient(timeout=30, headers=_HEADERS, follow_redirects=True) as client:
                    r = await client.get(_LIST_URL, params={**_LIST_PARAMS, 'page': page})
                    r.raise_for_status()
                soup = BeautifulSoup(r.text, 'html.parser')
                items = []
                for li in soup.select('ul.list li'):
                    a = li.find('a')
                    span = li.find('span')
                    if not a:
                        continue
                    date_raw = span.get_text(strip=True).strip('[]') if span else ''
                    items.append(DocumentItem(url=a['href'], title=a.get_text(strip=True), date=date_raw))
                return items
            except (httpx.TimeoutException, httpx.HTTPStatusError):
                if attempt == 2:
                    raise
                await asyncio.sleep(2 ** attempt)

    async def parse_document(self, item: DocumentItem) -> ParsedDocument | None:
        async with httpx.AsyncClient(timeout=30, headers=_HEADERS, follow_redirects=True) as client:
            r = await client.get(item.url)
            r.raise_for_status()
        soup = BeautifulSoup(r.text, 'html.parser')
        text = soup.get_text('\n')
        reply_date = None
        m = re.search('答复时间[：:]\\s*(\\d{4}年\\d{1,2}月\\d{1,2}日)', text)
        if m:
            reply_date = _parse_cn_date(m.group(1))
        reply_date = reply_date or item.date
        answer = ''
        content_node = soup.find(id='zoomcon') or soup.find(class_='article-content')
        if content_node:
            node_text = content_node.get_text('\n')
            m = re.search('答[：:]\\s*(.+)', node_text, re.DOTALL)
            if m:
                answer = m.group(1).strip()
        if not answer:
            m = re.search('答[：:]\\s*(.+)', text, re.DOTALL)
            if m:
                answer = m.group(1).strip()
        img_tags = [img for img in soup.find_all('img') if img.get('src', '') and (not img['src'].startswith('/')) and (not img['src'].startswith('http'))]
        img_markdown = ''
        if img_tags:
            base = item.url.rsplit('/', 1)[0] + '/'
            for img in img_tags:
                img_markdown += f"![答复图片]({base}{img['src']})\n"
        if not answer:
            if img_markdown:
                answer = img_markdown.strip()
            else:
                return None
        question = item.title
        content_text = f'答复机构：国家税务总局\n答复时间：{reply_date}\n\n问：{question}\n\n答：{answer}'
        content_markdown = content_text
        if img_markdown and (not re.search('答[：:]', text)):
            content_markdown = f'答复机构：国家税务总局\n答复时间：{reply_date}\n\n{img_markdown}'
        await asyncio.sleep(random.uniform(self.source.request_delay_min or 1.0, self.source.request_delay_max or 3.0))
        return ParsedDocument(source_url=item.url, title=item.title, doc_type='chinatax_message', region_code='CN', qa_question=question, qa_answer=answer, content_text=content_text, content_markdown=content_markdown, content_html=r.text, content_hash=ParsedDocument.compute_hash(content_text), issuing_authority='国家税务总局', issue_date=reply_date)
