import hashlib
import math
import random
import re
from collections.abc import AsyncIterator

import httpx
from bs4 import BeautifulSoup

from app.services.tax_data_processor.adapters.base import (

    BaseSourceAdapter,
    DocumentItem,
    ParsedDocument,
)

from common_logging import get_logger

logger = get_logger(__name__)

SEARCH_API = 'https://www.chinatax.gov.cn/search5/search/s'
SITE_CODE = 'bm29000002'
PAGE_SIZE = 10
_FORMS = [{'xxgkResolveType': '文字', 'form': 'text', 'referer': 'https://fgk.chinatax.gov.cn/zcfgk/c100015/list_zcjd.html'}, {'xxgkResolveType': '图片', 'form': 'image', 'referer': 'https://fgk.chinatax.gov.cn/zcfgk/c100016/list_zcjd.html'}, {'xxgkResolveType': '视频', 'form': 'video', 'referer': 'https://fgk.chinatax.gov.cn/'}]
_HEADERS = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36', 'Referer': 'https://fgk.chinatax.gov.cn/zcfgk/c100014/list_zcjd.html'}

async def _search(client: httpx.AsyncClient, resolve_type: str, page_num: int) -> dict:
    resp = await client.get(SEARCH_API, params={'siteCode': SITE_CODE, 'searchWord': '', 'type': 1, 'xxgkResolveType': resolve_type, 'pageNum': page_num, 'pageSize': PAGE_SIZE, 'cwrqStart': '', 'cwrqEnd': '', 'column': '政策解读', 'likeDoc': 0, 'wordPlace': 0, 'videoreSolveType': ''}, headers=_HEADERS, timeout=30)
    resp.raise_for_status()
    return resp.json()

def _parse_date(raw: str) -> str | None:
    if not raw:
        return None
    m = re.search('(\\d{4})-(\\d{1,2})-(\\d{1,2})', raw)
    return f'{m.group(1)}-{int(m.group(2)):02d}-{int(m.group(3)):02d}' if m else None

def _normalize_url(url: str) -> str:
    return url.replace('http://fgk.chinatax.gov.cn', 'https://fgk.chinatax.gov.cn').replace('http://www.chinatax.gov.cn', 'https://fgk.chinatax.gov.cn').replace('https://www.chinatax.gov.cn', 'https://fgk.chinatax.gov.cn')

def _handle_image(soup: BeautifulSoup, api_content: str, enable_ocr: bool) -> str:
    return '[图解解读：图片内容待 OCR 处理]'

async def _download_inline_images(soup: BeautifulSoup, save_dir: str) -> list:
    from pathlib import Path

    import httpx as _httpx
    images = []
    seen = set()
    Path(save_dir).mkdir(parents=True, exist_ok=True)
    for idx, img in enumerate(soup.find_all('img', src=True)):
        src = img['src']
        if src.startswith('data:') or src in seen:
            continue
        if not src.startswith('http'):
            src = 'https://fgk.chinatax.gov.cn' + src
        seen.add(src)
        ext = Path(src.split('?')[0]).suffix.lower() or '.jpg'
        local_path = str(Path(save_dir) / f'img_{idx:03d}{ext}')
        try:
            async with _httpx.AsyncClient(timeout=15) as client:
                r = await client.get(src, headers={'User-Agent': 'Mozilla/5.0'})
                if r.status_code == 200:
                    Path(local_path).write_bytes(r.content)
                    images.append({'src_original': src, 'path': local_path, 'alt': img.get('alt', ''), 'ocr_text': ''})
        except Exception as e:
            logger.warning(f'[chinatax_zcjd] 图片下载失败: {src} — {e}')
    return images

def _handle_video(api_content: str, enable_transcript: bool) -> str:
    return f'[视频解读：字幕待 ASR 处理]\n\n{api_content}'.strip() if api_content else '[视频解读：字幕待 ASR 处理]'

async def _download_inline_videos(soup: BeautifulSoup, save_dir: str) -> list:
    from pathlib import Path

    import httpx as _httpx
    videos = []
    seen = set()
    Path(save_dir).mkdir(parents=True, exist_ok=True)
    for idx, tag in enumerate(soup.find_all(['video', 'source', 'iframe'])):
        src = tag.get('src') or tag.get('data-src', '')
        if not src or src in seen:
            continue
        if not src.startswith('http'):
            src = 'https://fgk.chinatax.gov.cn' + src
        seen.add(src)
        ext = Path(src.split('?')[0]).suffix.lower() or '.mp4'
        local_path = str(Path(save_dir) / f'vid_{idx:03d}{ext}')
        try:
            async with _httpx.AsyncClient(timeout=60) as client:
                r = await client.get(src, headers={'User-Agent': 'Mozilla/5.0'})
                if r.status_code == 200:
                    Path(local_path).write_bytes(r.content)
                    videos.append({'src_original': src, 'path': local_path, 'transcript': '', 'keyframes': []})
        except Exception as e:
            logger.warning(f'[chinatax_zcjd] 视频下载失败: {src} — {e}')
    return videos

class ChinataxZcjdAdapter(BaseSourceAdapter):

    def __init__(self, source, engine):
        super().__init__(source, engine)
        cfg = source.adapter_config or {}
        self._enable_image_ocr: bool = cfg.get('enable_image_ocr', False)
        self._enable_video_transcript: bool = cfg.get('enable_video_transcript', False)
        self._delay_min: float = getattr(source, 'request_delay_min', 3.0)
        self._delay_max: float = getattr(source, 'request_delay_max', 8.0)

    async def get_total_pages(self) -> int:
        total_pages = 0
        async with httpx.AsyncClient() as client:
            for form_cfg in _FORMS:
                try:
                    data = await _search(client, form_cfg['xxgkResolveType'], 0)
                    total = data.get('searchResultAll', {}).get('total', 0)
                    total_pages += math.ceil(int(total) / PAGE_SIZE)
                except Exception as e:
                    logger.warning(f"[chinatax_zcjd] 获取总页数失败 {form_cfg['form']}: {e}")
        return max(total_pages, 1)

    async def list_documents(self, page: int=1) -> list[DocumentItem]:
        return []

    async def list_all_documents(self, start_page: int | None=None, end_page: int | None=None) -> AsyncIterator[DocumentItem]:
        async with httpx.AsyncClient() as client:
            for form_cfg in _FORMS:
                resolve_type = form_cfg['xxgkResolveType']
                form = form_cfg['form']
                page_num = 0
                total = None
                while True:
                    try:
                        data = await _search(client, resolve_type, page_num)
                        result_all = data.get('searchResultAll', {})
                        if total is None:
                            total = int(result_all.get('total', 0))
                            logger.info(f'[chinatax_zcjd] {resolve_type}解读 共 {total} 条')
                        items = result_all.get('searchTotal', [])
                    except Exception as e:
                        logger.error(f'[chinatax_zcjd] 搜索 API 失败 {resolve_type} page={page_num}: {e}')
                        break
                    for item in items:
                        url = _normalize_url(item.get('url', '') or item.get('snapshotUrl', ''))
                        if not url:
                            continue
                        title = item.get('title', '').strip()
                        date = _parse_date(item.get('cwrq', ''))
                        yield DocumentItem(url=url, title=title, date=date, extra={'form': form, 'content': item.get('content', ''), 'pubName': item.get('pubName', '国家税务总局'), 'referer': form_cfg['referer']})
                    page_num += 1
                    if not items or page_num * PAGE_SIZE >= (total or 0):
                        break

    async def parse_document(self, item: DocumentItem) -> ParsedDocument | None:
        import asyncio

        from app.services.tax_data_processor.document_cleaner import DocumentCleaner
        cleaner = DocumentCleaner()
        await asyncio.sleep(random.uniform(self._delay_min, self._delay_max))
        form = item.extra.get('form', 'text')
        api_content = item.extra.get('content', '')
        pub_name = item.extra.get('pubName', '国家税务总局')
        referer = item.extra.get('referer', _FORMS[0]['referer'])
        try:
            async with httpx.AsyncClient(timeout=30, follow_redirects=True, headers={**_HEADERS, 'Referer': referer}) as client:
                resp = await client.get(item.url)
                if resp.status_code != 200:
                    logger.warning(f'[chinatax_zcjd] HTTP {resp.status_code}: {item.url}')
                    return None
                html = resp.text
        except Exception as e:
            logger.error(f'[chinatax_zcjd] 请求失败: {item.url} — {e}')
            return None
        soup = BeautifulSoup(html, 'html.parser')
        from app.services.tax_data_processor.parsers.html_parser import HTMLParser
        cleaned_html = HTMLParser().extract_content(soup, item.url)
        content_html_str = cleaned_html or html
        content_markdown = cleaner.html_to_markdown(content_html_str)
        inline_images = None
        inline_videos = None
        if form == 'text':
            content_text = api_content or soup.get_text('\n', strip=True)
        elif form == 'image':
            from app.config import get_settings as _get_settings
            _settings = _get_settings()
            img_dir = str(_settings.raw_data_dir / 'chinatax_zcjd' / hashlib.sha256(item.url.encode()).hexdigest()[:12] / 'images')
            inline_images = await _download_inline_images(soup, img_dir)
            content_text = _handle_image(soup, api_content, self._enable_image_ocr)
        else:
            from app.config import get_settings as _get_settings


            _settings = _get_settings()
            vid_dir = str(_settings.raw_data_dir / 'chinatax_zcjd' / hashlib.sha256(item.url.encode()).hexdigest()[:12] / 'videos')
            inline_videos = await _download_inline_videos(soup, vid_dir)
            content_text = _handle_video(api_content, self._enable_video_transcript)
        content_hash = hashlib.sha256(html.encode()).hexdigest()
        title = item.title
        if not title:
            h = soup.select_one('h1, .article-title, .content-title')
            title = h.get_text(strip=True) if h else '政策解读'
        logger.info(f'[chinatax_zcjd] [{form}] {title[:40]}')
        return ParsedDocument(source_url=item.url, title=title, doc_type='政策解读', region_code='CN', content_html=html, content_markdown=content_markdown, content_text=content_text, content_hash=content_hash, issue_date=item.date, issuing_authority=pub_name, interpretation_form=form, inline_images=inline_images, inline_videos=inline_videos)
