import hashlib
from collections.abc import Callable
from pathlib import Path

import httpx
from bs4 import BeautifulSoup

from app.config import settings
from app.services.tax_data_processor.anti_blocking import AntiBlockingStrategy
from app.services.tax_data_processor.converters.doc_converter import DocConverter
from app.services.tax_data_processor.document_cleaner import DocumentCleaner
from app.services.tax_data_processor.parsers.attachment_parser import AttachmentParser
from app.services.tax_data_processor.parsers.html_parser import HTMLParser
from app.services.tax_data_processor.parsers.metadata_extractor import MetadataExtractor
from app.services.tax_data_processor.parsers.relationship_extractor import RelationshipExtractor
from app.services.tax_data_processor.retry_policy import RetryPolicy
from common_logging import get_logger
logger = get_logger(__name__)


def _safe_attachment_path(save_dir: str, subdir: str, name: str) -> str:
    from pathlib import Path
    max_bytes = 200
    if len(name.encode('utf-8')) > max_bytes:
        ext = Path(name).suffix.lower()
        ext_bytes = len(ext.encode('utf-8'))
        stem = Path(name).stem
        stem_bytes = stem.encode('utf-8')[:max_bytes - ext_bytes]
        safe_stem = stem_bytes.decode('utf-8', errors='ignore')
        name = safe_stem + ext
    att_dir = Path(save_dir) / subdir / 'attachments'
    att_dir.mkdir(parents=True, exist_ok=True)
    return str(att_dir / name)

def _default_doc_status(category_id: int, parsed_status: str | None) -> str:
    return parsed_status or 'effective'

class ProcessorEngine:

    def __init__(self, timeout: int=30, max_retries: int=3, delay_min: int=1, delay_max: int=5):
        self.timeout = timeout
        self.max_retries = max_retries
        self.retry_policy = RetryPolicy(max_retries=max_retries, base_delay=2.0, max_delay=60.0, jitter=0.3)
        self.anti_blocking = AntiBlockingStrategy(delay_min=delay_min, delay_max=delay_max)
        self.html_parser = HTMLParser()
        self.attachment_parser = AttachmentParser(timeout=timeout, max_retries=max_retries)
        self.metadata_extractor = MetadataExtractor()
        self.relationship_extractor = RelationshipExtractor()
        self.document_cleaner = DocumentCleaner()
        self.doc_converter = DocConverter()

    async def fetch_url(self, url: str, headers: dict | None=None, category_id: int=0, warmup_fn: Callable | None=None) -> str | None:
        import asyncio
        self.anti_blocking.category_id = category_id
        await self.anti_blocking.before_request(url=url)
        request_headers = self.anti_blocking.get_headers()
        if headers:
            request_headers.update(headers)
        self._last_fetch_used_warmup = False
        async with httpx.AsyncClient(timeout=self.timeout, follow_redirects=True) as client:
            try:
                response = await self.retry_policy.execute(client.get, url, headers=request_headers)
                if response.status_code == 403:
                    if warmup_fn:
                        await warmup_fn(client)
                    elif category_id:
                        from app.services.tax_data_processor.category_processor import (
                            CategoryProcessor,
                        )
                        config = CategoryProcessor().get_category_config(category_id)
                        if config:
                            list_url = config['list_url']
                            logger.warning(f'[fetch_url] 403，warm-up 列表页: {list_url}')
                            await client.get(list_url, headers=request_headers)
                            await asyncio.sleep(1)
                            response = await client.get(url, headers={**request_headers, 'Referer': list_url})
                            if response.status_code == 200:
                                self._last_fetch_used_warmup = True
                content = response.content
                encoding = response.encoding or 'utf-8'
                for enc in [encoding, 'utf-8', 'gbk', 'gb2312']:
                    try:
                        html_content = content.decode(enc)
                        logger.info(f'成功获取URL: {url} (编码: {enc}, status={response.status_code})')
                        return html_content
                    except UnicodeDecodeError:
                        continue
                html_content = content.decode('utf-8', errors='replace')
                logger.warning(f'使用错误替换解码: {url}')
                return html_content
            except httpx.HTTPStatusError as e:
                if e.response.status_code == 403:
                    if warmup_fn:
                        try:
                            await warmup_fn(client)
                            response = await client.get(url, headers=request_headers)
                            if response.status_code < 400:
                                self._last_fetch_used_warmup = True
                                content = response.content
                                encoding = response.encoding or 'utf-8'
                                for enc in [encoding, 'utf-8', 'gbk', 'gb2312']:
                                    try:
                                        return content.decode(enc)
                                    except UnicodeDecodeError:
                                        continue
                                return content.decode('utf-8', errors='replace')
                            else:
                                logger.error(f'[fetch_url] warm-up 后仍失败: {url}, status={response.status_code}')
                        except Exception as warmup_err:
                            logger.error(f'[fetch_url] warm-up 异常: {url}, 错误: {warmup_err}')
                    elif category_id:
                        from app.services.tax_data_processor.category_processor import (
                            CategoryProcessor,
                        )
                        config = CategoryProcessor().get_category_config(category_id)
                        if config:
                            list_url = config['list_url']
                            logger.warning(f'[fetch_url] 403，warm-up 列表页: {list_url}')
                            try:
                                await client.get(list_url, headers=request_headers)
                                await asyncio.sleep(1)
                                response = await client.get(url, headers={**request_headers, 'Referer': list_url})
                                if response.status_code < 400:
                                    self._last_fetch_used_warmup = True
                                    content = response.content
                                    encoding = response.encoding or 'utf-8'
                                    for enc in [encoding, 'utf-8', 'gbk', 'gb2312']:
                                        try:
                                            return content.decode(enc)
                                        except UnicodeDecodeError:
                                            continue
                                    return content.decode('utf-8', errors='replace')
                                else:
                                    logger.error(f'[fetch_url] warm-up 后仍失败: {url}, status={response.status_code}')
                            except Exception as warmup_err:
                                logger.error(f'[fetch_url] warm-up 异常: {url}, 错误: {warmup_err}')
                logger.error(f'获取URL失败: {url}, status={e.response.status_code}, body={e.response.text[:200]}')
                return None
            except Exception as e:
                logger.error(f'获取URL失败: {url}, 错误: {e}')
                return None

    async def process_document(self, doc_url: str, category_id: int, save_dir: str) -> dict | None:
        logger.info(f'开始处理文档: {doc_url}')
        try:
            html_content = await self.fetch_url(doc_url, category_id=category_id)
            if not html_content:
                return {'success': False, 'error': '获取文档内容失败'}
            parsed_data = self.html_parser.parse(html_content, doc_url)
            full_title = parsed_data['title']
            subtitle = parsed_data.get('subtitle', '')
            if subtitle:
                full_title = f'{full_title}{subtitle}'
            metadata = self.metadata_extractor.extract(full_title, parsed_data['content_html'], category_id)
            if parsed_data.get('issue_date'):
                metadata['issue_date'] = parsed_data['issue_date']
            if parsed_data.get('effective_date'):
                metadata['effective_date'] = parsed_data['effective_date']
            elif not metadata.get('effective_date') and metadata.get('issue_date'):
                metadata['effective_date'] = metadata['issue_date']
            markdown_content = self.document_cleaner.html_to_markdown(parsed_data['content_html'])
            content_hash = hashlib.sha256(html_content.encode('utf-8')).hexdigest()
            directory_name = self.metadata_extractor.generate_directory_name(metadata.get('issue_date'), doc_url)
            filename = self.metadata_extractor.generate_filename(metadata['title'], metadata.get('doc_number'), metadata.get('issue_date'), category_id=category_id)
            attachments = []
            if parsed_data.get('attachments'):
                for attachment in parsed_data['attachments']:
                    attachment_result = await self.attachment_parser.download_attachment(attachment['url'], _safe_attachment_path(save_dir, directory_name, attachment['name']), headers=self.anti_blocking.get_headers(), base_dir=settings.project_root)
                    if attachment_result['success']:
                        attachments.append({'name': attachment['name'], 'url': attachment['url'], 'path': attachment_result['path'], 'size': attachment_result['size'], 'type': attachment['type']})
            inline_images = []
            soup = BeautifulSoup(parsed_data['content_html'], 'lxml')
            media = self.html_parser.extract_media(soup, doc_url)
            if media['images']:
                img_dir = Path(save_dir) / directory_name / 'images'
                img_dir.mkdir(parents=True, exist_ok=True)
                for idx, img_info in enumerate(media['images']):
                    src = img_info['src']
                    ext = Path(src.split('?')[0]).suffix.lower() or '.jpg'
                    dl = await self.attachment_parser.download_attachment(src, str(img_dir / f'img_{idx:03d}{ext}'), headers=self.anti_blocking.get_headers(), base_dir=settings.project_root)
                    if dl['success']:
                        soup_img = soup.find('img', src=img_info.get('src_raw', img_info['src']))
                        if soup_img:
                            soup_img['src'] = dl['path']
                        inline_images.append({'src_original': src, 'path': dl['path'], 'alt': img_info['alt'], 'ocr_text': ''})
                if inline_images:
                    markdown_content = self.document_cleaner.html_to_markdown(str(soup))
            if attachments:
                new_atts = [a for a in attachments if a['url'] not in markdown_content]
                if new_atts:
                    lines = ['\n\n---\n\n## 附件']
                    for att in new_atts:
                        lines.append(f"- [{att['name']}]({att['url']})")
                    markdown_content += '\n'.join(lines)
            content_text = self.document_cleaner.markdown_to_rag_text(markdown_content)
            rel = self.relationship_extractor.extract(markdown_content)
            try:
                save_dir_path = Path(save_dir)
                if save_dir_path.is_absolute():
                    rel_dir = save_dir_path.relative_to(settings.project_root)
                else:
                    rel_dir = save_dir_path
                relative_file_path = str(rel_dir / directory_name / 'content.md')
            except ValueError:
                relative_file_path = None
            try:
                doc_dir = Path(save_dir) / directory_name
                doc_dir.mkdir(parents=True, exist_ok=True)
                (doc_dir / 'content.md').write_text(markdown_content, encoding='utf-8')
            except Exception as e:
                logger.warning(f'保存 content.md 失败: {doc_url}, 错误: {e}')
            return {'success': True, 'source_url': doc_url, 'category_id': category_id, 'title': metadata['title'], 'doc_number': metadata.get('doc_number'), 'issuing_authority': metadata.get('issuing_authority'), 'issue_date': metadata.get('issue_date'), 'effective_date': metadata.get('effective_date'), 'doc_status': _default_doc_status(category_id, parsed_data.get('doc_status')), 'content_html': html_content, 'content_markdown': markdown_content, 'content_text': content_text, 'content_hash': content_hash, 'attachments': attachments, 'inline_images': inline_images or None, 'inline_videos': None, 'filename': filename, 'file_directory': directory_name, 'file_path': relative_file_path, 'fetch_strategy': 'warmup' if getattr(self, '_last_fetch_used_warmup', False) else 'normal', 'supersedes': rel['supersedes'] or None, 'references': rel['references'] or None}
        except Exception as e:
            logger.error(f'处理文档失败: {doc_url}, 错误: {e}'.opt(exception=True))
            return {'success': False, 'error': str(e)}

    async def _ocr_inline_images(self, markdown_content: str, content_html: str) -> str:
        try:
            from bs4 import BeautifulSoup

            from app.services.tax_data_processor.ocr.image_ocr import ocr_image_url
            soup = BeautifulSoup(content_html, 'lxml')
            img_tags = soup.find_all('img')
            if not img_tags:
                return markdown_content
            headers = self.anti_blocking.get_headers()
            ocr_sections = []
            for i, img in enumerate(img_tags, 1):
                src = img.get('src', '')
                if not src or src.startswith('data:'):
                    continue
                ocr_text = await ocr_image_url(src, headers=headers)
                if ocr_text.strip():
                    label = f'图{i}' if len(img_tags) > 1 else '图片'
                    logger.info(f'[ProcessorEngine] 正文{label} OCR 成功: {len(ocr_text)} 字符')
                    ocr_sections.append(f'### {label}\n\n{ocr_text}')
            if ocr_sections:
                markdown_content += '\n\n---\n\n## 正文图片内容\n\n' + '\n\n'.join(ocr_sections)
        except Exception as e:
            logger.warning(f'[ProcessorEngine] 正文图片 OCR 异常: {e}')
        return markdown_content

    async def process_list_page(self, list_url: str, category_id: int) -> list | None:
        logger.info(f'获取列表页文档: category_id={category_id} url={list_url}')
        try:
            import re

            from app.services.tax_data_processor.category_processor import CategoryProcessor


            page = 1
            m = re.search('_(\\d+)\\.html$', list_url)
            if m:
                page = int(m.group(1))
            category_processor = CategoryProcessor()
            documents = await category_processor.fetch_document_list(category_id, page, extra_headers=self.anti_blocking.get_headers())
            logger.info(f'列表页获取完成，共 {len(documents)} 个文档')
            return documents
        except Exception as e:
            logger.error(f'获取列表页失败: category_id={category_id}, 错误: {e}'.opt(exception=True))
            return None

    async def reprocess_from_html(self, raw_html: str, doc_url: str, category_id: int, save_dir: str) -> dict | None:
        try:
            parsed_data = self.html_parser.parse(raw_html, doc_url)
            full_title = parsed_data['title']
            subtitle = parsed_data.get('subtitle', '')
            if subtitle:
                full_title = f'{full_title}{subtitle}'
            metadata = self.metadata_extractor.extract(full_title, parsed_data['content_html'], category_id)
            if parsed_data.get('issue_date'):
                metadata['issue_date'] = parsed_data['issue_date']
            if parsed_data.get('effective_date'):
                metadata['effective_date'] = parsed_data['effective_date']
            elif not metadata.get('effective_date') and metadata.get('issue_date'):
                metadata['effective_date'] = metadata['issue_date']
            markdown_content = self.document_cleaner.html_to_markdown(parsed_data['content_html'])
            content_hash = hashlib.sha256(raw_html.encode('utf-8')).hexdigest()
            filename = self.metadata_extractor.generate_filename(metadata['title'], metadata.get('doc_number'), metadata.get('issue_date'), category_id=category_id)
            directory_name = self.metadata_extractor.generate_directory_name(metadata.get('issue_date'), doc_url)
            rel = self.relationship_extractor.extract(markdown_content)
            content_text = self.document_cleaner.markdown_to_rag_text(markdown_content)
            try:
                save_dir_path = Path(save_dir)
                if save_dir_path.is_absolute():
                    rel_dir = save_dir_path.relative_to(settings.project_root)
                else:
                    rel_dir = save_dir_path
                relative_file_path = str(rel_dir / directory_name / 'content.md')
            except ValueError:
                relative_file_path = None
            return {'success': True, 'source_url': doc_url, 'category_id': category_id, 'title': metadata['title'], 'doc_number': metadata.get('doc_number'), 'issuing_authority': metadata.get('issuing_authority'), 'issue_date': metadata.get('issue_date'), 'effective_date': metadata.get('effective_date'), 'doc_status': _default_doc_status(category_id, parsed_data.get('doc_status')), 'content_html': raw_html, 'content_markdown': markdown_content, 'content_text': content_text, 'content_hash': content_hash, 'attachments': None, 'filename': filename, 'file_directory': directory_name, 'file_path': relative_file_path, 'supersedes': rel['supersedes'] or None, 'references': rel['references'] or None}
        except Exception as e:
            logger.error(f'本地重处理失败: {doc_url}, 错误: {e}'.opt(exception=True))
            return {'success': False, 'error': str(e)}
