from fastapi import APIRouter, Depends, HTTPException, Query, Request
from pydantic import BaseModel
from sqlalchemy.orm import Session, defer, joinedload

from app.api.deps import get_current_user
from app.api.permissions import require_permission, require_read
from app.core.exceptions import (
    DocumentNotFoundError,
    InsufficientPermissionError,
)
from app.core.i18n import get_translator
from app.db.session import get_db
from app.models import (
    DocumentTag,
    DocumentVersion,
    KnowledgeBase,
    KnowledgeCategory,
    KnowledgeDocument,
    User,
)
from app.schemas.knowledge_document import DocumentCreate, DocumentResponse, DocumentUpdate
from app.schemas.knowledge_document_list import DocumentListResponse
from common_logging import get_logger, log_errors

from .helpers import get_all_child_category_ids

logger = get_logger(__name__)
router = APIRouter()


@router.get(
    "/documents",
    response_model=dict,
    summary="获取文档列表",
    description="获取知识库文档列表，支持权限过滤、分类筛选（含子分类）和分页",
    responses={
        200: {"description": "成功返回文档列表"},
        403: {"description": "无权限访问"},
        500: {"description": "服务器错误"},
    },
)
def get_documents(
    knowledge_base_id: int | None = None,
    category_id: int | None = None,
    status: str | None = None,
    search: str | None = None,
    skip: int = Query(0, ge=0, description="跳过记录数"),
    limit: int = Query(100, ge=1, le=1000, description="每页记录数"),
    db: Session = Depends(get_db),
    current_user: User = Depends(require_read("knowledge_bases")),
):
    query = db.query(KnowledgeDocument)
    from sqlalchemy import text as _text

    from app.core.tenant_context import get_current_tenant_id

    tenant_id = get_current_tenant_id()
    if tenant_id:
        schema_name = f"tenant_{tenant_id}"
        rows = db.execute(
            _text(
                f"\n            SELECT c.id FROM public.knowledge_categories c\n            JOIN public.knowledge_bases kb ON c.knowledge_base_id = kb.id\n            WHERE kb.tenant_id = 0\n            UNION\n            SELECT c.id FROM {schema_name}.knowledge_categories c\n            JOIN {schema_name}.knowledge_bases kb ON c.knowledge_base_id = kb.id\n        "
            )
        ).fetchall()
        allowed_category_ids = [r[0] for r in rows]
        if not allowed_category_ids:
            return {"items": [], "total": 0, "skip": skip, "limit": limit}
        db.execute(_text("SET search_path TO public"))
        query = query.filter(KnowledgeDocument.category_id.in_(allowed_category_ids))
    if knowledge_base_id and knowledge_base_id > 0:
        category_ids = (
            db.query(KnowledgeCategory.id)
            .filter(KnowledgeCategory.knowledge_base_id == knowledge_base_id)
            .all()
        )
        category_ids = [cid[0] for cid in category_ids]
        if category_ids:
            query = query.filter(KnowledgeDocument.category_id.in_(category_ids))
        else:
            query = query.filter(KnowledgeDocument.id == -1)
    if category_id:
        all_category_ids = get_all_child_category_ids(db, category_id)
        query = query.filter(KnowledgeDocument.category_id.in_(all_category_ids))
    if status:
        query = query.filter(KnowledgeDocument.status == status)
    if search:
        query = query.filter(KnowledgeDocument.title.ilike(f"%{search}%"))
    total = query.count()
    documents = (
        query.options(
            joinedload(KnowledgeDocument.category),
            joinedload(KnowledgeDocument.tags),
            defer(KnowledgeDocument.content),
        )
        .order_by(KnowledgeDocument.created_at.desc())
        .offset(skip)
        .limit(limit)
        .all()
    )
    items = []
    for doc in documents:
        item = DocumentListResponse(
            id=doc.id,
            title=doc.title,
            summary=doc.summary,
            category_id=doc.category_id,
            category_name=doc.category.name if doc.category else None,
            author_id=doc.author_id,
            status=doc.status,
            is_vectorized=doc.is_vectorized,
            view_count=doc.view_count,
            segmentation_mode=doc.segmentation_mode,
            splitter_type=doc.splitter_type,
            chunk_size=doc.chunk_size,
            chunk_overlap=doc.chunk_overlap,
            tags=[{"id": tag.id, "name": tag.name} for tag in doc.tags] if doc.tags else [],
            created_at=doc.created_at,
            updated_at=doc.updated_at,
        )
        items.append(item)
    return {"items": items, "total": total, "skip": skip, "limit": limit}


@router.get("/documents/{document_id}", response_model=DocumentResponse)
def get_document(
    request: Request,
    document_id: int,
    db: Session = Depends(get_db),
    current_user: User = Depends(require_read("knowledge_bases")),
):
    t = get_translator(request)
    document = (
        db.query(KnowledgeDocument)
        .options(joinedload(KnowledgeDocument.category))
        .filter(KnowledgeDocument.id == document_id)
        .first()
    )
    if not document:
        raise HTTPException(status_code=404, detail=t.t("document.not_found"))
    from app.core.tenant_context import get_current_tenant_id

    tenant_id = get_current_tenant_id()
    if tenant_id and document.category:
        kb = (
            db.query(KnowledgeBase)
            .filter(KnowledgeBase.id == document.category.knowledge_base_id)
            .first()
        )
        if not kb or (hasattr(kb, "tenant_id") and kb.tenant_id != tenant_id):
            raise HTTPException(status_code=404, detail=t.t("document.not_found"))
    document.view_count += 1
    db.commit()
    category_name = document.category.name if document.category else None
    return {
        "id": document.id,
        "title": document.title,
        "content": document.content,
        "summary": document.summary,
        "category_id": document.category_id,
        "category_name": category_name,
        "author_id": document.author_id,
        "source": document.source,
        "reference_url": document.reference_url,
        "status": document.status,
        "is_public": document.is_public,
        "view_count": document.view_count,
        "is_vectorized": document.is_vectorized,
        "vector_model": document.vector_model,
        "file_type": document.file_type,
        "segmentation_mode": document.segmentation_mode,
        "chunk_size": document.chunk_size if hasattr(document, "chunk_size") else 1000,
        "chunk_overlap": document.chunk_overlap if hasattr(document, "chunk_overlap") else 200,
        "splitter_type": (
            document.splitter_type if hasattr(document, "splitter_type") else "recursive"
        ),
        "character_count": document.character_count,
        "recall_count": document.recall_count,
        "created_at": document.created_at,
        "updated_at": document.updated_at,
        "tax_category": getattr(document, "tax_category", None),
        "doc_number": getattr(document, "doc_number", None),
        "doc_number_year": getattr(document, "doc_number_year", None),
        "doc_number_serial": getattr(document, "doc_number_serial", None),
        "issuing_authority": getattr(document, "issuing_authority", None),
        "issue_date": getattr(document, "issue_date", None),
        "effective_date": getattr(document, "effective_date", None),
        "expire_date": getattr(document, "expire_date", None),
        "doc_status": getattr(document, "doc_status", None),
        "supersedes_doc_ids": getattr(document, "supersedes_doc_ids", None),
        "superseded_by_doc_id": getattr(document, "superseded_by_doc_id", None),
        "tax_type_tags": getattr(document, "tax_type_tags", None),
        "has_attachment": getattr(document, "has_attachment", False),
        "attachment_types": getattr(document, "attachment_types", None),
        "parse_quality_score": getattr(document, "parse_quality_score", None),
        "content_hash": getattr(document, "content_hash", None),
        "version_number": getattr(document, "version_number", None),
    }


@router.post("/documents", response_model=DocumentResponse)
@log_errors
def create_document(
    request: Request,
    document: DocumentCreate,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user),
    _: None = Depends(require_permission("knowledge_bases", "create")),
):
    from app.core.security_utils import check_xss

    if check_xss(document.title):
        raise HTTPException(
            status_code=400, detail="Document title contains potentially malicious content"
        )
    if document.content and check_xss(document.content):
        raise HTTPException(
            status_code=400, detail="Document content contains potentially malicious content"
        )
    doc_data = document.model_dump(exclude={"tag_ids"})
    db_document = KnowledgeDocument(**doc_data, author_id=current_user.id)
    db_document.tenant_id = (
        current_user.tenant_id
        if hasattr(current_user, "tenant_id") and current_user.tenant_id is not None
        else 0
    )
    db.add(db_document)
    db.flush()
    if document.tag_ids:
        for tag_id in document.tag_ids:
            doc_tag = DocumentTag(document_id=db_document.id, tag_id=tag_id)
            db.add(doc_tag)
    version = DocumentVersion(
        document_id=db_document.id,
        version_number=1,
        title=document.title,
        content=document.content,
        change_summary="初始版本",
        editor_id=current_user.id,
    )
    db.add(version)
    db.commit()
    db.refresh(db_document)
    logger.bind(doc_id=db_document.id, kb_id=document.category_id).info("Document created")
    return db_document


@router.put("/documents/{document_id}", response_model=DocumentResponse)
@log_errors
def update_document(
    request: Request,
    document_id: int,
    document: DocumentUpdate,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user),
    _: None = Depends(require_permission("knowledge_bases", "update")),
):
    get_translator(request)
    db_document = db.query(KnowledgeDocument).filter(KnowledgeDocument.id == document_id).first()
    if not db_document:
        raise DocumentNotFoundError(document_id)
    if current_user.role == "customer_user" and db_document.author_id != current_user.id:
        raise InsufficientPermissionError("document", "edit")
    latest_version = (
        db.query(DocumentVersion)
        .filter(DocumentVersion.document_id == document_id)
        .order_by(DocumentVersion.version_number.desc())
        .first()
    )
    next_version = latest_version.version_number + 1 if latest_version else 1
    update_data = document.model_dump(exclude_unset=True, exclude={"tag_ids"})
    for key, value in update_data.items():
        setattr(db_document, key, value)
    if document.tag_ids is not None:
        db.query(DocumentTag).filter(DocumentTag.document_id == document_id).delete()
        for tag_id in document.tag_ids:
            doc_tag = DocumentTag(document_id=document_id, tag_id=tag_id)
            db.add(doc_tag)
    if document.title or document.content:
        version = DocumentVersion(
            document_id=document_id,
            version_number=next_version,
            title=document.title or db_document.title,
            content=document.content or db_document.content,
            change_summary="更新文档",
            editor_id=current_user.id,
        )
        db.add(version)
    db.commit()
    db.refresh(db_document)
    logger.bind(doc_id=document_id).info("Document updated")
    return db_document


@router.delete("/documents/{document_id}")
@log_errors
def delete_document(
    request: Request,
    document_id: int,
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user),
    _: None = Depends(require_permission("knowledge_bases", "delete")),
):
    t = get_translator(request)
    db_document = db.query(KnowledgeDocument).filter(KnowledgeDocument.id == document_id).first()
    if not db_document:
        raise DocumentNotFoundError(document_id)
    if current_user.role == "customer_user" and db_document.author_id != current_user.id:
        raise InsufficientPermissionError("document", "delete")
    try:
        if db_document.is_vectorized:
            pass
    except Exception as e:
        logger.bind(doc_id=document_id).warning("Failed to check vectorization status", error=str(e))
    try:
        from app.models.knowledge_base import DocumentVector

        db.query(DocumentVector).filter(DocumentVector.document_id == document_id).delete(
            synchronize_session=False
        )
    except Exception as e:
        logger.bind(doc_id=document_id).error("Failed to delete vector metadata", error=str(e))
    try:
        from app.models.knowledge_base import DocumentTag

        db.query(DocumentTag).filter(DocumentTag.document_id == document_id).delete(
            synchronize_session=False
        )
    except Exception as e:
        logger.bind(doc_id=document_id).error("Failed to delete tag associations", error=str(e))
    try:
        from app.models.knowledge_base import DocumentVersion

        db.query(DocumentVersion).filter(DocumentVersion.document_id == document_id).delete(
            synchronize_session=False
        )
    except Exception as e:
        logger.bind(doc_id=document_id).error("Failed to delete version history", error=str(e))
    try:
        from app.models.knowledge_base import DocumentMetadataValue

        db.query(DocumentMetadataValue).filter(
            DocumentMetadataValue.document_id == document_id
        ).delete(synchronize_session=False)
    except Exception as e:
        logger.bind(doc_id=document_id).error("Failed to delete metadata values", error=str(e))
    db.delete(db_document)
    db.commit()
    logger.bind(doc_id=document_id, kb_id=db_document.category_id).info("Document deleted")
    return {"success": True, "message": t.t("knowledge.document_deleted")}


class RevectorizeRequest(BaseModel):
    new_content: str


@router.post("/documents/{document_id}/revectorize")
def revectorize_document(document_id: int, body: RevectorizeRequest, db: Session = Depends(get_db)):
    import hashlib

    doc = db.query(KnowledgeDocument).filter(KnowledgeDocument.id == document_id).first()
    if not doc:
        raise HTTPException(status_code=404, detail="文档不存在")
    new_content = body.new_content
    if not new_content:
        return {"success": False, "error": "new_content 不能为空", "document_id": document_id}
    new_hash = hashlib.sha256(new_content.encode("utf-8")).hexdigest()
    if doc.content_hash and doc.content_hash == new_hash:
        return {
            "success": True,
            "document_id": document_id,
            "message": "内容未变化，无需重向量化",
            "unchanged": True,
        }
    try:
        from app.services.knowledge.version_diff import VersionDiffService

        svc = VersionDiffService(db)
        result = svc.update_document_vectors(
            document_id=document_id,
            new_content=new_content,
            chunk_strategy=doc.splitter_type or "tax_adaptive",
            chunk_size=doc.chunk_size or 1000,
            chunk_overlap=doc.chunk_overlap or 200,
        )
        db.commit()
        logger.bind(doc_id=document_id, status="completed").info("Document revectorized")
        return {
            "success": result.get("success", False),
            "document_id": document_id,
            "unchanged": False,
            **{
                k: result.get(k)
                for k in ["unchanged", "changed", "added", "removed", "total_new", "error"]
                if k in result
            },
        }
    except Exception as e:
        logger.bind(doc_id=document_id).error("Revectorization failed", error=str(e))
        raise HTTPException(status_code=500, detail=f"增量重向量化失败: {e}") from None


@router.post("/documents/batch-delete")
def batch_delete_documents(
    request: Request,
    document_ids: list[int],
    db: Session = Depends(get_db),
    current_user: User = Depends(get_current_user),
    _: None = Depends(require_permission("knowledge_bases", "delete")),
):
    t = get_translator(request)
    if not document_ids:
        raise HTTPException(status_code=400, detail=t.t("knowledge.no_documents_selected"))
    if len(document_ids) > 100:
        raise HTTPException(status_code=400, detail=t.t("knowledge.batch_delete_limit_exceeded"))
    deleted_count = 0
    failed_count = 0
    failed_ids = []
    for document_id in document_ids:
        try:
            db_document = (
                db.query(KnowledgeDocument).filter(KnowledgeDocument.id == document_id).first()
            )
            if not db_document:
                failed_count += 1
                failed_ids.append(document_id)
                continue
            if current_user.role == "customer_user" and db_document.author_id != current_user.id:
                failed_count += 1
                failed_ids.append(document_id)
                continue
            try:
                if db_document.is_vectorized:
                    pass
            except Exception as e:
                logger.bind(doc_id=document_id).warning("Failed to check vectorization status", error=str(e))
            try:
                from app.models.knowledge_base import DocumentVector

                db.query(DocumentVector).filter(DocumentVector.document_id == document_id).delete(
                    synchronize_session=False
                )
            except Exception as e:
                logger.bind(doc_id=document_id).error("Failed to delete vector metadata", error=str(e))
            try:
                from app.models.knowledge_base import DocumentTag

                db.query(DocumentTag).filter(DocumentTag.document_id == document_id).delete(
                    synchronize_session=False
                )
            except Exception as e:
                logger.bind(doc_id=document_id).error("Failed to delete tag associations", error=str(e))
            try:
                from app.models.knowledge_base import DocumentVersion

                db.query(DocumentVersion).filter(DocumentVersion.document_id == document_id).delete(
                    synchronize_session=False
                )
            except Exception as e:
                logger.bind(doc_id=document_id).error("Failed to delete version history", error=str(e))
            try:
                from app.models.knowledge_base import DocumentMetadataValue

                db.query(DocumentMetadataValue).filter(
                    DocumentMetadataValue.document_id == document_id
                ).delete(synchronize_session=False)
            except Exception as e:
                logger.bind(doc_id=document_id).error("Failed to delete metadata values", error=str(e))
            db.delete(db_document)
            db.commit()
            deleted_count += 1
        except Exception as e:
            db.rollback()
            logger.bind(doc_id=document_id).error("Failed to delete document", error=str(e))
            failed_count += 1
            failed_ids.append(document_id)
    return {
        "success": True,
        "deleted_count": deleted_count,
        "failed_count": failed_count,
        "failed_ids": failed_ids,
        "message": t.t(
            "knowledge.batch_delete_completed", deleted=deleted_count, failed=failed_count
        ),
    }
