#!/usr/bin/env python3
"""
Main pipeline orchestrator for content generation.
Coordinates topic selection, article generation, fact-checking, and publishing.
"""

import json
import logging
import subprocess
import sys
from dataclasses import dataclass
from datetime import datetime
from pathlib import Path
from typing import Optional

# Add project root to path
sys.path.insert(0, str(Path(__file__).parent))

from config.settings import get_settings
from database.models import (
    ArticleRepository,
    PublishedContentRepository,
    TopicRepository,
    TopicStatus,
)
from modules import (
    article_generator,
    content_validator,
    fact_checker,
    html_converter,
    internal_link_injector,
    thumbnail_generator,
    topic_selector,
    wordpress_publisher,
)

logger = logging.getLogger(__name__)


@dataclass
class PipelineResult:
    success: bool
    topic_id: Optional[int] = None
    article_id: Optional[int] = None
    wp_post_id: Optional[int] = None
    post_url: Optional[str] = None
    error: Optional[str] = None
    skipped_reason: Optional[str] = None
    stats: dict = None

    def __post_init__(self):
        if self.stats is None:
            self.stats = {}


def run_pipeline(max_retries: int = 3) -> PipelineResult:
    """
    Run the complete content pipeline.

    Steps:
    1. Select highest-priority pending topic
    2. Check semantic deduplication
    3. Generate article with Gemini Flash + web search
    4. Validate content (word count, error patterns)
    5. Fact-check with Gemini Pro (max 2 attempts)
    6. Generate thumbnail
    7. Publish to WordPress
    8. Update database and invalidate caches

    Args:
        max_retries: Maximum topic retries on deduplication skip

    Returns:
        PipelineResult with outcome details
    """
    settings = get_settings()
    stats = {"started_at": datetime.now().isoformat()}

    # 1. Select topic
    logger.info("Step 1: Selecting next topic")
    topic = topic_selector.get_next_topic()

    if not topic:
        logger.info("No pending topics available")
        return PipelineResult(
            success=False,
            skipped_reason="No pending topics",
            stats=stats,
        )

    logger.info(f"Selected topic: {topic.title} (priority: {topic.priority_score})")
    stats["topic_id"] = topic.id
    stats["topic_title"] = topic.title

    # 2. Semantic deduplication check
    logger.info("Step 2: Checking semantic deduplication")
    dedupe_result = topic_selector.semantic_dedupe_check(topic)
    stats["dedupe_verdict"] = dedupe_result.verdict.value

    if dedupe_result.verdict.value == "duplicate":
        logger.warning(f"Topic skipped (duplicate): {dedupe_result.reason}")
        topic_selector.mark_status(
            topic.id,
            TopicStatus.SKIPPED,
            reason=f"Duplicate: {dedupe_result.reason}",
        )

        # Try next topic (recursive with retry limit)
        if max_retries > 0:
            logger.info(f"Trying next topic ({max_retries} retries left)")
            return run_pipeline(max_retries - 1)

        return PipelineResult(
            success=False,
            topic_id=topic.id,
            skipped_reason="All topics are duplicates",
            stats=stats,
        )

    # Mark topic as in_progress
    topic_selector.mark_status(topic.id, TopicStatus.IN_PROGRESS)

    # 3. Generate article
    logger.info("Step 3: Generating article")
    gen_result = article_generator.generate_article(topic)
    stats["generation_cost_usd"] = gen_result.cost_usd
    stats["grounding_used"] = gen_result.grounding_used

    if not gen_result.success:
        logger.error(f"Article generation failed: {gen_result.error}")
        topic_selector.mark_status(
            topic.id, TopicStatus.FAILED, reason=f"Generation failed: {gen_result.error}"
        )
        return PipelineResult(
            success=False,
            topic_id=topic.id,
            error=gen_result.error,
            stats=stats,
        )

    logger.info(f"Generated: {gen_result.word_count} words")

    # 4. Validate content
    logger.info("Step 4: Validating content")
    validation = content_validator.validate(gen_result.content_markdown)
    stats["word_count"] = validation.word_count
    stats["validation_warnings"] = validation.warnings

    if not validation.valid:
        logger.warning(f"Validation failed: {validation.reason}, retrying generation")

        # Retry generation once
        gen_result = article_generator.generate_article(topic)
        validation = content_validator.validate(gen_result.content_markdown)

        if not validation.valid:
            logger.error(f"Validation failed after retry: {validation.reason}")
            topic_selector.mark_status(
                topic.id, TopicStatus.FAILED, reason=f"Validation: {validation.reason}"
            )
            return PipelineResult(
                success=False,
                topic_id=topic.id,
                error=validation.reason,
                stats=stats,
            )

    # 5. Fact-check
    logger.info("Step 5: Fact-checking article")
    fact_passed, final_content, fact_log = fact_checker.fact_check_with_retry(
        gen_result.content_markdown,
        topic,
        max_attempts=2,
    )
    stats["fact_check_log"] = fact_log

    if not fact_passed:
        logger.error(f"Fact-check failed: {fact_log.get('final_verdict')}")
        topic_selector.mark_status(
            topic.id,
            TopicStatus.FAILED,
            reason=f"Fact-check: {fact_log.get('final_verdict')}",
        )
        return PipelineResult(
            success=False,
            topic_id=topic.id,
            error="Fact-check failed",
            stats=stats,
        )

    # 5.5. Inject internal links
    logger.info("Step 5.5: Injecting internal links")
    final_content, link_count = internal_link_injector.inject_internal_links(
        content=final_content,
        topic_keywords=topic.target_keywords,
        content_cluster=topic.content_cluster,
        exclude_wp_id=None,  # New article, no WP ID yet
        word_count=validation.word_count,
    )
    stats["internal_links_injected"] = link_count
    logger.info(f"Internal links: {link_count}")

    # 6. Generate thumbnail
    logger.info("Step 6: Generating thumbnail")
    thumbnail_result = thumbnail_generator.generate_thumbnail(topic)
    stats["thumbnail_provider"] = thumbnail_result.provider
    stats["thumbnail_cost_usd"] = thumbnail_result.cost_usd

    if not thumbnail_result.success:
        logger.warning(f"Thumbnail generation failed: {thumbnail_result.error}")
        # Continue without thumbnail (non-fatal)

    # 7. Convert to HTML
    logger.info("Step 7: Converting to HTML with CTA")
    content_html = html_converter.convert_with_cta(final_content, content_cluster=topic.content_cluster)

    # 8. Save article to database
    logger.info("Step 8: Saving article to database")
    article_id = ArticleRepository.insert(
        topic_id=topic.id,
        title=gen_result.title,
        slug=gen_result.slug,
        content_markdown=final_content,
        content_html=content_html,
        meta_description=gen_result.meta_description,
        word_count=validation.word_count,
        fact_check_passed=True,
        fact_check_log=json.dumps(fact_log),
        fact_check_attempts=len(fact_log.get("attempts", [])),
        generation_cost_usd=gen_result.cost_usd + thumbnail_result.cost_usd,
    )
    stats["article_id"] = article_id

    if thumbnail_result.success:
        ArticleRepository.update_thumbnail(article_id, thumbnail_result.file_path)

    # 9. Publish to WordPress
    logger.info("Step 9: Publishing to WordPress")
    article = ArticleRepository.get_by_id(article_id)

    if settings.dry_run:
        logger.info("DRY RUN: Skipping WordPress publish")
        publish_result = wordpress_publisher.PublishResult(
            success=True, wp_post_id=0, post_url="[DRY RUN]"
        )
    else:
        publish_result = wordpress_publisher.publish_article(
            article,
            thumbnail_path=thumbnail_result.file_path if thumbnail_result.success else None,
            category_slug="blog",
        )

    if not publish_result.success:
        logger.error(f"WordPress publish failed: {publish_result.error}")
        topic_selector.mark_status(
            topic.id, TopicStatus.FAILED, reason=f"Publish: {publish_result.error}"
        )
        return PipelineResult(
            success=False,
            topic_id=topic.id,
            article_id=article_id,
            error=publish_result.error,
            stats=stats,
        )

    # 10. Update article with WP IDs
    from modules.wordpress_publisher import WordPressClient

    wp_client = WordPressClient()
    author_id = wp_client.get_next_author_id()

    ArticleRepository.update_wordpress_ids(
        article_id,
        publish_result.wp_post_id,
        publish_result.wp_media_id,
        author_id,
    )

    # 11. Add to published content for future dedupe
    # Extract additional linkable keywords from content for better cross-article linking
    enhanced_keywords = internal_link_injector.extract_linkable_keywords(
        final_content, topic.target_keywords, title=gen_result.title
    )
    PublishedContentRepository.insert(
        source="pipeline",
        title=gen_result.title,
        slug=gen_result.slug,
        summary=gen_result.meta_description,
        main_keywords=enhanced_keywords,
        wp_post_id=publish_result.wp_post_id,
        published_at=datetime.now(),
        content_cluster=topic.content_cluster,
    )

    # 12. Mark topic as published
    topic_selector.mark_status(topic.id, TopicStatus.PUBLISHED, article_id=article_id)

    # 13. Invalidate WordPress caches
    if not settings.dry_run:
        logger.info("Step 13: Invalidating WordPress caches")
        _invalidate_wp_caches()

    stats["completed_at"] = datetime.now().isoformat()
    logger.info(f"Pipeline complete: {publish_result.post_url}")

    return PipelineResult(
        success=True,
        topic_id=topic.id,
        article_id=article_id,
        wp_post_id=publish_result.wp_post_id,
        post_url=publish_result.post_url,
        stats=stats,
    )


def _invalidate_wp_caches():
    """Trigger WordPress cache invalidation."""
    try:
        subprocess.run(
            [
                "docker",
                "exec",
                "parketry_wp",
                "php",
                "/var/www/project/dev/cronjobs/posts/cache_post_data_high_frequency.php",
            ],
            timeout=60,
            check=True,
            capture_output=True,
        )
        logger.info("WordPress caches invalidated")
    except subprocess.TimeoutExpired:
        logger.warning("Cache invalidation timed out")
    except subprocess.CalledProcessError as e:
        logger.warning(f"Cache invalidation failed: {e.stderr.decode()}")
    except FileNotFoundError:
        logger.warning("Docker not available for cache invalidation")


def main():
    """Main entry point."""
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    )

    result = run_pipeline()

    if result.success:
        logger.info(f"SUCCESS: Published article to {result.post_url}")
        return 0
    else:
        logger.error(f"FAILED: {result.error or result.skipped_reason}")
        return 1


if __name__ == "__main__":
    sys.exit(main())
