#!/usr/bin/env python3
"""
Simple page inspector to understand Tokopedia page structure
"""

from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time

def inspect_page_structure():
    """Inspect the structure of a Tokopedia page"""
    
    url = input("Masukkan URL toko/produk Tokopedia: ").strip()
    
    if not url:
        print("URL tidak boleh kosong!")
        return
    
    # Setup Chrome options
    options = Options()
    options.add_argument("--start-maximized")
    options.add_argument("--no-sandbox")
    options.add_argument("--disable-dev-shm-usage")
    options.add_argument("--disable-blink-features=AutomationControlled")
    options.add_experimental_option("excludeSwitches", ["enable-automation"])
    options.add_experimental_option('useAutomationExtension', False)
    # Add user agent to appear more like a real browser
    options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36")
    # Run in headless mode to avoid UI issues
    options.add_argument("--headless")
    
    try:
        print("Membuka browser...")
        driver = webdriver.Chrome(options=options)
        driver.get(url)
        
        # Wait for page to load
        print("Menunggu halaman dimuat...")
        time.sleep(5)
        
        # Get page source and parse with BeautifulSoup
        soup = BeautifulSoup(driver.page_source, "html.parser")
        
        print("\n=== PAGE STRUCTURE ANALYSIS ===")
        print(f"Page title: {soup.title.text if soup.title else 'No title'}")
        
        # Find all article elements
        articles = soup.find_all('article')
        print(f"\nFound {len(articles)} article elements")
        
        # Find elements with 'review' or 'ulasan' in class names
        review_elements = soup.find_all(class_=lambda x: x and ('review' in x.lower() or 'ulasan' in x.lower()))
        print(f"Found {len(review_elements)} elements with 'review' or 'ulasan' in class names")
        
        # Find elements with data-testid containing 'review' or 'ulasan'
        data_testid_elements = soup.find_all(attrs={'data-testid': lambda x: x and ('review' in x.lower() or 'ulasan' in x.lower())})
        print(f"Found {len(data_testid_elements)} elements with 'review' or 'ulasan' in data-testid")
        
        # Show some sample elements
        print("\n=== SAMPLE ARTICLE ELEMENTS ===")
        for i, article in enumerate(articles[:3]):
            print(f"\nArticle {i+1}:")
            print(f"  Class: {article.get('class', 'No class')}")
            print(f"  Text preview: {article.get_text(strip=True)[:100]}...")
            
        print("\n=== SAMPLE REVIEW-LIKE ELEMENTS ===")
        for i, elem in enumerate(review_elements[:3]):
            print(f"\nElement {i+1}:")
            print(f"  Tag: {elem.name}")
            print(f"  Class: {elem.get('class', 'No class')}")
            print(f"  Text preview: {elem.get_text(strip=True)[:100]}...")
        
        # Try to find any text that looks like reviews
        print("\n=== LOOKING FOR REVIEW-LIKE TEXT ===")
        all_text = soup.get_text()
        lines = all_text.split('\n')
        review_lines = [line.strip() for line in lines if len(line.strip()) > 20 and ('bagus' in line.lower() or 'jelek' in line.lower() or 'puas' in line.lower() or 'kecewa' in line.lower())]
        print(f"Found {len(review_lines)} lines that might be reviews:")
        for i, line in enumerate(review_lines[:5]):
            print(f"  {i+1}. {line[:100]}...")
            
    except Exception as e:
        print(f"Terjadi kesalahan: {e}")
    finally:
        try:
            driver.quit()
            print("Browser ditutup.")
        except:
            pass

if __name__ == "__main__":
    inspect_page_structure()