Add validation mode, structured logging, and CLI args to all scrapers

- Replace print() with Python logging module across all 6 scrapers
  for configurable log levels (DEBUG/INFO/WARNING/ERROR)
- Add --max-pages, --max-properties, and --log-level CLI arguments
  to each scraper via argparse for limiting scrape scope
- Add validation Make targets (validation, validation-local,
  validation-local-debug) for quick test runs with limited data
- Update run_all.sh to parse and forward CLI args to all scrapers
- Update mapa_bytu.html with latest scrape results

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Jan Novak
2026-02-14 23:12:59 +01:00
committed by kacerr
parent 5207c48890
commit 09a853aa05
9 changed files with 720 additions and 999 deletions

View File

@@ -6,7 +6,9 @@ Výstup: byty_idnes.json
"""
from __future__ import annotations
import argparse
import json
import logging
import math
import re
import time
@@ -15,6 +17,8 @@ import urllib.parse
from html.parser import HTMLParser
from pathlib import Path
logger = logging.getLogger(__name__)
# ── Konfigurace ─────────────────────────────────────────────────────────────
MAX_PRICE = 13_500_000
@@ -51,17 +55,21 @@ def fetch_url(url: str) -> str:
"""Fetch URL and return HTML string with retry logic."""
for attempt in range(MAX_RETRIES):
try:
logger.debug(f"HTTP GET request (attempt {attempt + 1}/{MAX_RETRIES}): {url}")
logger.debug(f"Headers: {HEADERS}")
req = urllib.request.Request(url, headers=HEADERS)
resp = urllib.request.urlopen(req, timeout=30)
data = resp.read()
logger.debug(f"HTTP response: status={resp.status}, size={len(data)} bytes")
return data.decode("utf-8")
except (ConnectionResetError, ConnectionError, urllib.error.URLError,
OSError) as e:
if attempt < MAX_RETRIES - 1:
wait = (attempt + 1) * 3 # 3, 6, 9, 12s
print(f" Retry {attempt + 1}/{MAX_RETRIES} (wait {wait}s): {e}")
logger.warning(f"Connection error (retry {attempt + 1}/{MAX_RETRIES} after {wait}s): {e}")
time.sleep(wait)
else:
logger.error(f"HTTP request failed after {MAX_RETRIES} attempts: {e}", exc_info=True)
raise
@@ -269,38 +277,47 @@ def load_cache(json_path: str = "byty_idnes.json") -> dict[str, dict]:
return {}
def scrape():
def scrape(max_pages: int | None = None, max_properties: int | None = None):
cache = load_cache()
print("=" * 60)
print("Stahuji inzeráty z Reality iDNES")
print(f"Cena: do {format_price(MAX_PRICE)}")
print(f"Min. plocha: {MIN_AREA}")
print(f"Patro: od {MIN_FLOOR}. NP")
print(f"Region: Praha")
logger.info("=" * 60)
logger.info("Stahuji inzeráty z Reality iDNES")
logger.info(f"Cena: do {format_price(MAX_PRICE)}")
logger.info(f"Min. plocha: {MIN_AREA}")
logger.info(f"Patro: od {MIN_FLOOR}. NP")
logger.info(f"Region: Praha")
if cache:
print(f"Cache: {len(cache)} bytů z minulého běhu")
print("=" * 60)
logger.info(f"Cache: {len(cache)} bytů z minulého běhu")
if max_pages:
logger.info(f"Max. stran: {max_pages}")
if max_properties:
logger.info(f"Max. bytů: {max_properties}")
logger.info("=" * 60)
# Step 1: Fetch listing pages
print("\nFáze 1: Stahování seznamu inzerátů...")
logger.info("\nFáze 1: Stahování seznamu inzerátů...")
all_listings = {} # id -> listing dict
page = 0
total = None
while True:
if max_pages and page >= max_pages:
logger.debug(f"Max pages limit reached: {max_pages}")
break
url = build_list_url(page)
print(f" Strana {page + 1} ...")
logger.info(f"Strana {page + 1} ...")
html = fetch_url(url)
if total is None:
total = parse_total_count(html)
total_pages = math.ceil(total / PER_PAGE) if total > 0 else 1
print(f" → Celkem {total} inzerátů, ~{total_pages} stran")
logger.info(f"→ Celkem {total} inzerátů, ~{total_pages} stran")
listings = parse_listings(html)
logger.debug(f"Page {page}: found {len(listings)} listings")
if not listings:
logger.debug(f"No listings found on page {page}, stopping")
break
for item in listings:
@@ -313,7 +330,7 @@ def scrape():
break
time.sleep(1.0)
print(f"\n Staženo: {len(all_listings)} unikátních inzerátů")
logger.info(f"\nStaženo: {len(all_listings)} unikátních inzerátů")
# Step 2: Pre-filter by price and area from list data
pre_filtered = []
@@ -322,40 +339,49 @@ def scrape():
excluded_disp = 0
for item in all_listings.values():
item_id = item["id"]
if item["price"] <= 0 or item["price"] > MAX_PRICE:
excluded_price += 1
logger.debug(f"Filter: id={item_id} - excluded (price {item['price']})")
continue
if item["area"] is not None and item["area"] < MIN_AREA:
excluded_area += 1
logger.debug(f"Filter: id={item_id} - excluded (area {item['area']} m²)")
continue
if item["disposition"] == "?":
excluded_disp += 1
logger.debug(f"Filter: id={item_id} - excluded (unknown disposition)")
continue
pre_filtered.append(item)
print(f"\nPo předfiltraci:")
print(f" Vyloučeno (cena): {excluded_price}")
print(f" Vyloučeno (plocha): {excluded_area}")
print(f" Vyloučeno (dispozice): {excluded_disp}")
print(f" Zbývá: {len(pre_filtered)}")
logger.info(f"\nPo předfiltraci:")
logger.info(f" Vyloučeno (cena): {excluded_price}")
logger.info(f" Vyloučeno (plocha): {excluded_area}")
logger.info(f" Vyloučeno (dispozice): {excluded_disp}")
logger.info(f" Zbývá: {len(pre_filtered)}")
# Step 3: Fetch details for GPS, floor, construction
print(f"\nFáze 2: Stahování detailů ({len(pre_filtered)} bytů)...")
logger.info(f"\nFáze 2: Stahování detailů ({len(pre_filtered)} bytů)...")
results = []
excluded_panel = 0
excluded_floor = 0
excluded_no_gps = 0
excluded_detail = 0
cache_hits = 0
properties_fetched = 0
for i, item in enumerate(pre_filtered):
if max_properties and properties_fetched >= max_properties:
logger.debug(f"Max properties limit reached: {max_properties}")
break
# Check cache — if hash_id exists and price unchanged, reuse
cached = cache.get(str(item["id"]))
if cached and cached.get("price") == item["price"]:
cache_hits += 1
logger.debug(f"Cache hit for id={item['id']}")
results.append(cached)
continue
@@ -365,34 +391,39 @@ def scrape():
try:
html = fetch_url(url)
except Exception as e:
print(f" Warning: detail failed for {item['id']}: {e}")
excluded_detail += 1
logger.warning(f"Detail failed for id={item['id']}: {e}")
continue
detail = parse_detail(html)
logger.debug(f"Detail parsed for id={item['id']}: lat={detail.get('lat')}, lon={detail.get('lon')}, floor={detail.get('floor')}")
# Must have GPS
if not detail.get("lat") or not detail.get("lon"):
excluded_no_gps += 1
logger.debug(f"Filter: id={item['id']} - excluded (no GPS)")
continue
# Check construction — exclude panel
construction = detail.get("construction", "")
if "panel" in construction:
excluded_panel += 1
print(f" ✗ Vyloučen {item['id'][:12]}...: panel ({construction})")
logger.debug(f"Filter: id={item['id']} - excluded (panel construction)")
logger.info(f"✗ Vyloučen {item['id'][:12]}...: panel ({construction})")
continue
# Check for sídliště in construction/description
if "sídliště" in construction or "sidliste" in construction:
excluded_panel += 1
print(f" ✗ Vyloučen {item['id'][:12]}...: sídliště")
logger.debug(f"Filter: id={item['id']} - excluded (housing estate)")
logger.info(f"✗ Vyloučen {item['id'][:12]}...: sídliště")
continue
# Check floor
floor = detail.get("floor")
if floor is not None and floor < MIN_FLOOR:
excluded_floor += 1
logger.debug(f"Filter: id={item['id']} - excluded (floor {floor})")
continue
# Map construction to Czech label
@@ -429,27 +460,44 @@ def scrape():
"image": "",
}
results.append(result)
properties_fetched += 1
if (i + 1) % 20 == 0:
print(f" Zpracováno {i + 1}/{len(pre_filtered)} ...")
logger.info(f"Zpracováno {i + 1}/{len(pre_filtered)} ...")
print(f"\n{'=' * 60}")
print(f"Výsledky Reality iDNES:")
print(f" Předfiltrováno: {len(pre_filtered)}")
print(f" Z cache (přeskočeno): {cache_hits}")
print(f" Vyloučeno (panel/síd): {excluded_panel}")
print(f" Vyloučeno (patro): {excluded_floor}")
print(f" Vyloučeno (bez GPS): {excluded_no_gps}")
print(f" Vyloučeno (bez detailu): {excluded_detail}")
print(f" ✓ Vyhovující byty: {len(results)}")
print(f"{'=' * 60}")
logger.info(f"\n{'=' * 60}")
logger.info(f"Výsledky Reality iDNES:")
logger.info(f" Předfiltrováno: {len(pre_filtered)}")
logger.info(f" Z cache (přeskočeno): {cache_hits}")
logger.info(f" Vyloučeno (panel/síd): {excluded_panel}")
logger.info(f" Vyloučeno (patro): {excluded_floor}")
logger.info(f" Vyloučeno (bez GPS): {excluded_no_gps}")
logger.info(f" Vyloučeno (bez detailu): {excluded_detail}")
logger.info(f" ✓ Vyhovující byty: {len(results)}")
logger.info(f"{'=' * 60}")
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Scrape apartments from Reality iDNES")
parser.add_argument("--max-pages", type=int, default=None,
help="Maximum number of listing pages to scrape")
parser.add_argument("--max-properties", type=int, default=None,
help="Maximum number of properties to fetch details for")
parser.add_argument("--log-level", type=str, default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR"],
help="Logging level (default: INFO)")
args = parser.parse_args()
# Configure logging
logging.basicConfig(
level=getattr(logging, args.log_level),
format="[%(levelname)s] %(asctime)s - %(name)s - %(message)s",
handlers=[logging.StreamHandler()]
)
start = time.time()
estates = scrape()
estates = scrape(max_pages=args.max_pages, max_properties=args.max_properties)
if estates:
json_path = Path("byty_idnes.json")
@@ -458,7 +506,7 @@ if __name__ == "__main__":
encoding="utf-8",
)
elapsed = time.time() - start
print(f"\n✓ Data uložena: {json_path.resolve()}")
print(f"⏱ Celkový čas: {elapsed:.0f} s")
logger.info(f"\n✓ Data uložena: {json_path.resolve()}")
logger.info(f"⏱ Celkový čas: {elapsed:.0f} s")
else:
print("\nŽádné byty z Reality iDNES neodpovídají kritériím :(")
logger.info("\nŽádné byty z Reality iDNES neodpovídají kritériím :(")