diff --git a/.env.example b/.env.example
index e39fe6a..e5be54c 100644
--- a/.env.example
+++ b/.env.example
@@ -39,6 +39,7 @@ ENABLE_PWA=0 # dockerhub: ENABLE_PWA="0"
ENABLE_PRESETS=0 # dockerhub: ENABLE_PRESETS="0"
WEB_VIRTUALIZE=1 # dockerhub: WEB_VIRTUALIZE="1"
ALLOW_MUST_HAVES=1 # dockerhub: ALLOW_MUST_HAVES="1"
+WEB_THEME_PICKER_DIAGNOSTICS=0 # 1=enable uncapped synergies, diagnostics fields & /themes/metrics (dev only)
############################
# Automation & Performance (Web)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 31097aa..34dcb2b 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -47,6 +47,10 @@ jobs:
run: |
python code/scripts/validate_theme_catalog.py --strict-alias
+ - name: Fast path catalog presence & hash validation
+ run: |
+ python code/scripts/validate_theme_fast_path.py --strict-warn
+
- name: Fast determinism tests (random subset)
env:
CSV_FILES_DIR: csv_files/testdata
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 77fbabc..0de0050 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,9 +13,35 @@ This format follows Keep a Changelog principles and aims for Semantic Versioning
- Link PRs/issues inline when helpful, e.g., (#123) or [#123]. Reference-style links at the bottom are encouraged for readability.
## [Unreleased]
+### Added
+- ETag header for basic client-side caching of catalog fragments.
+- Theme catalog performance optimizations: precomputed summary maps, lowercase search haystacks, memoized filtered slug cache (keyed by `(etag, params)`) for sub‑50ms warm queries.
+- Theme preview endpoint: `GET /themes/api/theme/{id}/preview` (and HTML fragment) returning representative sample (curated examples, curated synergy examples, heuristic roles: payoff / enabler / support / wildcard / synthetic).
+- Commander bias heuristics (color identity restriction, diminishing synergy overlap bonus, direct theme match bonus).
+- In‑memory TTL cache (default 600s) for previews with build time tracking.
+- Metrics endpoint `GET /themes/metrics` (diagnostics gated) exposing preview & catalog counters, cache stats, percentile build times.
+- Governance metrics: `example_enforcement_active`, `example_enforce_threshold_pct` surfaced once curated coverage passes threshold (default 90%).
+- Skeleton loading states for picker list, preview modal, and initial shell.
+- Diagnostics flag `WEB_THEME_PICKER_DIAGNOSTICS=1` enabling fallback description flag, editorial quality badges, uncapped synergy toggle, YAML fetch, metrics endpoint.
+- Cache bust hooks on catalog refresh & tagging completion clearing filter & preview caches (metrics include `preview_last_bust_at`).
+- Optional filter cache prewarm (`WEB_THEME_FILTER_PREWARM=1`) priming common filter combinations; metrics include `filter_prewarmed`.
+- Preview modal UX: role chips, condensed reasons line, hover tooltip with multiline heuristic reasons, export bar (CSV/JSON) honoring curated-only toggle.
+- Server authoritative mana & color identity ingestion (exposes `mana_cost`, `color_identity_list`, `pip_colors`) replacing client-side parsing.
+
+### Changed
+- Picker list & API use optimized fast filtering path (`filter_slugs_fast`) replacing per-request linear scans.
+- Preview sampling: curated examples pinned first, diversity quotas (~40% payoff / 40% enabler+support / 20% wildcard), synthetic placeholders only if underfilled.
+- Sampling refinements: rarity diminishing weight, splash leniency (single off-color allowance with penalty for 4–5 color commanders), role saturation penalty, refined commander overlap scaling curve.
+- Hover / DFC UX unified: single hover panel, overlay flip control (keyboard + persisted face), enlarged thumbnails (110px→165px→230px), activation limited to thumbnails.
+- Removed legacy client-side mana & color identity parsers (now server authoritative fields included in preview items and export endpoints).
+
+### Fixed
+- Removed redundant template environment instantiation causing inconsistent navigation state.
+- Ensured preview cache key includes catalog ETag to prevent stale sample reuse after catalog reload.
+- Explicit cache bust after tagging/catalog rebuild prevents stale preview exposure.
### Editorial / Themes
-- Enforce minimum example_commanders threshold (>=5) in CI (Phase D close-out). Lint now fails builds when a non-alias theme drops below threshold.
+- Enforce minimum `example_commanders` threshold (>=5) in CI; lint fails builds when a non-alias theme drops below threshold.
- Added enforcement test `test_theme_editorial_min_examples_enforced.py` to guard regression.
- Governance workflow updated to pass `--enforce-min-examples` and set `EDITORIAL_MIN_EXAMPLES_ENFORCE=1`.
- Clarified lint script docstring and behavior around enforced minimums.
diff --git a/README.md b/README.md
index 09b2718..8489600 100644
Binary files a/README.md and b/README.md differ
diff --git a/RELEASE_NOTES_TEMPLATE.md b/RELEASE_NOTES_TEMPLATE.md
index 8c996f0..b049384 100644
--- a/RELEASE_NOTES_TEMPLATE.md
+++ b/RELEASE_NOTES_TEMPLATE.md
@@ -3,17 +3,30 @@
## Unreleased (Draft)
### Added
-- Editorial duplication suppression for example cards: `--common-card-threshold` (default 0.18) and `--print-dup-metrics` flags in `synergy_promote_fill.py` to reduce over-represented staples and surface diverse thematic examples.
-- Optional `description_fallback_summary` block (enabled via `EDITORIAL_INCLUDE_FALLBACK_SUMMARY=1`) capturing specialization KPIs: generic vs specialized description counts and top generic holdouts.
+- Theme picker performance: precomputed summary projections + lowercase haystacks and memoized filtered slug cache (keyed by (etag, q, archetype, bucket, colors)) for sub‑50ms typical list queries on warm path.
+- Skeleton loading UI for theme picker list, preview modal, and initial shell.
+- Theme preview endpoint (`/themes/api/theme/{id}/preview` + HTML fragment) returning representative sample with roles (payoff/enabler/support/wildcard/example/curated_synergy/synthetic).
+- Commander bias heuristics in preview sampling (color identity filtering + overlap/theme bonuses) for context-aware suggestions.
+- In‑memory TTL (600s) preview cache with metrics (requests, cache hits, average build ms) exposed at diagnostics endpoint.
+- Web UI: Double-faced card (DFC) hover support with single-image overlay flip control (top-left button, keyboard (Enter/Space/F), aria-live), persisted face (localStorage), and immediate refresh post-flip.
+- Diagnostics flag `WEB_THEME_PICKER_DIAGNOSTICS=1` gating fallback description flag, editorial quality badges, uncapped synergy lists, raw YAML fetch, and metrics endpoint (`/themes/metrics`).
+- Catalog & preview metrics endpoint combining filter + preview counters & cache stats.
+- Performance headers on list & API responses: `X-ThemeCatalog-Filter-Duration-ms` and `ETag` for conditional requests.
+ - Cache bust hooks tied to catalog refresh & tagging completion clear filter/preview caches (metrics now include last bust timestamps).
+ - Governance metrics: `example_enforcement_active`, `example_enforce_threshold_pct` (threshold default 90%) signal when curated coverage enforcement is active.
+ - Server authoritative mana & color identity fields (`mana_cost`, `color_identity_list`, `pip_colors`) included in preview/export; legacy client parsers removed.
### Changed
-- Terminology migration: `provenance` renamed to `metadata_info` across catalog JSON, per-theme YAML, models, and tests. Builder writes `metadata_info`; legacy `provenance` key still accepted temporarily.
+- Preview assembly now pins curated `example_cards` then `synergy_example_cards` before heuristic sampling with diversity quotas (~40% payoff, 40% enabler/support, 20% wildcard) and synthetic placeholders only when underfilled.
+- List & API filtering route migrated to optimized path avoiding repeated concatenation / casefolding work each request.
+- Hover system consolidated to one global panel; removed fragment-specific duplicate & legacy large-image hover. Thumbnails enlarged & unified (110px → 165px → 230px). Hover activation limited to thumbnails; stability improved (no dismissal over flip control); DFC markup simplified to single
with opacity transition.
### Deprecated
-- Legacy `provenance` key retained as read-only alias; warning emitted if both keys present (suppress via `SUPPRESS_PROVENANCE_DEPRECATION=1`). Planned removal: v2.4.0.
+- (None new)
### Fixed
-- Schema evolution adjustments to accept per-theme `metadata_info` and optional fallback summary without triggering validation failures.
+- Resolved duplicate template environment instantiation causing inconsistent navigation globals in picker fragments.
+- Ensured preview cache key includes catalog ETag preventing stale samples after catalog reload.
---
diff --git a/code/scripts/build_theme_catalog.py b/code/scripts/build_theme_catalog.py
index a76dcdd..8a30c00 100644
--- a/code/scripts/build_theme_catalog.py
+++ b/code/scripts/build_theme_catalog.py
@@ -794,13 +794,40 @@ def build_catalog(limit: int, verbose: bool) -> Dict[str, Any]:
entries.append(entry)
# Renamed from 'provenance' to 'metadata_info' (migration phase)
+ # Compute deterministic hash of YAML catalog + synergy_cap for drift detection
+ import hashlib as _hashlib # local import to avoid top-level cost
+ def _catalog_hash() -> str:
+ h = _hashlib.sha256()
+ # Stable ordering: sort by display_name then key ordering inside dict for a subset of stable fields
+ for name in sorted(yaml_catalog.keys()):
+ yobj = yaml_catalog[name]
+ try:
+ # Compose a tuple of fields that should reflect editorial drift
+ payload = (
+ getattr(yobj, 'id', ''),
+ getattr(yobj, 'display_name', ''),
+ tuple(getattr(yobj, 'curated_synergies', []) or []),
+ tuple(getattr(yobj, 'enforced_synergies', []) or []),
+ tuple(getattr(yobj, 'example_commanders', []) or []),
+ tuple(getattr(yobj, 'example_cards', []) or []),
+ getattr(yobj, 'deck_archetype', None),
+ getattr(yobj, 'popularity_hint', None),
+ getattr(yobj, 'description', None),
+ getattr(yobj, 'editorial_quality', None),
+ )
+ h.update(repr(payload).encode('utf-8'))
+ except Exception:
+ continue
+ h.update(str(synergy_cap).encode('utf-8'))
+ return h.hexdigest()
metadata_info = {
'mode': 'merge',
'generated_at': time.strftime('%Y-%m-%dT%H:%M:%S'),
'curated_yaml_files': len(yaml_catalog),
'synergy_cap': synergy_cap,
'inference': 'pmi',
- 'version': 'phase-b-merge-v1'
+ 'version': 'phase-b-merge-v1',
+ 'catalog_hash': _catalog_hash(),
}
# Optional popularity analytics export for Phase D metrics collection
if os.environ.get('EDITORIAL_POP_EXPORT'):
diff --git a/code/scripts/preview_metrics_snapshot.py b/code/scripts/preview_metrics_snapshot.py
new file mode 100644
index 0000000..ba54bba
--- /dev/null
+++ b/code/scripts/preview_metrics_snapshot.py
@@ -0,0 +1,105 @@
+"""CLI utility: snapshot preview metrics and emit summary/top slow themes.
+
+Usage (from repo root virtualenv):
+ python -m code.scripts.preview_metrics_snapshot --limit 10 --output logs/preview_metrics_snapshot.json
+
+Fetches /themes/metrics (requires WEB_THEME_PICKER_DIAGNOSTICS=1) and writes a compact JSON plus
+human-readable summary to stdout.
+"""
+from __future__ import annotations
+
+import argparse
+import json
+import sys
+import time
+from pathlib import Path
+from typing import Any, Dict
+
+import urllib.request
+import urllib.error
+
+DEFAULT_URL = "http://localhost:8000/themes/metrics"
+
+
+def fetch_metrics(url: str) -> Dict[str, Any]:
+ req = urllib.request.Request(url, headers={"Accept": "application/json"})
+ with urllib.request.urlopen(req, timeout=10) as resp: # nosec B310 (local trusted)
+ data = resp.read().decode("utf-8", "replace")
+ try:
+ return json.loads(data) # type: ignore[return-value]
+ except json.JSONDecodeError as e: # pragma: no cover - unlikely if server OK
+ raise SystemExit(f"Invalid JSON from metrics endpoint: {e}\nRaw: {data[:400]}")
+
+
+def summarize(metrics: Dict[str, Any], top_n: int) -> Dict[str, Any]:
+ preview = (metrics.get("preview") or {}) if isinstance(metrics, dict) else {}
+ per_theme = preview.get("per_theme") or {}
+ # Compute top slow themes by avg_ms
+ items = []
+ for slug, info in per_theme.items():
+ if not isinstance(info, dict):
+ continue
+ avg = info.get("avg_ms")
+ if isinstance(avg, (int, float)):
+ items.append((slug, float(avg), info))
+ items.sort(key=lambda x: x[1], reverse=True)
+ top = items[:top_n]
+ return {
+ "preview_requests": preview.get("preview_requests"),
+ "preview_cache_hits": preview.get("preview_cache_hits"),
+ "preview_avg_build_ms": preview.get("preview_avg_build_ms"),
+ "preview_p95_build_ms": preview.get("preview_p95_build_ms"),
+ "preview_ttl_seconds": preview.get("preview_ttl_seconds"),
+ "editorial_curated_vs_sampled_pct": preview.get("editorial_curated_vs_sampled_pct"),
+ "top_slowest": [
+ {
+ "slug": slug,
+ "avg_ms": avg,
+ "p95_ms": info.get("p95_ms"),
+ "builds": info.get("builds"),
+ "requests": info.get("requests"),
+ "avg_curated_pct": info.get("avg_curated_pct"),
+ }
+ for slug, avg, info in top
+ ],
+ }
+
+
+def main(argv: list[str]) -> int:
+ ap = argparse.ArgumentParser(description="Snapshot preview metrics")
+ ap.add_argument("--url", default=DEFAULT_URL, help="Metrics endpoint URL (default: %(default)s)")
+ ap.add_argument("--limit", type=int, default=10, help="Top N slow themes to include (default: %(default)s)")
+ ap.add_argument("--output", type=Path, help="Optional output JSON file for snapshot")
+ ap.add_argument("--quiet", action="store_true", help="Suppress stdout summary (still writes file if --output)")
+ args = ap.parse_args(argv)
+
+ try:
+ raw = fetch_metrics(args.url)
+ except urllib.error.URLError as e:
+ print(f"ERROR: Failed fetching metrics endpoint: {e}", file=sys.stderr)
+ return 2
+
+ summary = summarize(raw, args.limit)
+ snapshot = {
+ "captured_at": int(time.time()),
+ "source": args.url,
+ "summary": summary,
+ }
+
+ if args.output:
+ try:
+ args.output.parent.mkdir(parents=True, exist_ok=True)
+ args.output.write_text(json.dumps(snapshot, indent=2, sort_keys=True), encoding="utf-8")
+ except Exception as e: # pragma: no cover
+ print(f"ERROR: writing snapshot file failed: {e}", file=sys.stderr)
+ return 3
+
+ if not args.quiet:
+ print("Preview Metrics Snapshot:")
+ print(json.dumps(summary, indent=2))
+
+ return 0
+
+
+if __name__ == "__main__": # pragma: no cover
+ raise SystemExit(main(sys.argv[1:]))
diff --git a/code/scripts/validate_theme_fast_path.py b/code/scripts/validate_theme_fast_path.py
new file mode 100644
index 0000000..0987861
--- /dev/null
+++ b/code/scripts/validate_theme_fast_path.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python3
+"""Fast path theme catalog presence & schema sanity validator.
+
+Checks:
+1. theme_list.json exists.
+2. Loads JSON and ensures top-level keys present: themes (list), metadata_info (dict).
+3. Basic field contract for each theme: id, theme, synergies (list), description.
+4. Enforces presence of catalog_hash inside metadata_info for drift detection.
+5. Optionally validates against Pydantic models if available (best effort).
+Exit codes:
+ 0 success
+ 1 structural failure / missing file
+ 2 partial validation warnings elevated via --strict
+"""
+from __future__ import annotations
+import sys
+import json
+import argparse
+import pathlib
+import typing as t
+
+THEME_LIST_PATH = pathlib.Path('config/themes/theme_list.json')
+
+class Problem:
+ def __init__(self, level: str, message: str):
+ self.level = level
+ self.message = message
+ def __repr__(self):
+ return f"{self.level.upper()}: {self.message}"
+
+def load_json(path: pathlib.Path) -> t.Any:
+ try:
+ return json.loads(path.read_text(encoding='utf-8') or '{}')
+ except FileNotFoundError:
+ raise
+ except Exception as e: # pragma: no cover
+ raise RuntimeError(f"parse_error: {e}")
+
+def validate(data: t.Any) -> list[Problem]:
+ probs: list[Problem] = []
+ if not isinstance(data, dict):
+ probs.append(Problem('error','top-level not an object'))
+ return probs
+ themes = data.get('themes')
+ if not isinstance(themes, list) or not themes:
+ probs.append(Problem('error','themes list missing or empty'))
+ meta = data.get('metadata_info')
+ if not isinstance(meta, dict):
+ probs.append(Problem('error','metadata_info missing or not object'))
+ else:
+ if not meta.get('catalog_hash'):
+ probs.append(Problem('error','metadata_info.catalog_hash missing'))
+ if not meta.get('generated_at'):
+ probs.append(Problem('warn','metadata_info.generated_at missing'))
+ # Per theme spot check (limit to first 50 to keep CI snappy)
+ for i, th in enumerate(themes[:50] if isinstance(themes, list) else []):
+ if not isinstance(th, dict):
+ probs.append(Problem('error', f'theme[{i}] not object'))
+ continue
+ if not th.get('id'):
+ probs.append(Problem('error', f'theme[{i}] id missing'))
+ if not th.get('theme'):
+ probs.append(Problem('error', f'theme[{i}] theme missing'))
+ syns = th.get('synergies')
+ if not isinstance(syns, list) or not syns:
+ probs.append(Problem('warn', f'theme[{i}] synergies empty or not list'))
+ if 'description' not in th:
+ probs.append(Problem('warn', f'theme[{i}] description missing'))
+ return probs
+
+def main(argv: list[str]) -> int:
+ ap = argparse.ArgumentParser(description='Validate fast path theme catalog build presence & schema.')
+ ap.add_argument('--strict-warn', action='store_true', help='Promote warnings to errors (fail CI).')
+ args = ap.parse_args(argv)
+ if not THEME_LIST_PATH.exists():
+ print('ERROR: theme_list.json missing at expected path.', file=sys.stderr)
+ return 1
+ try:
+ data = load_json(THEME_LIST_PATH)
+ except FileNotFoundError:
+ print('ERROR: theme_list.json missing.', file=sys.stderr)
+ return 1
+ except Exception as e:
+ print(f'ERROR: failed parsing theme_list.json: {e}', file=sys.stderr)
+ return 1
+ problems = validate(data)
+ errors = [p for p in problems if p.level=='error']
+ warns = [p for p in problems if p.level=='warn']
+ for p in problems:
+ stream = sys.stderr if p.level!='info' else sys.stdout
+ print(repr(p), file=stream)
+ if errors:
+ return 1
+ if args.strict_warn and warns:
+ return 2
+ print(f"Fast path validation ok: {len(errors)} errors, {len(warns)} warnings. Checked {min(len(data.get('themes', [])),50)} themes.")
+ return 0
+
+if __name__ == '__main__':
+ raise SystemExit(main(sys.argv[1:]))
diff --git a/code/scripts/warm_preview_traffic.py b/code/scripts/warm_preview_traffic.py
new file mode 100644
index 0000000..0f54c73
--- /dev/null
+++ b/code/scripts/warm_preview_traffic.py
@@ -0,0 +1,91 @@
+"""Generate warm preview traffic to populate theme preview cache & metrics.
+
+Usage:
+ python -m code.scripts.warm_preview_traffic --count 25 --repeats 2 \
+ --base-url http://localhost:8000 --delay 0.05
+
+Requirements:
+ - FastAPI server running locally exposing /themes endpoints
+ - WEB_THEME_PICKER_DIAGNOSTICS=1 so /themes/metrics is accessible
+
+Strategy:
+ 1. Fetch /themes/fragment/list?limit=COUNT to obtain HTML table.
+ 2. Extract theme slugs via regex on data-theme-id attributes.
+ 3. Issue REPEATS preview fragment requests per slug in order.
+ 4. Print simple timing / status summary.
+
+This script intentionally uses stdlib only (urllib, re, time) to avoid extra deps.
+"""
+from __future__ import annotations
+
+import argparse
+import re
+import time
+import urllib.request
+import urllib.error
+from typing import List
+
+LIST_PATH = "/themes/fragment/list"
+PREVIEW_PATH = "/themes/fragment/preview/{slug}"
+
+
+def fetch(url: str) -> str:
+ req = urllib.request.Request(url, headers={"User-Agent": "warm-preview/1"})
+ with urllib.request.urlopen(req, timeout=15) as resp: # nosec B310 (local trusted)
+ return resp.read().decode("utf-8", "replace")
+
+
+def extract_slugs(html: str, limit: int) -> List[str]:
+ slugs = []
+ for m in re.finditer(r'data-theme-id="([^"]+)"', html):
+ s = m.group(1).strip()
+ if s and s not in slugs:
+ slugs.append(s)
+ if len(slugs) >= limit:
+ break
+ return slugs
+
+
+def warm(base_url: str, count: int, repeats: int, delay: float) -> None:
+ list_url = f"{base_url}{LIST_PATH}?limit={count}&offset=0"
+ print(f"[warm] Fetching list: {list_url}")
+ try:
+ html = fetch(list_url)
+ except urllib.error.URLError as e: # pragma: no cover
+ raise SystemExit(f"Failed fetching list: {e}")
+ slugs = extract_slugs(html, count)
+ if not slugs:
+ raise SystemExit("No theme slugs extracted – cannot warm.")
+ print(f"[warm] Extracted {len(slugs)} slugs: {', '.join(slugs[:8])}{'...' if len(slugs)>8 else ''}")
+ total_requests = 0
+ start = time.time()
+ for r in range(repeats):
+ print(f"[warm] Pass {r+1}/{repeats}")
+ for slug in slugs:
+ url = f"{base_url}{PREVIEW_PATH.format(slug=slug)}"
+ try:
+ fetch(url)
+ except Exception as e: # pragma: no cover
+ print(f" [warn] Failed {slug}: {e}")
+ else:
+ total_requests += 1
+ if delay:
+ time.sleep(delay)
+ dur = time.time() - start
+ print(f"[warm] Completed {total_requests} preview requests in {dur:.2f}s ({total_requests/dur if dur>0 else 0:.1f} rps)")
+ print("[warm] Done. Now run metrics snapshot to capture warm p95.")
+
+
+def main(argv: list[str]) -> int:
+ ap = argparse.ArgumentParser(description="Generate warm preview traffic")
+ ap.add_argument("--base-url", default="http://localhost:8000", help="Base URL (default: %(default)s)")
+ ap.add_argument("--count", type=int, default=25, help="Number of distinct theme slugs to warm (default: %(default)s)")
+ ap.add_argument("--repeats", type=int, default=2, help="Repeat passes over slugs (default: %(default)s)")
+ ap.add_argument("--delay", type=float, default=0.05, help="Delay between requests in seconds (default: %(default)s)")
+ args = ap.parse_args(argv)
+ warm(args.base_url.rstrip("/"), args.count, args.repeats, args.delay)
+ return 0
+
+if __name__ == "__main__": # pragma: no cover
+ import sys
+ raise SystemExit(main(sys.argv[1:]))
diff --git a/code/tests/test_fast_theme_list_regression.py b/code/tests/test_fast_theme_list_regression.py
new file mode 100644
index 0000000..dc03c52
--- /dev/null
+++ b/code/tests/test_fast_theme_list_regression.py
@@ -0,0 +1,30 @@
+import json
+from code.web.routes.themes import _load_fast_theme_list
+
+def test_fast_theme_list_derives_ids(monkeypatch, tmp_path):
+ # Create a minimal theme_list.json without explicit 'id' fields to simulate current build output
+ data = {
+ "themes": [
+ {"theme": "+1/+1 Counters", "description": "Foo desc that is a bit longer to ensure trimming works properly and demonstrates snippet logic."},
+ {"theme": "Artifacts", "description": "Artifacts matter deck."},
+ ],
+ "generated_from": "merge"
+ }
+ # Write to a temporary file and monkeypatch THEME_LIST_PATH to point there
+ theme_json = tmp_path / 'theme_list.json'
+ theme_json.write_text(json.dumps(data), encoding='utf-8')
+
+ from code.web.routes import themes as themes_module
+ monkeypatch.setattr(themes_module, 'THEME_LIST_PATH', theme_json)
+
+ lst = _load_fast_theme_list()
+ assert lst is not None
+ # Should derive slug ids
+ ids = {e['id'] for e in lst}
+ assert 'plus1-plus1-counters' in ids
+ assert 'artifacts' in ids
+ # Should generate short_description
+ for e in lst:
+ assert 'short_description' in e
+ assert e['short_description']
+
diff --git a/code/tests/test_preview_curated_examples_regression.py b/code/tests/test_preview_curated_examples_regression.py
new file mode 100644
index 0000000..9839784
--- /dev/null
+++ b/code/tests/test_preview_curated_examples_regression.py
@@ -0,0 +1,20 @@
+import json
+from fastapi.testclient import TestClient
+
+from code.web.app import app # type: ignore
+
+
+def test_preview_includes_curated_examples_regression():
+ """Regression test (2025-09-20): After P2 changes the preview lost curated
+ example cards because theme_list.json lacks example_* arrays. We added YAML
+ fallback in project_detail; ensure at least one 'example' role appears for
+ a theme known to have example_cards in its YAML (aggro.yml)."""
+ client = TestClient(app)
+ r = client.get('/themes/api/theme/aggro/preview?limit=12')
+ assert r.status_code == 200, r.text
+ data = r.json()
+ assert data.get('ok') is True
+ sample = data.get('preview', {}).get('sample', [])
+ # Collect roles
+ roles = { (it.get('roles') or [''])[0] for it in sample }
+ assert 'example' in roles, f"expected at least one curated example card role; roles present: {roles} sample={json.dumps(sample, indent=2)[:400]}"
\ No newline at end of file
diff --git a/code/tests/test_preview_error_rate_metrics.py b/code/tests/test_preview_error_rate_metrics.py
new file mode 100644
index 0000000..211934b
--- /dev/null
+++ b/code/tests/test_preview_error_rate_metrics.py
@@ -0,0 +1,22 @@
+from fastapi.testclient import TestClient
+from code.web.app import app
+
+def test_preview_error_rate_metrics(monkeypatch):
+ monkeypatch.setenv('WEB_THEME_PICKER_DIAGNOSTICS', '1')
+ client = TestClient(app)
+ # Trigger one preview to ensure request counter increments
+ themes_resp = client.get('/themes/api/themes?limit=1')
+ assert themes_resp.status_code == 200
+ theme_id = themes_resp.json()['items'][0]['id']
+ pr = client.get(f'/themes/fragment/preview/{theme_id}')
+ assert pr.status_code == 200
+ # Simulate two client fetch error structured log events
+ for _ in range(2):
+ r = client.post('/themes/log', json={'event':'preview_fetch_error'})
+ assert r.status_code == 200
+ metrics = client.get('/themes/metrics').json()
+ assert metrics['ok'] is True
+ preview_block = metrics['preview']
+ assert 'preview_client_fetch_errors' in preview_block
+ assert preview_block['preview_client_fetch_errors'] >= 2
+ assert 'preview_error_rate_pct' in preview_block
diff --git a/code/tests/test_preview_metrics_percentiles.py b/code/tests/test_preview_metrics_percentiles.py
new file mode 100644
index 0000000..8ac84c4
--- /dev/null
+++ b/code/tests/test_preview_metrics_percentiles.py
@@ -0,0 +1,35 @@
+from fastapi.testclient import TestClient
+from code.web.app import app
+
+
+def test_preview_metrics_percentiles_present(monkeypatch):
+ # Enable diagnostics for metrics endpoint
+ monkeypatch.setenv('WEB_THEME_PICKER_DIAGNOSTICS', '1')
+ # Force logging on (not required but ensures code path safe)
+ monkeypatch.setenv('WEB_THEME_PREVIEW_LOG', '0')
+ client = TestClient(app)
+ # Hit a few previews to generate durations
+ # We need an existing theme id; fetch list API first
+ r = client.get('/themes/api/themes?limit=3')
+ assert r.status_code == 200, r.text
+ data = r.json()
+ # API returns 'items' not 'themes'
+ assert 'items' in data
+ themes = data['items']
+ assert themes, 'Expected at least one theme for metrics test'
+ theme_id = themes[0]['id']
+ for _ in range(3):
+ pr = client.get(f'/themes/fragment/preview/{theme_id}')
+ assert pr.status_code == 200
+ mr = client.get('/themes/metrics')
+ assert mr.status_code == 200, mr.text
+ metrics = mr.json()
+ assert metrics['ok'] is True
+ per_theme = metrics['preview']['per_theme']
+ # pick first entry in per_theme stats
+ # Validate new percentile fields exist (p50_ms, p95_ms) and are numbers
+ any_entry = next(iter(per_theme.values())) if per_theme else None
+ assert any_entry, 'Expected at least one per-theme metrics entry'
+ assert 'p50_ms' in any_entry and 'p95_ms' in any_entry, any_entry
+ assert isinstance(any_entry['p50_ms'], (int, float))
+ assert isinstance(any_entry['p95_ms'], (int, float))
diff --git a/code/tests/test_preview_minimal_variant.py b/code/tests/test_preview_minimal_variant.py
new file mode 100644
index 0000000..2fec530
--- /dev/null
+++ b/code/tests/test_preview_minimal_variant.py
@@ -0,0 +1,13 @@
+from fastapi.testclient import TestClient
+from code.web.app import app # type: ignore
+
+
+def test_minimal_variant_hides_controls_and_headers():
+ client = TestClient(app)
+ r = client.get('/themes/fragment/preview/aggro?suppress_curated=1&minimal=1')
+ assert r.status_code == 200
+ html = r.text
+ assert 'Curated Only' not in html
+ assert 'Commander Overlap & Diversity Rationale' not in html
+ # Ensure sample cards still render
+ assert 'card-sample' in html
\ No newline at end of file
diff --git a/code/tests/test_preview_suppress_curated_flag.py b/code/tests/test_preview_suppress_curated_flag.py
new file mode 100644
index 0000000..9ab5283
--- /dev/null
+++ b/code/tests/test_preview_suppress_curated_flag.py
@@ -0,0 +1,17 @@
+from fastapi.testclient import TestClient
+from code.web.app import app # type: ignore
+
+
+def test_preview_fragment_suppress_curated_removes_examples():
+ client = TestClient(app)
+ # Get HTML fragment with suppress_curated
+ r = client.get('/themes/fragment/preview/aggro?suppress_curated=1&limit=14')
+ assert r.status_code == 200
+ html = r.text
+ # Should not contain group label Curated Examples
+ assert 'Curated Examples' not in html
+ # Should still contain payoff/enabler group labels
+ assert 'Payoffs' in html or 'Enablers & Support' in html
+ # No example role chips: role-example occurrences removed
+ # Ensure no rendered span with curated example role (avoid style block false positive)
+ assert '' in frag.text:
+ assert 'title="' in frag.text # coarse check; ensures at least one title attr present for snippet
+ # If there is at least one row, request detail fragment
+ base = client.get('/themes/api/themes').json()
+ if base['items']:
+ tid = base['items'][0]['id']
+ dfrag = client.get(f'/themes/fragment/detail/{tid}')
+ assert dfrag.status_code == 200
+
+
+@pytest.mark.skipif(not CATALOG_PATH.exists(), reason="theme catalog missing")
+def test_detail_ok_and_not_found():
+ client = TestClient(app)
+ listing = client.get('/themes/api/themes').json()
+ if not listing['items']:
+ pytest.skip('No themes to test detail')
+ first_id = listing['items'][0]['id']
+ r = client.get(f'/themes/api/theme/{first_id}')
+ assert r.status_code == 200
+ detail = r.json()['theme']
+ assert detail['id'] == first_id
+ r404 = client.get('/themes/api/theme/does-not-exist-xyz')
+ assert r404.status_code == 404
+
+
+@pytest.mark.skipif(not CATALOG_PATH.exists(), reason="theme catalog missing")
+def test_diagnostics_gating(monkeypatch):
+ client = TestClient(app)
+ # Without flag -> diagnostics fields absent
+ r = client.get('/themes/api/themes', params={'diagnostics': '1'})
+ sample = r.json()['items'][0] if r.json()['items'] else {}
+ assert 'has_fallback_description' not in sample
+ # Enable flag
+ monkeypatch.setenv('WEB_THEME_PICKER_DIAGNOSTICS', '1')
+ r2 = client.get('/themes/api/themes', params={'diagnostics': '1'})
+ sample2 = r2.json()['items'][0] if r2.json()['items'] else {}
+ if sample2:
+ assert 'has_fallback_description' in sample2
+
+
+@pytest.mark.skipif(not CATALOG_PATH.exists(), reason="theme catalog missing")
+def test_uncapped_requires_diagnostics(monkeypatch):
+ client = TestClient(app)
+ listing = client.get('/themes/api/themes').json()
+ if not listing['items']:
+ pytest.skip('No themes available')
+ tid = listing['items'][0]['id']
+ # Request uncapped without diagnostics -> should not include
+ d = client.get(f'/themes/api/theme/{tid}', params={'uncapped': '1'}).json()['theme']
+ assert 'uncapped_synergies' not in d
+ # Enable diagnostics
+ monkeypatch.setenv('WEB_THEME_PICKER_DIAGNOSTICS', '1')
+ d2 = client.get(f'/themes/api/theme/{tid}', params={'diagnostics': '1', 'uncapped': '1'}).json()['theme']
+ # Uncapped may equal capped if no difference, but key must exist
+ assert 'uncapped_synergies' in d2
+
+
+@pytest.mark.skipif(not CATALOG_PATH.exists(), reason="theme catalog missing")
+def test_preview_endpoint_basic():
+ client = TestClient(app)
+ listing = client.get('/themes/api/themes').json()
+ if not listing['items']:
+ pytest.skip('No themes available')
+ tid = listing['items'][0]['id']
+ preview = client.get(f'/themes/api/theme/{tid}/preview', params={'limit': 5}).json()
+ assert preview['ok'] is True
+ sample = preview['preview']['sample']
+ assert len(sample) <= 5
+ # Scores should be non-increasing for first curated entries (simple heuristic)
+ scores = [it['score'] for it in sample]
+ assert all(isinstance(s, (int, float)) for s in scores)
+ # Synthetic placeholders (if any) should have role 'synthetic'
+ for it in sample:
+ assert 'roles' in it and isinstance(it['roles'], list)
+ # Color filter invocation (may reduce or keep size; ensure no crash)
+ preview_color = client.get(f'/themes/api/theme/{tid}/preview', params={'limit': 4, 'colors': 'U'}).json()
+ assert preview_color['ok'] is True
+ # Fragment version
+ frag = client.get(f'/themes/fragment/preview/{tid}')
+ assert frag.status_code == 200
+
+
+@pytest.mark.skipif(not CATALOG_PATH.exists(), reason="theme catalog missing")
+def test_preview_commander_bias(): # lightweight heuristic validation
+ client = TestClient(app)
+ listing = client.get('/themes/api/themes').json()
+ if not listing['items']:
+ pytest.skip('No themes available')
+ tid = listing['items'][0]['id']
+ # Use an arbitrary commander name – depending on dataset may not be found; test tolerant
+ commander_name = 'Atraxa, Praetors Voice' # attempt full name; if absent test remains soft
+ preview = client.get(f'/themes/api/theme/{tid}/preview', params={'limit': 6, 'commander': commander_name}).json()
+ assert preview['ok'] is True
+ sample = preview['preview']['sample']
+ # If commander card was discovered at least one item should have commander_bias reason
+ any_commander_reason = any('commander_bias' in it.get('reasons', []) for it in sample)
+ # It's acceptable if not found (dataset subset) but reasons structure must exist
+ assert all('reasons' in it for it in sample)
+ # Soft assertion (no failure if commander not present) – if discovered we assert overlap marker
+ if any_commander_reason:
+ assert any('commander_overlap' in it.get('reasons', []) for it in sample)
+
+
+@pytest.mark.skipif(not CATALOG_PATH.exists(), reason="theme catalog missing")
+def test_preview_curated_synergy_ordering():
+ """Curated synergy example cards (role=curated_synergy) must appear after role=example
+ cards but before any sampled payoff/enabler/support/wildcard entries.
+ """
+ client = TestClient(app)
+ listing = client.get('/themes/api/themes').json()
+ if not listing['items']:
+ pytest.skip('No themes available')
+ tid = listing['items'][0]['id']
+ preview = client.get(f'/themes/api/theme/{tid}/preview', params={'limit': 12}).json()
+ assert preview['ok'] is True
+ sample = preview['preview']['sample']
+ roles_sequence = [it['roles'][0] if it.get('roles') else None for it in sample]
+ if 'curated_synergy' not in roles_sequence:
+ pytest.skip('No curated synergy cards present in sample (data-dependent)')
+ first_non_example_index = None
+ first_curated_synergy_index = None
+ first_sampled_index = None
+ sampled_roles = {'payoff', 'enabler', 'support', 'wildcard'}
+ for idx, role in enumerate(roles_sequence):
+ if role != 'example' and first_non_example_index is None:
+ first_non_example_index = idx
+ if role == 'curated_synergy' and first_curated_synergy_index is None:
+ first_curated_synergy_index = idx
+ if role in sampled_roles and first_sampled_index is None:
+ first_sampled_index = idx
+ # Ensure ordering: examples (if any) -> curated_synergy -> sampled roles
+ if first_curated_synergy_index is not None and first_sampled_index is not None:
+ assert first_curated_synergy_index < first_sampled_index
diff --git a/code/tests/test_theme_picker_gaps.py b/code/tests/test_theme_picker_gaps.py
new file mode 100644
index 0000000..6e7f5c9
--- /dev/null
+++ b/code/tests/test_theme_picker_gaps.py
@@ -0,0 +1,247 @@
+"""Tests covering Section H (Testing Gaps) & related Phase F items.
+
+These are backend-oriented approximations for browser behaviors. Where full
+JS execution would be required (keyboard event dispatch, sessionStorage), we
+simulate or validate server produced HTML attributes / ordering contracts.
+
+Contained tests:
+ - test_fast_path_load_time: ensure catalog list fragment renders quickly using
+ fixture dataset (budget <= 120ms on CI hardware; relaxed if env override)
+ - test_colors_filter_constraint: applying colors=G restricts primary/secondary
+ colors to subset including 'G'
+ - test_preview_placeholder_fill: themes with insufficient real cards are
+ padded with synthetic placeholders (role synthetic & name bracketed)
+ - test_preview_cache_hit_timing: second call served from cache faster (uses
+ monkeypatch to force _now progression minimal)
+ - test_navigation_state_preservation_roundtrip: simulate list fetch then
+ detail fetch and ensure detail HTML contains theme id while list fragment
+ params persist in constructed URL logic (server side approximation)
+ - test_mana_cost_parser_variants: port of client JS mana parser implemented
+ in Python to validate hybrid / phyrexian / X handling does not crash.
+
+NOTE: Pure keyboard navigation & sessionStorage cache skip paths require a
+JS runtime; we assert presence of required attributes (tabindex, role=option)
+as a smoke proxy until an integration (playwright) layer is added.
+"""
+
+from __future__ import annotations
+
+import os
+import re
+import time
+from typing import List
+
+import pytest
+from fastapi.testclient import TestClient
+
+
+def _get_app(): # local import to avoid heavy import cost if file unused
+ from code.web.app import app # type: ignore
+ return app
+
+
+@pytest.fixture(scope="module")
+def client():
+ # Enable diagnostics to allow /themes/metrics access if gated
+ os.environ.setdefault("WEB_THEME_PICKER_DIAGNOSTICS", "1")
+ return TestClient(_get_app())
+
+
+def test_fast_path_load_time(client):
+ # First load may include startup warm logic; allow generous budget, tighten later in CI ratchet
+ budget_ms = int(os.getenv("TEST_THEME_FAST_PATH_BUDGET_MS", "2500"))
+ t0 = time.perf_counter()
+ r = client.get("/themes/fragment/list?limit=20")
+ dt_ms = (time.perf_counter() - t0) * 1000
+ assert r.status_code == 200
+ # Basic sanity: table rows present
+ assert "theme-row" in r.text
+ assert dt_ms <= budget_ms, f"Fast path list fragment exceeded budget {dt_ms:.2f}ms > {budget_ms}ms"
+
+
+def test_colors_filter_constraint(client):
+ r = client.get("/themes/fragment/list?limit=50&colors=G")
+ assert r.status_code == 200
+ rows = [m.group(0) for m in re.finditer(r"]*class=\"theme-row\"[\s\S]*?
", r.text)]
+ assert rows, "Expected some rows for colors filter"
+ greenish = 0
+ considered = 0
+ for row in rows:
+ tds = re.findall(r"
(.*?) | ", row)
+ if len(tds) < 3:
+ continue
+ primary = tds[1]
+ secondary = tds[2]
+ if primary or secondary:
+ considered += 1
+ if ("G" in primary) or ("G" in secondary):
+ greenish += 1
+ # Expect at least half of colored themes to include G (soft assertion due to multi-color / secondary logic on backend)
+ if considered:
+ assert greenish / considered >= 0.5, f"Expected >=50% green presence, got {greenish}/{considered}"
+
+
+def test_preview_placeholder_fill(client):
+ # Find a theme likely to have low card pool by requesting high limit and then checking for synthetic placeholders '['
+ # Use first theme id from list fragment
+ list_html = client.get("/themes/fragment/list?limit=1").text
+ m = re.search(r'data-theme-id=\"([^\"]+)\"', list_html)
+ assert m, "Could not extract theme id"
+ theme_id = m.group(1)
+ # Request preview with high limit to likely force padding
+ pv = client.get(f"/themes/fragment/preview/{theme_id}?limit=30")
+ assert pv.status_code == 200
+ # Synthetic placeholders appear as names inside brackets (server template), search raw HTML
+ bracketed = re.findall(r"\[[^\]]+\]", pv.text)
+ # Not all themes will pad; if none found try a second theme
+ if not bracketed:
+ list_html2 = client.get("/themes/fragment/list?limit=5").text
+ ids = re.findall(r'data-theme-id=\"([^\"]+)\"', list_html2)
+ for tid in ids[1:]:
+ pv2 = client.get(f"/themes/fragment/preview/{tid}?limit=30")
+ if pv2.status_code == 200 and re.search(r"\[[^\]]+\]", pv2.text):
+ bracketed = ["ok"]
+ break
+ assert bracketed, "Expected at least one synthetic placeholder bracketed item in high-limit preview"
+
+
+def test_preview_cache_hit_timing(monkeypatch, client):
+ # Warm first
+ list_html = client.get("/themes/fragment/list?limit=1").text
+ m = re.search(r'data-theme-id=\"([^\"]+)\"', list_html)
+ assert m, "Theme id missing"
+ theme_id = m.group(1)
+ # First build (miss)
+ r1 = client.get(f"/themes/fragment/preview/{theme_id}?limit=12")
+ assert r1.status_code == 200
+ # Monkeypatch theme_preview._now to freeze time so second call counts as hit
+ import code.web.services.theme_preview as tp # type: ignore
+ orig_now = tp._now
+ monkeypatch.setattr(tp, "_now", lambda: orig_now())
+ r2 = client.get(f"/themes/fragment/preview/{theme_id}?limit=12")
+ assert r2.status_code == 200
+ # Deterministic service-level verification: second direct function call should short-circuit via cache
+ import code.web.services.theme_preview as tp # type: ignore
+ # Snapshot counters
+ pre_hits = getattr(tp, "_PREVIEW_CACHE_HITS", 0)
+ first_payload = tp.get_theme_preview(theme_id, limit=12)
+ second_payload = tp.get_theme_preview(theme_id, limit=12)
+ post_hits = getattr(tp, "_PREVIEW_CACHE_HITS", 0)
+ assert first_payload.get("sample"), "Missing sample items in preview"
+ # Cache hit should have incremented hits counter
+ assert post_hits >= pre_hits + 1 or post_hits > 0, "Expected cache hits counter to increase"
+ # Items list identity (names) should be identical even if build_ms differs (second call cached has no build_ms recompute)
+ first_names = [i.get("name") for i in first_payload.get("sample", [])]
+ second_names = [i.get("name") for i in second_payload.get("sample", [])]
+ assert first_names == second_names, "Item ordering changed between cached calls"
+ # Metrics cache hit counter is best-effort; do not hard fail if not exposed yet
+ metrics_resp = client.get("/themes/metrics")
+ if metrics_resp.status_code == 200:
+ metrics = metrics_resp.json()
+ # Soft assertion
+ if metrics.get("preview_cache_hits", 0) == 0:
+ pytest.skip("Preview cache hit not reflected in metrics (soft skip)")
+
+
+def test_navigation_state_preservation_roundtrip(client):
+ # Simulate list fetch with search & filters appended
+ r = client.get("/themes/fragment/list?q=counters&limit=20&bucket=Common")
+ assert r.status_code == 200
+ # Extract a theme id then fetch detail fragment to simulate navigation
+ m = re.search(r'data-theme-id=\"([^\"]+)\"', r.text)
+ assert m, "Missing theme id in filtered list"
+ theme_id = m.group(1)
+ detail = client.get(f"/themes/fragment/detail/{theme_id}")
+ assert detail.status_code == 200
+ # Detail fragment should include theme display name or id in heading
+ assert theme_id in detail.text or "Theme Detail" in detail.text
+ # Ensure list fragment contained highlighted mark for query
+ assert "" in r.text, "Expected search term highlighting for state preservation"
+
+
+# --- Mana cost parser parity (mirror of client JS simplified) ---
+def _parse_mana_symbols(raw: str) -> List[str]:
+ # Emulate JS regex /\{([^}]+)\}/g
+ return re.findall(r"\{([^}]+)\}", raw or "")
+
+
+@pytest.mark.parametrize(
+ "mana,expected_syms",
+ [
+ ("{X}{2}{U}{B/P}", ["X", "2", "U", "B/P"]),
+ ("{G/U}{G/U}{1}{G}", ["G/U", "G/U", "1", "G"]),
+ ("{R}{R}{R}{R}{R}", ["R", "R", "R", "R", "R"]),
+ ("{2/W}{2/W}{W}", ["2/W", "2/W", "W"]),
+ ("{G}{G/P}{X}{C}", ["G", "G/P", "X", "C"]),
+ ],
+)
+def test_mana_cost_parser_variants(mana, expected_syms):
+ assert _parse_mana_symbols(mana) == expected_syms
+
+
+def test_lazy_load_img_attributes(client):
+ # Grab a preview and ensure loading="lazy" present on card images
+ list_html = client.get("/themes/fragment/list?limit=1").text
+ m = re.search(r'data-theme-id=\"([^\"]+)\"', list_html)
+ assert m
+ theme_id = m.group(1)
+ pv = client.get(f"/themes/fragment/preview/{theme_id}?limit=12")
+ assert pv.status_code == 200
+ # At least one img tag with loading="lazy" attribute
+ assert re.search(r"
]+loading=\"lazy\"", pv.text), "Expected lazy-loading images in preview"
+
+
+def test_list_fragment_accessibility_tokens(client):
+ # Smoke test for role=listbox and row role=option presence (accessibility baseline)
+ r = client.get("/themes/fragment/list?limit=10")
+ assert r.status_code == 200
+ assert "role=\"option\"" in r.text
+
+
+def test_accessibility_live_region_and_listbox(client):
+ r = client.get("/themes/fragment/list?limit=5")
+ assert r.status_code == 200
+ # List container should have role listbox and aria-live removed in fragment (fragment may omit outer wrapper) – allow either present or absent gracefully
+ # We assert at least one aria-label attribute referencing themes count OR presence of pager text
+ assert ("aria-label=\"" in r.text) or ("Showing" in r.text)
+
+
+def test_keyboard_nav_script_presence(client):
+ # Fetch full picker page (not just fragment) to inspect embedded JS for Arrow key handling
+ page = client.get("/themes/picker")
+ assert page.status_code == 200
+ body = page.text
+ assert "ArrowDown" in body and "ArrowUp" in body and "Enter" in body and "Escape" in body, "Keyboard nav handlers missing"
+
+
+def test_list_fragment_filter_cache_fallback_timing(client):
+ # First call (likely cold) vs second call (cached by etag + filter cache)
+ import time as _t
+ t0 = _t.perf_counter()
+ client.get("/themes/fragment/list?limit=25&q=a")
+ first_ms = (_t.perf_counter() - t0) * 1000
+ t1 = _t.perf_counter()
+ client.get("/themes/fragment/list?limit=25&q=a")
+ second_ms = (_t.perf_counter() - t1) * 1000
+ # Soft assertion: second should not be dramatically slower; allow equality but fail if slower by >50%
+ if second_ms > first_ms * 1.5:
+ pytest.skip(f"Second call slower (cold path variance) first={first_ms:.1f}ms second={second_ms:.1f}ms")
+
+
+def test_intersection_observer_lazy_fallback(client):
+ # Preview fragment should include script referencing IntersectionObserver (fallback path implied by try/catch) and images with loading lazy
+ list_html = client.get("/themes/fragment/list?limit=1").text
+ m = re.search(r'data-theme-id="([^"]+)"', list_html)
+ assert m
+ theme_id = m.group(1)
+ pv = client.get(f"/themes/fragment/preview/{theme_id}?limit=12")
+ assert pv.status_code == 200
+ html = pv.text
+ assert 'IntersectionObserver' in html or 'loading="lazy"' in html
+ assert re.search(r"
]+loading=\"lazy\"", html)
+
+
+def test_session_storage_cache_script_tokens_present(client):
+ # Ensure list fragment contains cache_hit / cache_miss tokens for sessionStorage path instrumentation
+ frag = client.get("/themes/fragment/list?limit=5").text
+ assert 'cache_hit' in frag and 'cache_miss' in frag, "Expected cache_hit/cache_miss tokens in fragment script"
diff --git a/code/tests/test_theme_preview_additional.py b/code/tests/test_theme_preview_additional.py
new file mode 100644
index 0000000..f9a848f
--- /dev/null
+++ b/code/tests/test_theme_preview_additional.py
@@ -0,0 +1,62 @@
+from __future__ import annotations
+
+import os
+import re
+import importlib
+import pytest
+from fastapi.testclient import TestClient
+
+
+def _new_client(prewarm: bool = False) -> TestClient:
+ # Ensure fresh import with desired env flags
+ if prewarm:
+ os.environ['WEB_THEME_FILTER_PREWARM'] = '1'
+ else:
+ os.environ.pop('WEB_THEME_FILTER_PREWARM', None)
+ # Remove existing module (if any) so lifespan runs again
+ if 'code.web.app' in list(importlib.sys.modules.keys()):
+ importlib.sys.modules.pop('code.web.app')
+ from code.web.app import app # type: ignore
+ return TestClient(app)
+
+
+def _first_theme_id(client: TestClient) -> str:
+ html = client.get('/themes/fragment/list?limit=1').text
+ m = re.search(r'data-theme-id="([^"]+)"', html)
+ assert m, 'No theme id found'
+ return m.group(1)
+
+
+def test_role_group_separators_and_role_chips():
+ client = _new_client()
+ theme_id = _first_theme_id(client)
+ pv_html = client.get(f'/themes/fragment/preview/{theme_id}?limit=18').text
+ # Ensure at least one role chip exists
+ assert 'role-chip' in pv_html, 'Expected role-chip elements in preview fragment'
+ # Capture group separator ordering
+ groups = re.findall(r'data-group="(examples|curated_synergy|payoff|enabler_support|wildcard)"', pv_html)
+ if groups:
+ # Remove duplicates preserving order
+ seen = []
+ for g in groups:
+ if g not in seen:
+ seen.append(g)
+ # Expected relative order subset prefix list
+ expected_order = ['examples', 'curated_synergy', 'payoff', 'enabler_support', 'wildcard']
+ # Filter expected list to those actually present and compare ordering
+ filtered_expected = [g for g in expected_order if g in seen]
+ assert seen == filtered_expected, f'Group separators out of order: {seen} vs expected subset {filtered_expected}'
+
+
+def test_prewarm_flag_metrics():
+ client = _new_client(prewarm=True)
+ # Trigger at least one list request (though prewarm runs in lifespan already)
+ client.get('/themes/fragment/list?limit=5')
+ metrics_resp = client.get('/themes/metrics')
+ if metrics_resp.status_code != 200:
+ pytest.skip('Metrics endpoint unavailable')
+ metrics = metrics_resp.json()
+ # Soft assertion: if key missing, skip (older build)
+ if 'filter_prewarmed' not in metrics:
+ pytest.skip('filter_prewarmed metric not present')
+ assert metrics['filter_prewarmed'] in (True, 1), 'Expected filter_prewarmed to be True after prewarm'
diff --git a/code/tests/test_theme_preview_ordering.py b/code/tests/test_theme_preview_ordering.py
new file mode 100644
index 0000000..5cbebdf
--- /dev/null
+++ b/code/tests/test_theme_preview_ordering.py
@@ -0,0 +1,38 @@
+from __future__ import annotations
+
+import pytest
+
+from code.web.services.theme_preview import get_theme_preview # type: ignore
+from code.web.services.theme_catalog_loader import load_index, slugify, project_detail # type: ignore
+
+
+@pytest.mark.parametrize("limit", [8, 12])
+def test_preview_role_ordering(limit):
+ # Pick a deterministic existing theme (first catalog theme)
+ idx = load_index()
+ assert idx.catalog.themes, "No themes available for preview test"
+ theme = idx.catalog.themes[0].theme
+ preview = get_theme_preview(theme, limit=limit)
+ # Ensure curated examples (role=example) all come before any curated_synergy, which come before any payoff/enabler/support/wildcard
+ roles = [c["roles"][0] for c in preview["sample"] if c.get("roles")]
+ # Find first indices
+ first_curated_synergy = next((i for i, r in enumerate(roles) if r == "curated_synergy"), None)
+ first_non_curated = next((i for i, r in enumerate(roles) if r not in {"example", "curated_synergy"}), None)
+ # If both present, ordering constraints
+ if first_curated_synergy is not None and first_non_curated is not None:
+ assert first_curated_synergy < first_non_curated, "curated_synergy block should precede sampled roles"
+ # All example indices must be < any curated_synergy index
+ if first_curated_synergy is not None:
+ for i, r in enumerate(roles):
+ if r == "example":
+ assert i < first_curated_synergy, "example card found after curated_synergy block"
+
+
+def test_synergy_commanders_no_overlap_with_examples():
+ idx = load_index()
+ theme_entry = idx.catalog.themes[0]
+ slug = slugify(theme_entry.theme)
+ detail = project_detail(slug, idx.slug_to_entry[slug], idx.slug_to_yaml, uncapped=False)
+ examples = set(detail.get("example_commanders") or [])
+ synergy_commanders = detail.get("synergy_commanders") or []
+ assert not (examples.intersection(synergy_commanders)), "synergy_commanders should not include example_commanders"
diff --git a/code/tests/test_theme_preview_p0_new.py b/code/tests/test_theme_preview_p0_new.py
new file mode 100644
index 0000000..50efa77
--- /dev/null
+++ b/code/tests/test_theme_preview_p0_new.py
@@ -0,0 +1,72 @@
+import os
+import time
+import json
+from code.web.services.theme_preview import get_theme_preview, preview_metrics, bust_preview_cache # type: ignore
+
+
+def test_colors_filter_constraint_green_subset():
+ """colors=G should only return cards whose color identities are subset of {G} or colorless ('' list)."""
+ payload = get_theme_preview('Blink', limit=8, colors='G') # pick any theme; data-driven
+ for card in payload['sample']:
+ if not card['colors']:
+ continue
+ assert set(card['colors']).issubset({'G'}), f"Card {card['name']} had colors {card['colors']} outside filter"
+
+
+def test_synthetic_placeholder_fill_present_when_short():
+ # Force scarcity via impossible color filter letter ensuring empty real pool -> synthetic placeholders
+ payload = get_theme_preview('Blink', limit=50, colors='Z')
+ # All real cards filtered out; placeholders must appear
+ synthetic_roles = [c for c in payload['sample'] if 'synthetic' in (c.get('roles') or [])]
+ assert synthetic_roles, 'Expected at least one synthetic placeholder entry under restrictive color filter'
+ assert any('synthetic_synergy_placeholder' in (c.get('reasons') or []) for c in synthetic_roles), 'Missing synthetic placeholder reason'
+
+
+def test_cache_hit_timing_and_log(monkeypatch, capsys):
+ os.environ['WEB_THEME_PREVIEW_LOG'] = '1'
+ # Force fresh build
+ bust_preview_cache()
+ payload1 = get_theme_preview('Blink', limit=6)
+ assert payload1['cache_hit'] is False
+ # Second call should hit cache
+ payload2 = get_theme_preview('Blink', limit=6)
+ assert payload2['cache_hit'] is True
+ captured = capsys.readouterr().out.splitlines()
+ assert any('theme_preview_build' in line for line in captured), 'Missing build log'
+ assert any('theme_preview_cache_hit' in line for line in captured), 'Missing cache hit log'
+
+
+def test_per_theme_percentiles_and_raw_counts():
+ bust_preview_cache()
+ for _ in range(5):
+ get_theme_preview('Blink', limit=6)
+ metrics = preview_metrics()
+ per = metrics['per_theme']
+ assert 'blink' in per, 'Expected theme slug in per_theme metrics'
+ blink_stats = per['blink']
+ assert 'p50_ms' in blink_stats and 'p95_ms' in blink_stats, 'Missing percentile metrics'
+ assert 'curated_total' in blink_stats and 'sampled_total' in blink_stats, 'Missing raw curated/sample per-theme totals'
+
+
+def test_structured_log_contains_new_fields(capsys):
+ os.environ['WEB_THEME_PREVIEW_LOG'] = '1'
+ bust_preview_cache()
+ get_theme_preview('Blink', limit=5)
+ out_lines = capsys.readouterr().out.splitlines()
+ build_lines = [line for line in out_lines if 'theme_preview_build' in line]
+ assert build_lines, 'No build log lines found'
+ parsed = [json.loads(line) for line in build_lines]
+ obj = parsed[-1]
+ assert 'curated_total' in obj and 'sampled_total' in obj and 'role_counts' in obj, 'Missing expected structured log fields'
+
+
+def test_warm_index_latency_reduction():
+ bust_preview_cache()
+ t0 = time.time()
+ get_theme_preview('Blink', limit=6)
+ cold = time.time() - t0
+ t1 = time.time()
+ get_theme_preview('Blink', limit=6)
+ warm = time.time() - t1
+ # Warm path should generally be faster; allow flakiness with generous factor
+ assert warm <= cold * 1.2, f"Expected warm path faster or near equal (cold={cold}, warm={warm})"
diff --git a/code/type_definitions_theme_catalog.py b/code/type_definitions_theme_catalog.py
index 24206f1..b16828f 100644
--- a/code/type_definitions_theme_catalog.py
+++ b/code/type_definitions_theme_catalog.py
@@ -28,6 +28,7 @@ class ThemeEntry(BaseModel):
# Phase D editorial enhancements (optional)
example_commanders: List[str] = Field(default_factory=list, description="Curated example commanders illustrating the theme")
example_cards: List[str] = Field(default_factory=list, description="Representative non-commander cards (short, curated list)")
+ synergy_example_cards: List[str] = Field(default_factory=list, description="Optional curated synergy-relevant cards distinct from general example_cards")
synergy_commanders: List[str] = Field(default_factory=list, description="Commanders surfaced from top synergies (3/2/1 from top three synergies)")
deck_archetype: Optional[str] = Field(
None,
@@ -113,6 +114,7 @@ class ThemeYAMLFile(BaseModel):
# Phase D optional editorial metadata (may be absent in existing YAMLs)
example_commanders: List[str] = Field(default_factory=list)
example_cards: List[str] = Field(default_factory=list)
+ synergy_example_cards: List[str] = Field(default_factory=list)
synergy_commanders: List[str] = Field(default_factory=list)
deck_archetype: Optional[str] = None
popularity_hint: Optional[str] = None # Free-form editorial note; bucket computed during merge
diff --git a/code/web/app.py b/code/web/app.py
index eaf39ed..dd8b100 100644
--- a/code/web/app.py
+++ b/code/web/app.py
@@ -14,13 +14,41 @@ from starlette.exceptions import HTTPException as StarletteHTTPException
from starlette.middleware.gzip import GZipMiddleware
from typing import Any
from .services.combo_utils import detect_all as _detect_all
+from .services.theme_catalog_loader import prewarm_common_filters # type: ignore
# Resolve template/static dirs relative to this file
_THIS_DIR = Path(__file__).resolve().parent
_TEMPLATES_DIR = _THIS_DIR / "templates"
_STATIC_DIR = _THIS_DIR / "static"
-app = FastAPI(title="MTG Deckbuilder Web UI")
+from contextlib import asynccontextmanager
+
+
+@asynccontextmanager
+async def _lifespan(app: FastAPI): # pragma: no cover - simple infra glue
+ """FastAPI lifespan context replacing deprecated on_event startup hooks.
+
+ Consolidates previous startup tasks:
+ - prewarm_common_filters (optional fast filter cache priming)
+ - theme preview card index warm (CSV parse avoidance for first preview)
+
+ Failures in warm tasks are intentionally swallowed to avoid blocking app start.
+ """
+ # Prewarm theme filter cache (guarded internally by env flag)
+ try:
+ prewarm_common_filters()
+ except Exception:
+ pass
+ # Warm preview card index once
+ try: # local import to avoid cost if preview unused
+ from .services import theme_preview as _tp # type: ignore
+ _tp._maybe_build_card_index() # internal warm function
+ except Exception:
+ pass
+ yield # (no shutdown tasks currently)
+
+
+app = FastAPI(title="MTG Deckbuilder Web UI", lifespan=_lifespan)
app.add_middleware(GZipMiddleware, minimum_size=500)
# Mount static if present
@@ -64,6 +92,8 @@ def _compat_template_response(*args, **kwargs): # type: ignore[override]
templates.TemplateResponse = _compat_template_response # type: ignore[assignment]
+# (Startup prewarm moved to lifespan handler _lifespan)
+
# Global template flags (env-driven)
def _as_bool(val: str | None, default: bool = False) -> bool:
if val is None:
@@ -80,6 +110,7 @@ ENABLE_PRESETS = _as_bool(os.getenv("ENABLE_PRESETS"), False)
ALLOW_MUST_HAVES = _as_bool(os.getenv("ALLOW_MUST_HAVES"), False)
RANDOM_MODES = _as_bool(os.getenv("RANDOM_MODES"), False) # initial snapshot (legacy)
RANDOM_UI = _as_bool(os.getenv("RANDOM_UI"), False)
+THEME_PICKER_DIAGNOSTICS = _as_bool(os.getenv("WEB_THEME_PICKER_DIAGNOSTICS"), False)
def _as_int(val: str | None, default: int) -> int:
try:
return int(val) if val is not None and str(val).strip() != "" else default
@@ -109,6 +140,7 @@ templates.env.globals.update({
"random_ui": RANDOM_UI,
"random_max_attempts": RANDOM_MAX_ATTEMPTS,
"random_timeout_ms": RANDOM_TIMEOUT_MS,
+ "theme_picker_diagnostics": THEME_PICKER_DIAGNOSTICS,
})
# --- Simple fragment cache for template partials (low-risk, TTL-based) ---
@@ -552,6 +584,8 @@ try:
except Exception:
pass
+## (Additional startup warmers consolidated into lifespan handler)
+
# --- Exception handling ---
def _wants_html(request: Request) -> bool:
try:
diff --git a/code/web/models/theme_api.py b/code/web/models/theme_api.py
new file mode 100644
index 0000000..9b0c724
--- /dev/null
+++ b/code/web/models/theme_api.py
@@ -0,0 +1,30 @@
+from __future__ import annotations
+
+from typing import List, Optional
+from pydantic import BaseModel, Field
+
+
+class ThemeSummary(BaseModel):
+ id: str
+ theme: str
+ primary_color: Optional[str] = None
+ secondary_color: Optional[str] = None
+ popularity_bucket: Optional[str] = None
+ deck_archetype: Optional[str] = None
+ description: Optional[str] = None
+ synergies: List[str] = Field(default_factory=list)
+ synergy_count: int = 0
+ # Diagnostics-only fields (gated by flag)
+ has_fallback_description: Optional[bool] = None
+ editorial_quality: Optional[str] = None
+
+
+class ThemeDetail(ThemeSummary):
+ curated_synergies: List[str] = Field(default_factory=list)
+ enforced_synergies: List[str] = Field(default_factory=list)
+ inferred_synergies: List[str] = Field(default_factory=list)
+ example_commanders: List[str] = Field(default_factory=list)
+ example_cards: List[str] = Field(default_factory=list)
+ synergy_commanders: List[str] = Field(default_factory=list)
+ # Diagnostics-only optional uncapped list
+ uncapped_synergies: Optional[List[str]] = None
diff --git a/code/web/routes/themes.py b/code/web/routes/themes.py
index 3b6c00c..8d923bd 100644
--- a/code/web/routes/themes.py
+++ b/code/web/routes/themes.py
@@ -5,13 +5,41 @@ from datetime import datetime as _dt
from pathlib import Path
from typing import Optional, Dict, Any
-from fastapi import APIRouter
+from fastapi import APIRouter, Request, HTTPException, Query
from fastapi import BackgroundTasks
from ..services.orchestrator import _ensure_setup_ready, _run_theme_metadata_enrichment # type: ignore
-from fastapi.responses import JSONResponse
+from fastapi.responses import JSONResponse, HTMLResponse
+from fastapi.templating import Jinja2Templates
+from ..services.theme_catalog_loader import (
+ load_index,
+ project_detail,
+ slugify,
+ filter_slugs_fast,
+ summaries_for_slugs,
+)
+from ..services.theme_preview import get_theme_preview # type: ignore
+from ..services.theme_catalog_loader import catalog_metrics, prewarm_common_filters # type: ignore
+from ..services.theme_preview import preview_metrics # type: ignore
+from ..services import theme_preview as _theme_preview_mod # type: ignore # for error counters
+import os
+from fastapi import Body
+
+# In-memory client metrics & structured log counters (diagnostics only)
+CLIENT_PERF: dict[str, list[float]] = {
+ "list_render_ms": [], # list_ready - list_render_start
+ "preview_load_ms": [], # optional future measure (not yet emitted)
+}
+LOG_COUNTS: dict[str, int] = {}
+MAX_CLIENT_SAMPLES = 500 # cap to avoid unbounded growth
router = APIRouter(prefix="/themes", tags=["themes"]) # /themes/status
+# Reuse the main app's template environment so nav globals stay consistent.
+try: # circular-safe import: app defines templates before importing this router
+ from ..app import templates as _templates # type: ignore
+except Exception: # Fallback (tests/minimal contexts)
+ _templates = Jinja2Templates(directory=str(Path(__file__).resolve().parent.parent / 'templates'))
+
THEME_LIST_PATH = Path("config/themes/theme_list.json")
CATALOG_DIR = Path("config/themes/catalog")
STATUS_PATH = Path("csv_files/.setup_status.json")
@@ -36,6 +64,57 @@ def _load_status() -> Dict[str, Any]:
return {}
+def _load_fast_theme_list() -> Optional[list[dict[str, Any]]]:
+ """Load precomputed lightweight theme list JSON if available.
+
+ Expected structure: {"themes": [{"id": str, "theme": str, "short_description": str, ...}, ...]}
+ Returns list or None on failure.
+ """
+ try:
+ if THEME_LIST_PATH.exists():
+ raw = json.loads(THEME_LIST_PATH.read_text(encoding="utf-8") or "{}")
+ if isinstance(raw, dict):
+ arr = raw.get("themes")
+ if isinstance(arr, list):
+ # Shallow copy to avoid mutating original reference
+ # NOTE: Regression fix (2025-09-20): theme_list.json produced by current
+ # build pipeline does NOT include an explicit 'id' per theme (only 'theme').
+ # Earlier implementation required e.get('id') causing the fast path to
+ # treat the catalog as empty and show "No themes found." even though
+ # hundreds of themes exist. We now derive the id via slugify(theme) when
+ # missing, and also opportunistically compute a short_description snippet
+ # if absent (trim description to ~110 chars mirroring project_summary logic).
+ out: list[dict[str, Any]] = []
+ for e in arr:
+ if not isinstance(e, dict):
+ continue
+ theme_name = e.get("theme")
+ if not theme_name or not isinstance(theme_name, str):
+ continue
+ _id = e.get("id") or slugify(theme_name)
+ short_desc = e.get("short_description")
+ if not short_desc:
+ desc = e.get("description")
+ if isinstance(desc, str) and desc.strip():
+ sd = desc.strip()
+ if len(sd) > 110:
+ sd = sd[:107].rstrip() + "…"
+ short_desc = sd
+ out.append({
+ "id": _id,
+ "theme": theme_name,
+ "short_description": short_desc,
+ })
+ # If we ended up with zero items (unexpected) fall back to None so caller
+ # will use full index logic instead of rendering empty state incorrectly.
+ if not out:
+ return None
+ return out
+ except Exception:
+ return None
+ return None
+
+
def _load_tag_flag_time() -> Optional[float]:
try:
if TAG_FLAG_PATH.exists():
@@ -128,3 +207,672 @@ async def theme_refresh(background: BackgroundTasks):
return JSONResponse({"ok": True, "started": True})
except Exception as e: # pragma: no cover
return JSONResponse({"ok": False, "error": str(e)}, status_code=500)
+
+
+# --- Phase E Theme Catalog APIs ---
+
+def _diag_enabled() -> bool:
+ return (os.getenv("WEB_THEME_PICKER_DIAGNOSTICS") or "").strip().lower() in {"1", "true", "yes", "on"}
+
+
+@router.get("/picker", response_class=HTMLResponse)
+async def theme_picker_page(request: Request):
+ """Render the theme picker shell.
+
+ Dynamic data (list, detail) loads via fragment endpoints. We still inject
+ known archetype list for the filter select so it is populated on initial load.
+ """
+ archetypes: list[str] = []
+ try:
+ idx = load_index()
+ archetypes = sorted({t.deck_archetype for t in idx.catalog.themes if t.deck_archetype}) # type: ignore[arg-type]
+ except Exception:
+ archetypes = []
+ return _templates.TemplateResponse(
+ "themes/picker.html",
+ {
+ "request": request,
+ "archetypes": archetypes,
+ "theme_picker_diagnostics": _diag_enabled(),
+ },
+ )
+
+@router.get("/metrics")
+async def theme_metrics():
+ if not _diag_enabled():
+ raise HTTPException(status_code=403, detail="diagnostics_disabled")
+ try:
+ idx = load_index()
+ prewarm_common_filters()
+ return JSONResponse({
+ "ok": True,
+ "etag": idx.etag,
+ "catalog": catalog_metrics(),
+ "preview": preview_metrics(),
+ "client_perf": {
+ "list_render_avg_ms": round(sum(CLIENT_PERF["list_render_ms"]) / len(CLIENT_PERF["list_render_ms"])) if CLIENT_PERF["list_render_ms"] else 0,
+ "list_render_count": len(CLIENT_PERF["list_render_ms"]),
+ "preview_load_avg_ms": round(sum(CLIENT_PERF["preview_load_ms"]) / len(CLIENT_PERF["preview_load_ms"])) if CLIENT_PERF["preview_load_ms"] else 0,
+ "preview_load_batch_count": len(CLIENT_PERF["preview_load_ms"]),
+ },
+ "log_counts": LOG_COUNTS,
+ })
+ except Exception as e:
+ return JSONResponse({"ok": False, "error": str(e)}, status_code=500)
+
+
+@router.get("/", response_class=HTMLResponse)
+async def theme_catalog_simple(request: Request):
+ """Simplified catalog: list + search only (no per-row heavy data)."""
+ return _templates.TemplateResponse("themes/catalog_simple.html", {"request": request})
+
+
+@router.get("/{theme_id}", response_class=HTMLResponse)
+async def theme_catalog_detail_page(theme_id: str, request: Request):
+ """Full detail page for a single theme (standalone route)."""
+ try:
+ idx = load_index()
+ except FileNotFoundError:
+ return HTMLResponse("Catalog unavailable.
", status_code=503)
+ slug = slugify(theme_id)
+ entry = idx.slug_to_entry.get(slug)
+ if not entry:
+ return HTMLResponse("Not found.
", status_code=404)
+ detail = project_detail(slug, entry, idx.slug_to_yaml, uncapped=False)
+ # Strip diagnostics-only fields for public page
+ detail.pop('has_fallback_description', None)
+ detail.pop('editorial_quality', None)
+ detail.pop('uncapped_synergies', None)
+ # Build example + synergy commanders (reuse logic from preview)
+ example_commanders = [c for c in (detail.get("example_commanders") or []) if isinstance(c, str)]
+ synergy_commanders_raw = [c for c in (detail.get("synergy_commanders") or []) if isinstance(c, str)]
+ seen = set(example_commanders)
+ synergy_commanders: list[str] = []
+ for c in synergy_commanders_raw:
+ if c not in seen:
+ synergy_commanders.append(c)
+ seen.add(c)
+ # Render via reuse of detail fragment inside a page shell
+ return _templates.TemplateResponse(
+ "themes/detail_page.html",
+ {
+ "request": request,
+ "theme": detail,
+ "diagnostics": False,
+ "uncapped": False,
+ "yaml_available": False,
+ "example_commanders": example_commanders,
+ "synergy_commanders": synergy_commanders,
+ "standalone_page": True,
+ },
+ )
+
+
+@router.get("/fragment/list", response_class=HTMLResponse)
+async def theme_list_fragment(
+ request: Request,
+ q: str | None = None,
+ archetype: str | None = None,
+ bucket: str | None = None,
+ colors: str | None = None,
+ diagnostics: bool | None = None,
+ synergy_mode: str | None = Query(None, description="Synergy display mode: 'capped' (default) or 'full'"),
+ limit: int | None = Query(20, ge=1, le=100),
+ offset: int | None = Query(0, ge=0),
+):
+ import time as _t
+ t0 = _t.time()
+ try:
+ idx = load_index()
+ except FileNotFoundError:
+ return HTMLResponse("Catalog unavailable.
", status_code=503)
+ color_list = [c.strip() for c in colors.split(',')] if colors else None
+ # Fast filtering (falls back only for legacy logic differences if needed)
+ slugs = filter_slugs_fast(idx, q=q, archetype=archetype, bucket=bucket, colors=color_list)
+ diag = _diag_enabled() and bool(diagnostics)
+ lim = int(limit or 30)
+ off = int(offset or 0)
+ total = len(slugs)
+ slice_slugs = slugs[off: off + lim]
+ items = summaries_for_slugs(idx, slice_slugs)
+ # Synergy display logic: default 'capped' mode (cap at 6) unless diagnostics & user explicitly requests full
+ # synergy_mode can be 'full' to force uncapped in list (still diagnostics-gated to prevent layout spam in prod)
+ mode = (synergy_mode or '').strip().lower()
+ allow_full = (mode == 'full') and diag # only diagnostics may request full
+ SYNERGY_CAP = 6
+ if not allow_full:
+ for it in items:
+ syns = it.get("synergies") or []
+ if isinstance(syns, list) and len(syns) > SYNERGY_CAP:
+ it["synergies_capped"] = True
+ it["synergies_full"] = syns
+ it["synergies"] = syns[:SYNERGY_CAP]
+ if not diag:
+ for it in items:
+ it.pop('has_fallback_description', None)
+ it.pop('editorial_quality', None)
+ duration_ms = int(((_t.time() - t0) * 1000))
+ resp = _templates.TemplateResponse(
+ "themes/list_fragment.html",
+ {
+ "request": request,
+ "items": items,
+ "diagnostics": diag,
+ "total": total,
+ "limit": lim,
+ "offset": off,
+ "next_offset": off + lim if (off + lim) < total else None,
+ "prev_offset": off - lim if off - lim >= 0 else None,
+ },
+ )
+ resp.headers["X-ThemeCatalog-Filter-Duration-ms"] = str(duration_ms)
+ resp.headers["X-ThemeCatalog-Index-ETag"] = idx.etag
+ return resp
+
+
+@router.get("/fragment/list_simple", response_class=HTMLResponse)
+async def theme_list_simple_fragment(
+ request: Request,
+ q: str | None = None,
+ limit: int | None = Query(100, ge=1, le=300),
+ offset: int | None = Query(0, ge=0),
+):
+ """Lightweight list: only id, theme, short_description (for speed).
+
+ Attempts fast path using precomputed theme_list.json; falls back to full index.
+ """
+ import time as _t
+ t0 = _t.time()
+ lim = int(limit or 100)
+ off = int(offset or 0)
+ fast_items = _load_fast_theme_list()
+ fast_used = False
+ items: list[dict[str, Any]] = []
+ total = 0
+ if fast_items is not None:
+ fast_used = True
+ # Filter (substring on theme only) if q provided
+ if q:
+ ql = q.lower()
+ fast_items = [e for e in fast_items if isinstance(e.get("theme"), str) and ql in e["theme"].lower()]
+ total = len(fast_items)
+ slice_items = fast_items[off: off + lim]
+ for e in slice_items:
+ items.append({
+ "id": e.get("id"),
+ "theme": e.get("theme"),
+ "short_description": e.get("short_description"),
+ })
+ else:
+ # Fallback: load full index
+ try:
+ idx = load_index()
+ except FileNotFoundError:
+ return HTMLResponse("Catalog unavailable.
", status_code=503)
+ slugs = filter_slugs_fast(idx, q=q, archetype=None, bucket=None, colors=None)
+ total = len(slugs)
+ slice_slugs = slugs[off: off + lim]
+ items_raw = summaries_for_slugs(idx, slice_slugs)
+ for it in items_raw:
+ items.append({
+ "id": it.get("id"),
+ "theme": it.get("theme"),
+ "short_description": it.get("short_description"),
+ })
+ duration_ms = int(((_t.time() - t0) * 1000))
+ resp = _templates.TemplateResponse(
+ "themes/list_simple_fragment.html",
+ {
+ "request": request,
+ "items": items,
+ "total": total,
+ "limit": lim,
+ "offset": off,
+ "next_offset": off + lim if (off + lim) < total else None,
+ "prev_offset": off - lim if off - lim >= 0 else None,
+ },
+ )
+ resp.headers['X-ThemeCatalog-Simple-Duration-ms'] = str(duration_ms)
+ resp.headers['X-ThemeCatalog-Simple-Fast'] = '1' if fast_used else '0'
+ # Consistency: expose same filter duration style header used by full list fragment so
+ # tooling / DevTools inspection does not depend on which catalog view is active.
+ resp.headers['X-ThemeCatalog-Filter-Duration-ms'] = str(duration_ms)
+ return resp
+
+
+@router.get("/fragment/detail/{theme_id}", response_class=HTMLResponse)
+async def theme_detail_fragment(
+ theme_id: str,
+ diagnostics: bool | None = None,
+ uncapped: bool | None = None,
+ request: Request = None,
+):
+ try:
+ idx = load_index()
+ except FileNotFoundError:
+ return HTMLResponse("Catalog unavailable.
", status_code=503)
+ slug = slugify(theme_id)
+ entry = idx.slug_to_entry.get(slug)
+ if not entry:
+ return HTMLResponse("Not found.
", status_code=404)
+ diag = _diag_enabled() and bool(diagnostics)
+ uncapped_enabled = bool(uncapped) and diag
+ detail = project_detail(slug, entry, idx.slug_to_yaml, uncapped=uncapped_enabled)
+ if not diag:
+ detail.pop('has_fallback_description', None)
+ detail.pop('editorial_quality', None)
+ detail.pop('uncapped_synergies', None)
+ return _templates.TemplateResponse(
+ "themes/detail_fragment.html",
+ {
+ "request": request,
+ "theme": detail,
+ "diagnostics": diag,
+ "uncapped": uncapped_enabled,
+ "yaml_available": diag, # gate by diagnostics flag
+ },
+ )
+
+
+## (moved metrics route earlier to avoid collision with catch-all /{theme_id})
+
+
+@router.get("/yaml/{theme_id}")
+async def theme_yaml(theme_id: str):
+ """Return raw YAML file for a theme (diagnostics/dev only)."""
+ if not _diag_enabled():
+ raise HTTPException(status_code=403, detail="diagnostics_disabled")
+ try:
+ idx = load_index()
+ except FileNotFoundError:
+ raise HTTPException(status_code=503, detail="catalog_unavailable")
+ slug = slugify(theme_id)
+ # Attempt to locate via slug -> YAML map, fallback path guess
+ y = idx.slug_to_yaml.get(slug)
+ if not y:
+ raise HTTPException(status_code=404, detail="yaml_not_found")
+ # Reconstruct minimal YAML (we have dict already)
+ import yaml as _yaml # local import to keep top-level lean
+ text = _yaml.safe_dump(y, sort_keys=False) # type: ignore
+ headers = {"Content-Type": "text/plain; charset=utf-8"}
+ return HTMLResponse(text, headers=headers)
+
+
+@router.get("/api/themes")
+async def api_themes(
+ request: Request,
+ q: str | None = Query(None, description="Substring filter on theme or synergies"),
+ archetype: str | None = Query(None, description="Filter by deck_archetype"),
+ bucket: str | None = Query(None, description="Filter by popularity bucket"),
+ colors: str | None = Query(None, description="Comma-separated color initials (e.g. G,W)"),
+ limit: int = Query(50, ge=1, le=200),
+ offset: int = Query(0, ge=0),
+ diagnostics: bool | None = Query(None, description="Force diagnostics mode (allowed only if flag enabled)"),
+):
+ import time as _t
+ t0 = _t.time()
+ try:
+ idx = load_index()
+ except FileNotFoundError:
+ raise HTTPException(status_code=503, detail="catalog_unavailable")
+ color_list = [c.strip() for c in colors.split(",") if c.strip()] if colors else None
+ # Validate archetype quickly (fast path uses underlying entries anyway)
+ if archetype:
+ present_archetypes = {e.deck_archetype for e in idx.catalog.themes if e.deck_archetype}
+ if archetype not in present_archetypes:
+ slugs: list[str] = []
+ else:
+ slugs = filter_slugs_fast(idx, q=q, archetype=archetype, bucket=bucket, colors=color_list)
+ else:
+ slugs = filter_slugs_fast(idx, q=q, archetype=None, bucket=bucket, colors=color_list)
+ total = len(slugs)
+ slice_slugs = slugs[offset: offset + limit]
+ items = summaries_for_slugs(idx, slice_slugs)
+ diag = _diag_enabled() and bool(diagnostics)
+ if not diag:
+ # Strip diagnostics-only fields
+ for it in items:
+ # has_fallback_description is diagnostics-only
+ it.pop("has_fallback_description", None)
+ it.pop("editorial_quality", None)
+ duration_ms = int(((_t.time() - t0) * 1000))
+ headers = {
+ "ETag": idx.etag,
+ "Cache-Control": "no-cache", # Clients may still conditional GET using ETag
+ "X-ThemeCatalog-Filter-Duration-ms": str(duration_ms),
+ }
+ return JSONResponse({
+ "ok": True,
+ "count": total,
+ "items": items,
+ "next_offset": offset + limit if (offset + limit) < total else None,
+ "stale": False, # status already exposed elsewhere; keep placeholder for UI
+ "generated_at": idx.catalog.metadata_info.generated_at if idx.catalog.metadata_info else None,
+ "diagnostics": diag,
+ }, headers=headers)
+
+
+@router.get("/api/search")
+async def api_theme_search(
+ q: str = Query(..., min_length=1, description="Search query"),
+ limit: int = Query(15, ge=1, le=50),
+ include_synergies: bool = Query(False, description="Also match synergies (slower)"),
+):
+ """Lightweight search with tiered matching (exact > prefix > substring).
+
+ Performance safeguards:
+ - Stop scanning once we have >= limit and at least one exact/prefix.
+ - Substring phase limited to first 250 themes unless still under limit.
+ - Optional synergy search (off by default) to avoid wide fan-out of matches like 'aggro' in many synergy lists.
+ """
+ try:
+ idx = load_index()
+ except FileNotFoundError:
+ return JSONResponse({"ok": False, "error": "catalog_unavailable"}, status_code=503)
+ qnorm = q.strip()
+ if not qnorm:
+ return JSONResponse({"ok": True, "items": []})
+ qlower = qnorm.lower()
+ exact: list[dict[str, Any]] = []
+ prefix: list[dict[str, Any]] = []
+ substr: list[dict[str, Any]] = []
+ seen: set[str] = set()
+ themes_iter = list(idx.catalog.themes) # type: ignore[attr-defined]
+ # Phase 1 + 2: exact / prefix
+ for t in themes_iter:
+ name = t.theme
+ slug = slugify(name)
+ lower_name = name.lower()
+ if lower_name == qlower or slug == qlower:
+ if slug not in seen:
+ exact.append({"id": slug, "theme": name})
+ seen.add(slug)
+ continue
+ if lower_name.startswith(qlower):
+ if slug not in seen:
+ prefix.append({"id": slug, "theme": name})
+ seen.add(slug)
+ if len(exact) + len(prefix) >= limit:
+ break
+ # Phase 3: substring (only if still room)
+ if (len(exact) + len(prefix)) < limit:
+ scan_limit = 250 # cap scan for responsiveness
+ for t in themes_iter[:scan_limit]:
+ name = t.theme
+ slug = slugify(name)
+ if slug in seen:
+ continue
+ if qlower in name.lower():
+ substr.append({"id": slug, "theme": name})
+ seen.add(slug)
+ if (len(exact) + len(prefix) + len(substr)) >= limit:
+ break
+ ordered = exact + prefix + substr
+ # Optional synergy search fill (lowest priority) if still space
+ if include_synergies and len(ordered) < limit:
+ remaining = limit - len(ordered)
+ for t in themes_iter:
+ if remaining <= 0:
+ break
+ slug = slugify(t.theme)
+ if slug in seen:
+ continue
+ syns = getattr(t, 'synergies', None) or []
+ try:
+ # Only a quick any() scan to keep it cheap
+ if any(qlower in s.lower() for s in syns):
+ ordered.append({"id": slug, "theme": t.theme})
+ seen.add(slug)
+ remaining -= 1
+ except Exception:
+ continue
+ if len(ordered) > limit:
+ ordered = ordered[:limit]
+ return JSONResponse({"ok": True, "items": ordered})
+
+
+@router.get("/api/theme/{theme_id}")
+async def api_theme_detail(
+ theme_id: str,
+ uncapped: bool | None = Query(False, description="Return uncapped synergy set (diagnostics mode only)"),
+ diagnostics: bool | None = Query(None, description="Diagnostics mode gating extra fields"),
+):
+ try:
+ idx = load_index()
+ except FileNotFoundError:
+ raise HTTPException(status_code=503, detail="catalog_unavailable")
+ slug = slugify(theme_id)
+ entry = idx.slug_to_entry.get(slug)
+ if not entry:
+ raise HTTPException(status_code=404, detail="theme_not_found")
+ diag = _diag_enabled() and bool(diagnostics)
+ detail = project_detail(slug, entry, idx.slug_to_yaml, uncapped=bool(uncapped) and diag)
+ if not diag:
+ # Remove diagnostics-only fields
+ detail.pop("has_fallback_description", None)
+ detail.pop("editorial_quality", None)
+ detail.pop("uncapped_synergies", None)
+ headers = {"ETag": idx.etag, "Cache-Control": "no-cache"}
+ return JSONResponse({"ok": True, "theme": detail, "diagnostics": diag}, headers=headers)
+
+
+@router.get("/api/theme/{theme_id}/preview")
+async def api_theme_preview(
+ theme_id: str,
+ limit: int = Query(12, ge=1, le=30),
+ colors: str | None = Query(None, description="Comma separated color filter (currently placeholder)"),
+ commander: str | None = Query(None, description="Commander name to bias sampling (future)"),
+):
+ try:
+ payload = get_theme_preview(theme_id, limit=limit, colors=colors, commander=commander)
+ except KeyError:
+ raise HTTPException(status_code=404, detail="theme_not_found")
+ return JSONResponse({"ok": True, "preview": payload})
+
+
+@router.get("/fragment/preview/{theme_id}", response_class=HTMLResponse)
+async def theme_preview_fragment(
+ theme_id: str,
+ limit: int = Query(12, ge=1, le=30),
+ colors: str | None = None,
+ commander: str | None = None,
+ suppress_curated: bool = Query(False, description="If true, omit curated example cards/commanders from the sample area (used on detail page to avoid duplication)"),
+ minimal: bool = Query(False, description="Minimal inline variant (no header/controls/rationale – used in detail page collapsible preview)"),
+ request: Request = None,
+):
+ """Return HTML fragment for theme preview with caching headers.
+
+ Adds ETag and Last-Modified headers (no strong caching – enables conditional GET / 304).
+ ETag composed of catalog index etag + stable hash of preview payload (theme id + limit + commander).
+ """
+ try:
+ payload = get_theme_preview(theme_id, limit=limit, colors=colors, commander=commander)
+ except KeyError:
+ return HTMLResponse("Theme not found.
", status_code=404)
+ # Load example commanders (authoritative list) from catalog detail for legality instead of inferring
+ example_commanders: list[str] = []
+ synergy_commanders: list[str] = []
+ try:
+ idx = load_index()
+ slug = slugify(theme_id)
+ entry = idx.slug_to_entry.get(slug)
+ if entry:
+ detail = project_detail(slug, entry, idx.slug_to_yaml, uncapped=False)
+ example_commanders = [c for c in (detail.get("example_commanders") or []) if isinstance(c, str)]
+ synergy_commanders_raw = [c for c in (detail.get("synergy_commanders") or []) if isinstance(c, str)]
+ # De-duplicate any overlap with example commanders while preserving order
+ seen = set(example_commanders)
+ for c in synergy_commanders_raw:
+ if c not in seen:
+ synergy_commanders.append(c)
+ seen.add(c)
+ except Exception:
+ example_commanders = []
+ synergy_commanders = []
+ # Build ETag (use catalog etag + hash of core identifying fields to reflect underlying data drift)
+ import hashlib
+ import json as _json
+ import time as _time
+ try:
+ idx = load_index()
+ catalog_tag = idx.etag
+ except Exception:
+ catalog_tag = "unknown"
+ hash_src = _json.dumps({
+ "theme": theme_id,
+ "limit": limit,
+ "commander": commander,
+ "sample": payload.get("sample", [])[:3], # small slice for stability & speed
+ "v": 1,
+ }, sort_keys=True).encode("utf-8")
+ etag = "pv-" + hashlib.sha256(hash_src).hexdigest()[:20] + f"-{catalog_tag}"
+ # Conditional request support
+ if request is not None:
+ inm = request.headers.get("if-none-match")
+ if inm and inm == etag:
+ # 304 Not Modified – FastAPI HTMLResponse with empty body & headers
+ resp = HTMLResponse(status_code=304, content="")
+ resp.headers["ETag"] = etag
+ from email.utils import formatdate as _fmtdate
+ resp.headers["Last-Modified"] = _fmtdate(timeval=_time.time(), usegmt=True)
+ resp.headers["Cache-Control"] = "no-cache"
+ return resp
+ ctx = {
+ "request": request,
+ "preview": payload,
+ "example_commanders": example_commanders,
+ "synergy_commanders": synergy_commanders,
+ "theme_id": theme_id,
+ "etag": etag,
+ "suppress_curated": suppress_curated,
+ "minimal": minimal,
+ }
+ resp = _templates.TemplateResponse("themes/preview_fragment.html", ctx)
+ resp.headers["ETag"] = etag
+ from email.utils import formatdate as _fmtdate
+ resp.headers["Last-Modified"] = _fmtdate(timeval=_time.time(), usegmt=True)
+ resp.headers["Cache-Control"] = "no-cache"
+ return resp
+
+
+# --- Preview Export Endpoints (CSV / JSON) ---
+@router.get("/preview/{theme_id}/export.json")
+async def export_preview_json(
+ theme_id: str,
+ limit: int = Query(12, ge=1, le=60),
+ colors: str | None = None,
+ commander: str | None = None,
+ curated_only: bool | None = Query(False, description="If true, only curated example + curated synergy entries returned"),
+):
+ try:
+ payload = get_theme_preview(theme_id, limit=limit, colors=colors, commander=commander)
+ except KeyError:
+ raise HTTPException(status_code=404, detail="theme_not_found")
+ items = payload.get("sample", [])
+ if curated_only:
+ items = [i for i in items if any(r in {"example", "curated_synergy", "synthetic"} for r in (i.get("roles") or []))]
+ return JSONResponse({
+ "ok": True,
+ "theme": payload.get("theme"),
+ "theme_id": payload.get("theme_id"),
+ "curated_only": bool(curated_only),
+ "generated_at": payload.get("generated_at"),
+ "limit": limit,
+ "count": len(items),
+ "items": items,
+ })
+
+
+@router.get("/preview/{theme_id}/export.csv")
+async def export_preview_csv(
+ theme_id: str,
+ limit: int = Query(12, ge=1, le=60),
+ colors: str | None = None,
+ commander: str | None = None,
+ curated_only: bool | None = Query(False, description="If true, only curated example + curated synergy entries returned"),
+):
+ import csv as _csv
+ import io as _io
+ try:
+ payload = get_theme_preview(theme_id, limit=limit, colors=colors, commander=commander)
+ except KeyError:
+ raise HTTPException(status_code=404, detail="theme_not_found")
+ rows = payload.get("sample", [])
+ if curated_only:
+ rows = [r for r in rows if any(role in {"example", "curated_synergy", "synthetic"} for role in (r.get("roles") or []))]
+ buf = _io.StringIO()
+ fieldnames = ["name", "roles", "score", "rarity", "mana_cost", "color_identity_list", "pip_colors", "reasons", "tags"]
+ w = _csv.DictWriter(buf, fieldnames=fieldnames)
+ w.writeheader()
+ for r in rows:
+ w.writerow({
+ "name": r.get("name"),
+ "roles": ";".join(r.get("roles") or []),
+ "score": r.get("score"),
+ "rarity": r.get("rarity"),
+ "mana_cost": r.get("mana_cost"),
+ "color_identity_list": ";".join(r.get("color_identity_list") or []),
+ "pip_colors": ";".join(r.get("pip_colors") or []),
+ "reasons": ";".join(r.get("reasons") or []),
+ "tags": ";".join(r.get("tags") or []),
+ })
+ csv_text = buf.getvalue()
+ from fastapi.responses import Response
+ filename = f"preview_{theme_id}.csv"
+ headers = {
+ "Content-Disposition": f"attachment; filename={filename}",
+ "Content-Type": "text/csv; charset=utf-8",
+ }
+ return Response(content=csv_text, media_type="text/csv", headers=headers)
+
+
+# --- New: Client performance marks ingestion (Section E) ---
+@router.post("/metrics/client")
+async def ingest_client_metrics(request: Request, payload: dict[str, Any] = Body(...)):
+ if not _diag_enabled():
+ raise HTTPException(status_code=403, detail="diagnostics_disabled")
+ try:
+ events = payload.get("events")
+ if not isinstance(events, list):
+ return JSONResponse({"ok": False, "error": "invalid_events"}, status_code=400)
+ for ev in events:
+ if not isinstance(ev, dict):
+ continue
+ name = ev.get("name")
+ dur = ev.get("duration_ms")
+ if name == "list_render" and isinstance(dur, (int, float)) and dur >= 0:
+ CLIENT_PERF["list_render_ms"].append(float(dur))
+ if len(CLIENT_PERF["list_render_ms"]) > MAX_CLIENT_SAMPLES:
+ # Drop oldest half to keep memory bounded
+ CLIENT_PERF["list_render_ms"] = CLIENT_PERF["list_render_ms"][len(CLIENT_PERF["list_render_ms"])//2:]
+ elif name == "preview_load_batch":
+ # Aggregate average into samples list (store avg redundantly for now)
+ avg_ms = ev.get("avg_ms")
+ if isinstance(avg_ms, (int, float)) and avg_ms >= 0:
+ CLIENT_PERF["preview_load_ms"].append(float(avg_ms))
+ if len(CLIENT_PERF["preview_load_ms"]) > MAX_CLIENT_SAMPLES:
+ CLIENT_PERF["preview_load_ms"] = CLIENT_PERF["preview_load_ms"][len(CLIENT_PERF["preview_load_ms"])//2:]
+ return JSONResponse({"ok": True, "ingested": len(events)})
+ except Exception as e: # pragma: no cover
+ return JSONResponse({"ok": False, "error": str(e)}, status_code=500)
+
+
+# --- New: Structured logging ingestion for cache/prefetch events (Section E) ---
+@router.post("/log")
+async def ingest_structured_log(request: Request, payload: dict[str, Any] = Body(...)):
+ if not _diag_enabled():
+ raise HTTPException(status_code=403, detail="diagnostics_disabled")
+ try:
+ event = payload.get("event")
+ if not isinstance(event, str) or not event:
+ return JSONResponse({"ok": False, "error": "missing_event"}, status_code=400)
+ LOG_COUNTS[event] = LOG_COUNTS.get(event, 0) + 1
+ if event == "preview_fetch_error": # client-side fetch failure
+ try:
+ _theme_preview_mod._PREVIEW_REQUEST_ERROR_COUNT += 1 # type: ignore[attr-defined]
+ except Exception:
+ pass
+ # Lightweight echo back
+ return JSONResponse({"ok": True, "count": LOG_COUNTS[event]})
+ except Exception as e: # pragma: no cover
+ return JSONResponse({"ok": False, "error": str(e)}, status_code=500)
diff --git a/code/web/services/orchestrator.py b/code/web/services/orchestrator.py
index 24b4ab6..db99d02 100644
--- a/code/web/services/orchestrator.py
+++ b/code/web/services/orchestrator.py
@@ -910,6 +910,18 @@ def _ensure_setup_ready(out, force: bool = False) -> None:
_run_theme_metadata_enrichment(out_func)
except Exception:
pass
+ # Bust theme-related in-memory caches so new catalog reflects immediately
+ try:
+ from .theme_catalog_loader import bust_filter_cache # type: ignore
+ from .theme_preview import bust_preview_cache # type: ignore
+ bust_filter_cache("catalog_refresh")
+ bust_preview_cache("catalog_refresh")
+ try:
+ out_func("[cache] Busted theme filter & preview caches after catalog refresh")
+ except Exception:
+ pass
+ except Exception:
+ pass
except Exception as _e: # pragma: no cover - non-critical diagnostics only
try:
out_func(f"Theme catalog refresh failed: {_e}")
@@ -1092,6 +1104,13 @@ def _ensure_setup_ready(out, force: bool = False) -> None:
duration_s = None
# Generate / refresh theme catalog (JSON + per-theme YAML) BEFORE marking done so UI sees progress
_refresh_theme_catalog(out, force=True, fast_path=False)
+ try:
+ from .theme_catalog_loader import bust_filter_cache # type: ignore
+ from .theme_preview import bust_preview_cache # type: ignore
+ bust_filter_cache("tagging_complete")
+ bust_preview_cache("tagging_complete")
+ except Exception:
+ pass
payload = {"running": False, "phase": "done", "message": "Setup complete", "color": None, "percent": 100, "finished_at": finished, "themes_exported": True}
if duration_s is not None:
payload["duration_seconds"] = duration_s
diff --git a/code/web/services/theme_catalog_loader.py b/code/web/services/theme_catalog_loader.py
new file mode 100644
index 0000000..c5a88e2
--- /dev/null
+++ b/code/web/services/theme_catalog_loader.py
@@ -0,0 +1,511 @@
+"""Theme catalog loader & projection utilities.
+
+Phase E foundation + Phase F performance optimizations.
+
+Responsibilities:
+ - Lazy load & cache merged catalog JSON + YAML overlays.
+ - Provide slug -> ThemeEntry and raw YAML maps.
+ - Provide summary & detail projections (with synergy segmentation).
+ - NEW (Phase F perf): precompute summary dicts & lowercase haystacks, and
+ add fast filtering / result caching to accelerate list & API endpoints.
+"""
+
+from __future__ import annotations
+
+from pathlib import Path
+import json
+import re
+from typing import Dict, Any, List, Optional, Tuple, Iterable
+
+import yaml # type: ignore
+from pydantic import BaseModel
+
+# Import ThemeCatalog & ThemeEntry with resilient fallbacks.
+# Runtime contexts:
+# - Local dev (cwd == project root): modules available as top-level.
+# - Docker (WORKDIR /app/code): modules also available top-level.
+# - Package/zip installs (rare): may require 'code.' prefix.
+try:
+ from type_definitions_theme_catalog import ThemeCatalog, ThemeEntry # type: ignore
+except ImportError: # pragma: no cover - fallback path
+ try:
+ from code.type_definitions_theme_catalog import ThemeCatalog, ThemeEntry # type: ignore
+ except ImportError: # pragma: no cover - last resort (avoid beyond top-level relative import)
+ raise
+
+CATALOG_JSON = Path("config/themes/theme_list.json")
+YAML_DIR = Path("config/themes/catalog")
+
+_CACHE: Dict[str, Any] = {}
+# Filter result cache: key = (etag, q, archetype, bucket, colors_tuple)
+_FILTER_CACHE: Dict[Tuple[str, Optional[str], Optional[str], Optional[str], Optional[Tuple[str, ...]]], List[str]] = {}
+_FILTER_REQUESTS = 0
+_FILTER_CACHE_HITS = 0
+_FILTER_LAST_BUST_AT: float | None = None
+_FILTER_PREWARMED = False # guarded single-run prewarm flag
+
+# --- Performance: YAML newest mtime scan caching ---
+# Repeated calls to _needs_reload() previously scanned every *.yml file (~700 files)
+# on each theme list/filter request, contributing noticeable latency on Windows (many stat calls).
+# We cache the newest YAML mtime for a short interval (default 2s, tunable via env) to avoid
+# excessive directory traversal while still detecting edits quickly during active authoring.
+_YAML_SCAN_CACHE: Dict[str, Any] = { # keys: newest_mtime (float), scanned_at (float)
+ "newest_mtime": 0.0,
+ "scanned_at": 0.0,
+}
+try:
+ import os as _os
+ _YAML_SCAN_INTERVAL = float((_os.getenv("THEME_CATALOG_YAML_SCAN_INTERVAL_SEC") or "2.0"))
+except Exception: # pragma: no cover - fallback
+ _YAML_SCAN_INTERVAL = 2.0
+
+
+class SlugThemeIndex(BaseModel):
+ catalog: ThemeCatalog
+ slug_to_entry: Dict[str, ThemeEntry]
+ slug_to_yaml: Dict[str, Dict[str, Any]] # raw YAML data per theme
+ # Performance precomputations for fast list filtering
+ summary_by_slug: Dict[str, Dict[str, Any]]
+ haystack_by_slug: Dict[str, str]
+ primary_color_by_slug: Dict[str, Optional[str]]
+ secondary_color_by_slug: Dict[str, Optional[str]]
+ mtime: float
+ yaml_mtime_max: float
+ etag: str
+
+
+_GENERIC_DESCRIPTION_PREFIXES = [
+ "Accumulates ", # many auto-generated variants start like this
+ "Builds around ",
+ "Leverages ",
+]
+
+
+_SLUG_RE_NON_ALNUM = re.compile(r"[^a-z0-9]+")
+
+
+def slugify(name: str) -> str:
+ s = name.lower().strip()
+ # Preserve +1/+1 pattern meaningfully by converting '+' to 'plus'
+ s = s.replace("+", "plus")
+ s = _SLUG_RE_NON_ALNUM.sub("-", s)
+ s = re.sub(r"-+", "-", s).strip("-")
+ return s
+
+
+def _needs_reload() -> bool:
+ if not CATALOG_JSON.exists():
+ return bool(_CACHE)
+ mtime = CATALOG_JSON.stat().st_mtime
+ idx: SlugThemeIndex | None = _CACHE.get("index") # type: ignore
+ if idx is None:
+ return True
+ if mtime > idx.mtime:
+ return True
+ # If any YAML newer than catalog mtime or newest YAML newer than cached scan -> reload
+ if YAML_DIR.exists():
+ import time as _t
+ now = _t.time()
+ # Use cached newest mtime if within interval; else rescan.
+ if (now - _YAML_SCAN_CACHE["scanned_at"]) < _YAML_SCAN_INTERVAL:
+ newest_yaml = _YAML_SCAN_CACHE["newest_mtime"]
+ else:
+ # Fast path: use os.scandir for lower overhead vs Path.glob
+ newest = 0.0
+ try:
+ import os as _os
+ with _os.scandir(YAML_DIR) as it: # type: ignore[arg-type]
+ for entry in it:
+ if entry.is_file() and entry.name.endswith('.yml'):
+ try:
+ st = entry.stat()
+ if st.st_mtime > newest:
+ newest = st.st_mtime
+ except Exception:
+ continue
+ except Exception: # pragma: no cover - scandir failure fallback
+ newest = max((p.stat().st_mtime for p in YAML_DIR.glob('*.yml')), default=0.0)
+ _YAML_SCAN_CACHE["newest_mtime"] = newest
+ _YAML_SCAN_CACHE["scanned_at"] = now
+ newest_yaml = newest
+ if newest_yaml > idx.yaml_mtime_max:
+ return True
+ return False
+
+
+def _load_yaml_map() -> Tuple[Dict[str, Dict[str, Any]], float]:
+ latest = 0.0
+ out: Dict[str, Dict[str, Any]] = {}
+ if not YAML_DIR.exists():
+ return out, latest
+ for p in YAML_DIR.glob("*.yml"):
+ try:
+ data = yaml.safe_load(p.read_text(encoding="utf-8")) or {}
+ if isinstance(data, dict):
+ slug = data.get("id") or slugify(data.get("display_name", p.stem))
+ out[str(slug)] = data
+ if p.stat().st_mtime > latest:
+ latest = p.stat().st_mtime
+ except Exception:
+ continue
+ return out, latest
+
+
+def _compute_etag(size: int, mtime: float, yaml_mtime: float) -> str:
+ return f"{int(size)}-{int(mtime)}-{int(yaml_mtime)}"
+
+
+def load_index() -> SlugThemeIndex:
+ if not _needs_reload():
+ return _CACHE["index"] # type: ignore
+ if not CATALOG_JSON.exists():
+ raise FileNotFoundError("theme_list.json missing")
+ raw = json.loads(CATALOG_JSON.read_text(encoding="utf-8") or "{}")
+ catalog = ThemeCatalog.model_validate(raw)
+ slug_to_entry: Dict[str, ThemeEntry] = {}
+ summary_by_slug: Dict[str, Dict[str, Any]] = {}
+ haystack_by_slug: Dict[str, str] = {}
+ primary_color_by_slug: Dict[str, Optional[str]] = {}
+ secondary_color_by_slug: Dict[str, Optional[str]] = {}
+ for t in catalog.themes:
+ slug = slugify(t.theme)
+ slug_to_entry[slug] = t
+ summary = project_summary(t)
+ summary_by_slug[slug] = summary
+ haystack_by_slug[slug] = "|".join([t.theme] + t.synergies).lower()
+ primary_color_by_slug[slug] = t.primary_color
+ secondary_color_by_slug[slug] = t.secondary_color
+ yaml_map, yaml_mtime_max = _load_yaml_map()
+ idx = SlugThemeIndex(
+ catalog=catalog,
+ slug_to_entry=slug_to_entry,
+ slug_to_yaml=yaml_map,
+ summary_by_slug=summary_by_slug,
+ haystack_by_slug=haystack_by_slug,
+ primary_color_by_slug=primary_color_by_slug,
+ secondary_color_by_slug=secondary_color_by_slug,
+ mtime=CATALOG_JSON.stat().st_mtime,
+ yaml_mtime_max=yaml_mtime_max,
+ etag=_compute_etag(CATALOG_JSON.stat().st_size, CATALOG_JSON.stat().st_mtime, yaml_mtime_max),
+ )
+ _CACHE["index"] = idx
+ _FILTER_CACHE.clear() # Invalidate fast filter cache on any reload
+ return idx
+
+
+def validate_catalog_integrity(rebuild: bool = True) -> Dict[str, Any]:
+ """Validate that theme_list.json matches current YAML set via catalog_hash.
+
+ Returns dict with status fields. If drift detected and rebuild=True and
+ THEME_CATALOG_MODE merge script is available, attempts an automatic rebuild.
+ Environment flags:
+ THEME_CATALOG_VALIDATE=1 enables invocation from app startup (else caller controls).
+ """
+ out: Dict[str, Any] = {"ok": True, "rebuild_attempted": False, "drift": False}
+ if not CATALOG_JSON.exists():
+ out.update({"ok": False, "error": "theme_list_missing"})
+ return out
+ try:
+ raw = json.loads(CATALOG_JSON.read_text(encoding="utf-8") or "{}")
+ meta = raw.get("metadata_info") or {}
+ recorded_hash = meta.get("catalog_hash")
+ except Exception as e: # pragma: no cover
+ out.update({"ok": False, "error": f"read_error:{e}"})
+ return out
+ # Recompute hash using same heuristic as build script
+ from scripts.build_theme_catalog import load_catalog_yaml # type: ignore
+ try:
+ yaml_catalog = load_catalog_yaml(verbose=False) # keyed by display_name
+ except Exception:
+ yaml_catalog = {}
+ import hashlib as _hashlib
+ h = _hashlib.sha256()
+ for name in sorted(yaml_catalog.keys()):
+ yobj = yaml_catalog[name]
+ try:
+ payload = (
+ getattr(yobj, 'id', ''),
+ getattr(yobj, 'display_name', ''),
+ tuple(getattr(yobj, 'curated_synergies', []) or []),
+ tuple(getattr(yobj, 'enforced_synergies', []) or []),
+ tuple(getattr(yobj, 'example_commanders', []) or []),
+ tuple(getattr(yobj, 'example_cards', []) or []),
+ getattr(yobj, 'deck_archetype', None),
+ getattr(yobj, 'popularity_hint', None),
+ getattr(yobj, 'description', None),
+ getattr(yobj, 'editorial_quality', None),
+ )
+ h.update(repr(payload).encode('utf-8'))
+ except Exception:
+ continue
+ # Synergy cap influences ordering; include if present in meta
+ if meta.get('synergy_cap') is not None:
+ h.update(str(meta.get('synergy_cap')).encode('utf-8'))
+ current_hash = h.hexdigest()
+ if recorded_hash and recorded_hash != current_hash:
+ out['drift'] = True
+ out['recorded_hash'] = recorded_hash
+ out['current_hash'] = current_hash
+ if rebuild:
+ import subprocess
+ import os as _os
+ import sys as _sys
+ out['rebuild_attempted'] = True
+ try:
+ env = {**_os.environ, 'THEME_CATALOG_MODE': 'merge'}
+ subprocess.run([
+ _sys.executable, 'code/scripts/build_theme_catalog.py'
+ ], check=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out['rebuild_ok'] = True
+ except Exception as e:
+ out['rebuild_ok'] = False
+ out['rebuild_error'] = str(e)
+ else:
+ out['drift'] = False
+ out['recorded_hash'] = recorded_hash
+ out['current_hash'] = current_hash
+ return out
+
+
+def has_fallback_description(entry: ThemeEntry) -> bool:
+ if not entry.description:
+ return True
+ desc = entry.description.strip()
+ # Simple heuristic: generic if starts with any generic prefix and length < 160
+ if len(desc) < 160 and any(desc.startswith(p) for p in _GENERIC_DESCRIPTION_PREFIXES):
+ return True
+ return False
+
+
+def project_summary(entry: ThemeEntry) -> Dict[str, Any]:
+ # Short description (snippet) for list hover / condensed display
+ desc = entry.description or ""
+ short_desc = desc.strip()
+ if len(short_desc) > 110:
+ short_desc = short_desc[:107].rstrip() + "…"
+ return {
+ "id": slugify(entry.theme),
+ "theme": entry.theme,
+ "primary_color": entry.primary_color,
+ "secondary_color": entry.secondary_color,
+ "popularity_bucket": entry.popularity_bucket,
+ "deck_archetype": entry.deck_archetype,
+ "editorial_quality": entry.editorial_quality,
+ "description": entry.description,
+ "short_description": short_desc,
+ "synergies": entry.synergies,
+ "synergy_count": len(entry.synergies),
+ "has_fallback_description": has_fallback_description(entry),
+ }
+
+
+def _split_synergies(slug: str, entry: ThemeEntry, yaml_map: Dict[str, Dict[str, Any]]) -> Dict[str, List[str]]:
+ y = yaml_map.get(slug)
+ if not y:
+ return {"curated": [], "enforced": [], "inferred": []}
+ return {
+ "curated": [s for s in y.get("curated_synergies", []) if isinstance(s, str)],
+ "enforced": [s for s in y.get("enforced_synergies", []) if isinstance(s, str)],
+ "inferred": [s for s in y.get("inferred_synergies", []) if isinstance(s, str)],
+ }
+
+
+def project_detail(slug: str, entry: ThemeEntry, yaml_map: Dict[str, Dict[str, Any]], uncapped: bool = False) -> Dict[str, Any]:
+ seg = _split_synergies(slug, entry, yaml_map)
+ uncapped_synergies: Optional[List[str]] = None
+ if uncapped:
+ # Full ordered list reconstructed: curated + enforced (preserve duplication guard) + inferred
+ seen = set()
+ full: List[str] = []
+ for block in (seg["curated"], seg["enforced"], seg["inferred"]):
+ for s in block:
+ if s not in seen:
+ full.append(s)
+ seen.add(s)
+ uncapped_synergies = full
+ d = project_summary(entry)
+ d.update({
+ "curated_synergies": seg["curated"],
+ "enforced_synergies": seg["enforced"],
+ "inferred_synergies": seg["inferred"],
+ })
+ if uncapped_synergies is not None:
+ d["uncapped_synergies"] = uncapped_synergies
+ # Add editorial lists with YAML fallback (REGRESSION FIX 2025-09-20):
+ # The current theme_list.json emitted by the build pipeline omits the
+ # example_* and synergy_* editorial arrays. Earlier logic populated these
+ # from the JSON so previews showed curated examples. After the omission,
+ # ThemeEntry fields default to empty lists and curated examples vanished
+ # from the preview (user-reported). We now fallback to the per-theme YAML
+ # source when the ThemeEntry lists are empty to restore expected behavior
+ # without requiring an immediate catalog rebuild.
+ y_entry: Dict[str, Any] = yaml_map.get(slug, {}) or {}
+ def _norm_list(val: Any) -> List[str]:
+ if isinstance(val, list):
+ return [str(x) for x in val if isinstance(x, str)]
+ return []
+ example_commanders = entry.example_commanders or _norm_list(y_entry.get("example_commanders"))
+ example_cards = entry.example_cards or _norm_list(y_entry.get("example_cards"))
+ synergy_example_cards = getattr(entry, 'synergy_example_cards', None) or _norm_list(y_entry.get("synergy_example_cards"))
+ synergy_commanders = entry.synergy_commanders or _norm_list(y_entry.get("synergy_commanders"))
+ # YAML fallback for description & selected editorial fields (REGRESSION FIX 2025-09-20):
+ # theme_list.json currently omits description/editorial_quality/popularity_bucket for some themes after P2 build changes.
+ # Use YAML values when the ThemeEntry field is empty/None. Preserve existing non-empty entry values.
+ description = entry.description or y_entry.get("description") or None
+ editorial_quality = entry.editorial_quality or y_entry.get("editorial_quality") or None
+ popularity_bucket = entry.popularity_bucket or y_entry.get("popularity_bucket") or None
+ d.update({
+ "example_commanders": example_commanders,
+ "example_cards": example_cards,
+ "synergy_example_cards": synergy_example_cards,
+ "synergy_commanders": synergy_commanders,
+ "description": description,
+ "editorial_quality": editorial_quality,
+ "popularity_bucket": popularity_bucket,
+ })
+ return d
+
+
+def filter_entries(entries: List[ThemeEntry], *, q: Optional[str] = None, archetype: Optional[str] = None, bucket: Optional[str] = None, colors: Optional[List[str]] = None) -> List[ThemeEntry]:
+ q_lower = q.lower() if q else None
+ colors_set = {c.strip().upper() for c in colors} if colors else None
+ out: List[ThemeEntry] = []
+ for e in entries:
+ if archetype and e.deck_archetype != archetype:
+ continue
+ if bucket and e.popularity_bucket != bucket:
+ continue
+ if colors_set:
+ pc = (e.primary_color or "").upper()[:1]
+ sc = (e.secondary_color or "").upper()[:1]
+ if not (pc in colors_set or sc in colors_set):
+ continue
+ if q_lower:
+ hay = "|".join([e.theme] + e.synergies).lower()
+ if q_lower not in hay:
+ continue
+ out.append(e)
+ return out
+
+
+# -------------------- Optimized filtering (fast path) --------------------
+def _color_match(slug: str, colors_set: Optional[set[str]], idx: SlugThemeIndex) -> bool:
+ if not colors_set:
+ return True
+ pc = (idx.primary_color_by_slug.get(slug) or "").upper()[:1]
+ sc = (idx.secondary_color_by_slug.get(slug) or "").upper()[:1]
+ return (pc in colors_set) or (sc in colors_set)
+
+
+def filter_slugs_fast(
+ idx: SlugThemeIndex,
+ *,
+ q: Optional[str] = None,
+ archetype: Optional[str] = None,
+ bucket: Optional[str] = None,
+ colors: Optional[List[str]] = None,
+) -> List[str]:
+ """Return filtered slugs using precomputed haystacks & memoized cache.
+
+ Cache key: (etag, q_lower, archetype, bucket, colors_tuple) where colors_tuple
+ is sorted & uppercased. Cache invalidates automatically when index reloads.
+ """
+ colors_key: Optional[Tuple[str, ...]] = (
+ tuple(sorted({c.strip().upper() for c in colors})) if colors else None
+ )
+ cache_key = (idx.etag, q.lower() if q else None, archetype, bucket, colors_key)
+ global _FILTER_REQUESTS, _FILTER_CACHE_HITS
+ _FILTER_REQUESTS += 1
+ cached = _FILTER_CACHE.get(cache_key)
+ if cached is not None:
+ _FILTER_CACHE_HITS += 1
+ return cached
+ q_lower = q.lower() if q else None
+ colors_set = set(colors_key) if colors_key else None
+ out: List[str] = []
+ for slug, entry in idx.slug_to_entry.items():
+ if archetype and entry.deck_archetype != archetype:
+ continue
+ if bucket and entry.popularity_bucket != bucket:
+ continue
+ if colors_set and not _color_match(slug, colors_set, idx):
+ continue
+ if q_lower and q_lower not in idx.haystack_by_slug.get(slug, ""):
+ continue
+ out.append(slug)
+ _FILTER_CACHE[cache_key] = out
+ return out
+
+
+def summaries_for_slugs(idx: SlugThemeIndex, slugs: Iterable[str]) -> List[Dict[str, Any]]:
+ out: List[Dict[str, Any]] = []
+ for s in slugs:
+ summ = idx.summary_by_slug.get(s)
+ if summ:
+ out.append(summ.copy()) # shallow copy so route can pop diag-only fields
+ return out
+
+
+def catalog_metrics() -> Dict[str, Any]:
+ """Return lightweight catalog filtering/cache metrics (diagnostics only)."""
+ return {
+ "filter_requests": _FILTER_REQUESTS,
+ "filter_cache_hits": _FILTER_CACHE_HITS,
+ "filter_cache_entries": len(_FILTER_CACHE),
+ "filter_last_bust_at": _FILTER_LAST_BUST_AT,
+ "filter_prewarmed": _FILTER_PREWARMED,
+ }
+
+
+def bust_filter_cache(reason: str | None = None) -> None:
+ """Clear fast filter cache (call after catalog rebuild or yaml change)."""
+ global _FILTER_CACHE, _FILTER_LAST_BUST_AT
+ try:
+ _FILTER_CACHE.clear()
+ import time as _t
+ _FILTER_LAST_BUST_AT = _t.time()
+ except Exception:
+ pass
+
+
+def prewarm_common_filters(max_archetypes: int = 12) -> None:
+ """Pre-execute a handful of common filter queries to prime the fast cache.
+
+ This is intentionally conservative (only a small cartesian of bucket/archetype)
+ and gated by WEB_THEME_FILTER_PREWARM=1 environment variable as well as a
+ single-run guard. Safe to call multiple times (no-op after first success).
+ """
+ global _FILTER_PREWARMED
+ if _FILTER_PREWARMED:
+ return
+ import os
+ if (os.getenv("WEB_THEME_FILTER_PREWARM") or "").strip().lower() not in {"1", "true", "yes", "on"}:
+ return
+ try:
+ idx = load_index()
+ except Exception:
+ return
+ # Gather archetypes & buckets (limited)
+ archetypes: List[str] = []
+ try:
+ archetypes = [a for a in {t.deck_archetype for t in idx.catalog.themes if t.deck_archetype}][:max_archetypes] # type: ignore[arg-type]
+ except Exception:
+ archetypes = []
+ buckets = ["Very Common", "Common", "Uncommon", "Niche", "Rare"]
+ # Execute fast filter queries (ignore output, we only want cache side effects)
+ try:
+ # Global (no filters) & each bucket
+ filter_slugs_fast(idx)
+ for b in buckets:
+ filter_slugs_fast(idx, bucket=b)
+ # Archetype only combos (first N)
+ for a in archetypes:
+ filter_slugs_fast(idx, archetype=a)
+ # Archetype + bucket cross (cap combinations)
+ for a in archetypes[:5]:
+ for b in buckets[:3]:
+ filter_slugs_fast(idx, archetype=a, bucket=b)
+ _FILTER_PREWARMED = True
+ except Exception:
+ # Swallow any unexpected error; prewarm is opportunistic
+ return
diff --git a/code/web/services/theme_preview.py b/code/web/services/theme_preview.py
new file mode 100644
index 0000000..07e4117
--- /dev/null
+++ b/code/web/services/theme_preview.py
@@ -0,0 +1,862 @@
+"""Theme preview sampling (Phase F – enhanced sampling & diversity heuristics).
+
+Summary of implemented capabilities and pending roadmap items documented inline.
+"""
+from __future__ import annotations
+
+from pathlib import Path
+import csv
+import time
+import random
+from collections import OrderedDict, deque
+from typing import List, Dict, Any, Optional, Tuple, Iterable
+import os
+import json
+import threading
+
+try:
+ import yaml # type: ignore
+except Exception: # pragma: no cover - PyYAML already in requirements; defensive
+ yaml = None # type: ignore
+
+from .theme_catalog_loader import load_index, slugify, project_detail
+
+# NOTE: Remainder of module keeps large logic blocks; imports consolidated above per PEP8.
+
+# Commander bias configuration constants
+COMMANDER_COLOR_FILTER_STRICT = True # If commander found, restrict sample to its color identity (except colorless)
+COMMANDER_OVERLAP_BONUS = 1.8 # additive score bonus for sharing at least one tag with commander
+COMMANDER_THEME_MATCH_BONUS = 0.9 # extra if also matches theme directly
+
+## (duplicate imports removed)
+
+# Adaptive TTL configuration (can be toggled via THEME_PREVIEW_ADAPTIVE=1)
+# Starts at a baseline and is adjusted up/down based on cache hit ratio bands.
+TTL_SECONDS = 600 # current effective TTL (mutable)
+_TTL_BASE = 600
+_TTL_MIN = 300
+_TTL_MAX = 900
+_ADAPT_SAMPLE_WINDOW = 120 # number of recent requests to evaluate
+_ADAPTATION_ENABLED = (os.getenv("THEME_PREVIEW_ADAPTIVE") or "").lower() in {"1","true","yes","on"}
+_RECENT_HITS: deque[bool] = deque(maxlen=_ADAPT_SAMPLE_WINDOW)
+_LAST_ADAPT_AT: float | None = None
+_ADAPT_INTERVAL_S = 30 # do not adapt more often than every 30s
+
+_BG_REFRESH_THREAD_STARTED = False
+_BG_REFRESH_INTERVAL_S = int(os.getenv("THEME_PREVIEW_BG_REFRESH_INTERVAL") or 120)
+_BG_REFRESH_ENABLED = (os.getenv("THEME_PREVIEW_BG_REFRESH") or "").lower() in {"1","true","yes","on"}
+
+# Adaptive background refresh heuristics (P2): we will adjust per-loop sleep based on
+# recent error rate & p95 build latency. Bounds: [30s, 5 * base interval].
+_BG_REFRESH_MIN = 30
+_BG_REFRESH_MAX = max(300, _BG_REFRESH_INTERVAL_S * 5)
+
+# Per-theme error histogram (P2 observability)
+_PREVIEW_PER_THEME_ERRORS: Dict[str, int] = {}
+
+# Optional curated synergy pair matrix externalization (P2 DATA).
+_CURATED_SYNERGY_MATRIX_PATH = Path("config/themes/curated_synergy_matrix.yml")
+_CURATED_SYNERGY_MATRIX: Dict[str, Dict[str, Any]] | None = None
+
+def _load_curated_synergy_matrix() -> None:
+ global _CURATED_SYNERGY_MATRIX
+ if _CURATED_SYNERGY_MATRIX is not None:
+ return
+ if not _CURATED_SYNERGY_MATRIX_PATH.exists() or yaml is None:
+ _CURATED_SYNERGY_MATRIX = None
+ return
+ try:
+ with _CURATED_SYNERGY_MATRIX_PATH.open('r', encoding='utf-8') as fh:
+ data = yaml.safe_load(fh) or {}
+ if isinstance(data, dict):
+ # Expect top-level key 'pairs' but allow raw mapping
+ pairs = data.get('pairs', data)
+ if isinstance(pairs, dict):
+ _CURATED_SYNERGY_MATRIX = pairs # type: ignore
+ else:
+ _CURATED_SYNERGY_MATRIX = None
+ else:
+ _CURATED_SYNERGY_MATRIX = None
+ except Exception:
+ _CURATED_SYNERGY_MATRIX = None
+
+_load_curated_synergy_matrix()
+
+def _maybe_adapt_ttl(now: float) -> None:
+ """Adjust global TTL_SECONDS based on recent hit ratio bands.
+
+ Strategy:
+ - If hit ratio < 0.25: decrease TTL slightly (favor freshness) ( -60s )
+ - If hit ratio between 0.25–0.55: gently nudge toward base ( +/- 30s toward _TTL_BASE )
+ - If hit ratio between 0.55–0.75: slight increase (+60s) (stability payoff)
+ - If hit ratio > 0.75: stronger increase (+90s) to leverage locality
+ Never exceeds [_TTL_MIN, _TTL_MAX]. Only runs if enough samples.
+ """
+ global TTL_SECONDS, _LAST_ADAPT_AT
+ if not _ADAPTATION_ENABLED:
+ return
+ if len(_RECENT_HITS) < max(30, int(_ADAPT_SAMPLE_WINDOW * 0.5)):
+ return # insufficient data
+ if _LAST_ADAPT_AT and (now - _LAST_ADAPT_AT) < _ADAPT_INTERVAL_S:
+ return
+ hit_ratio = sum(1 for h in _RECENT_HITS if h) / len(_RECENT_HITS)
+ new_ttl = TTL_SECONDS
+ if hit_ratio < 0.25:
+ new_ttl = max(_TTL_MIN, TTL_SECONDS - 60)
+ elif hit_ratio < 0.55:
+ # move 30s toward base
+ if TTL_SECONDS > _TTL_BASE:
+ new_ttl = max(_TTL_BASE, TTL_SECONDS - 30)
+ elif TTL_SECONDS < _TTL_BASE:
+ new_ttl = min(_TTL_BASE, TTL_SECONDS + 30)
+ elif hit_ratio < 0.75:
+ new_ttl = min(_TTL_MAX, TTL_SECONDS + 60)
+ else:
+ new_ttl = min(_TTL_MAX, TTL_SECONDS + 90)
+ if new_ttl != TTL_SECONDS:
+ TTL_SECONDS = new_ttl
+ try:
+ print(json.dumps({"event":"theme_preview_ttl_adapt","hit_ratio":round(hit_ratio,3),"ttl":TTL_SECONDS})) # noqa: T201
+ except Exception:
+ pass
+ _LAST_ADAPT_AT = now
+
+def _compute_bg_interval() -> int:
+ """Derive adaptive sleep interval using recent metrics (P2 PERF)."""
+ try:
+ m = preview_metrics()
+ p95 = float(m.get('preview_p95_build_ms') or 0.0)
+ err_rate = float(m.get('preview_error_rate_pct') or 0.0)
+ base = _BG_REFRESH_INTERVAL_S
+ # Heuristic: high latency -> lengthen interval slightly (avoid stampede), high error rate -> shorten (refresh quicker)
+ interval = base
+ if p95 > 350: # slow builds
+ interval = int(base * 1.75)
+ elif p95 > 250:
+ interval = int(base * 1.4)
+ elif p95 < 120:
+ interval = int(base * 0.85)
+ # Error rate influence
+ if err_rate > 5.0:
+ interval = max(_BG_REFRESH_MIN, int(interval * 0.6))
+ elif err_rate < 1.0 and p95 < 180:
+ # Very healthy -> stretch slightly (less churn)
+ interval = min(_BG_REFRESH_MAX, int(interval * 1.15))
+ return max(_BG_REFRESH_MIN, min(_BG_REFRESH_MAX, interval))
+ except Exception:
+ return max(_BG_REFRESH_MIN, _BG_REFRESH_INTERVAL_S)
+
+def _bg_refresh_loop(): # pragma: no cover (background behavior)
+ import time as _t
+ while True:
+ if not _BG_REFRESH_ENABLED:
+ return
+ try:
+ ranked = sorted(_PREVIEW_PER_THEME_REQUESTS.items(), key=lambda kv: kv[1], reverse=True)
+ top = [slug for slug,_cnt in ranked[:10]]
+ for slug in top:
+ try:
+ get_theme_preview(slug, limit=12, colors=None, commander=None, uncapped=True)
+ except Exception:
+ continue
+ except Exception:
+ pass
+ _t.sleep(_compute_bg_interval())
+
+def _ensure_bg_refresh_thread(): # pragma: no cover
+ global _BG_REFRESH_THREAD_STARTED
+ if _BG_REFRESH_THREAD_STARTED or not _BG_REFRESH_ENABLED:
+ return
+ try:
+ th = threading.Thread(target=_bg_refresh_loop, name="theme_preview_bg_refresh", daemon=True)
+ th.start()
+ _BG_REFRESH_THREAD_STARTED = True
+ except Exception:
+ pass
+
+_PREVIEW_CACHE: "OrderedDict[Tuple[str, int, str | None, str | None, str], Dict[str, Any]]" = OrderedDict()
+_CARD_INDEX: Dict[str, List[Dict[str, Any]]] = {}
+_CARD_INDEX_MTIME: float | None = None
+_PREVIEW_REQUESTS = 0
+_PREVIEW_CACHE_HITS = 0
+_PREVIEW_ERROR_COUNT = 0 # rolling count of preview build failures (non-cache operational)
+_PREVIEW_REQUEST_ERROR_COUNT = 0 # client side reported fetch errors
+_PREVIEW_BUILD_MS_TOTAL = 0.0
+_PREVIEW_BUILD_COUNT = 0
+_PREVIEW_LAST_BUST_AT: float | None = None
+# Per-theme stats and global distribution tracking
+_PREVIEW_PER_THEME: Dict[str, Dict[str, Any]] = {}
+_PREVIEW_PER_THEME_REQUESTS: Dict[str, int] = {}
+_BUILD_DURATIONS = deque(maxlen=500) # rolling window for percentile calc
+_ROLE_GLOBAL_COUNTS: Dict[str, int] = {"payoff": 0, "enabler": 0, "support": 0, "wildcard": 0}
+_CURATED_GLOBAL = 0 # example + curated_synergy (non-synthetic curated content)
+_SAMPLED_GLOBAL = 0
+
+# Rarity normalization mapping (baseline – extend as new variants appear)
+_RARITY_NORM = {
+ "mythic rare": "mythic",
+ "mythic": "mythic",
+ "m": "mythic",
+ "rare": "rare",
+ "r": "rare",
+ "uncommon": "uncommon",
+ "u": "uncommon",
+ "common": "common",
+ "c": "common",
+}
+
+def _normalize_rarity(raw: str) -> str:
+ r = (raw or "").strip().lower()
+ return _RARITY_NORM.get(r, r)
+
+def _preview_cache_max() -> int:
+ try:
+ val_raw = (__import__('os').getenv('THEME_PREVIEW_CACHE_MAX') or '400')
+ val = int(val_raw)
+ if val <= 0:
+ raise ValueError("cache max must be >0")
+ return val
+ except Exception:
+ # Emit single-line warning (stdout) – diagnostics style (won't break)
+ try:
+ print(json.dumps({"event":"theme_preview_cache_config_warning","message":"Invalid THEME_PREVIEW_CACHE_MAX; using default 400"})) # noqa: T201
+ except Exception:
+ pass
+ return 400
+
+def _enforce_cache_limit():
+ try:
+ limit = max(50, _preview_cache_max())
+ while len(_PREVIEW_CACHE) > limit:
+ _PREVIEW_CACHE.popitem(last=False) # FIFO eviction
+ except Exception:
+ pass
+
+CARD_FILES_GLOB = [
+ Path("csv_files/blue_cards.csv"),
+ Path("csv_files/white_cards.csv"),
+ Path("csv_files/black_cards.csv"),
+ Path("csv_files/red_cards.csv"),
+ Path("csv_files/green_cards.csv"),
+ Path("csv_files/colorless_cards.csv"),
+ Path("csv_files/cards.csv"), # fallback large file last
+]
+
+THEME_TAGS_COL = "themeTags"
+NAME_COL = "name"
+COLOR_IDENTITY_COL = "colorIdentity"
+MANA_COST_COL = "manaCost"
+RARITY_COL = "rarity" # Some CSVs may not include; optional
+
+
+def _maybe_build_card_index():
+ global _CARD_INDEX, _CARD_INDEX_MTIME
+ latest = 0.0
+ mtimes: List[float] = []
+ for p in CARD_FILES_GLOB:
+ if p.exists():
+ mt = p.stat().st_mtime
+ mtimes.append(mt)
+ if mt > latest:
+ latest = mt
+ if _CARD_INDEX and _CARD_INDEX_MTIME and latest <= _CARD_INDEX_MTIME:
+ return
+ # Rebuild index
+ _CARD_INDEX = {}
+ for p in CARD_FILES_GLOB:
+ if not p.exists():
+ continue
+ try:
+ with p.open("r", encoding="utf-8", newline="") as fh:
+ reader = csv.DictReader(fh)
+ if not reader.fieldnames or THEME_TAGS_COL not in reader.fieldnames:
+ continue
+ for row in reader:
+ name = row.get(NAME_COL) or row.get("faceName") or ""
+ tags_raw = row.get(THEME_TAGS_COL) or ""
+ # tags stored like "['Blink', 'Enter the Battlefield']"; naive parse
+ tags = [t.strip(" '[]") for t in tags_raw.split(',') if t.strip()] if tags_raw else []
+ if not tags:
+ continue
+ color_id = (row.get(COLOR_IDENTITY_COL) or "").strip()
+ mana_cost = (row.get(MANA_COST_COL) or "").strip()
+ rarity = _normalize_rarity(row.get(RARITY_COL) or "")
+ for tg in tags:
+ if not tg:
+ continue
+ _CARD_INDEX.setdefault(tg, []).append({
+ "name": name,
+ "color_identity": color_id,
+ "tags": tags,
+ "mana_cost": mana_cost,
+ "rarity": rarity,
+ # Pre-parsed helpers (color identity list & pip colors from mana cost)
+ "color_identity_list": list(color_id) if color_id else [],
+ "pip_colors": [c for c in mana_cost if c in {"W","U","B","R","G"}],
+ })
+ except Exception:
+ continue
+ _CARD_INDEX_MTIME = latest
+
+
+def _classify_role(theme: str, synergies: List[str], tags: List[str]) -> str:
+ tag_set = set(tags)
+ synergy_overlap = tag_set.intersection(synergies)
+ if theme in tag_set:
+ return "payoff"
+ if len(synergy_overlap) >= 2:
+ return "enabler"
+ if len(synergy_overlap) == 1:
+ return "support"
+ return "wildcard"
+
+
+def _seed_from(theme: str, commander: Optional[str]) -> int:
+ base = f"{theme.lower()}|{(commander or '').lower()}".encode("utf-8")
+ # simple deterministic hash (stable across runs within Python version – keep primitive)
+ h = 0
+ for b in base:
+ h = (h * 131 + b) & 0xFFFFFFFF
+ return h or 1
+
+
+def _deterministic_shuffle(items: List[Any], seed: int) -> None:
+ rnd = random.Random(seed)
+ rnd.shuffle(items)
+
+
+def _score_card(theme: str, synergies: List[str], role: str, tags: List[str]) -> float:
+ tag_set = set(tags)
+ synergy_overlap = len(tag_set.intersection(synergies))
+ score = 0.0
+ if theme in tag_set:
+ score += 3.0
+ score += synergy_overlap * 1.2
+ # Role weight baseline
+ role_weights = {
+ "payoff": 2.5,
+ "enabler": 2.0,
+ "support": 1.5,
+ "wildcard": 0.9,
+ }
+ score += role_weights.get(role, 0.5)
+ # Base rarity weighting (future: dynamic diminishing duplicate penalty)
+ # Access rarity via closure later by augmenting item after score (handled outside)
+ return score
+
+def _commander_overlap_scale(commander_tags: set[str], card_tags: List[str], synergy_set: set[str]) -> float:
+ """Refined overlap scaling: only synergy tag intersections count toward diminishing curve.
+
+ Uses geometric diminishing returns: bonus = B * (1 - 0.5 ** n) where n is synergy overlap count.
+ Guarantees first overlap grants 50% of base, second 75%, third 87.5%, asymptotically approaching B.
+ """
+ if not commander_tags or not synergy_set:
+ return 0.0
+ overlap_synergy = len(commander_tags.intersection(synergy_set).intersection(card_tags))
+ if overlap_synergy <= 0:
+ return 0.0
+ return COMMANDER_OVERLAP_BONUS * (1 - (0.5 ** overlap_synergy))
+
+
+def _lookup_commander(commander: Optional[str]) -> Optional[Dict[str, Any]]:
+ if not commander:
+ return None
+ _maybe_build_card_index()
+ # Commander can appear under many tags; brute scan limited to first match
+ needle = commander.lower().strip()
+ for tag_cards in _CARD_INDEX.values():
+ for c in tag_cards:
+ if c.get("name", "").lower() == needle:
+ return c
+ return None
+
+
+def _sample_real_cards_for_theme(theme: str, limit: int, colors_filter: Optional[str], *, synergies: List[str], commander: Optional[str]) -> List[Dict[str, Any]]:
+ _maybe_build_card_index()
+ pool = _CARD_INDEX.get(theme) or []
+ if not pool:
+ return []
+ commander_card = _lookup_commander(commander)
+ commander_colors: set[str] = set(commander_card.get("color_identity", "")) if commander_card else set()
+ commander_tags: set[str] = set(commander_card.get("tags", [])) if commander_card else set()
+ if colors_filter:
+ allowed = {c.strip().upper() for c in colors_filter.split(',') if c.strip()}
+ if allowed:
+ pool = [c for c in pool if set(c.get("color_identity", "")).issubset(allowed) or not c.get("color_identity")]
+ # Apply commander color identity restriction if configured
+ if commander_card and COMMANDER_COLOR_FILTER_STRICT and commander_colors:
+ # Allow single off-color splash for 4-5 color commanders (leniency policy) with later mild penalty
+ allow_splash = len(commander_colors) >= 4
+ new_pool = []
+ for c in pool:
+ ci = set(c.get("color_identity", ""))
+ if not ci or ci.issubset(commander_colors):
+ new_pool.append(c)
+ continue
+ if allow_splash:
+ off = ci - commander_colors
+ if len(off) == 1: # single off-color splash
+ # mark for later penalty (avoid mutating shared index structure deeply; tag ephemeral flag)
+ c["_splash_off_color"] = True # type: ignore
+ new_pool.append(c)
+ continue
+ pool = new_pool
+ # Build role buckets
+ seen_names: set[str] = set()
+ payoff: List[Dict[str, Any]] = []
+ enabler: List[Dict[str, Any]] = []
+ support: List[Dict[str, Any]] = []
+ wildcard: List[Dict[str, Any]] = []
+ rarity_counts: Dict[str, int] = {}
+ synergy_set = set(synergies)
+ # Rarity calibration (P2 SAMPLING): allow tuning via env; default adjusted after observation.
+ rarity_weight_base = {
+ "mythic": float(os.getenv("RARITY_W_MYTHIC", "1.2")),
+ "rare": float(os.getenv("RARITY_W_RARE", "0.9")),
+ "uncommon": float(os.getenv("RARITY_W_UNCOMMON", "0.65")),
+ "common": float(os.getenv("RARITY_W_COMMON", "0.4")),
+ }
+ for raw in pool:
+ nm = raw.get("name")
+ if not nm or nm in seen_names:
+ continue
+ seen_names.add(nm)
+ tags = raw.get("tags", [])
+ role = _classify_role(theme, synergies, tags)
+ score = _score_card(theme, synergies, role, tags)
+ reasons = [f"role:{role}", f"synergy_overlap:{len(set(tags).intersection(synergies))}"]
+ if commander_card:
+ if theme in tags:
+ score += COMMANDER_THEME_MATCH_BONUS
+ reasons.append("commander_theme_match")
+ scaled = _commander_overlap_scale(commander_tags, tags, synergy_set)
+ if scaled:
+ score += scaled
+ reasons.append(f"commander_synergy_overlap:{len(commander_tags.intersection(synergy_set).intersection(tags))}:{round(scaled,2)}")
+ reasons.append("commander_bias")
+ rarity = raw.get("rarity") or ""
+ if rarity:
+ base_rarity_weight = rarity_weight_base.get(rarity, 0.25)
+ count_so_far = rarity_counts.get(rarity, 0)
+ # Diminishing influence: divide by (1 + 0.4 * duplicates_already)
+ score += base_rarity_weight / (1 + 0.4 * count_so_far)
+ rarity_counts[rarity] = count_so_far + 1
+ reasons.append(f"rarity_weight_calibrated:{rarity}:{round(base_rarity_weight/(1+0.4*count_so_far),2)}")
+ # Splash leniency penalty (applied after other scoring)
+ if raw.get("_splash_off_color"):
+ score -= 0.3
+ reasons.append("splash_off_color_penalty:-0.3")
+ item = {
+ "name": nm,
+ "colors": list(raw.get("color_identity", "")),
+ "roles": [role],
+ "tags": tags,
+ "score": score,
+ "reasons": reasons,
+ "mana_cost": raw.get("mana_cost"),
+ "rarity": rarity,
+ # Newly exposed server authoritative parsed helpers
+ "color_identity_list": raw.get("color_identity_list", []),
+ "pip_colors": raw.get("pip_colors", []),
+ }
+ if role == "payoff":
+ payoff.append(item)
+ elif role == "enabler":
+ enabler.append(item)
+ elif role == "support":
+ support.append(item)
+ else:
+ wildcard.append(item)
+ # Deterministic shuffle inside each bucket to avoid bias from CSV ordering
+ seed = _seed_from(theme, commander)
+ for bucket in (payoff, enabler, support, wildcard):
+ _deterministic_shuffle(bucket, seed)
+ # stable secondary ordering: higher score first, then name
+ bucket.sort(key=lambda x: (-x["score"], x["name"]))
+
+ # Diversity targets (after curated examples are pinned externally)
+ target_payoff = max(1, int(round(limit * 0.4)))
+ target_enabler_support = max(1, int(round(limit * 0.4)))
+ # support grouped with enabler for quota distribution
+ target_wild = max(0, limit - target_payoff - target_enabler_support)
+
+ def take(n: int, source: List[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
+ for i in range(min(n, len(source))):
+ yield source[i]
+
+ chosen: List[Dict[str, Any]] = []
+ # Collect payoff
+ chosen.extend(take(target_payoff, payoff))
+ # Collect enabler + support mix
+ remaining_for_enab = target_enabler_support
+ es_combined = enabler + support
+ chosen.extend(take(remaining_for_enab, es_combined))
+ # Collect wildcards
+ chosen.extend(take(target_wild, wildcard))
+
+ # If still short fill from remaining (payoff first, then enab, support, wildcard)
+ if len(chosen) < limit:
+ def fill_from(src: List[Dict[str, Any]]):
+ nonlocal chosen
+ for it in src:
+ if len(chosen) >= limit:
+ break
+ if it not in chosen:
+ chosen.append(it)
+ for bucket in (payoff, enabler, support, wildcard):
+ fill_from(bucket)
+
+ # Role saturation penalty (post-selection adjustment): discourage dominance overflow beyond soft thresholds
+ role_soft_caps = {
+ "payoff": int(round(limit * 0.5)),
+ "enabler": int(round(limit * 0.35)),
+ "support": int(round(limit * 0.35)),
+ "wildcard": int(round(limit * 0.25)),
+ }
+ role_seen: Dict[str, int] = {k: 0 for k in role_soft_caps}
+ for it in chosen:
+ r = (it.get("roles") or [None])[0]
+ if not r or r not in role_soft_caps:
+ continue
+ role_seen[r] += 1
+ if role_seen[r] > max(1, role_soft_caps[r]):
+ it["score"] = it.get("score", 0) - 0.4
+ (it.setdefault("reasons", [])).append("role_saturation_penalty:-0.4")
+ # Truncate and re-rank final sequence deterministically by score then name (already ordered by selection except fill)
+ if len(chosen) > limit:
+ chosen = chosen[:limit]
+ # Normalize score scale (optional future; keep raw for now)
+ return chosen
+# key: (slug, limit, colors, commander, etag)
+
+
+def _now() -> float: # small indirection for future test monkeypatch
+ return time.time()
+
+
+def _build_stub_items(detail: Dict[str, Any], limit: int, colors_filter: Optional[str], *, commander: Optional[str]) -> List[Dict[str, Any]]:
+ items: List[Dict[str, Any]] = []
+ # Start with curated example cards if present, else generic example_cards
+ curated_cards = detail.get("example_cards") or []
+ for idx, name in enumerate(curated_cards):
+ if len(items) >= limit:
+ break
+ items.append({
+ "name": name,
+ "colors": [], # unknown without deeper card DB link
+ "roles": ["example"],
+ "tags": [],
+ "score": float(limit - idx), # simple descending score
+ "reasons": ["curated_example"],
+ })
+ # Curated synergy example cards (if any) follow standard examples but before sampled
+ synergy_curated = detail.get("synergy_example_cards") or []
+ for name in synergy_curated:
+ if len(items) >= limit:
+ break
+ # Skip duplicates with example_cards
+ if any(it["name"] == name for it in items):
+ continue
+ items.append({
+ "name": name,
+ "colors": [],
+ "roles": ["curated_synergy"],
+ "tags": [],
+ "score": max((it["score"] for it in items), default=1.0) - 0.1, # just below top examples
+ "reasons": ["curated_synergy_example"],
+ })
+ # Remaining slots after curated examples
+ remaining = max(0, limit - len(items))
+ if remaining:
+ theme_name = detail.get("theme")
+ if isinstance(theme_name, str):
+ all_synergies = []
+ # Use uncapped synergies if available else merged list
+ if detail.get("uncapped_synergies"):
+ all_synergies = detail.get("uncapped_synergies") or []
+ else:
+ # Combine curated/enforced/inferred
+ seen = set()
+ for blk in (detail.get("curated_synergies") or [], detail.get("enforced_synergies") or [], detail.get("inferred_synergies") or []):
+ for s in blk:
+ if s not in seen:
+ all_synergies.append(s)
+ seen.add(s)
+ real_cards = _sample_real_cards_for_theme(theme_name, remaining, colors_filter, synergies=all_synergies, commander=commander)
+ for rc in real_cards:
+ if len(items) >= limit:
+ break
+ items.append(rc)
+ if len(items) < limit:
+ # Pad using synergies as synthetic placeholders to reach requested size
+ synergies = detail.get("uncapped_synergies") or detail.get("synergies") or []
+ for s in synergies:
+ if len(items) >= limit:
+ break
+ synthetic_name = f"[{s}]"
+ items.append({
+ "name": synthetic_name,
+ "colors": [],
+ "roles": ["synthetic"],
+ "tags": [s],
+ "score": 0.5, # lower score to keep curated first
+ "reasons": ["synthetic_synergy_placeholder"],
+ })
+ return items
+
+
+def get_theme_preview(theme_id: str, *, limit: int = 12, colors: Optional[str] = None, commander: Optional[str] = None, uncapped: bool = True) -> Dict[str, Any]:
+ global _PREVIEW_REQUESTS, _PREVIEW_CACHE_HITS, _PREVIEW_BUILD_MS_TOTAL, _PREVIEW_BUILD_COUNT
+ idx = load_index()
+ slug = slugify(theme_id)
+ entry = idx.slug_to_entry.get(slug)
+ if not entry:
+ raise KeyError("theme_not_found")
+ # Use uncapped synergies for better placeholder coverage (diagnostics flag gating not applied here; placeholder only)
+ detail = project_detail(slug, entry, idx.slug_to_yaml, uncapped=uncapped)
+ colors_key = colors or None
+ commander_key = commander or None
+ cache_key = (slug, limit, colors_key, commander_key, idx.etag)
+ _PREVIEW_REQUESTS += 1
+ cached = _PREVIEW_CACHE.get(cache_key)
+ if cached and (_now() - cached["_cached_at"]) < TTL_SECONDS:
+ _PREVIEW_CACHE_HITS += 1
+ _RECENT_HITS.append(True)
+ # Count request (even if cache hit) for per-theme metrics
+ _PREVIEW_PER_THEME_REQUESTS[slug] = _PREVIEW_PER_THEME_REQUESTS.get(slug, 0) + 1
+ # Structured cache hit log (diagnostics gated)
+ try:
+ if (os.getenv("WEB_THEME_PREVIEW_LOG") or "").lower() in {"1","true","yes","on"}:
+ print(json.dumps({
+ "event": "theme_preview_cache_hit",
+ "theme": slug,
+ "limit": limit,
+ "colors": colors_key,
+ "commander": commander_key,
+ "ttl_remaining_s": round(TTL_SECONDS - (_now() - cached["_cached_at"]), 2)
+ }, separators=(",",":"))) # noqa: T201
+ except Exception:
+ pass
+ # Annotate cache hit flag (shallow copy to avoid mutating stored payload timings)
+ payload_cached = dict(cached["payload"])
+ payload_cached["cache_hit"] = True
+ return payload_cached
+ _RECENT_HITS.append(False)
+ # Build items
+ t0 = _now()
+ try:
+ items = _build_stub_items(detail, limit, colors_key, commander=commander_key)
+ except Exception as e:
+ # Record error histogram & propagate
+ _PREVIEW_PER_THEME_ERRORS[slug] = _PREVIEW_PER_THEME_ERRORS.get(slug, 0) + 1
+ _PREVIEW_ERROR_COUNT += 1 # type: ignore
+ raise e
+
+ # Race condition guard (P2 RESILIENCE): If we somehow produced an empty sample (e.g., catalog rebuild mid-flight)
+ # retry a limited number of times with small backoff.
+ if not items:
+ for _retry in range(2): # up to 2 retries
+ time.sleep(0.05)
+ try:
+ items = _build_stub_items(detail, limit, colors_key, commander=commander_key)
+ except Exception:
+ _PREVIEW_PER_THEME_ERRORS[slug] = _PREVIEW_PER_THEME_ERRORS.get(slug, 0) + 1
+ _PREVIEW_ERROR_COUNT += 1 # type: ignore
+ break
+ if items:
+ try:
+ print(json.dumps({"event":"theme_preview_retry_after_empty","theme":slug})) # noqa: T201
+ except Exception:
+ pass
+ break
+ build_ms = (_now() - t0) * 1000.0
+ _PREVIEW_BUILD_MS_TOTAL += build_ms
+ _PREVIEW_BUILD_COUNT += 1
+ # Duplicate suppression safety across roles (should already be unique, defensive)
+ seen_names: set[str] = set()
+ dedup: List[Dict[str, Any]] = []
+ for it in items:
+ nm = it.get("name")
+ if not nm:
+ continue
+ if nm in seen_names:
+ continue
+ seen_names.add(nm)
+ dedup.append(it)
+ items = dedup
+
+ # Aggregate statistics
+ curated_count = sum(1 for i in items if any(r in {"example", "curated_synergy"} for r in (i.get("roles") or [])))
+ sampled_core_roles = {"payoff", "enabler", "support", "wildcard"}
+ role_counts_local: Dict[str, int] = {r: 0 for r in sampled_core_roles}
+ for i in items:
+ roles = i.get("roles") or []
+ for r in roles:
+ if r in role_counts_local:
+ role_counts_local[r] += 1
+ # Update global counters
+ global _ROLE_GLOBAL_COUNTS, _CURATED_GLOBAL, _SAMPLED_GLOBAL
+ for r, c in role_counts_local.items():
+ _ROLE_GLOBAL_COUNTS[r] = _ROLE_GLOBAL_COUNTS.get(r, 0) + c
+ _CURATED_GLOBAL += curated_count
+ _SAMPLED_GLOBAL += sum(role_counts_local.values())
+ _BUILD_DURATIONS.append(build_ms)
+ per = _PREVIEW_PER_THEME.setdefault(slug, {"builds": 0, "total_ms": 0.0, "durations": deque(maxlen=50), "role_counts": {r: 0 for r in sampled_core_roles}, "curated": 0, "sampled": 0})
+ per["builds"] += 1
+ per["total_ms"] += build_ms
+ per["durations"].append(build_ms)
+ per["curated"] += curated_count
+ per["sampled"] += sum(role_counts_local.values())
+ for r, c in role_counts_local.items():
+ per["role_counts"][r] = per["role_counts"].get(r, 0) + c
+
+ synergies_used = detail.get("uncapped_synergies") or detail.get("synergies") or []
+ payload = {
+ "theme_id": slug,
+ "theme": detail.get("theme"),
+ "count_total": len(items), # population size TBD when full sampling added
+ "sample": items,
+ "synergies_used": synergies_used,
+ "generated_at": idx.catalog.metadata_info.generated_at if idx.catalog.metadata_info else None,
+ "colors_filter": colors_key,
+ "commander": commander_key,
+ "stub": False if any(it.get("roles") and it["roles"][0] in {"payoff", "support", "enabler", "wildcard"} for it in items) else True,
+ "role_counts": role_counts_local,
+ "curated_pct": round((curated_count / max(1, len(items))) * 100, 2),
+ "build_ms": round(build_ms, 2),
+ "curated_total": curated_count,
+ "sampled_total": sum(role_counts_local.values()),
+ "cache_hit": False,
+ }
+ _PREVIEW_CACHE[cache_key] = {"payload": payload, "_cached_at": _now()}
+ _PREVIEW_CACHE.move_to_end(cache_key)
+ _enforce_cache_limit()
+ # Track request count post-build
+ _PREVIEW_PER_THEME_REQUESTS[slug] = _PREVIEW_PER_THEME_REQUESTS.get(slug, 0) + 1
+ # Structured logging (opt-in)
+ try:
+ if (os.getenv("WEB_THEME_PREVIEW_LOG") or "").lower() in {"1","true","yes","on"}:
+ log_obj = {
+ "event": "theme_preview_build",
+ "theme": slug,
+ "limit": limit,
+ "colors": colors_key,
+ "commander": commander_key,
+ "build_ms": round(build_ms, 2),
+ "curated_pct": payload["curated_pct"],
+ "curated_total": payload["curated_total"],
+ "sampled_total": payload["sampled_total"],
+ "role_counts": role_counts_local,
+ "cache_hit": False,
+ }
+ print(json.dumps(log_obj, separators=(",",":"))) # noqa: T201
+ except Exception:
+ pass
+ # Post-build adaptive TTL evaluation & background refresher initialization
+ _maybe_adapt_ttl(_now())
+ _ensure_bg_refresh_thread()
+ return payload
+
+
+def _percentile(sorted_vals: List[float], pct: float) -> float:
+ if not sorted_vals:
+ return 0.0
+ k = (len(sorted_vals) - 1) * pct
+ f = int(k)
+ c = min(f + 1, len(sorted_vals) - 1)
+ if f == c:
+ return sorted_vals[f]
+ d0 = sorted_vals[f] * (c - k)
+ d1 = sorted_vals[c] * (k - f)
+ return d0 + d1
+
+def preview_metrics() -> Dict[str, Any]:
+ avg_ms = (_PREVIEW_BUILD_MS_TOTAL / _PREVIEW_BUILD_COUNT) if _PREVIEW_BUILD_COUNT else 0.0
+ durations_list = sorted(list(_BUILD_DURATIONS))
+ p95 = _percentile(durations_list, 0.95)
+ # Role distribution actual vs target (aggregate)
+ total_roles = sum(_ROLE_GLOBAL_COUNTS.values()) or 1
+ target = {"payoff": 0.4, "enabler+support": 0.4, "wildcard": 0.2}
+ actual_enabler_support = (_ROLE_GLOBAL_COUNTS.get("enabler", 0) + _ROLE_GLOBAL_COUNTS.get("support", 0)) / total_roles
+ role_distribution = {
+ "payoff": {
+ "count": _ROLE_GLOBAL_COUNTS.get("payoff", 0),
+ "actual_pct": round((_ROLE_GLOBAL_COUNTS.get("payoff", 0) / total_roles) * 100, 2),
+ "target_pct": target["payoff"] * 100,
+ },
+ "enabler_support": {
+ "count": _ROLE_GLOBAL_COUNTS.get("enabler", 0) + _ROLE_GLOBAL_COUNTS.get("support", 0),
+ "actual_pct": round(actual_enabler_support * 100, 2),
+ "target_pct": target["enabler+support"] * 100,
+ },
+ "wildcard": {
+ "count": _ROLE_GLOBAL_COUNTS.get("wildcard", 0),
+ "actual_pct": round((_ROLE_GLOBAL_COUNTS.get("wildcard", 0) / total_roles) * 100, 2),
+ "target_pct": target["wildcard"] * 100,
+ },
+ }
+ editorial_coverage_pct = round((_CURATED_GLOBAL / max(1, (_CURATED_GLOBAL + _SAMPLED_GLOBAL))) * 100, 2)
+ per_theme_stats = {}
+ for slug, data in list(_PREVIEW_PER_THEME.items())[:50]:
+ durs = list(data.get("durations", []))
+ sd = sorted(durs)
+ p50 = _percentile(sd, 0.50)
+ p95_local = _percentile(sd, 0.95)
+ per_theme_stats[slug] = {
+ "avg_ms": round(data["total_ms"] / max(1, data["builds"]), 2),
+ "p50_ms": round(p50, 2),
+ "p95_ms": round(p95_local, 2),
+ "builds": data["builds"],
+ "avg_curated_pct": round((data["curated"] / max(1, (data["curated"] + data["sampled"])) ) * 100, 2),
+ "requests": _PREVIEW_PER_THEME_REQUESTS.get(slug, 0),
+ "curated_total": data.get("curated", 0),
+ "sampled_total": data.get("sampled", 0),
+ }
+ error_rate = 0.0
+ total_req = _PREVIEW_REQUESTS or 0
+ if total_req:
+ error_rate = round((_PREVIEW_ERROR_COUNT / total_req) * 100, 2)
+ # Example coverage enforcement flag: when curated coverage exceeds threshold (default 90%)
+ try:
+ enforce_threshold = float(os.getenv("EXAMPLE_ENFORCE_THRESHOLD", "90"))
+ except Exception:
+ enforce_threshold = 90.0
+ example_enforcement_active = editorial_coverage_pct >= enforce_threshold
+ return {
+ "preview_requests": _PREVIEW_REQUESTS,
+ "preview_cache_hits": _PREVIEW_CACHE_HITS,
+ "preview_cache_entries": len(_PREVIEW_CACHE),
+ "preview_avg_build_ms": round(avg_ms, 2),
+ "preview_p95_build_ms": round(p95, 2),
+ "preview_error_rate_pct": error_rate,
+ "preview_client_fetch_errors": _PREVIEW_REQUEST_ERROR_COUNT,
+ "preview_ttl_seconds": TTL_SECONDS,
+ "preview_ttl_adaptive": _ADAPTATION_ENABLED,
+ "preview_ttl_window": len(_RECENT_HITS),
+ "preview_last_bust_at": _PREVIEW_LAST_BUST_AT,
+ "role_distribution": role_distribution,
+ "editorial_curated_vs_sampled_pct": editorial_coverage_pct,
+ "example_enforcement_active": example_enforcement_active,
+ "example_enforce_threshold_pct": enforce_threshold,
+ "editorial_curated_total": _CURATED_GLOBAL,
+ "editorial_sampled_total": _SAMPLED_GLOBAL,
+ "per_theme": per_theme_stats,
+ "per_theme_errors": dict(list(_PREVIEW_PER_THEME_ERRORS.items())[:50]),
+ "curated_synergy_matrix_loaded": _CURATED_SYNERGY_MATRIX is not None,
+ "curated_synergy_matrix_size": sum(len(v) for v in _CURATED_SYNERGY_MATRIX.values()) if _CURATED_SYNERGY_MATRIX else 0,
+ }
+
+
+def bust_preview_cache(reason: str | None = None) -> None:
+ """Clear in-memory preview cache (e.g., after catalog rebuild or tagging).
+
+ Exposed for orchestrator hooks. Keeps metrics counters (requests/hits) for
+ observability; records last bust timestamp.
+ """
+ global _PREVIEW_CACHE, _PREVIEW_LAST_BUST_AT
+ try: # defensive; never raise
+ _PREVIEW_CACHE.clear()
+ import time as _t
+ _PREVIEW_LAST_BUST_AT = _t.time()
+ except Exception:
+ pass
diff --git a/code/web/templates/base.html b/code/web/templates/base.html
index a556d68..de6d04e 100644
--- a/code/web/templates/base.html
+++ b/code/web/templates/base.html
@@ -5,6 +5,10 @@
MTG Deckbuilder
+
+
+
{% if enable_themes %}
+
+