From 49f1f8b2ebe6ba839e73b49188afec8822727ded Mon Sep 17 00:00:00 2001 From: matt Date: Fri, 26 Sep 2025 18:15:52 -0700 Subject: [PATCH] feat(random): finalize multi-theme telemetry and polish - document random theme exclusions, perf guard tooling, and roadmap completion - tighten random reroll UX: strict theme persistence, throttle handling, export parity, diagnostics updates - add regression coverage for telemetry counters, multi-theme flows, and locked rerolls; refresh README and notes Tests: pytest -q (fast random + telemetry suites) --- CHANGELOG.md | 16 + README.md | Bin 107740 -> 109268 bytes RELEASE_NOTES_TEMPLATE.md | 25 +- code/deck_builder/random_entrypoint.py | 1275 ++++++++++++++++- code/scripts/check_random_theme_perf.py | 118 ++ code/scripts/profile_multi_theme_filter.py | 136 ++ code/scripts/report_random_theme_pool.py | 193 +++ code/tests/test_random_build_api.py | 120 ++ .../test_random_metrics_and_seed_history.py | 82 +- .../test_random_multi_theme_filtering.py | 236 +++ .../test_random_multi_theme_seed_stability.py | 46 + .../tests/test_random_multi_theme_webflows.py | 204 +++ code/tests/test_random_reroll_endpoints.py | 69 +- .../test_random_reroll_locked_artifacts.py | 2 +- .../test_random_reroll_locked_commander.py | 4 +- ...est_random_reroll_locked_commander_form.py | 4 +- ...ndom_reroll_locked_no_duplicate_exports.py | 2 +- code/tests/test_random_reroll_throttle.py | 65 + .../test_random_surprise_reroll_behavior.py | 178 +++ .../test_random_theme_stats_diagnostics.py | 37 + code/tests/test_random_theme_tag_cache.py | 39 + code/web/app.py | 1065 +++++++++++++- code/web/routes/build.py | 71 +- code/web/templates/diagnostics/index.html | 116 ++ .../web/templates/partials/random_result.html | 100 +- code/web/templates/random/index.html | 842 +++++++++-- config/random_theme_exclusions.yml | 35 + docs/random_theme_exclusions.md | 59 + 28 files changed, 4888 insertions(+), 251 deletions(-) create mode 100644 code/scripts/check_random_theme_perf.py create mode 100644 code/scripts/profile_multi_theme_filter.py create mode 100644 code/scripts/report_random_theme_pool.py create mode 100644 code/tests/test_random_multi_theme_filtering.py create mode 100644 code/tests/test_random_multi_theme_seed_stability.py create mode 100644 code/tests/test_random_multi_theme_webflows.py create mode 100644 code/tests/test_random_reroll_throttle.py create mode 100644 code/tests/test_random_surprise_reroll_behavior.py create mode 100644 code/tests/test_random_theme_stats_diagnostics.py create mode 100644 code/tests/test_random_theme_tag_cache.py create mode 100644 config/random_theme_exclusions.yml create mode 100644 docs/random_theme_exclusions.md diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c47d31..80fe055 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,17 @@ This format follows Keep a Changelog principles and aims for Semantic Versioning ## [Unreleased] ### Added +- Tests: added `test_random_reroll_throttle.py` to enforce reroll throttle behavior and `test_random_metrics_and_seed_history.py` to validate opt-in telemetry counters plus seed history exposure. +- Random Mode curated theme pool now documents manual exclusions (`config/random_theme_exclusions.yml`) and ships a reporting script `code/scripts/report_random_theme_pool.py` (`--write-exclusions` emits Markdown/JSON) alongside `docs/random_theme_exclusions.md`. Diagnostics now show manual categories and tag index telemetry. +- Performance guard: `code/scripts/check_random_theme_perf.py` compares the multi-theme profiler output to `config/random_theme_perf_baseline.json` and fails if timings regress beyond configurable thresholds (`--update-baseline` refreshes the file). +- Random Modes UI/API: separate auto-fill controls for Secondary and Tertiary themes with full session, permalink, HTMX, and JSON API support (per-slot state persists across rerolls and exports, and Tertiary auto-fill now automatically enables Secondary to keep combinations valid). +- Random Mode UI gains a lightweight “Clear themes” button that resets all theme inputs and stored preferences in one click for fast Surprise Me reruns. +- Diagnostics: `/status/random_theme_stats` exposes cached commander theme token metrics and the diagnostics dashboard renders indexed commander coverage plus top tokens for multi-theme debugging. +- Random Mode sidecar metadata now records multi-theme details (`primary_theme`, `secondary_theme`, `tertiary_theme`, `resolved_themes`, `combo_fallback`, `synergy_fallback`, `fallback_reason`, plus legacy aliases) in both the summary payload and exported `.summary.json` files. +- Tests: added `test_random_multi_theme_filtering.py` covering triple success, fallback tiers (P+S, P+T, Primary-only, synergy, full pool) and sidecar metadata emission for multi-theme builds. +- Tests: added `test_random_multi_theme_webflows.py` to exercise reroll-same-commander caching and permalink roundtrips for multi-theme runs across HTMX and API layers. - Random Mode multi-theme groundwork: backend now supports `primary_theme`, `secondary_theme`, `tertiary_theme` with deterministic AND-combination cascade (P+S+T → P+S → P+T → P → synergy-overlap → full pool). Diagnostics fields (`resolved_themes`, `combo_fallback`, `synergy_fallback`, `fallback_reason`) added to `RandomBuildResult` (UI wiring pending). +- Tests: added `test_random_surprise_reroll_behavior.py` covering Surprise Me input preservation and locked commander reroll cache reuse. - Locked commander reroll path now produces full artifact parity (CSV, TXT, compliance JSON, summary JSON) identical to Surprise builds. - Random reroll tests for: commander lock invariance, artifact presence, duplicate export prevention, and form vs JSON submission. - Roadmap document `logs/roadmaps/random_multi_theme_roadmap.md` capturing design, fallback strategy, diagnostics, and incremental delivery plan. @@ -47,10 +57,15 @@ This format follows Keep a Changelog principles and aims for Semantic Versioning - Optional multi-pass performance CI variant (`preview_perf_ci_check.py --multi-pass`) to collect cold vs warm pass stats when diagnosing divergence. ### Changed +- Random theme pool builder loads manual exclusions and always emits `auto_filled_themes` as a list (empty when unused), while enhanced metadata powers diagnostics telemetry. +- Random build summaries normalize multi-theme metadata before embedding in summary payloads and sidecar exports (trimming whitespace, deduplicating/normalizing resolved theme lists). +- Random Mode strict-theme toggle is now fully stateful: the checkbox and hidden field keep session/local storage in sync, HTMX rerolls reuse the flag, and API/full-build responses plus permalinks carry `strict_theme_match` through exports and sidecars. +- Multi-theme filtering now pre-caches lowercase tag lists and builds a reusable token index so AND-combos and synergy fallback avoid repeated pandas `.apply` passes; profiling via `code/scripts/profile_multi_theme_filter.py` shows mean ~9.3 ms / p95 ~21 ms for cascade checks (seed 42, 300 iterations). - Random reroll (locked commander) export flow: now reuses builder-exported artifacts when present and records `last_csv_path` / `last_txt_path` inside the headless runner to avoid duplicate suffixed files. - Summary sidecars for random builds include `locked_commander` flag when rerolling same commander. - Splash analytics recognize both static and adaptive penalty reasons (shared prefix handling), so existing dashboards continue to work when `SPLASH_ADAPTIVE=1`. - Random full builds now internally force `RANDOM_BUILD_SUPPRESS_INITIAL_EXPORT=1` (if unset) ensuring only the orchestrated export path executes (eliminates historical duplicate `*_1.csv` / `*_1.txt`). Set `RANDOM_BUILD_SUPPRESS_INITIAL_EXPORT=0` to intentionally restore the legacy double-export (not recommended outside debugging). +- Multi-theme Random UI polish: fallback notices now surface high-contrast icons, focus outlines, and aria-friendly copy; diagnostics badges gain icons/labels; help tooltip converted to an accessible popover with keyboard support; Secondary/Tertiary inputs persist across sessions. - Picker list & API use optimized fast filtering path (`filter_slugs_fast`) replacing per-request linear scans. - Preview sampling: curated examples pinned first, diversity quotas (~40% payoff / 40% enabler+support / 20% wildcard), synthetic placeholders only if underfilled. - Sampling refinements: rarity diminishing weight, splash leniency (single off-color allowance with penalty for 4–5 color commanders), role saturation penalty, refined commander overlap scaling curve. @@ -63,6 +78,7 @@ This format follows Keep a Changelog principles and aims for Semantic Versioning - Performance gating formalized: CI fails if warm p95 regression > configured threshold (default 5%). Baseline refresh policy: only update committed warm baseline when (a) intentional performance improvement >10% p95, or (b) unavoidable drift exceeds threshold and is justified in CHANGELOG entry. ### Fixed +- Random UI Surprise Me rerolls now keep user-supplied theme inputs instead of adopting fallback combinations, and reroll-same-commander builds reuse cached resolved themes without re-running the filter cascade. - Removed redundant template environment instantiation causing inconsistent navigation state. - Ensured preview cache key includes catalog ETag to prevent stale sample reuse after catalog reload. - Explicit cache bust after tagging/catalog rebuild prevents stale preview exposure. diff --git a/README.md b/README.md index bbca6f3d3bd7fe075d2dc54a887a8c0983b51191..8cbe75c817a3e2dbee4a1d11c0710b7ea544130d 100644 GIT binary patch delta 1279 zcmb7EO=}ZT6ul2habaxJaTv6M6Wp}WBr7Q);G(WvNCQQ|g=B_&)WLjAXA)zv;6iX; z-eNWq5W4nbCAblnx>i9@!5`qF{sPZ=&%_9^AVV@U@1A?^*SYiM-QtIji!UDMi@xm0 zP`bE6Nn~GqNhOk|w4`B9Qk>W)ShIRft!{Q<9KhJbk9S|zB#;>H(UhhR%N^i|4P*nD zKp!tSx8|G!*@Eu?-Zq7Km;{(q`qGhmcxuWlCS?SDyzRPe*hAR`avc$4ybUGQ`Jbyx z^!B9Qe?`rh(&tBFZt26r9kMj|yFe$TsLYPUD zN@aL(n})&w&P(Y_YvyG^BZ@UsfEavecukva9j zPR-dda=2YsdKf6R5P{_`tQ<4P+oUp9D*EQi>mmz>D)8Si|KzyfUp1`M}Zb0QNNW`&$`vS6&vvd<5>OMO&s>#C!U zA#&k{{4EfRu=`_s%`tLaW1!hRt9$0n8p}AZ$Yo$=Jy{($*Y+xvjne>3+nR-AdBjsiBQ?=kSS8gwJ;f^z_J{P3$?*)m0 nL#*uO6LrzGpCL+(+h{*dt?VtbSAkx-;;QvWq4O$q)Puuc+YA^m delta 91 zcmca|mF><+whe89lY|o{+l5OAmosECioL+(ccR-SD`^Q$ qZkj8`slZUqkin43kT*HeOO#uIA)g@`2um4?nj:`. + - Tests: added `test_random_reroll_throttle.py` to guard reroll throttle behavior and `test_random_metrics_and_seed_history.py` to verify opt-in telemetry counters and seed history API output. - Analytics: splash penalty counters recognize both static and adaptive reasons; compare deltas with the flag toggled. -- Theme picker performance: precomputed summary projections + lowercase haystacks and memoized filtered slug cache (keyed by (etag, q, archetype, bucket, colors)) for sub‑50ms typical list queries on warm path. -- Skeleton loading UI for theme picker list, preview modal, and initial shell. -- Theme preview endpoint (`/themes/api/theme/{id}/preview` + HTML fragment) returning representative sample with roles (payoff/enabler/support/wildcard/example/curated_synergy/synthetic). -- Commander bias heuristics in preview sampling (color identity filtering + overlap/theme bonuses) for context-aware suggestions. -- In‑memory TTL (600s) preview cache with metrics (requests, cache hits, average build ms) exposed at diagnostics endpoint. -- Web UI: Double-faced card (DFC) hover support with single-image overlay flip control (top-left button, keyboard (Enter/Space/F), aria-live), persisted face (localStorage), and immediate refresh post-flip. -- Diagnostics flag `WEB_THEME_PICKER_DIAGNOSTICS=1` gating fallback description flag, editorial quality badges, uncapped synergy lists, raw YAML fetch, and metrics endpoint (`/themes/metrics`). -- Catalog & preview metrics endpoint combining filter + preview counters & cache stats. -- Performance headers on list & API responses: `X-ThemeCatalog-Filter-Duration-ms` and `ETag` for conditional requests. +- Random Mode curated pool now loads manual exclusions (`config/random_theme_exclusions.yml`), includes reporting helpers (`code/scripts/report_random_theme_pool.py --write-exclusions`), and ships documentation (`docs/random_theme_exclusions.md`). Diagnostics cards show manual categories and tag index telemetry. +- Added `code/scripts/check_random_theme_perf.py` guard that compares the multi-theme profiler (`code/scripts/profile_multi_theme_filter.py`) against `config/random_theme_perf_baseline.json` with optional `--update-baseline`. +- Random Mode UI adds a “Clear themes” control that resets Primary/Secondary/Tertiary inputs plus local persistence in a single click. + - Diagnostics: Added `/status/random_theme_stats` and a diagnostics dashboard card surfacing commander/theme token coverage and top tokens for multi-theme debugging. - Cache bust hooks tied to catalog refresh & tagging completion clear filter/preview caches (metrics now include last bust timestamps). - Governance metrics: `example_enforcement_active`, `example_enforce_threshold_pct` (threshold default 90%) signal when curated coverage enforcement is active. - Server authoritative mana & color identity fields (`mana_cost`, `color_identity_list`, `pip_colors`) included in preview/export; legacy client parsers removed. ### Changed -- Random reroll export logic deduplicated by persisting `last_csv_path` / `last_txt_path` from headless runs; avoids creation of `*_1` suffixed artifacts on reroll. +### Added +- Tests: added `test_random_multi_theme_webflows.py` validating reroll-same-commander caching and permalink roundtrips for multi-theme runs across HTMX and API layers. +- Multi-theme filtering now reuses a cached lowercase tag column and builds a reusable token index so combination checks and synergy fallback avoid repeated pandas `.apply` passes; new script `code/scripts/profile_multi_theme_filter.py` reports mean ~9.3 ms / p95 ~21 ms cascade timings on the current catalog (seed 42, 300 iterations). - Splash analytics updated to count both static and adaptive penalty reasons via a shared prefix, keeping historical dashboards intact. - Random full builds internally auto-set `RANDOM_BUILD_SUPPRESS_INITIAL_EXPORT=1` (unless explicitly provided) to eliminate duplicate suffixed decklists. - Preview assembly now pins curated `example_cards` then `synergy_example_cards` before heuristic sampling with diversity quotas (~40% payoff, 40% enabler/support, 20% wildcard) and synthetic placeholders only when underfilled. @@ -45,6 +37,7 @@ ### Added - Theme whitelist governance (`config/themes/theme_whitelist.yml`) with normalization, enforced synergies, and synergy cap (5). - Expanded curated synergy matrix plus PMI-based inferred synergies (data-driven) blended with curated anchors. +- Random UI polish: fallback notices gain accessible icons, focus outlines, and aria copy; diagnostics badges now include icons/labels; the theme help tooltip is an accessible popover with keyboard controls; secondary/tertiary theme inputs persist via localStorage so repeat builds start with previous choices. - Test: `test_theme_whitelist_and_synergy_cap.py` validates enforced synergy presence and cap compliance. - PyYAML dependency for governance parsing. diff --git a/code/deck_builder/random_entrypoint.py b/code/deck_builder/random_entrypoint.py index 5cb2f9b..83d6f55 100644 --- a/code/deck_builder/random_entrypoint.py +++ b/code/deck_builder/random_entrypoint.py @@ -1,14 +1,374 @@ from __future__ import annotations -from dataclasses import dataclass -from typing import Any, Dict, List, Optional +from dataclasses import dataclass, fields +from pathlib import Path +from typing import Any, Dict, Iterable, List, Optional import time import pandas as pd +import yaml from deck_builder import builder_constants as bc from random_util import get_random, generate_seed +_THEME_STATS_CACHE: Dict[str, Any] | None = None +_THEME_STATS_CACHE_TS: float = 0.0 +_THEME_STATS_TTL_S = 60.0 +_RANDOM_THEME_POOL_CACHE: Dict[str, Any] | None = None +_RANDOM_THEME_POOL_TS: float = 0.0 +_RANDOM_THEME_POOL_TTL_S = 60.0 + +_PROJECT_ROOT = Path(__file__).resolve().parents[2] +_MANUAL_EXCLUSIONS_PATH = _PROJECT_ROOT / "config" / "random_theme_exclusions.yml" +_MANUAL_EXCLUSIONS_CACHE: Dict[str, Dict[str, Any]] | None = None +_MANUAL_EXCLUSIONS_META: List[Dict[str, Any]] | None = None +_MANUAL_EXCLUSIONS_MTIME: float = 0.0 + +_TAG_INDEX_TELEMETRY: Dict[str, Any] = { + "builds": 0, + "last_build_ts": 0.0, + "token_count": 0, + "lookups": 0, + "hits": 0, + "misses": 0, + "substring_checks": 0, + "substring_hits": 0, +} + +_KINDRED_KEYWORDS: tuple[str, ...] = ( + "kindred", + "tribal", + "tribe", + "clan", + "family", + "pack", +) +_GLOBAL_THEME_KEYWORDS: tuple[str, ...] = ( + "goodstuff", + "good stuff", + "all colors", + "omnicolor", +) +_GLOBAL_THEME_PATTERNS: tuple[tuple[str, str], ...] = ( + ("legend", "matter"), + ("legendary", "matter"), + ("historic", "matter"), +) + +_OVERREPRESENTED_SHARE_THRESHOLD: float = 0.30 # 30% of the commander catalog + + +def _sanitize_manual_category(value: Any) -> str: + try: + text = str(value).strip().lower() + except Exception: + text = "manual" + return text.replace(" ", "_") or "manual" + + +def _load_manual_theme_exclusions(refresh: bool = False) -> tuple[Dict[str, Dict[str, Any]], List[Dict[str, Any]]]: + global _MANUAL_EXCLUSIONS_CACHE, _MANUAL_EXCLUSIONS_META, _MANUAL_EXCLUSIONS_MTIME + + path = _MANUAL_EXCLUSIONS_PATH + if not path.exists(): + _MANUAL_EXCLUSIONS_CACHE = {} + _MANUAL_EXCLUSIONS_META = [] + _MANUAL_EXCLUSIONS_MTIME = 0.0 + return {}, [] + + try: + mtime = path.stat().st_mtime + except Exception: + mtime = 0.0 + + if ( + not refresh + and _MANUAL_EXCLUSIONS_CACHE is not None + and _MANUAL_EXCLUSIONS_META is not None + and _MANUAL_EXCLUSIONS_MTIME == mtime + ): + return dict(_MANUAL_EXCLUSIONS_CACHE), list(_MANUAL_EXCLUSIONS_META) + + try: + raw_data = yaml.safe_load(path.read_text(encoding="utf-8")) + except FileNotFoundError: + raw_data = None + except Exception: + raw_data = None + + groups = [] + if isinstance(raw_data, dict): + manual = raw_data.get("manual_exclusions") + if isinstance(manual, list): + groups = manual + elif isinstance(raw_data, list): + groups = raw_data + + manual_map: Dict[str, Dict[str, Any]] = {} + manual_meta: List[Dict[str, Any]] = [] + + for group in groups: + if not isinstance(group, dict): + continue + tokens = group.get("tokens") + if not isinstance(tokens, (list, tuple)): + continue + category = _sanitize_manual_category(group.get("category")) + summary = str(group.get("summary", "")).strip() + notes_raw = group.get("notes") + notes = str(notes_raw).strip() if notes_raw is not None else "" + display_tokens: List[str] = [] + for token in tokens: + try: + display = str(token).strip() + except Exception: + continue + if not display: + continue + norm = display.lower() + manual_map[norm] = { + "display": display, + "category": category, + "summary": summary, + "notes": notes, + } + display_tokens.append(display) + if display_tokens: + manual_meta.append( + { + "category": category, + "summary": summary, + "notes": notes, + "tokens": display_tokens, + } + ) + + _MANUAL_EXCLUSIONS_CACHE = manual_map + _MANUAL_EXCLUSIONS_META = manual_meta + _MANUAL_EXCLUSIONS_MTIME = mtime + return dict(manual_map), list(manual_meta) + + +def _record_index_build(token_count: int) -> None: + _TAG_INDEX_TELEMETRY["builds"] = int(_TAG_INDEX_TELEMETRY.get("builds", 0) or 0) + 1 + _TAG_INDEX_TELEMETRY["last_build_ts"] = time.time() + _TAG_INDEX_TELEMETRY["token_count"] = int(max(0, token_count)) + + +def _record_index_lookup(token: Optional[str], hit: bool, *, substring: bool = False) -> None: + _TAG_INDEX_TELEMETRY["lookups"] = int(_TAG_INDEX_TELEMETRY.get("lookups", 0) or 0) + 1 + key = "hits" if hit else "misses" + _TAG_INDEX_TELEMETRY[key] = int(_TAG_INDEX_TELEMETRY.get(key, 0) or 0) + 1 + if substring: + _TAG_INDEX_TELEMETRY["substring_checks"] = int(_TAG_INDEX_TELEMETRY.get("substring_checks", 0) or 0) + 1 + if hit: + _TAG_INDEX_TELEMETRY["substring_hits"] = int(_TAG_INDEX_TELEMETRY.get("substring_hits", 0) or 0) + 1 + + +def _get_index_telemetry_snapshot() -> Dict[str, Any]: + lookups = float(_TAG_INDEX_TELEMETRY.get("lookups", 0) or 0) + hits = float(_TAG_INDEX_TELEMETRY.get("hits", 0) or 0) + hit_rate = round(hits / lookups, 6) if lookups else 0.0 + snapshot = { + "builds": int(_TAG_INDEX_TELEMETRY.get("builds", 0) or 0), + "token_count": int(_TAG_INDEX_TELEMETRY.get("token_count", 0) or 0), + "lookups": int(lookups), + "hits": int(hits), + "misses": int(_TAG_INDEX_TELEMETRY.get("misses", 0) or 0), + "hit_rate": hit_rate, + "substring_checks": int(_TAG_INDEX_TELEMETRY.get("substring_checks", 0) or 0), + "substring_hits": int(_TAG_INDEX_TELEMETRY.get("substring_hits", 0) or 0), + } + last_ts = _TAG_INDEX_TELEMETRY.get("last_build_ts") + if last_ts: + try: + snapshot["last_build_iso"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(float(last_ts))) + except Exception: + pass + return snapshot + + +def _is_kindred_token(token: str) -> bool: + norm = token.strip().lower() + if not norm: + return False + if norm.startswith("tribal ") or norm.endswith(" tribe"): + return True + for keyword in _KINDRED_KEYWORDS: + if keyword in norm: + return True + return False + + +def _is_global_token(token: str) -> bool: + norm = token.strip().lower() + if not norm: + return False + for keyword in _GLOBAL_THEME_KEYWORDS: + if keyword in norm: + return True + for prefix, suffix in _GLOBAL_THEME_PATTERNS: + if prefix in norm and suffix in norm: + return True + return False + + +def _build_random_theme_pool(df: pd.DataFrame, *, include_details: bool = False) -> tuple[set[str], Dict[str, Any]]: + """Build a curated pool of theme tokens eligible for auto-fill assistance.""" + + _ensure_theme_tag_cache(df) + index_map = df.attrs.get("_ltag_index") or {} + manual_map, manual_meta = _load_manual_theme_exclusions() + manual_applied: Dict[str, Dict[str, Any]] = {} + allowed: set[str] = set() + excluded: Dict[str, list[str]] = {} + counts: Dict[str, int] = {} + try: + total_rows = int(len(df.index)) + except Exception: + total_rows = 0 + total_rows = max(0, total_rows) + for token, values in index_map.items(): + reasons: list[str] = [] + count = 0 + try: + count = int(len(values)) if values is not None else 0 + except Exception: + count = 0 + counts[token] = count + if count < 5: + reasons.append("insufficient_samples") + if _is_global_token(token): + reasons.append("global_theme") + if _is_kindred_token(token): + reasons.append("kindred_theme") + if total_rows > 0: + try: + share = float(count) / float(total_rows) + except Exception: + share = 0.0 + if share >= _OVERREPRESENTED_SHARE_THRESHOLD: + reasons.append("overrepresented_theme") + manual_entry = manual_map.get(token) + if manual_entry: + category = _sanitize_manual_category(manual_entry.get("category")) + if category: + reasons.append(f"manual_category:{category}") + reasons.append("manual_exclusion") + manual_applied[token] = { + "display": manual_entry.get("display", token), + "category": category, + "summary": manual_entry.get("summary", ""), + "notes": manual_entry.get("notes", ""), + } + + if reasons: + excluded[token] = reasons + continue + allowed.add(token) + + excluded_counts: Dict[str, int] = {} + excluded_samples: Dict[str, list[str]] = {} + for token, reasons in excluded.items(): + for reason in reasons: + excluded_counts[reason] = excluded_counts.get(reason, 0) + 1 + bucket = excluded_samples.setdefault(reason, []) + if len(bucket) < 8: + bucket.append(token) + + total_tokens = len(counts) + try: + coverage_ratio = float(len(allowed)) / float(total_tokens) if total_tokens else 0.0 + except Exception: + coverage_ratio = 0.0 + + try: + manual_source = _MANUAL_EXCLUSIONS_PATH.relative_to(_PROJECT_ROOT) + manual_source_str = str(manual_source) + except Exception: + manual_source_str = str(_MANUAL_EXCLUSIONS_PATH) + + metadata: Dict[str, Any] = { + "pool_size": len(allowed), + "total_commander_count": total_rows, + "coverage_ratio": round(float(coverage_ratio), 6), + "excluded_counts": excluded_counts, + "excluded_samples": excluded_samples, + "generated_at": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + "rules": { + "min_commander_tags": 5, + "excluded_keywords": list(_GLOBAL_THEME_KEYWORDS), + "excluded_patterns": [" ".join(p) for p in _GLOBAL_THEME_PATTERNS], + "kindred_keywords": list(_KINDRED_KEYWORDS), + "overrepresented_share_threshold": _OVERREPRESENTED_SHARE_THRESHOLD, + "manual_exclusions_source": manual_source_str, + "manual_exclusions": manual_meta, + "manual_category_count": len({entry.get("category") for entry in manual_meta}), + }, + } + if manual_applied: + metadata["manual_exclusion_detail"] = manual_applied + metadata["manual_exclusion_token_count"] = len(manual_applied) + if include_details: + metadata["excluded_detail"] = { + token: list(reasons) + for token, reasons in excluded.items() + } + return allowed, metadata + + +def _get_random_theme_pool_cached(refresh: bool = False, df: Optional[pd.DataFrame] = None) -> tuple[set[str], Dict[str, Any]]: + global _RANDOM_THEME_POOL_CACHE, _RANDOM_THEME_POOL_TS + + now = time.time() + if ( + not refresh + and _RANDOM_THEME_POOL_CACHE is not None + and (now - _RANDOM_THEME_POOL_TS) < _RANDOM_THEME_POOL_TTL_S + ): + cached_allowed = _RANDOM_THEME_POOL_CACHE.get("allowed", set()) + cached_meta = _RANDOM_THEME_POOL_CACHE.get("metadata", {}) + return set(cached_allowed), dict(cached_meta) + + dataset = df if df is not None else _load_commanders_df() + allowed, metadata = _build_random_theme_pool(dataset) + _RANDOM_THEME_POOL_CACHE = {"allowed": set(allowed), "metadata": dict(metadata)} + _RANDOM_THEME_POOL_TS = now + return set(allowed), dict(metadata) + + +def get_random_theme_pool(*, refresh: bool = False) -> Dict[str, Any]: + """Public helper exposing the curated auto-fill theme pool.""" + + allowed, metadata = _get_random_theme_pool_cached(refresh=refresh) + rules = dict(metadata.get("rules", {})) + if not rules: + rules = { + "min_commander_tags": 5, + "excluded_keywords": list(_GLOBAL_THEME_KEYWORDS), + "excluded_patterns": [" ".join(p) for p in _GLOBAL_THEME_PATTERNS], + "kindred_keywords": list(_KINDRED_KEYWORDS), + "overrepresented_share_threshold": _OVERREPRESENTED_SHARE_THRESHOLD, + } + metadata = dict(metadata) + metadata["rules"] = dict(rules) + payload = { + "allowed_tokens": sorted(allowed), + "metadata": metadata, + "rules": rules, + } + return payload + + +def token_allowed_for_random(token: Optional[str]) -> bool: + if token is None: + return False + norm = token.strip().lower() + if not norm: + return False + allowed, _meta = _get_random_theme_pool_cached(refresh=False) + return norm in allowed + class RandomBuildError(Exception): pass @@ -21,6 +381,12 @@ class RandomConstraintsImpossibleError(RandomBuildError): self.pool_size = int(pool_size or 0) +class RandomThemeNoMatchError(RandomBuildError): + def __init__(self, message: str, *, diagnostics: Optional[Dict[str, Any]] = None): + super().__init__(message) + self.diagnostics = diagnostics or {} + + @dataclass class RandomBuildResult: seed: int @@ -41,6 +407,13 @@ class RandomBuildResult: attempts_tried: int = 0 timeout_hit: bool = False retries_exhausted: bool = False + display_themes: List[str] | None = None + auto_fill_secondary_enabled: bool = False + auto_fill_tertiary_enabled: bool = False + auto_fill_enabled: bool = False + auto_fill_applied: bool = False + auto_filled_themes: List[str] | None = None + strict_theme_match: bool = False def to_dict(self) -> Dict[str, Any]: return { @@ -56,7 +429,387 @@ def _load_commanders_df() -> pd.DataFrame: Uses bc.COMMANDER_CSV_PATH and bc.COMMANDER_CONVERTERS for consistency. """ - return pd.read_csv(bc.COMMANDER_CSV_PATH, converters=getattr(bc, "COMMANDER_CONVERTERS", None)) + df = pd.read_csv(bc.COMMANDER_CSV_PATH, converters=getattr(bc, "COMMANDER_CONVERTERS", None)) + return _ensure_theme_tag_cache(df) + + +def _ensure_theme_tag_cache(df: pd.DataFrame) -> pd.DataFrame: + """Attach a lower-cased theme tag cache column and prebuilt index.""" + + if "_ltags" not in df.columns: + + def _normalize_tag_list(raw: Any) -> List[str]: + result: List[str] = [] + if raw is None: + return result + try: + iterable = list(raw) if isinstance(raw, (list, tuple, set)) else raw + except Exception: + iterable = [] + seen: set[str] = set() + for item in iterable: + try: + token = str(item).strip().lower() + except Exception: + continue + if not token: + continue + if token in seen: + continue + seen.add(token) + result.append(token) + return result + + try: + df["_ltags"] = df.get("themeTags").apply(_normalize_tag_list) + except Exception: + df["_ltags"] = [[] for _ in range(len(df))] + + _ensure_theme_tag_index(df) + return df + + +def _ensure_theme_tag_index(df: pd.DataFrame) -> None: + """Populate a cached mapping of theme tag -> DataFrame index for fast lookups.""" + + if "_ltag_index" in df.attrs: + return + + index_map: Dict[str, List[Any]] = {} + tags_series = df.get("_ltags") + if tags_series is None: + df.attrs["_ltag_index"] = {} + return + + for idx, tags in tags_series.items(): + if not tags: + continue + for token in tags: + index_map.setdefault(token, []).append(idx) + + built_index = {token: pd.Index(values) for token, values in index_map.items()} + df.attrs["_ltag_index"] = built_index + try: + _record_index_build(len(built_index)) + except Exception: + pass + + +def _fallback_display_token(token: str) -> str: + parts = [segment for segment in token.strip().split() if segment] + if not parts: + return token.strip() or token + return " ".join(piece.capitalize() for piece in parts) + + +def _resolve_display_tokens(tokens: Iterable[str], *frames: pd.DataFrame) -> List[str]: + order: List[str] = [] + display_map: Dict[str, Optional[str]] = {} + for raw in tokens: + try: + norm = str(raw).strip().lower() + except Exception: + continue + if not norm or norm in display_map: + continue + display_map[norm] = None + order.append(norm) + if not order: + return [] + + def _harvest(frame: pd.DataFrame) -> None: + try: + tags_series = frame.get("themeTags") + except Exception: + tags_series = None + if not isinstance(tags_series, pd.Series): + return + for tags in tags_series: + if not tags: + continue + try: + iterator = list(tags) if isinstance(tags, (list, tuple, set)) else [] + except Exception: + iterator = [] + for tag in iterator: + try: + text = str(tag).strip() + except Exception: + continue + if not text: + continue + key = text.lower() + if key in display_map and display_map[key] is None: + display_map[key] = text + if all(display_map[k] is not None for k in order): + return + + for frame in frames: + if isinstance(frame, pd.DataFrame): + _harvest(frame) + if all(display_map[k] is not None for k in order): + break + + return [display_map.get(norm) or _fallback_display_token(norm) for norm in order] + + +def _auto_fill_missing_themes( + df: pd.DataFrame, + commander: str, + rng, + *, + primary_theme: Optional[str], + secondary_theme: Optional[str], + tertiary_theme: Optional[str], + allowed_pool: set[str], + fill_secondary: bool, + fill_tertiary: bool, +) -> tuple[Optional[str], Optional[str], list[str]]: + """Given a commander, auto-fill secondary/tertiary themes from curated pool.""" + + def _norm(value: Optional[str]) -> Optional[str]: + if value is None: + return None + try: + text = str(value).strip().lower() + except Exception: + return None + return text if text else None + + secondary_result = secondary_theme if secondary_theme else None + tertiary_result = tertiary_theme if tertiary_theme else None + auto_filled: list[str] = [] + + missing_secondary = bool(fill_secondary) and (secondary_result is None or _norm(secondary_result) is None) + missing_tertiary = bool(fill_tertiary) and (tertiary_result is None or _norm(tertiary_result) is None) + if not missing_secondary and not missing_tertiary: + return secondary_result, tertiary_result, auto_filled + + try: + subset = df[df["name"].astype(str) == str(commander)] + if subset.empty: + return secondary_result, tertiary_result, auto_filled + row = subset.iloc[0] + raw_tags = row.get("themeTags", []) or [] + except Exception: + return secondary_result, tertiary_result, auto_filled + + seen_norms: set[str] = set() + candidates: list[tuple[str, str]] = [] + + primary_norm = _norm(primary_theme) + secondary_norm = _norm(secondary_result) + tertiary_norm = _norm(tertiary_result) + existing_norms = {n for n in (primary_norm, secondary_norm, tertiary_norm) if n} + + for raw in raw_tags: + try: + text = str(raw).strip() + except Exception: + continue + if not text: + continue + norm = text.lower() + if norm in seen_norms: + continue + seen_norms.add(norm) + if norm in existing_norms: + continue + if norm not in allowed_pool: + continue + candidates.append((text, norm)) + + if not candidates: + return secondary_result, tertiary_result, auto_filled + + order = list(range(len(candidates))) + try: + rng.shuffle(order) + except Exception: + order = list(range(len(candidates))) + + shuffled = [candidates[i] for i in order] + used_norms = set(existing_norms) + + for text, norm in shuffled: + if missing_secondary and norm not in used_norms: + secondary_result = text + missing_secondary = False + used_norms.add(norm) + auto_filled.append(text) + continue + if missing_tertiary and norm not in used_norms: + tertiary_result = text + missing_tertiary = False + used_norms.add(norm) + auto_filled.append(text) + if not missing_secondary and not missing_tertiary: + break + + return secondary_result, tertiary_result, auto_filled + + +def _build_theme_tag_stats(df: pd.DataFrame) -> Dict[str, Any]: + stats: Dict[str, Any] = { + "commanders": 0, + "with_tags": 0, + "without_tags": 0, + "unique_tokens": 0, + "total_assignments": 0, + "avg_tokens_per_commander": 0.0, + "median_tokens_per_commander": 0.0, + "top_tokens": [], + "cache_ready": False, + "generated_at": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + } + + try: + total_rows = int(len(df.index)) + except Exception: + total_rows = 0 + stats["commanders"] = total_rows + + try: + tags_series = df.get("_ltags") + except Exception: + tags_series = None + + lengths: list[int] = [] + if tags_series is not None: + try: + for item in tags_series.tolist(): + if isinstance(item, list): + lengths.append(len(item)) + else: + lengths.append(0) + except Exception: + lengths = [] + + if lengths: + with_tags = sum(1 for length in lengths if length > 0) + else: + with_tags = 0 + stats["with_tags"] = with_tags + stats["without_tags"] = max(0, total_rows - with_tags) + + index_map = df.attrs.get("_ltag_index") or {} + stats["cache_ready"] = bool(index_map) + + try: + unique_tokens = len(index_map) + except Exception: + unique_tokens = 0 + stats["unique_tokens"] = unique_tokens + + total_assignments = 0 + if isinstance(index_map, dict): + try: + for values in index_map.values(): + try: + total_assignments += int(len(values)) + except Exception: + continue + except Exception: + total_assignments = 0 + stats["total_assignments"] = total_assignments + + avg_tokens = 0.0 + if total_rows > 0: + try: + avg_tokens = total_assignments / float(total_rows) + except Exception: + avg_tokens = 0.0 + stats["avg_tokens_per_commander"] = round(float(avg_tokens), 3) + + if lengths: + try: + sorted_lengths = sorted(lengths) + mid = len(sorted_lengths) // 2 + if len(sorted_lengths) % 2 == 0: + median_val = (sorted_lengths[mid - 1] + sorted_lengths[mid]) / 2.0 + else: + median_val = float(sorted_lengths[mid]) + except Exception: + median_val = 0.0 + stats["median_tokens_per_commander"] = round(float(median_val), 3) + + top_tokens: list[Dict[str, Any]] = [] + if isinstance(index_map, dict) and index_map: + try: + pairs = [ + (token, int(len(idx))) + for token, idx in index_map.items() + if idx is not None + ] + pairs.sort(key=lambda item: item[1], reverse=True) + for token, count in pairs[:10]: + top_tokens.append({"token": token, "count": count}) + except Exception: + top_tokens = [] + stats["top_tokens"] = top_tokens + + try: + pool_allowed, pool_meta = _build_random_theme_pool(df) + except Exception: + pool_allowed, pool_meta = set(), {} + rules_meta = pool_meta.get("rules") or { + "min_commander_tags": 5, + "excluded_keywords": list(_GLOBAL_THEME_KEYWORDS), + "excluded_patterns": [" ".join(p) for p in _GLOBAL_THEME_PATTERNS], + "kindred_keywords": list(_KINDRED_KEYWORDS), + "overrepresented_share_threshold": _OVERREPRESENTED_SHARE_THRESHOLD, + } + stats["random_pool"] = { + "size": len(pool_allowed), + "coverage_ratio": pool_meta.get("coverage_ratio"), + "total_commander_count": pool_meta.get("total_commander_count"), + "excluded_counts": dict(pool_meta.get("excluded_counts", {})), + "excluded_samples": { + reason: list(tokens) + for reason, tokens in (pool_meta.get("excluded_samples", {}) or {}).items() + }, + "rules": dict(rules_meta), + "manual_exclusion_detail": dict(pool_meta.get("manual_exclusion_detail", {})), + "manual_exclusion_token_count": pool_meta.get("manual_exclusion_token_count", 0), + } + + try: + stats["index_telemetry"] = _get_index_telemetry_snapshot() + except Exception: + stats["index_telemetry"] = { + "builds": 0, + "token_count": 0, + "lookups": 0, + "hits": 0, + "misses": 0, + "hit_rate": 0.0, + "substring_checks": 0, + "substring_hits": 0, + } + + return stats + + +def get_theme_tag_stats(*, refresh: bool = False) -> Dict[str, Any]: + """Return cached commander theme tag statistics for diagnostics.""" + + global _THEME_STATS_CACHE, _THEME_STATS_CACHE_TS + + now = time.time() + if ( + not refresh + and _THEME_STATS_CACHE is not None + and (now - _THEME_STATS_CACHE_TS) < _THEME_STATS_TTL_S + ): + return dict(_THEME_STATS_CACHE) + + df = _load_commanders_df() + df = _ensure_theme_tag_cache(df) + stats = _build_theme_tag_stats(df) + + _THEME_STATS_CACHE = dict(stats) + _THEME_STATS_CACHE_TS = now + return stats def _normalize_tag(value: Optional[str]) -> Optional[str]: @@ -66,6 +819,29 @@ def _normalize_tag(value: Optional[str]) -> Optional[str]: return v if v else None +def _normalize_meta_value(value: Optional[str]) -> Optional[str]: + if value is None: + return None + text = str(value).strip() + return text if text else None + + +def _normalize_meta_list(values: Optional[Iterable[Optional[str]]]) -> List[str]: + normalized: List[str] = [] + seen: set[str] = set() + if not values: + return normalized + for value in values: + norm = _normalize_meta_value(value) + if norm: + lowered = norm.lower() + if lowered in seen: + continue + seen.add(lowered) + normalized.append(lowered) + return normalized + + def _filter_multi(df: pd.DataFrame, primary: Optional[str], secondary: Optional[str], tertiary: Optional[str]) -> tuple[pd.DataFrame, Dict[str, Any]]: """Return filtered commander dataframe based on ordered fallback strategy. @@ -95,13 +871,42 @@ def _filter_multi(df: pd.DataFrame, primary: Optional[str], secondary: Optional[ s = _normalize_tag(secondary) t = _normalize_tag(tertiary) # Helper to test AND-combo + def _get_index_map(current_df: pd.DataFrame) -> Dict[str, pd.Index]: + _ensure_theme_tag_cache(current_df) + index_map = current_df.attrs.get("_ltag_index") + if index_map is None: + _ensure_theme_tag_index(current_df) + index_map = current_df.attrs.get("_ltag_index") or {} + return index_map # type: ignore[return-value] + + index_map_all = _get_index_map(df) + def and_filter(req: List[str]) -> pd.DataFrame: if not req: return df req_l = [r.lower() for r in req] try: - mask = df.get("themeTags").apply(lambda tags: all(any(str(x).strip().lower() == r for x in (tags or [])) for r in req_l)) - return df[mask] + matching_indices: Optional[pd.Index] = None + for token in req_l: + token_matches = index_map_all.get(token) + hit = False + if token_matches is not None: + try: + hit = len(token_matches) > 0 + except Exception: + hit = False + try: + _record_index_lookup(token, hit) + except Exception: + pass + if not hit: + return df.iloc[0:0] + matching_indices = token_matches if matching_indices is None else matching_indices.intersection(token_matches) + if matching_indices is not None and matching_indices.empty: + return df.iloc[0:0] + if matching_indices is None or matching_indices.empty: + return df.iloc[0:0] + return df.loc[matching_indices] except Exception: return df.iloc[0:0] @@ -140,22 +945,63 @@ def _filter_multi(df: pd.DataFrame, primary: Optional[str], secondary: Optional[ return p_only, diag # 5. Synergy fallback based on primary token overlaps if p: - words = [w for w in p.replace('-', ' ').split() if w] + words = [w.lower() for w in p.replace('-', ' ').split() if w] if words: try: - mask = df.get("themeTags").apply( - lambda tags: any( - any(w == str(x).strip().lower() or w in str(x).strip().lower() for w in words) - for x in (tags or []) - ) - ) - synergy_df = df[mask] - if len(synergy_df) > 0: - diag["resolved_themes"] = words # approximate overlap tokens - diag["combo_fallback"] = True - diag["synergy_fallback"] = True - diag["fallback_reason"] = "Primary theme had no direct matches; using synergy overlap" - return synergy_df, diag + direct_hits = pd.Index([]) + matched_tokens: set[str] = set() + matched_order: List[str] = [] + for token in words: + matches = index_map_all.get(token) + hit = False + if matches is not None: + try: + hit = len(matches) > 0 + except Exception: + hit = False + try: + _record_index_lookup(token, hit) + except Exception: + pass + if hit: + if token not in matched_tokens: + matched_tokens.add(token) + matched_order.append(token) + direct_hits = direct_hits.union(matches) + + # If no direct hits, attempt substring matches using cached index keys + if len(direct_hits) == 0: + for word in words: + for token_value, matches in index_map_all.items(): + if word in token_value: + hit = False + if matches is not None: + try: + hit = len(matches) > 0 + except Exception: + hit = False + try: + _record_index_lookup(token_value, hit, substring=True) + except Exception: + pass + if hit: + token_key = str(token_value).strip().lower() + if token_key and token_key not in matched_tokens: + matched_tokens.add(token_key) + matched_order.append(token_key) + direct_hits = direct_hits.union(matches) + + if len(direct_hits) > 0: + synergy_df = df.loc[direct_hits] + if len(synergy_df) > 0: + display_tokens = _resolve_display_tokens(matched_order or words, synergy_df, df) + if not display_tokens: + display_tokens = [_fallback_display_token(word) for word in words] + diag["resolved_themes"] = display_tokens + diag["combo_fallback"] = True + diag["synergy_fallback"] = True + diag["fallback_reason"] = "Primary theme had no direct matches; using synergy overlap" + return synergy_df, diag except Exception: pass # 6. Full pool fallback @@ -220,6 +1066,10 @@ def build_random_deck( primary_theme: Optional[str] = None, secondary_theme: Optional[str] = None, tertiary_theme: Optional[str] = None, + auto_fill_missing: bool = False, + auto_fill_secondary: Optional[bool] = None, + auto_fill_tertiary: Optional[bool] = None, + strict_theme_match: bool = False, ) -> RandomBuildResult: """Thin wrapper for random selection of a commander, deterministic when seeded. @@ -252,9 +1102,23 @@ def build_random_deck( if primary_theme is None: primary_theme = theme # legacy single theme becomes primary df_all = _load_commanders_df() + df_all = _ensure_theme_tag_cache(df_all) df, multi_diag = _filter_multi(df_all, primary_theme, secondary_theme, tertiary_theme) + strict_flag = bool(strict_theme_match) + if strict_flag: + if df.empty: + raise RandomThemeNoMatchError( + "No commanders matched the requested themes", + diagnostics=dict(multi_diag or {}), + ) + if bool(multi_diag.get("combo_fallback")) or bool(multi_diag.get("synergy_fallback")): + raise RandomThemeNoMatchError( + "No commanders matched the requested themes", + diagnostics=dict(multi_diag or {}), + ) used_fallback = False original_theme = None + resolved_before_auto = list(multi_diag.get("resolved_themes") or []) if multi_diag.get("combo_fallback") or multi_diag.get("synergy_fallback"): # For legacy fields used_fallback = bool(multi_diag.get("combo_fallback")) @@ -293,6 +1157,73 @@ def build_random_deck( # Timeout/attempts exhausted; choose deterministically based on seed modulo pick = names[resolved_seed % len(names)] + display_themes: List[str] = list(multi_diag.get("resolved_themes") or []) + auto_filled_themes: List[str] = [] + + fill_secondary = bool(auto_fill_secondary if auto_fill_secondary is not None else auto_fill_missing) + fill_tertiary = bool(auto_fill_tertiary if auto_fill_tertiary is not None else auto_fill_missing) + auto_fill_enabled_flag = bool(fill_secondary or fill_tertiary) + + if auto_fill_enabled_flag and pick: + try: + allowed_pool, _pool_meta = _get_random_theme_pool_cached(refresh=False, df=df_all) + except Exception: + allowed_pool = set() + try: + secondary_new, tertiary_new, filled = _auto_fill_missing_themes( + df_all, + pick, + rng, + primary_theme=primary_theme, + secondary_theme=secondary_theme, + tertiary_theme=tertiary_theme, + allowed_pool=allowed_pool, + fill_secondary=fill_secondary, + fill_tertiary=fill_tertiary, + ) + except Exception: + secondary_new, tertiary_new, filled = secondary_theme, tertiary_theme, [] + secondary_theme = secondary_new + tertiary_theme = tertiary_new + auto_filled_themes = list(filled or []) + + if auto_filled_themes: + multi_diag.setdefault("filter_resolved_themes", resolved_before_auto) + if not display_themes: + display_themes = [ + value + for value in (primary_theme, secondary_theme, tertiary_theme) + if value + ] + existing_norms = { + str(item).strip().lower() + for item in display_themes + if isinstance(item, str) and str(item).strip() + } + for value in auto_filled_themes: + try: + text = str(value).strip() + except Exception: + continue + if not text: + continue + key = text.lower() + if key in existing_norms: + continue + display_themes.append(text) + existing_norms.add(key) + multi_diag["resolved_themes"] = list(display_themes) + + if not display_themes: + display_themes = list(multi_diag.get("resolved_themes") or []) + + multi_diag["auto_fill_secondary_enabled"] = bool(fill_secondary) + multi_diag["auto_fill_tertiary_enabled"] = bool(fill_tertiary) + multi_diag["auto_fill_enabled"] = bool(auto_fill_enabled_flag) + multi_diag["auto_fill_applied"] = bool(auto_filled_themes) + multi_diag["auto_filled_themes"] = list(auto_filled_themes) + multi_diag["strict_theme_match"] = strict_flag + return RandomBuildResult( seed=int(resolved_seed), commander=pick, @@ -302,6 +1233,13 @@ def build_random_deck( secondary_theme=secondary_theme, tertiary_theme=tertiary_theme, resolved_themes=list(multi_diag.get("resolved_themes") or []), + display_themes=list(display_themes), + auto_fill_secondary_enabled=bool(fill_secondary), + auto_fill_tertiary_enabled=bool(fill_tertiary), + auto_fill_enabled=bool(auto_fill_enabled_flag), + auto_fill_applied=bool(auto_filled_themes), + auto_filled_themes=list(auto_filled_themes or []), + strict_theme_match=strict_flag, combo_fallback=bool(multi_diag.get("combo_fallback")), synergy_fallback=bool(multi_diag.get("synergy_fallback")), fallback_reason=multi_diag.get("fallback_reason"), @@ -316,6 +1254,7 @@ def build_random_deck( __all__ = [ "RandomBuildResult", "build_random_deck", + "get_theme_tag_stats", ] @@ -336,15 +1275,158 @@ def build_random_full_deck( seed: Optional[int | str] = None, attempts: int = 5, timeout_s: float = 5.0, + *, + primary_theme: Optional[str] = None, + secondary_theme: Optional[str] = None, + tertiary_theme: Optional[str] = None, + auto_fill_missing: bool = False, + auto_fill_secondary: Optional[bool] = None, + auto_fill_tertiary: Optional[bool] = None, + strict_theme_match: bool = False, ) -> RandomFullBuildResult: """Select a commander deterministically, then run a full deck build via DeckBuilder. Returns a compact result including the seed, commander, and a summarized decklist. """ t0 = time.time() - base = build_random_deck(theme=theme, constraints=constraints, seed=seed, attempts=attempts, timeout_s=timeout_s) + + # Align legacy single-theme input with multi-theme fields + if primary_theme is None and theme is not None: + primary_theme = theme + if primary_theme is not None and theme is None: + theme = primary_theme + + base = build_random_deck( + theme=theme, + constraints=constraints, + seed=seed, + attempts=attempts, + timeout_s=timeout_s, + primary_theme=primary_theme, + secondary_theme=secondary_theme, + tertiary_theme=tertiary_theme, + auto_fill_missing=auto_fill_missing, + auto_fill_secondary=auto_fill_secondary, + auto_fill_tertiary=auto_fill_tertiary, + strict_theme_match=strict_theme_match, + ) + + def _resolve_theme_choices_for_headless(commander_name: str, base_result: RandomBuildResult) -> tuple[int, Optional[int], Optional[int]]: + """Translate resolved theme names into DeckBuilder menu selections. + + The headless runner expects numeric indices for primary/secondary/tertiary selections + based on the commander-specific theme menu. We mirror the CLI ordering so the + automated run picks the same combination that triggered the commander selection. + """ + + try: + df = _load_commanders_df() + row = df[df["name"].astype(str) == str(commander_name)] + if row.empty: + return 1, None, None + raw_tags = row.iloc[0].get("themeTags", []) or [] + except Exception: + return 1, None, None + + cleaned_tags: List[str] = [] + seen_tags: set[str] = set() + for tag in raw_tags: + try: + tag_str = str(tag).strip() + except Exception: + continue + if not tag_str: + continue + key = tag_str.lower() + if key in seen_tags: + continue + seen_tags.add(key) + cleaned_tags.append(tag_str) + + if not cleaned_tags: + return 1, None, None + + resolved_list: List[str] = [] + for item in (base_result.resolved_themes or [])[:3]: + try: + text = str(item).strip() + except Exception: + continue + if text: + resolved_list.append(text) + + def _norm(value: Optional[str]) -> str: + return str(value).strip().lower() if isinstance(value, str) else "" + + def _collect_candidates(*values: Optional[str]) -> List[str]: + collected: List[str] = [] + seen: set[str] = set() + for val in values: + if not val: + continue + text = str(val).strip() + if not text: + continue + key = text.lower() + if key in seen: + continue + seen.add(key) + collected.append(text) + return collected + + def _match(options: List[str], candidates: List[str]) -> Optional[int]: + for candidate in candidates: + cand_norm = candidate.lower() + for idx, option in enumerate(options, start=1): + if option.strip().lower() == cand_norm: + return idx + return None + + primary_candidates = _collect_candidates( + resolved_list[0] if resolved_list else None, + base_result.primary_theme, + ) + primary_idx = _match(cleaned_tags, primary_candidates) + if primary_idx is None: + primary_idx = 1 + + def _remove_index(options: List[str], idx: Optional[int]) -> List[str]: + if idx is None: + return list(options) + return [opt for position, opt in enumerate(options, start=1) if position != idx] + + remaining_after_primary = _remove_index(cleaned_tags, primary_idx) + + secondary_idx: Optional[int] = None + tertiary_idx: Optional[int] = None + + if len(resolved_list) >= 2 and remaining_after_primary: + second_token = resolved_list[1] + secondary_candidates = _collect_candidates( + second_token, + base_result.secondary_theme if _norm(base_result.secondary_theme) == _norm(second_token) else None, + base_result.tertiary_theme if _norm(base_result.tertiary_theme) == _norm(second_token) else None, + ) + secondary_idx = _match(remaining_after_primary, secondary_candidates) + if secondary_idx is not None: + remaining_after_secondary = _remove_index(remaining_after_primary, secondary_idx) + if len(resolved_list) >= 3 and remaining_after_secondary: + third_token = resolved_list[2] + tertiary_candidates = _collect_candidates( + third_token, + base_result.tertiary_theme if _norm(base_result.tertiary_theme) == _norm(third_token) else None, + ) + tertiary_idx = _match(remaining_after_secondary, tertiary_candidates) + elif len(resolved_list) >= 3: + # Multi-theme fallback kept extra tokens but we could not match a secondary; + # in that case avoid forcing tertiary selection. + tertiary_idx = None + + return int(primary_idx), int(secondary_idx) if secondary_idx is not None else None, int(tertiary_idx) if tertiary_idx is not None else None # Run the full headless build with the chosen commander and the same seed + primary_choice_idx, secondary_choice_idx, tertiary_choice_idx = _resolve_theme_choices_for_headless(base.commander, base) + try: from headless_runner import run as _run # type: ignore except Exception as e: @@ -353,6 +1435,20 @@ def build_random_full_deck( commander=base.commander, theme=base.theme, constraints=base.constraints or {}, + primary_theme=getattr(base, "primary_theme", None), + secondary_theme=getattr(base, "secondary_theme", None), + tertiary_theme=getattr(base, "tertiary_theme", None), + resolved_themes=list(getattr(base, "resolved_themes", []) or []), + strict_theme_match=bool(getattr(base, "strict_theme_match", False)), + combo_fallback=bool(getattr(base, "combo_fallback", False)), + synergy_fallback=bool(getattr(base, "synergy_fallback", False)), + fallback_reason=getattr(base, "fallback_reason", None), + display_themes=list(getattr(base, "display_themes", []) or []), + auto_fill_secondary_enabled=bool(getattr(base, "auto_fill_secondary_enabled", False)), + auto_fill_tertiary_enabled=bool(getattr(base, "auto_fill_tertiary_enabled", False)), + auto_fill_enabled=bool(getattr(base, "auto_fill_enabled", False)), + auto_fill_applied=bool(getattr(base, "auto_fill_applied", False)), + auto_filled_themes=list(getattr(base, "auto_filled_themes", []) or []), decklist=None, diagnostics={"error": f"headless runner unavailable: {e}"}, ) @@ -366,7 +1462,13 @@ def build_random_full_deck( _os.environ['RANDOM_BUILD_SUPPRESS_INITIAL_EXPORT'] = '1' except Exception: pass - builder = _run(command_name=base.commander, seed=base.seed) + builder = _run( + command_name=base.commander, + seed=base.seed, + primary_choice=primary_choice_idx, + secondary_choice=secondary_choice_idx, + tertiary_choice=tertiary_choice_idx, + ) # Build summary (may fail gracefully) summary: Dict[str, Any] | None = None @@ -376,6 +1478,80 @@ def build_random_full_deck( except Exception: summary = None + primary_theme_clean = _normalize_meta_value(getattr(base, "primary_theme", None)) + secondary_theme_clean = _normalize_meta_value(getattr(base, "secondary_theme", None)) + tertiary_theme_clean = _normalize_meta_value(getattr(base, "tertiary_theme", None)) + resolved_themes_clean = _normalize_meta_list(getattr(base, "resolved_themes", []) or []) + fallback_reason_clean = _normalize_meta_value(getattr(base, "fallback_reason", None)) + display_themes_clean = _normalize_meta_list(getattr(base, "display_themes", []) or []) + auto_filled_clean = _normalize_meta_list(getattr(base, "auto_filled_themes", []) or []) + + random_meta_fields = { + "primary_theme": primary_theme_clean, + "secondary_theme": secondary_theme_clean, + "tertiary_theme": tertiary_theme_clean, + "resolved_themes": resolved_themes_clean, + "combo_fallback": bool(getattr(base, "combo_fallback", False)), + "synergy_fallback": bool(getattr(base, "synergy_fallback", False)), + "fallback_reason": fallback_reason_clean, + "display_themes": display_themes_clean, + "auto_fill_secondary_enabled": bool(getattr(base, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(base, "auto_fill_tertiary_enabled", False)), + "auto_fill_enabled": bool(getattr(base, "auto_fill_enabled", False)), + "auto_fill_applied": bool(getattr(base, "auto_fill_applied", False)), + "auto_filled_themes": auto_filled_clean, + } + + if isinstance(summary, dict): + try: + existing_meta = summary.get("meta") if isinstance(summary.get("meta"), dict) else {} + except Exception: + existing_meta = {} + merged_meta = dict(existing_meta or {}) + merged_meta.update({k: v for k, v in random_meta_fields.items()}) + summary["meta"] = merged_meta + + def _build_sidecar_meta(csv_path_val: Optional[str], txt_path_val: Optional[str]) -> Dict[str, Any]: + commander_name = getattr(builder, 'commander_name', '') or getattr(builder, 'commander', '') + try: + selected_tags = list(getattr(builder, 'selected_tags', []) or []) + except Exception: + selected_tags = [] + if not selected_tags: + selected_tags = [t for t in [getattr(builder, 'primary_tag', None), getattr(builder, 'secondary_tag', None), getattr(builder, 'tertiary_tag', None)] if t] + meta_payload: Dict[str, Any] = { + "commander": commander_name, + "tags": selected_tags, + "bracket_level": getattr(builder, 'bracket_level', None), + "csv": csv_path_val, + "txt": txt_path_val, + "random_seed": base.seed, + "random_theme": base.theme, + "random_constraints": base.constraints or {}, + } + meta_payload.update(random_meta_fields) + # Legacy keys for backward compatibility + meta_payload.setdefault("random_primary_theme", meta_payload.get("primary_theme")) + meta_payload.setdefault("random_secondary_theme", meta_payload.get("secondary_theme")) + meta_payload.setdefault("random_tertiary_theme", meta_payload.get("tertiary_theme")) + meta_payload.setdefault("random_resolved_themes", meta_payload.get("resolved_themes")) + meta_payload.setdefault("random_combo_fallback", meta_payload.get("combo_fallback")) + meta_payload.setdefault("random_synergy_fallback", meta_payload.get("synergy_fallback")) + meta_payload.setdefault("random_fallback_reason", meta_payload.get("fallback_reason")) + meta_payload.setdefault("random_display_themes", meta_payload.get("display_themes")) + meta_payload.setdefault("random_auto_fill_secondary_enabled", meta_payload.get("auto_fill_secondary_enabled")) + meta_payload.setdefault("random_auto_fill_tertiary_enabled", meta_payload.get("auto_fill_tertiary_enabled")) + meta_payload.setdefault("random_auto_fill_enabled", meta_payload.get("auto_fill_enabled")) + meta_payload.setdefault("random_auto_fill_applied", meta_payload.get("auto_fill_applied")) + meta_payload.setdefault("random_auto_filled_themes", meta_payload.get("auto_filled_themes")) + try: + custom_base = getattr(builder, 'custom_export_base', None) + except Exception: + custom_base = None + if isinstance(custom_base, str) and custom_base.strip(): + meta_payload["name"] = custom_base.strip() + return meta_payload + # Attempt to reuse existing export performed inside builder (headless run already exported) csv_path: str | None = None txt_path: str | None = None @@ -409,22 +1585,7 @@ def build_random_full_deck( if summary: sidecar = base_path + '.summary.json' if not _os.path.isfile(sidecar): - meta = { - "commander": getattr(builder, 'commander_name', '') or getattr(builder, 'commander', ''), - "tags": list(getattr(builder, 'selected_tags', []) or []) or [t for t in [getattr(builder, 'primary_tag', None), getattr(builder, 'secondary_tag', None), getattr(builder, 'tertiary_tag', None)] if t], - "bracket_level": getattr(builder, 'bracket_level', None), - "csv": csv_path, - "txt": txt_path, - "random_seed": base.seed, - "random_theme": base.theme, - "random_constraints": base.constraints or {}, - } - try: - custom_base = getattr(builder, 'custom_export_base', None) - except Exception: - custom_base = None - if isinstance(custom_base, str) and custom_base.strip(): - meta["name"] = custom_base.strip() + meta = _build_sidecar_meta(csv_path, txt_path) try: with open(sidecar, 'w', encoding='utf-8') as f: _json.dump({"meta": meta, "summary": summary}, f, ensure_ascii=False, indent=2) @@ -481,16 +1642,7 @@ def build_random_full_deck( if summary: sidecar = base_path + '.summary.json' if not _os.path.isfile(sidecar): - meta = { - "commander": getattr(builder, 'commander_name', '') or getattr(builder, 'commander', ''), - "tags": list(getattr(builder, 'selected_tags', []) or []) or [t for t in [getattr(builder, 'primary_tag', None), getattr(builder, 'secondary_tag', None), getattr(builder, 'tertiary_tag', None)] if t], - "bracket_level": getattr(builder, 'bracket_level', None), - "csv": csv_path, - "txt": txt_path, - "random_seed": base.seed, - "random_theme": base.theme, - "random_constraints": base.constraints or {}, - } + meta = _build_sidecar_meta(csv_path, txt_path) with open(sidecar, 'w', encoding='utf-8') as f: _json.dump({"meta": meta, "summary": summary}, f, ensure_ascii=False, indent=2) except Exception: @@ -521,16 +1673,23 @@ def build_random_full_deck( "timeout_hit": bool(getattr(base, "timeout_hit", False)), "retries_exhausted": bool(getattr(base, "retries_exhausted", False)), } - return RandomFullBuildResult( - seed=base.seed, - commander=base.commander, - theme=base.theme, - constraints=base.constraints or {}, - decklist=deck_items, - diagnostics=diags, - summary=summary, - csv_path=csv_path, - txt_path=txt_path, - compliance=compliance, + diags.update( + { + "resolved_themes": list(getattr(base, "resolved_themes", []) or []), + "combo_fallback": bool(getattr(base, "combo_fallback", False)), + "synergy_fallback": bool(getattr(base, "synergy_fallback", False)), + "fallback_reason": getattr(base, "fallback_reason", None), + } ) + base_kwargs = {f.name: getattr(base, f.name) for f in fields(RandomBuildResult)} + base_kwargs.update({ + "decklist": deck_items, + "diagnostics": diags, + "summary": summary, + "csv_path": csv_path, + "txt_path": txt_path, + "compliance": compliance, + }) + return RandomFullBuildResult(**base_kwargs) + diff --git a/code/scripts/check_random_theme_perf.py b/code/scripts/check_random_theme_perf.py new file mode 100644 index 0000000..5b739e5 --- /dev/null +++ b/code/scripts/check_random_theme_perf.py @@ -0,0 +1,118 @@ +"""Opt-in guard that compares multi-theme filter performance to a stored baseline. + +Run inside the project virtual environment: + + python -m code.scripts.check_random_theme_perf --baseline config/random_theme_perf_baseline.json + +The script executes the same profiling loop as `profile_multi_theme_filter` and fails +if the observed mean or p95 timings regress more than the allowed threshold. +""" +from __future__ import annotations + +import argparse +import json +import sys +from pathlib import Path +from typing import Any, Dict, Tuple + +PROJECT_ROOT = Path(__file__).resolve().parents[2] +DEFAULT_BASELINE = PROJECT_ROOT / "config" / "random_theme_perf_baseline.json" + +if str(PROJECT_ROOT) not in sys.path: + sys.path.append(str(PROJECT_ROOT)) + +from code.scripts.profile_multi_theme_filter import run_profile # type: ignore # noqa: E402 + + +def _load_baseline(path: Path) -> Dict[str, Any]: + if not path.exists(): + raise FileNotFoundError(f"Baseline file not found: {path}") + data = json.loads(path.read_text(encoding="utf-8")) + return data + + +def _extract(metric: Dict[str, Any], key: str) -> float: + try: + value = float(metric.get(key, 0.0)) + except Exception: + value = 0.0 + return value + + +def _check_section(name: str, actual: Dict[str, Any], baseline: Dict[str, Any], threshold: float) -> Tuple[bool, str]: + a_mean = _extract(actual, "mean_ms") + b_mean = _extract(baseline, "mean_ms") + a_p95 = _extract(actual, "p95_ms") + b_p95 = _extract(baseline, "p95_ms") + + allowed_mean = b_mean * (1.0 + threshold) + allowed_p95 = b_p95 * (1.0 + threshold) + + mean_ok = a_mean <= allowed_mean or b_mean == 0.0 + p95_ok = a_p95 <= allowed_p95 or b_p95 == 0.0 + + status = mean_ok and p95_ok + + def _format_row(label: str, actual_val: float, baseline_val: float, allowed_val: float, ok: bool) -> str: + trend = ((actual_val - baseline_val) / baseline_val * 100.0) if baseline_val else 0.0 + trend_str = f"{trend:+.1f}%" if baseline_val else "n/a" + limit_str = f"≤ {allowed_val:.3f}ms" if baseline_val else "n/a" + return f" {label:<6} actual={actual_val:.3f}ms baseline={baseline_val:.3f}ms ({trend_str}), limit {limit_str} -> {'OK' if ok else 'FAIL'}" + + rows = [f"Section: {name}"] + rows.append(_format_row("mean", a_mean, b_mean, allowed_mean, mean_ok)) + rows.append(_format_row("p95", a_p95, b_p95, allowed_p95, p95_ok)) + return status, "\n".join(rows) + + +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser(description="Check multi-theme filtering performance against a baseline") + parser.add_argument("--baseline", type=Path, default=DEFAULT_BASELINE, help="Baseline JSON file (default: config/random_theme_perf_baseline.json)") + parser.add_argument("--iterations", type=int, default=400, help="Number of iterations to sample (default: 400)") + parser.add_argument("--seed", type=int, default=None, help="Optional RNG seed for reproducibility") + parser.add_argument("--threshold", type=float, default=0.15, help="Allowed regression threshold as a fraction (default: 0.15 = 15%)") + parser.add_argument("--update-baseline", action="store_true", help="Overwrite the baseline file with the newly collected metrics") + args = parser.parse_args(argv) + + baseline_path = args.baseline if args.baseline else DEFAULT_BASELINE + if args.update_baseline and not baseline_path.parent.exists(): + baseline_path.parent.mkdir(parents=True, exist_ok=True) + + if not args.update_baseline: + baseline = _load_baseline(baseline_path) + else: + baseline = {} + + results = run_profile(args.iterations, args.seed) + + cascade_status, cascade_report = _check_section("cascade", results.get("cascade", {}), baseline.get("cascade", {}), args.threshold) + synergy_status, synergy_report = _check_section("synergy", results.get("synergy", {}), baseline.get("synergy", {}), args.threshold) + + print("Iterations:", results.get("iterations")) + print("Seed:", results.get("seed")) + print(cascade_report) + print(synergy_report) + + overall_ok = cascade_status and synergy_status + + if args.update_baseline: + payload = { + "iterations": results.get("iterations"), + "seed": results.get("seed"), + "cascade": results.get("cascade"), + "synergy": results.get("synergy"), + } + baseline_path.write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8") + print(f"Baseline updated → {baseline_path}") + return 0 + + if not overall_ok: + print(f"FAIL: performance regressions exceeded {args.threshold * 100:.1f}% threshold", file=sys.stderr) + return 1 + + print("PASS: performance within allowed threshold") + return 0 + + +if __name__ == "__main__": # pragma: no cover + raise SystemExit(main()) diff --git a/code/scripts/profile_multi_theme_filter.py b/code/scripts/profile_multi_theme_filter.py new file mode 100644 index 0000000..2af36c0 --- /dev/null +++ b/code/scripts/profile_multi_theme_filter.py @@ -0,0 +1,136 @@ +"""Profile helper for multi-theme commander filtering. + +Run within the project virtual environment: + + python code/scripts/profile_multi_theme_filter.py --iterations 500 + +Outputs aggregate timing for combination and synergy fallback scenarios. +""" + +from __future__ import annotations + +import argparse +import json +import statistics +import sys +import time +from pathlib import Path +from typing import Any, Dict, List, Tuple + +import pandas as pd + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.append(str(PROJECT_ROOT)) + +from deck_builder.random_entrypoint import _ensure_theme_tag_cache, _filter_multi, _load_commanders_df # noqa: E402 + + +def _sample_combinations(tags: List[str], iterations: int) -> List[Tuple[str | None, str | None, str | None]]: + import random + + combos: List[Tuple[str | None, str | None, str | None]] = [] + if not tags: + return combos + for _ in range(iterations): + primary = random.choice(tags) + secondary = random.choice(tags) if random.random() < 0.45 else None + tertiary = random.choice(tags) if random.random() < 0.25 else None + combos.append((primary, secondary, tertiary)) + return combos + + +def _collect_tag_pool(df: pd.DataFrame) -> List[str]: + tag_pool: set[str] = set() + for tags in df.get("_ltags", []): # type: ignore[assignment] + if not tags: + continue + for token in tags: + tag_pool.add(token) + return sorted(tag_pool) + + +def _summarize(values: List[float]) -> Dict[str, float]: + mean_ms = statistics.mean(values) * 1000 + if len(values) >= 20: + p95_ms = statistics.quantiles(values, n=20)[18] * 1000 + else: + p95_ms = max(values) * 1000 if values else 0.0 + return { + "mean_ms": round(mean_ms, 6), + "p95_ms": round(p95_ms, 6), + "samples": len(values), + } + + +def run_profile(iterations: int, seed: int | None = None) -> Dict[str, Any]: + if iterations <= 0: + raise ValueError("Iterations must be a positive integer") + + df = _load_commanders_df() + df = _ensure_theme_tag_cache(df) + tag_pool = _collect_tag_pool(df) + if not tag_pool: + raise RuntimeError("No theme tags available in dataset; ensure commander catalog is populated") + + combos = _sample_combinations(tag_pool, iterations) + if not combos: + raise RuntimeError("Failed to generate theme combinations for profiling") + + timings: List[float] = [] + synergy_timings: List[float] = [] + + for primary, secondary, tertiary in combos: + start = time.perf_counter() + _filter_multi(df, primary, secondary, tertiary) + timings.append(time.perf_counter() - start) + + improbable_primary = f"{primary or 'aggro'}_unlikely_value" + start_synergy = time.perf_counter() + _filter_multi(df, improbable_primary, secondary, tertiary) + synergy_timings.append(time.perf_counter() - start_synergy) + + return { + "iterations": iterations, + "seed": seed, + "cascade": _summarize(timings), + "synergy": _summarize(synergy_timings), + } + + +def main() -> None: + parser = argparse.ArgumentParser(description="Profile multi-theme filtering performance") + parser.add_argument("--iterations", type=int, default=400, help="Number of random theme combinations to evaluate") + parser.add_argument("--seed", type=int, default=None, help="Optional RNG seed for repeatability") + parser.add_argument("--json", type=Path, help="Optional path to write the raw metrics as JSON") + args = parser.parse_args() + + if args.seed is not None: + import random + + random.seed(args.seed) + + results = run_profile(args.iterations, args.seed) + + def _print(label: str, stats: Dict[str, float]) -> None: + mean_ms = stats.get("mean_ms", 0.0) + p95_ms = stats.get("p95_ms", 0.0) + samples = stats.get("samples", 0) + print(f"{label}: mean={mean_ms:.4f}ms p95={p95_ms:.4f}ms (n={samples})") + + _print("AND-combo cascade", results.get("cascade", {})) + _print("Synergy fallback", results.get("synergy", {})) + + if args.json: + payload = { + "iterations": results.get("iterations"), + "seed": results.get("seed"), + "cascade": results.get("cascade"), + "synergy": results.get("synergy"), + } + args.json.parent.mkdir(parents=True, exist_ok=True) + args.json.write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8") + + +if __name__ == "__main__": + main() diff --git a/code/scripts/report_random_theme_pool.py b/code/scripts/report_random_theme_pool.py new file mode 100644 index 0000000..1b3833f --- /dev/null +++ b/code/scripts/report_random_theme_pool.py @@ -0,0 +1,193 @@ +"""Summarize the curated random theme pool and exclusion rules. + +Usage examples: + + python -m code.scripts.report_random_theme_pool --format markdown + python -m code.scripts.report_random_theme_pool --output logs/random_theme_pool.json + +The script refreshes the commander catalog, rebuilds the curated random +pool using the same heuristics as Random Mode auto-fill, and prints a +summary (JSON by default). +""" +from __future__ import annotations + +import argparse +import json +import sys +from pathlib import Path +from typing import Any, Dict, List + +PROJECT_ROOT = Path(__file__).resolve().parents[1] +if str(PROJECT_ROOT) not in sys.path: + sys.path.append(str(PROJECT_ROOT)) + +from deck_builder.random_entrypoint import ( # type: ignore # noqa: E402 + _build_random_theme_pool, + _ensure_theme_tag_cache, + _load_commanders_df, + _OVERREPRESENTED_SHARE_THRESHOLD, +) + + +def build_report(refresh: bool = False) -> Dict[str, Any]: + df = _load_commanders_df() + if refresh: + # Force re-cache of tag structures + df = _ensure_theme_tag_cache(df) + else: + try: + df = _ensure_theme_tag_cache(df) + except Exception: + pass + allowed, metadata = _build_random_theme_pool(df, include_details=True) + detail = metadata.pop("excluded_detail", {}) + report = { + "allowed_tokens": sorted(allowed), + "allowed_count": len(allowed), + "metadata": metadata, + "excluded_detail": detail, + } + return report + + +def format_markdown(report: Dict[str, Any], *, limit: int = 20) -> str: + lines: List[str] = [] + meta = report.get("metadata", {}) + rules = meta.get("rules", {}) + lines.append("# Curated Random Theme Pool") + lines.append("") + lines.append(f"- Allowed tokens: **{report.get('allowed_count', 0)}**") + total_commander_count = meta.get("total_commander_count") + if total_commander_count is not None: + lines.append(f"- Commander entries analyzed: **{total_commander_count}**") + coverage = meta.get("coverage_ratio") + if coverage is not None: + pct = round(float(coverage) * 100.0, 2) + lines.append(f"- Coverage: **{pct}%** of catalog tokens") + if rules: + thresh = rules.get("overrepresented_share_threshold", _OVERREPRESENTED_SHARE_THRESHOLD) + thresh_pct = round(float(thresh) * 100.0, 2) + lines.append("- Exclusion rules:") + lines.append(" - Minimum commander coverage: 5 unique commanders") + lines.append(f" - Kindred filter keywords: {', '.join(rules.get('kindred_keywords', []))}") + lines.append(f" - Global theme keywords: {', '.join(rules.get('excluded_keywords', []))}") + pattern_str = ", ".join(rules.get("excluded_patterns", [])) + if pattern_str: + lines.append(f" - Global theme patterns: {pattern_str}") + lines.append(f" - Over-represented threshold: ≥ {thresh_pct}% of commanders") + manual_src = rules.get("manual_exclusions_source") + manual_groups = rules.get("manual_exclusions") or [] + if manual_src or manual_groups: + lines.append(f" - Manual exclusion config: {manual_src or 'config/random_theme_exclusions.yml'}") + if manual_groups: + lines.append(f" - Manual categories: {len(manual_groups)} tracked groups") + counts = meta.get("excluded_counts", {}) or {} + if counts: + lines.append("") + lines.append("## Excluded tokens by reason") + lines.append("Reason | Count") + lines.append("------ | -----") + for reason, count in sorted(counts.items(), key=lambda item: item[0]): + lines.append(f"{reason} | {count}") + samples = meta.get("excluded_samples", {}) or {} + if samples: + lines.append("") + lines.append("## Sample tokens per exclusion reason") + for reason, tokens in sorted(samples.items(), key=lambda item: item[0]): + subset = tokens[:limit] + more = "" if len(tokens) <= limit else f" … (+{len(tokens) - limit})" + lines.append(f"- **{reason}**: {', '.join(subset)}{more}") + detail = report.get("excluded_detail", {}) or {} + if detail: + lines.append("") + lines.append("## Detailed exclusions (first few)") + for token, reasons in list(sorted(detail.items()))[:limit]: + lines.append(f"- {token}: {', '.join(reasons)}") + if len(detail) > limit: + lines.append(f"… (+{len(detail) - limit} more tokens)") + manual_detail = meta.get("manual_exclusion_detail", {}) or {} + if manual_detail: + lines.append("") + lines.append("## Manual exclusions applied") + for token, info in sorted(manual_detail.items(), key=lambda item: item[0]): + display = info.get("display", token) + category = info.get("category") + summary = info.get("summary") + notes = info.get("notes") + descriptors: List[str] = [] + if category: + descriptors.append(f"category={category}") + if summary: + descriptors.append(summary) + if notes: + descriptors.append(notes) + suffix = f" — {'; '.join(descriptors)}" if descriptors else "" + lines.append(f"- {display}{suffix}") + + if rules.get("manual_exclusions"): + lines.append("") + lines.append("## Manual exclusion categories") + for group in rules["manual_exclusions"]: + if not isinstance(group, dict): + continue + category = group.get("category", "manual") + summary = group.get("summary") + tokens = group.get("tokens", []) or [] + notes = group.get("notes") + lines.append(f"- **{category}** — {summary or 'no summary provided'}") + if notes: + lines.append(f" - Notes: {notes}") + if tokens: + token_list = tokens[:limit] + more = "" if len(tokens) <= limit else f" … (+{len(tokens) - limit})" + lines.append(f" - Tokens: {', '.join(token_list)}{more}") + + return "\n".join(lines) + + +def write_output(path: Path, payload: Dict[str, Any]) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + with path.open("w", encoding="utf-8") as handle: + json.dump(payload, handle, indent=2, sort_keys=True) + handle.write("\n") + + +def write_manual_exclusions(path: Path, report: Dict[str, Any]) -> None: + meta = report.get("metadata", {}) or {} + rules = meta.get("rules", {}) or {} + detail = meta.get("manual_exclusion_detail", {}) or {} + payload = { + "source": rules.get("manual_exclusions_source"), + "categories": rules.get("manual_exclusions", []), + "tokens": detail, + } + write_output(path, payload) + + +def main(argv: List[str] | None = None) -> int: + parser = argparse.ArgumentParser(description="Report the curated random theme pool heuristics") + parser.add_argument("--format", choices={"json", "markdown"}, default="json", help="Output format (default: json)") + parser.add_argument("--output", type=Path, help="Optional path to write the structured report (JSON regardless of --format)") + parser.add_argument("--limit", type=int, default=20, help="Max sample tokens per reason when printing markdown (default: 20)") + parser.add_argument("--refresh", action="store_true", help="Bypass caches when rebuilding commander stats") + parser.add_argument("--write-exclusions", type=Path, help="Optional path for writing manual exclusion tokens + metadata (JSON)") + args = parser.parse_args(argv) + + report = build_report(refresh=args.refresh) + + if args.output: + write_output(args.output, report) + + if args.write_exclusions: + write_manual_exclusions(args.write_exclusions, report) + + if args.format == "markdown": + print(format_markdown(report, limit=max(1, args.limit))) + else: + print(json.dumps(report, indent=2, sort_keys=True)) + + return 0 + + +if __name__ == "__main__": # pragma: no cover + raise SystemExit(main()) diff --git a/code/tests/test_random_build_api.py b/code/tests/test_random_build_api.py index b5685cb..aa91bd8 100644 --- a/code/tests/test_random_build_api.py +++ b/code/tests/test_random_build_api.py @@ -11,6 +11,7 @@ def test_random_build_api_commander_and_seed(monkeypatch): monkeypatch.setenv("CSV_FILES_DIR", os.path.join("csv_files", "testdata")) app_module = importlib.import_module('code.web.app') + app_module = importlib.reload(app_module) client = TestClient(app_module.app) payload = {"seed": 12345, "theme": "Goblin Kindred"} @@ -20,3 +21,122 @@ def test_random_build_api_commander_and_seed(monkeypatch): assert data["seed"] == 12345 assert isinstance(data.get("commander"), str) assert data.get("commander") + assert "auto_fill_enabled" in data + assert "auto_fill_secondary_enabled" in data + assert "auto_fill_tertiary_enabled" in data + assert "auto_fill_applied" in data + assert "auto_filled_themes" in data + assert "display_themes" in data + + +def test_random_build_api_auto_fill_toggle(monkeypatch): + monkeypatch.setenv("RANDOM_MODES", "1") + monkeypatch.setenv("CSV_FILES_DIR", os.path.join("csv_files", "testdata")) + + app_module = importlib.import_module('code.web.app') + client = TestClient(app_module.app) + + payload = {"seed": 54321, "primary_theme": "Aggro", "auto_fill_enabled": True} + r = client.post('/api/random_build', json=payload) + assert r.status_code == 200, r.text + data = r.json() + assert data["seed"] == 54321 + assert data.get("auto_fill_enabled") is True + assert data.get("auto_fill_secondary_enabled") is True + assert data.get("auto_fill_tertiary_enabled") is True + assert data.get("auto_fill_applied") in (True, False) + assert isinstance(data.get("auto_filled_themes"), list) + assert isinstance(data.get("display_themes"), list) + + +def test_random_build_api_partial_auto_fill(monkeypatch): + monkeypatch.setenv("RANDOM_MODES", "1") + monkeypatch.setenv("CSV_FILES_DIR", os.path.join("csv_files", "testdata")) + + app_module = importlib.import_module('code.web.app') + client = TestClient(app_module.app) + + payload = { + "seed": 98765, + "primary_theme": "Aggro", + "auto_fill_secondary_enabled": True, + "auto_fill_tertiary_enabled": False, + } + r = client.post('/api/random_build', json=payload) + assert r.status_code == 200, r.text + data = r.json() + assert data["seed"] == 98765 + assert data.get("auto_fill_enabled") is True + assert data.get("auto_fill_secondary_enabled") is True + assert data.get("auto_fill_tertiary_enabled") is False + assert data.get("auto_fill_applied") in (True, False) + assert isinstance(data.get("auto_filled_themes"), list) + + +def test_random_build_api_tertiary_requires_secondary(monkeypatch): + monkeypatch.setenv("RANDOM_MODES", "1") + monkeypatch.setenv("CSV_FILES_DIR", os.path.join("csv_files", "testdata")) + + app_module = importlib.import_module('code.web.app') + client = TestClient(app_module.app) + + payload = { + "seed": 192837, + "primary_theme": "Aggro", + "auto_fill_secondary_enabled": False, + "auto_fill_tertiary_enabled": True, + } + r = client.post('/api/random_build', json=payload) + assert r.status_code == 200, r.text + data = r.json() + assert data["seed"] == 192837 + assert data.get("auto_fill_enabled") is True + assert data.get("auto_fill_secondary_enabled") is True + assert data.get("auto_fill_tertiary_enabled") is True + assert data.get("auto_fill_applied") in (True, False) + assert isinstance(data.get("auto_filled_themes"), list) + + +def test_random_build_api_reports_auto_filled_themes(monkeypatch): + monkeypatch.setenv("RANDOM_MODES", "1") + monkeypatch.setenv("CSV_FILES_DIR", os.path.join("csv_files", "testdata")) + + import code.web.app as app_module + import code.deck_builder.random_entrypoint as random_entrypoint + import deck_builder.random_entrypoint as random_entrypoint_pkg + + def fake_auto_fill( + df, + commander, + rng, + *, + primary_theme, + secondary_theme, + tertiary_theme, + allowed_pool, + fill_secondary, + fill_tertiary, + ): + return "Tokens", "Sacrifice", ["Tokens", "Sacrifice"] + + monkeypatch.setattr(random_entrypoint, "_auto_fill_missing_themes", fake_auto_fill) + monkeypatch.setattr(random_entrypoint_pkg, "_auto_fill_missing_themes", fake_auto_fill) + + client = TestClient(app_module.app) + + payload = { + "seed": 654321, + "primary_theme": "Aggro", + "auto_fill_enabled": True, + "auto_fill_secondary_enabled": True, + "auto_fill_tertiary_enabled": True, + } + r = client.post('/api/random_build', json=payload) + assert r.status_code == 200, r.text + data = r.json() + assert data["seed"] == 654321 + assert data.get("auto_fill_enabled") is True + assert data.get("auto_fill_applied") is True + assert data.get("auto_fill_secondary_enabled") is True + assert data.get("auto_fill_tertiary_enabled") is True + assert data.get("auto_filled_themes") == ["Tokens", "Sacrifice"] diff --git a/code/tests/test_random_metrics_and_seed_history.py b/code/tests/test_random_metrics_and_seed_history.py index 96ae72d..b3c000b 100644 --- a/code/tests/test_random_metrics_and_seed_history.py +++ b/code/tests/test_random_metrics_and_seed_history.py @@ -1,32 +1,66 @@ from __future__ import annotations + import os + from fastapi.testclient import TestClient + def test_metrics_and_seed_history(monkeypatch): - monkeypatch.setenv('RANDOM_MODES', '1') - monkeypatch.setenv('RANDOM_UI', '1') - monkeypatch.setenv('RANDOM_TELEMETRY', '1') - monkeypatch.setenv('CSV_FILES_DIR', os.path.join('csv_files', 'testdata')) - from code.web.app import app - client = TestClient(app) + monkeypatch.setenv("RANDOM_MODES", "1") + monkeypatch.setenv("RANDOM_UI", "1") + monkeypatch.setenv("RANDOM_TELEMETRY", "1") + monkeypatch.setenv("CSV_FILES_DIR", os.path.join("csv_files", "testdata")) - # Build + reroll to generate metrics and seed history - r1 = client.post('/api/random_full_build', json={'seed': 9090}) - assert r1.status_code == 200, r1.text - r2 = client.post('/api/random_reroll', json={'seed': 9090}) - assert r2.status_code == 200, r2.text + import code.web.app as app_module - # Metrics - m = client.get('/status/random_metrics') - assert m.status_code == 200, m.text - mj = m.json() - assert mj.get('ok') is True - metrics = mj.get('metrics') or {} - assert 'full_build' in metrics and 'reroll' in metrics + # Reset in-memory telemetry so assertions are deterministic + app_module.RANDOM_TELEMETRY = True + app_module.RATE_LIMIT_ENABLED = False + for bucket in app_module._RANDOM_METRICS.values(): + for key in bucket: + bucket[key] = 0 + for key in list(app_module._RANDOM_USAGE_METRICS.keys()): + app_module._RANDOM_USAGE_METRICS[key] = 0 + for key in list(app_module._RANDOM_FALLBACK_METRICS.keys()): + app_module._RANDOM_FALLBACK_METRICS[key] = 0 + app_module._RANDOM_FALLBACK_REASONS.clear() + app_module._RL_COUNTS.clear() - # Seed history - sh = client.get('/api/random/seeds') - assert sh.status_code == 200 - sj = sh.json() - seeds = sj.get('seeds') or [] - assert any(s == 9090 for s in seeds) and sj.get('last') in seeds + prev_ms = app_module.RANDOM_REROLL_THROTTLE_MS + prev_seconds = app_module._REROLL_THROTTLE_SECONDS + app_module.RANDOM_REROLL_THROTTLE_MS = 0 + app_module._REROLL_THROTTLE_SECONDS = 0.0 + + try: + with TestClient(app_module.app) as client: + # Build + reroll to generate metrics and seed history + r1 = client.post("/api/random_full_build", json={"seed": 9090, "primary_theme": "Aggro"}) + assert r1.status_code == 200, r1.text + r2 = client.post("/api/random_reroll", json={"seed": 9090}) + assert r2.status_code == 200, r2.text + + # Metrics + m = client.get("/status/random_metrics") + assert m.status_code == 200, m.text + mj = m.json() + assert mj.get("ok") is True + metrics = mj.get("metrics") or {} + assert "full_build" in metrics and "reroll" in metrics + + usage = mj.get("usage") or {} + modes = usage.get("modes") or {} + fallbacks = usage.get("fallbacks") or {} + assert set(modes.keys()) >= {"theme", "reroll", "surprise", "reroll_same_commander"} + assert modes.get("theme", 0) >= 2 + assert "none" in fallbacks + assert isinstance(usage.get("fallback_reasons"), dict) + + # Seed history + sh = client.get("/api/random/seeds") + assert sh.status_code == 200 + sj = sh.json() + seeds = sj.get("seeds") or [] + assert any(s == 9090 for s in seeds) and sj.get("last") in seeds + finally: + app_module.RANDOM_REROLL_THROTTLE_MS = prev_ms + app_module._REROLL_THROTTLE_SECONDS = prev_seconds diff --git a/code/tests/test_random_multi_theme_filtering.py b/code/tests/test_random_multi_theme_filtering.py new file mode 100644 index 0000000..8c37760 --- /dev/null +++ b/code/tests/test_random_multi_theme_filtering.py @@ -0,0 +1,236 @@ +from __future__ import annotations + +import json +from pathlib import Path +from typing import Iterable, Sequence + +import pandas as pd + +from deck_builder import random_entrypoint + + +def _patch_commanders(monkeypatch, rows: Sequence[dict[str, object]]) -> None: + df = pd.DataFrame(rows) + monkeypatch.setattr(random_entrypoint, "_load_commanders_df", lambda: df) + + +def _make_row(name: str, tags: Iterable[str]) -> dict[str, object]: + return {"name": name, "themeTags": list(tags)} + + +def test_random_multi_theme_exact_triple_success(monkeypatch) -> None: + _patch_commanders( + monkeypatch, + [_make_row("Triple Threat", ["aggro", "tokens", "equipment"])], + ) + + res = random_entrypoint.build_random_deck( + primary_theme="aggro", + secondary_theme="tokens", + tertiary_theme="equipment", + seed=1313, + ) + + assert res.commander == "Triple Threat" + assert res.resolved_themes == ["aggro", "tokens", "equipment"] + assert res.combo_fallback is False + assert res.synergy_fallback is False + assert res.fallback_reason is None + + +def test_random_multi_theme_fallback_to_ps(monkeypatch) -> None: + _patch_commanders( + monkeypatch, + [ + _make_row("PrimarySecondary", ["Aggro", "Tokens"]), + _make_row("Other Commander", ["Tokens", "Equipment"]), + ], + ) + + res = random_entrypoint.build_random_deck( + primary_theme="Aggro", + secondary_theme="Tokens", + tertiary_theme="Equipment", + seed=2024, + ) + + assert res.commander == "PrimarySecondary" + assert res.resolved_themes == ["Aggro", "Tokens"] + assert res.combo_fallback is True + assert res.synergy_fallback is False + assert "Primary+Secondary" in (res.fallback_reason or "") + + +def test_random_multi_theme_fallback_to_pt(monkeypatch) -> None: + _patch_commanders( + monkeypatch, + [ + _make_row("PrimaryTertiary", ["Aggro", "Equipment"]), + _make_row("Tokens Only", ["Tokens"]), + ], + ) + + res = random_entrypoint.build_random_deck( + primary_theme="Aggro", + secondary_theme="Tokens", + tertiary_theme="Equipment", + seed=777, + ) + + assert res.commander == "PrimaryTertiary" + assert res.resolved_themes == ["Aggro", "Equipment"] + assert res.combo_fallback is True + assert res.synergy_fallback is False + assert "Primary+Tertiary" in (res.fallback_reason or "") + + +def test_random_multi_theme_fallback_primary_only(monkeypatch) -> None: + _patch_commanders( + monkeypatch, + [ + _make_row("PrimarySolo", ["Aggro"]), + _make_row("Tokens Solo", ["Tokens"]), + ], + ) + + res = random_entrypoint.build_random_deck( + primary_theme="Aggro", + secondary_theme="Tokens", + tertiary_theme="Equipment", + seed=9090, + ) + + assert res.commander == "PrimarySolo" + assert res.resolved_themes == ["Aggro"] + assert res.combo_fallback is True + assert res.synergy_fallback is False + assert "Primary only" in (res.fallback_reason or "") + + +def test_random_multi_theme_synergy_fallback(monkeypatch) -> None: + _patch_commanders( + monkeypatch, + [ + _make_row("Synergy Commander", ["aggro surge"]), + _make_row("Unrelated", ["tokens"]), + ], + ) + + res = random_entrypoint.build_random_deck( + primary_theme="aggro swarm", + secondary_theme="treasure", + tertiary_theme="artifacts", + seed=5150, + ) + + assert res.commander == "Synergy Commander" + assert res.resolved_themes == ["aggro", "swarm"] + assert res.combo_fallback is True + assert res.synergy_fallback is True + assert "synergy overlap" in (res.fallback_reason or "") + + +def test_random_multi_theme_full_pool_fallback(monkeypatch) -> None: + _patch_commanders( + monkeypatch, + [_make_row("Any Commander", ["control"])], + ) + + res = random_entrypoint.build_random_deck( + primary_theme="nonexistent", + secondary_theme="made up", + tertiary_theme="imaginary", + seed=6060, + ) + + assert res.commander == "Any Commander" + assert res.resolved_themes == [] + assert res.combo_fallback is True + assert res.synergy_fallback is True + assert "full commander pool" in (res.fallback_reason or "") + + +def test_random_multi_theme_sidecar_fields_present(monkeypatch, tmp_path) -> None: + export_dir = tmp_path / "exports" + export_dir.mkdir() + + commander_name = "Tri Commander" + _patch_commanders( + monkeypatch, + [_make_row(commander_name, ["Aggro", "Tokens", "Equipment"])], + ) + + import headless_runner + + def _fake_run( + command_name: str, + seed: int | None = None, + primary_choice: int | None = None, + secondary_choice: int | None = None, + tertiary_choice: int | None = None, + ): + base_path = export_dir / command_name.replace(" ", "_") + csv_path = base_path.with_suffix(".csv") + txt_path = base_path.with_suffix(".txt") + csv_path.write_text("Name\nCard\n", encoding="utf-8") + txt_path.write_text("Decklist", encoding="utf-8") + + class DummyBuilder: + def __init__(self) -> None: + self.commander_name = command_name + self.commander = command_name + self.selected_tags = ["Aggro", "Tokens", "Equipment"] + self.primary_tag = "Aggro" + self.secondary_tag = "Tokens" + self.tertiary_tag = "Equipment" + self.bracket_level = 3 + self.last_csv_path = str(csv_path) + self.last_txt_path = str(txt_path) + self.custom_export_base = command_name + + def build_deck_summary(self) -> dict[str, object]: + return {"meta": {"existing": True}, "counts": {"total": 100}} + + def compute_and_print_compliance(self, base_stem: str | None = None): + return {"ok": True} + + return DummyBuilder() + + monkeypatch.setattr(headless_runner, "run", _fake_run) + + result = random_entrypoint.build_random_full_deck( + primary_theme="Aggro", + secondary_theme="Tokens", + tertiary_theme="Equipment", + seed=4242, + ) + + assert result.summary is not None + meta = result.summary.get("meta") + assert meta is not None + assert meta["primary_theme"] == "Aggro" + assert meta["secondary_theme"] == "Tokens" + assert meta["tertiary_theme"] == "Equipment" + assert meta["resolved_themes"] == ["aggro", "tokens", "equipment"] + assert meta["combo_fallback"] is False + assert meta["synergy_fallback"] is False + assert meta["fallback_reason"] is None + + assert result.csv_path is not None + sidecar_path = Path(result.csv_path).with_suffix(".summary.json") + assert sidecar_path.is_file() + + payload = json.loads(sidecar_path.read_text(encoding="utf-8")) + sidecar_meta = payload["meta"] + assert sidecar_meta["primary_theme"] == "Aggro" + assert sidecar_meta["secondary_theme"] == "Tokens" + assert sidecar_meta["tertiary_theme"] == "Equipment" + assert sidecar_meta["resolved_themes"] == ["aggro", "tokens", "equipment"] + assert sidecar_meta["random_primary_theme"] == "Aggro" + assert sidecar_meta["random_resolved_themes"] == ["aggro", "tokens", "equipment"] + + # cleanup + sidecar_path.unlink(missing_ok=True) + Path(result.csv_path).unlink(missing_ok=True) + txt_candidate = Path(result.csv_path).with_suffix(".txt") + txt_candidate.unlink(missing_ok=True) \ No newline at end of file diff --git a/code/tests/test_random_multi_theme_seed_stability.py b/code/tests/test_random_multi_theme_seed_stability.py new file mode 100644 index 0000000..3fa4114 --- /dev/null +++ b/code/tests/test_random_multi_theme_seed_stability.py @@ -0,0 +1,46 @@ +from __future__ import annotations + +import os + +from deck_builder.random_entrypoint import build_random_deck + + +def _use_testdata(monkeypatch) -> None: + monkeypatch.setenv("CSV_FILES_DIR", os.path.join("csv_files", "testdata")) + + +def test_multi_theme_same_seed_same_result(monkeypatch) -> None: + _use_testdata(monkeypatch) + kwargs = { + "primary_theme": "Goblin Kindred", + "secondary_theme": "Token Swarm", + "tertiary_theme": "Treasure Support", + "seed": 4040, + } + res_a = build_random_deck(**kwargs) + res_b = build_random_deck(**kwargs) + + assert res_a.seed == res_b.seed == 4040 + assert res_a.commander == res_b.commander + assert res_a.resolved_themes == res_b.resolved_themes + + +def test_legacy_theme_and_primary_equivalence(monkeypatch) -> None: + _use_testdata(monkeypatch) + + legacy = build_random_deck(theme="Goblin Kindred", seed=5151) + multi = build_random_deck(primary_theme="Goblin Kindred", seed=5151) + + assert legacy.commander == multi.commander + assert legacy.seed == multi.seed == 5151 + + +def test_string_seed_coerces_to_int(monkeypatch) -> None: + _use_testdata(monkeypatch) + + result = build_random_deck(primary_theme="Goblin Kindred", seed="6262") + + assert result.seed == 6262 + # Sanity check that commander selection remains deterministic once coerced + repeat = build_random_deck(primary_theme="Goblin Kindred", seed="6262") + assert repeat.commander == result.commander diff --git a/code/tests/test_random_multi_theme_webflows.py b/code/tests/test_random_multi_theme_webflows.py new file mode 100644 index 0000000..2bc4ef1 --- /dev/null +++ b/code/tests/test_random_multi_theme_webflows.py @@ -0,0 +1,204 @@ +from __future__ import annotations + +import base64 +import json +import os +from typing import Any, Dict, Iterator, List +from urllib.parse import urlencode + +import importlib +import pytest +from fastapi.testclient import TestClient + +from deck_builder.random_entrypoint import RandomFullBuildResult + + +def _decode_state_token(token: str) -> Dict[str, Any]: + pad = "=" * (-len(token) % 4) + raw = base64.urlsafe_b64decode((token + pad).encode("ascii")).decode("utf-8") + return json.loads(raw) + + +@pytest.fixture() +def client(monkeypatch: pytest.MonkeyPatch) -> Iterator[TestClient]: + monkeypatch.setenv("RANDOM_MODES", "1") + monkeypatch.setenv("RANDOM_UI", "1") + monkeypatch.setenv("CSV_FILES_DIR", os.path.join("csv_files", "testdata")) + + web_app_module = importlib.import_module("code.web.app") + web_app_module = importlib.reload(web_app_module) + from code.web.services import tasks + + tasks._SESSIONS.clear() + with TestClient(web_app_module.app) as test_client: + yield test_client + tasks._SESSIONS.clear() + + +def _make_full_result(seed: int) -> RandomFullBuildResult: + return RandomFullBuildResult( + seed=seed, + commander=f"Commander-{seed}", + theme="Aggro", + constraints={}, + primary_theme="Aggro", + secondary_theme="Tokens", + tertiary_theme="Equipment", + resolved_themes=["aggro", "tokens", "equipment"], + combo_fallback=False, + synergy_fallback=False, + fallback_reason=None, + decklist=[{"name": "Sample Card", "count": 1}], + diagnostics={"elapsed_ms": 5}, + summary={"meta": {"existing": True}}, + csv_path=None, + txt_path=None, + compliance=None, + ) + + +def test_random_multi_theme_reroll_same_commander_preserves_resolved(client: TestClient, monkeypatch: pytest.MonkeyPatch) -> None: + import deck_builder.random_entrypoint as random_entrypoint + import headless_runner + from code.web.services import tasks + + build_calls: List[Dict[str, Any]] = [] + + def fake_build_random_full_deck(*, theme, constraints, seed, attempts, timeout_s, primary_theme, secondary_theme, tertiary_theme): + build_calls.append( + { + "theme": theme, + "primary": primary_theme, + "secondary": secondary_theme, + "tertiary": tertiary_theme, + "seed": seed, + } + ) + return _make_full_result(int(seed)) + + monkeypatch.setattr(random_entrypoint, "build_random_full_deck", fake_build_random_full_deck) + + class DummyBuilder: + def __init__(self, commander: str, seed: int) -> None: + self.commander_name = commander + self.commander = commander + self.deck_list_final: List[Dict[str, Any]] = [] + self.last_csv_path = None + self.last_txt_path = None + self.custom_export_base = commander + + def build_deck_summary(self) -> Dict[str, Any]: + return {"meta": {"rebuild": True}} + + def export_decklist_csv(self) -> str: + return "deck_files/placeholder.csv" + + def export_decklist_text(self, filename: str | None = None) -> str: + return "deck_files/placeholder.txt" + + def compute_and_print_compliance(self, base_stem: str | None = None) -> Dict[str, Any]: + return {"ok": True} + + reroll_runs: List[Dict[str, Any]] = [] + + def fake_run(command_name: str, seed: int | None = None): + reroll_runs.append({"commander": command_name, "seed": seed}) + return DummyBuilder(command_name, seed or 0) + + monkeypatch.setattr(headless_runner, "run", fake_run) + + tasks._SESSIONS.clear() + + resp1 = client.post( + "/hx/random_reroll", + json={ + "mode": "surprise", + "primary_theme": "Aggro", + "secondary_theme": "Tokens", + "tertiary_theme": "Equipment", + "seed": 1010, + }, + ) + assert resp1.status_code == 200, resp1.text + assert build_calls and build_calls[0]["primary"] == "Aggro" + assert "value=\"aggro||tokens||equipment\"" in resp1.text + + sid = client.cookies.get("sid") + assert sid + session = tasks.get_session(sid) + resolved_list = session.get("random_build", {}).get("resolved_theme_info", {}).get("resolved_list") + assert resolved_list == ["aggro", "tokens", "equipment"] + + commander = f"Commander-{build_calls[0]['seed']}" + form_payload = [ + ("mode", "reroll_same_commander"), + ("commander", commander), + ("seed", str(build_calls[0]["seed"])), + ("resolved_themes", "aggro||tokens||equipment"), + ] + encoded = urlencode(form_payload, doseq=True) + resp2 = client.post( + "/hx/random_reroll", + content=encoded, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + assert resp2.status_code == 200, resp2.text + assert len(build_calls) == 1 + assert reroll_runs and reroll_runs[0]["commander"] == commander + assert "value=\"aggro||tokens||equipment\"" in resp2.text + + session_after = tasks.get_session(sid) + resolved_after = session_after.get("random_build", {}).get("resolved_theme_info", {}).get("resolved_list") + assert resolved_after == ["aggro", "tokens", "equipment"] + + +def test_random_multi_theme_permalink_roundtrip(client: TestClient, monkeypatch: pytest.MonkeyPatch) -> None: + import deck_builder.random_entrypoint as random_entrypoint + from code.web.services import tasks + + seeds_seen: List[int] = [] + + def fake_build_random_full_deck(*, theme, constraints, seed, attempts, timeout_s, primary_theme, secondary_theme, tertiary_theme): + seeds_seen.append(int(seed)) + return _make_full_result(int(seed)) + + monkeypatch.setattr(random_entrypoint, "build_random_full_deck", fake_build_random_full_deck) + + tasks._SESSIONS.clear() + + resp = client.post( + "/api/random_full_build", + json={ + "seed": 4242, + "primary_theme": "Aggro", + "secondary_theme": "Tokens", + "tertiary_theme": "Equipment", + }, + ) + assert resp.status_code == 200, resp.text + body = resp.json() + assert body["primary_theme"] == "Aggro" + assert body["secondary_theme"] == "Tokens" + assert body["tertiary_theme"] == "Equipment" + assert body["resolved_themes"] == ["aggro", "tokens", "equipment"] + permalink = body["permalink"] + assert permalink and permalink.startswith("/build/from?state=") + + visit = client.get(permalink) + assert visit.status_code == 200 + + state_resp = client.get("/build/permalink") + assert state_resp.status_code == 200, state_resp.text + state_payload = state_resp.json() + token = state_payload["permalink"].split("state=", 1)[1] + decoded = _decode_state_token(token) + random_section = decoded.get("random") or {} + assert random_section.get("primary_theme") == "Aggro" + assert random_section.get("secondary_theme") == "Tokens" + assert random_section.get("tertiary_theme") == "Equipment" + assert random_section.get("resolved_themes") == ["aggro", "tokens", "equipment"] + requested = random_section.get("requested_themes") or {} + assert requested.get("primary") == "Aggro" + assert requested.get("secondary") == "Tokens" + assert requested.get("tertiary") == "Equipment" + assert seeds_seen == [4242] \ No newline at end of file diff --git a/code/tests/test_random_reroll_endpoints.py b/code/tests/test_random_reroll_endpoints.py index 26cc901..8ef13e7 100644 --- a/code/tests/test_random_reroll_endpoints.py +++ b/code/tests/test_random_reroll_endpoints.py @@ -32,9 +32,76 @@ def test_api_random_reroll_increments_seed(client: TestClient): assert data2.get("permalink") +def test_api_random_reroll_auto_fill_metadata(client: TestClient): + r1 = client.post("/api/random_full_build", json={"seed": 555, "primary_theme": "Aggro"}) + assert r1.status_code == 200, r1.text + + r2 = client.post( + "/api/random_reroll", + json={"seed": 555, "primary_theme": "Aggro", "auto_fill_enabled": True}, + ) + assert r2.status_code == 200, r2.text + data = r2.json() + assert data.get("auto_fill_enabled") is True + assert data.get("auto_fill_secondary_enabled") is True + assert data.get("auto_fill_tertiary_enabled") is True + assert data.get("auto_fill_applied") in (True, False) + assert isinstance(data.get("auto_filled_themes"), list) + assert data.get("requested_themes", {}).get("auto_fill_enabled") is True + assert data.get("requested_themes", {}).get("auto_fill_secondary_enabled") is True + assert data.get("requested_themes", {}).get("auto_fill_tertiary_enabled") is True + assert "display_themes" in data + + +def test_api_random_reroll_secondary_only_auto_fill(client: TestClient): + r1 = client.post( + "/api/random_reroll", + json={ + "seed": 777, + "primary_theme": "Aggro", + "auto_fill_secondary_enabled": True, + "auto_fill_tertiary_enabled": False, + }, + ) + assert r1.status_code == 200, r1.text + data = r1.json() + assert data.get("auto_fill_enabled") is True + assert data.get("auto_fill_secondary_enabled") is True + assert data.get("auto_fill_tertiary_enabled") is False + assert data.get("auto_fill_applied") in (True, False) + assert isinstance(data.get("auto_filled_themes"), list) + requested = data.get("requested_themes", {}) + assert requested.get("auto_fill_enabled") is True + assert requested.get("auto_fill_secondary_enabled") is True + assert requested.get("auto_fill_tertiary_enabled") is False + + +def test_api_random_reroll_tertiary_requires_secondary(client: TestClient): + r1 = client.post( + "/api/random_reroll", + json={ + "seed": 778, + "primary_theme": "Aggro", + "auto_fill_secondary_enabled": False, + "auto_fill_tertiary_enabled": True, + }, + ) + assert r1.status_code == 200, r1.text + data = r1.json() + assert data.get("auto_fill_enabled") is True + assert data.get("auto_fill_secondary_enabled") is True + assert data.get("auto_fill_tertiary_enabled") is True + assert data.get("auto_fill_applied") in (True, False) + assert isinstance(data.get("auto_filled_themes"), list) + requested = data.get("requested_themes", {}) + assert requested.get("auto_fill_enabled") is True + assert requested.get("auto_fill_secondary_enabled") is True + assert requested.get("auto_fill_tertiary_enabled") is True + + def test_hx_random_reroll_returns_html(client: TestClient): headers = {"HX-Request": "true", "Content-Type": "application/json"} - r = client.post("/hx/random_reroll", data=json.dumps({"seed": 42}), headers=headers) + r = client.post("/hx/random_reroll", content=json.dumps({"seed": 42}), headers=headers) assert r.status_code == 200, r.text # Accept either HTML fragment or JSON fallback content_type = r.headers.get("content-type", "") diff --git a/code/tests/test_random_reroll_locked_artifacts.py b/code/tests/test_random_reroll_locked_artifacts.py index 808d668..6dd134f 100644 --- a/code/tests/test_random_reroll_locked_artifacts.py +++ b/code/tests/test_random_reroll_locked_artifacts.py @@ -35,7 +35,7 @@ def test_locked_reroll_generates_summary_and_compliance(): start = time.time() # Locked reroll via HTMX path (form style) form_body = f"seed={seed}&commander={commander}&mode=reroll_same_commander" - r2 = c.post('/hx/random_reroll', data=form_body, headers={'Content-Type':'application/x-www-form-urlencoded'}) + r2 = c.post('/hx/random_reroll', content=form_body, headers={'Content-Type':'application/x-www-form-urlencoded'}) assert r2.status_code == 200, r2.text # Look for new sidecar/compliance created after start diff --git a/code/tests/test_random_reroll_locked_commander.py b/code/tests/test_random_reroll_locked_commander.py index c2752d0..439419a 100644 --- a/code/tests/test_random_reroll_locked_commander.py +++ b/code/tests/test_random_reroll_locked_commander.py @@ -23,14 +23,14 @@ def test_reroll_keeps_commander(): # First reroll with commander lock headers = {'Content-Type': 'application/json'} body = json.dumps({'seed': seed, 'commander': commander, 'mode': 'reroll_same_commander'}) - r2 = client.post('/hx/random_reroll', data=body, headers=headers) + r2 = client.post('/hx/random_reroll', content=body, headers=headers) assert r2.status_code == 200 html1 = r2.text assert commander in html1 # Second reroll should keep same commander (seed increments so prior +1 used on server) body2 = json.dumps({'seed': seed + 1, 'commander': commander, 'mode': 'reroll_same_commander'}) - r3 = client.post('/hx/random_reroll', data=body2, headers=headers) + r3 = client.post('/hx/random_reroll', content=body2, headers=headers) assert r3.status_code == 200 html2 = r3.text assert commander in html2 diff --git a/code/tests/test_random_reroll_locked_commander_form.py b/code/tests/test_random_reroll_locked_commander_form.py index 93958be..781f34d 100644 --- a/code/tests/test_random_reroll_locked_commander_form.py +++ b/code/tests/test_random_reroll_locked_commander_form.py @@ -20,12 +20,12 @@ def test_reroll_keeps_commander_form_encoded(): seed = data1['seed'] form_body = f"seed={seed}&commander={quote_plus(commander)}&mode=reroll_same_commander" - r2 = client.post('/hx/random_reroll', data=form_body, headers={'Content-Type': 'application/x-www-form-urlencoded'}) + r2 = client.post('/hx/random_reroll', content=form_body, headers={'Content-Type': 'application/x-www-form-urlencoded'}) assert r2.status_code == 200 assert commander in r2.text # second reroll with incremented seed form_body2 = f"seed={seed+1}&commander={quote_plus(commander)}&mode=reroll_same_commander" - r3 = client.post('/hx/random_reroll', data=form_body2, headers={'Content-Type': 'application/x-www-form-urlencoded'}) + r3 = client.post('/hx/random_reroll', content=form_body2, headers={'Content-Type': 'application/x-www-form-urlencoded'}) assert r3.status_code == 200 assert commander in r3.text \ No newline at end of file diff --git a/code/tests/test_random_reroll_locked_no_duplicate_exports.py b/code/tests/test_random_reroll_locked_no_duplicate_exports.py index a76831b..da33845 100644 --- a/code/tests/test_random_reroll_locked_no_duplicate_exports.py +++ b/code/tests/test_random_reroll_locked_no_duplicate_exports.py @@ -19,7 +19,7 @@ def test_locked_reroll_single_export(): commander = r.json()['commander'] before_csvs = set(glob.glob('deck_files/*.csv')) form_body = f"seed={seed}&commander={commander}&mode=reroll_same_commander" - r2 = c.post('/hx/random_reroll', data=form_body, headers={'Content-Type':'application/x-www-form-urlencoded'}) + r2 = c.post('/hx/random_reroll', content=form_body, headers={'Content-Type':'application/x-www-form-urlencoded'}) assert r2.status_code == 200 after_csvs = set(glob.glob('deck_files/*.csv')) new_csvs = after_csvs - before_csvs diff --git a/code/tests/test_random_reroll_throttle.py b/code/tests/test_random_reroll_throttle.py new file mode 100644 index 0000000..7a0b97d --- /dev/null +++ b/code/tests/test_random_reroll_throttle.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import os +import time + +import pytest +from fastapi.testclient import TestClient + + +@pytest.fixture() +def throttle_client(monkeypatch): + monkeypatch.setenv("RANDOM_MODES", "1") + monkeypatch.setenv("RANDOM_UI", "1") + monkeypatch.setenv("CSV_FILES_DIR", os.path.join("csv_files", "testdata")) + + import code.web.app as app_module + + # Ensure feature flags and globals reflect the test configuration + app_module.RANDOM_MODES = True + app_module.RANDOM_UI = True + app_module.RATE_LIMIT_ENABLED = False + + # Keep existing values so we can restore after the test + prev_ms = app_module.RANDOM_REROLL_THROTTLE_MS + prev_seconds = app_module._REROLL_THROTTLE_SECONDS + + app_module.RANDOM_REROLL_THROTTLE_MS = 50 + app_module._REROLL_THROTTLE_SECONDS = 0.05 + + app_module._RL_COUNTS.clear() + + with TestClient(app_module.app) as client: + yield client, app_module + + # Restore globals for other tests + app_module.RANDOM_REROLL_THROTTLE_MS = prev_ms + app_module._REROLL_THROTTLE_SECONDS = prev_seconds + app_module._RL_COUNTS.clear() + + +def test_random_reroll_session_throttle(throttle_client): + client, app_module = throttle_client + + # First reroll succeeds and seeds the session timestamp + first = client.post("/api/random_reroll", json={"seed": 5000}) + assert first.status_code == 200, first.text + assert "sid" in client.cookies + + # Immediate follow-up should hit the throttle guard + second = client.post("/api/random_reroll", json={"seed": 5001}) + assert second.status_code == 429 + retry_after = second.headers.get("Retry-After") + assert retry_after is not None + assert int(retry_after) >= 1 + + # After waiting slightly longer than the throttle window, requests succeed again + time.sleep(0.06) + third = client.post("/api/random_reroll", json={"seed": 5002}) + assert third.status_code == 200, third.text + assert int(third.json().get("seed")) >= 5002 + + # Telemetry shouldn't record fallback for the throttle rejection + metrics_snapshot = app_module._RANDOM_METRICS.get("reroll") + assert metrics_snapshot is not None + assert metrics_snapshot.get("error", 0) == 0 \ No newline at end of file diff --git a/code/tests/test_random_surprise_reroll_behavior.py b/code/tests/test_random_surprise_reroll_behavior.py new file mode 100644 index 0000000..2c08438 --- /dev/null +++ b/code/tests/test_random_surprise_reroll_behavior.py @@ -0,0 +1,178 @@ +from __future__ import annotations + +import importlib +import itertools +import os +from typing import Any + +from fastapi.testclient import TestClient + + +def _make_stub_result(seed: int | None, theme: Any, primary: Any, secondary: Any = None, tertiary: Any = None): + class _Result: + pass + + res = _Result() + res.seed = int(seed) if seed is not None else 0 + res.commander = f"Commander-{res.seed}" + res.decklist = [] + res.theme = theme + res.primary_theme = primary + res.secondary_theme = secondary + res.tertiary_theme = tertiary + res.resolved_themes = [t for t in [primary, secondary, tertiary] if t] + res.combo_fallback = True if primary and primary != theme else False + res.synergy_fallback = False + res.fallback_reason = "fallback" if res.combo_fallback else None + res.constraints = {} + res.diagnostics = {} + res.summary = None + res.theme_fallback = bool(res.combo_fallback or res.synergy_fallback) + res.csv_path = None + res.txt_path = None + res.compliance = None + res.original_theme = theme + return res + + +def test_surprise_reuses_requested_theme(monkeypatch): + monkeypatch.setenv("RANDOM_MODES", "1") + monkeypatch.setenv("RANDOM_UI", "1") + monkeypatch.setenv("CSV_FILES_DIR", os.path.join("csv_files", "testdata")) + + random_util = importlib.import_module("random_util") + seed_iter = itertools.count(1000) + monkeypatch.setattr(random_util, "generate_seed", lambda: next(seed_iter)) + + random_entrypoint = importlib.import_module("deck_builder.random_entrypoint") + build_calls: list[dict[str, Any]] = [] + + def fake_build_random_full_deck(*, theme, constraints, seed, attempts, timeout_s, primary_theme, secondary_theme, tertiary_theme): + build_calls.append({ + "theme": theme, + "primary": primary_theme, + "secondary": secondary_theme, + "tertiary": tertiary_theme, + "seed": seed, + }) + return _make_stub_result(seed, theme, "ResolvedTokens") + + monkeypatch.setattr(random_entrypoint, "build_random_full_deck", fake_build_random_full_deck) + + web_app_module = importlib.import_module("code.web.app") + web_app_module = importlib.reload(web_app_module) + + client = TestClient(web_app_module.app) + + # Initial surprise request with explicit theme + resp1 = client.post("/hx/random_reroll", json={"mode": "surprise", "primary_theme": "Tokens"}) + assert resp1.status_code == 200 + assert build_calls[0]["primary"] == "Tokens" + assert build_calls[0]["theme"] == "Tokens" + + # Subsequent surprise request without providing themes should reuse requested input, not resolved fallback + resp2 = client.post("/hx/random_reroll", json={"mode": "surprise"}) + assert resp2.status_code == 200 + assert len(build_calls) == 2 + assert build_calls[1]["primary"] == "Tokens" + assert build_calls[1]["theme"] == "Tokens" + + +def test_reroll_same_commander_uses_resolved_cache(monkeypatch): + monkeypatch.setenv("RANDOM_MODES", "1") + monkeypatch.setenv("RANDOM_UI", "1") + monkeypatch.setenv("CSV_FILES_DIR", os.path.join("csv_files", "testdata")) + + random_util = importlib.import_module("random_util") + seed_iter = itertools.count(2000) + monkeypatch.setattr(random_util, "generate_seed", lambda: next(seed_iter)) + + random_entrypoint = importlib.import_module("deck_builder.random_entrypoint") + build_calls: list[dict[str, Any]] = [] + + def fake_build_random_full_deck(*, theme, constraints, seed, attempts, timeout_s, primary_theme, secondary_theme, tertiary_theme): + build_calls.append({ + "theme": theme, + "primary": primary_theme, + "seed": seed, + }) + return _make_stub_result(seed, theme, "ResolvedArtifacts") + + monkeypatch.setattr(random_entrypoint, "build_random_full_deck", fake_build_random_full_deck) + + headless_runner = importlib.import_module("headless_runner") + locked_runs: list[dict[str, Any]] = [] + + class DummyBuilder: + def __init__(self, commander: str): + self.commander_name = commander + self.commander = commander + self.deck_list_final: list[Any] = [] + self.last_csv_path = None + self.last_txt_path = None + self.custom_export_base = None + + def build_deck_summary(self): + return None + + def export_decklist_csv(self): + return None + + def export_decklist_text(self, filename: str | None = None): # pragma: no cover - optional path + return None + + def compute_and_print_compliance(self, base_stem: str | None = None): # pragma: no cover - optional path + return None + + def fake_run(command_name: str, seed: int | None = None): + locked_runs.append({"commander": command_name, "seed": seed}) + return DummyBuilder(command_name) + + monkeypatch.setattr(headless_runner, "run", fake_run) + + web_app_module = importlib.import_module("code.web.app") + web_app_module = importlib.reload(web_app_module) + from code.web.services import tasks + + tasks._SESSIONS.clear() + client = TestClient(web_app_module.app) + + # Initial surprise build to populate session cache + resp1 = client.post("/hx/random_reroll", json={"mode": "surprise", "primary_theme": "Artifacts"}) + assert resp1.status_code == 200 + assert build_calls[0]["primary"] == "Artifacts" + commander_name = f"Commander-{build_calls[0]['seed']}" + first_seed = build_calls[0]["seed"] + + form_payload = [ + ("mode", "reroll_same_commander"), + ("commander", commander_name), + ("seed", str(first_seed)), + ("primary_theme", "ResolvedArtifacts"), + ("primary_theme", "UserOverride"), + ("resolved_themes", "ResolvedArtifacts"), + ] + + from urllib.parse import urlencode + + encoded = urlencode(form_payload, doseq=True) + resp2 = client.post( + "/hx/random_reroll", + content=encoded, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + ) + assert resp2.status_code == 200 + assert resp2.request.headers.get("Content-Type") == "application/x-www-form-urlencoded" + assert len(locked_runs) == 1 # headless runner invoked once + assert len(build_calls) == 1 # no additional filter build + + # Hidden input should reflect resolved theme, not user override + assert 'id="current-primary-theme"' in resp2.text + assert 'value="ResolvedArtifacts"' in resp2.text + assert "UserOverride" not in resp2.text + + sid = client.cookies.get("sid") + assert sid + session = tasks.get_session(sid) + requested = session.get("random_build", {}).get("requested_themes") or {} + assert requested.get("primary") == "Artifacts" diff --git a/code/tests/test_random_theme_stats_diagnostics.py b/code/tests/test_random_theme_stats_diagnostics.py new file mode 100644 index 0000000..5602ba4 --- /dev/null +++ b/code/tests/test_random_theme_stats_diagnostics.py @@ -0,0 +1,37 @@ +import sys +from pathlib import Path + +from fastapi.testclient import TestClient + +from code.web import app as web_app # type: ignore +from code.web.app import app # type: ignore + +# Ensure project root on sys.path for absolute imports +ROOT = Path(__file__).resolve().parents[2] +if str(ROOT) not in sys.path: + sys.path.insert(0, str(ROOT)) + + +def _make_client() -> TestClient: + return TestClient(app) + + +def test_theme_stats_requires_diagnostics_flag(monkeypatch): + monkeypatch.setattr(web_app, "SHOW_DIAGNOSTICS", False) + client = _make_client() + resp = client.get("/status/random_theme_stats") + assert resp.status_code == 404 + + +def test_theme_stats_payload_includes_core_fields(monkeypatch): + monkeypatch.setattr(web_app, "SHOW_DIAGNOSTICS", True) + client = _make_client() + resp = client.get("/status/random_theme_stats") + assert resp.status_code == 200 + payload = resp.json() + assert payload.get("ok") is True + stats = payload.get("stats") or {} + assert "commanders" in stats + assert "unique_tokens" in stats + assert "total_assignments" in stats + assert isinstance(stats.get("top_tokens"), list) \ No newline at end of file diff --git a/code/tests/test_random_theme_tag_cache.py b/code/tests/test_random_theme_tag_cache.py new file mode 100644 index 0000000..2f7fb1c --- /dev/null +++ b/code/tests/test_random_theme_tag_cache.py @@ -0,0 +1,39 @@ +import pandas as pd + +from deck_builder.random_entrypoint import _ensure_theme_tag_cache, _filter_multi + + +def _build_df() -> pd.DataFrame: + data = { + "name": ["Alpha", "Beta", "Gamma"], + "themeTags": [ + ["Aggro", "Tokens"], + ["LifeGain", "Control"], + ["Artifacts", "Combo"], + ], + } + df = pd.DataFrame(data) + return _ensure_theme_tag_cache(df) + + +def test_and_filter_uses_cached_index(): + df = _build_df() + filtered, diag = _filter_multi(df, "Aggro", "Tokens", None) + + assert list(filtered["name"].values) == ["Alpha"] + assert diag["resolved_themes"] == ["Aggro", "Tokens"] + assert not diag["combo_fallback"] + assert "aggro" in df.attrs["_ltag_index"] + assert "tokens" in df.attrs["_ltag_index"] + + +def test_synergy_fallback_partial_match_uses_index_union(): + df = _build_df() + + filtered, diag = _filter_multi(df, "Life Gain", None, None) + + assert list(filtered["name"].values) == ["Beta"] + assert diag["combo_fallback"] + assert diag["synergy_fallback"] + assert diag["resolved_themes"] == ["life", "gain"] + assert diag["fallback_reason"] is not None diff --git a/code/web/app.py b/code/web/app.py index 4730e6c..b89755b 100644 --- a/code/web/app.py +++ b/code/web/app.py @@ -10,9 +10,10 @@ import json as _json import time import uuid import logging +import math from starlette.exceptions import HTTPException as StarletteHTTPException from starlette.middleware.gzip import GZipMiddleware -from typing import Any, Optional, Dict +from typing import Any, Optional, Dict, Iterable, Mapping from contextlib import asynccontextmanager from .services.combo_utils import detect_all as _detect_all from .services.theme_catalog_loader import prewarm_common_filters # type: ignore @@ -124,6 +125,7 @@ RATE_LIMIT_RANDOM = _as_int(os.getenv("RANDOM_RATE_LIMIT_RANDOM"), 10) RATE_LIMIT_BUILD = _as_int(os.getenv("RANDOM_RATE_LIMIT_BUILD"), 10) RATE_LIMIT_SUGGEST = _as_int(os.getenv("RANDOM_RATE_LIMIT_SUGGEST"), 30) RANDOM_STRUCTURED_LOGS = _as_bool(os.getenv("RANDOM_STRUCTURED_LOGS"), False) +RANDOM_REROLL_THROTTLE_MS = _as_int(os.getenv("RANDOM_REROLL_THROTTLE_MS"), 350) # Simple theme input validation constraints _THEME_MAX_LEN = 60 @@ -153,6 +155,71 @@ def _sanitize_theme(raw: Optional[str]) -> Optional[str]: return None return s + +def _sanitize_bool(raw: Any, *, default: Optional[bool] = None) -> Optional[bool]: + """Coerce assorted truthy/falsey payloads into booleans. + + Accepts booleans, ints, and common string forms ("1", "0", "true", "false", "on", "off"). + Returns `default` when the value is None or cannot be interpreted. + """ + + if raw is None: + return default + if isinstance(raw, bool): + return raw + if isinstance(raw, (int, float)): + if raw == 0: + return False + if raw == 1: + return True + try: + text = str(raw).strip().lower() + except Exception: + return default + if text in {"1", "true", "yes", "on", "y"}: + return True + if text in {"0", "false", "no", "off", "n", ""}: + return False + return default + + +def _parse_auto_fill_flags( + source: Mapping[str, Any] | None, + *, + default_enabled: Optional[bool] = None, + default_secondary: Optional[bool] = None, + default_tertiary: Optional[bool] = None, +) -> tuple[bool, bool, bool]: + """Resolve auto-fill booleans from payload with graceful fallbacks.""" + + data: Mapping[str, Any] = source or {} + enabled_raw = _sanitize_bool(data.get("auto_fill_enabled"), default=default_enabled) + secondary_raw = _sanitize_bool(data.get("auto_fill_secondary_enabled"), default=None) + tertiary_raw = _sanitize_bool(data.get("auto_fill_tertiary_enabled"), default=None) + + def _resolve(value: Optional[bool], fallback: Optional[bool]) -> bool: + if value is None: + if enabled_raw is not None: + return bool(enabled_raw) + if fallback is not None: + return bool(fallback) + return False + return bool(value) + + secondary = _resolve(secondary_raw, default_secondary) + tertiary = _resolve(tertiary_raw, default_tertiary) + + if tertiary and not secondary: + secondary = True + if not secondary: + tertiary = False + + if enabled_raw is None: + enabled = bool(secondary or tertiary) + else: + enabled = bool(enabled_raw) + return enabled, secondary, tertiary + # Theme default from environment: THEME=light|dark|system (case-insensitive). Defaults to system. _THEME_ENV = (os.getenv("THEME") or "").strip().lower() DEFAULT_THEME = "system" @@ -174,6 +241,7 @@ templates.env.globals.update({ "random_ui": RANDOM_UI, "random_max_attempts": RANDOM_MAX_ATTEMPTS, "random_timeout_ms": RANDOM_TIMEOUT_MS, + "random_reroll_throttle_ms": int(RANDOM_REROLL_THROTTLE_MS), "theme_picker_diagnostics": THEME_PICKER_DIAGNOSTICS, }) @@ -200,6 +268,62 @@ _RANDOM_METRICS: dict[str, dict[str, int]] = { "reroll": {"success": 0, "fallback": 0, "constraints_impossible": 0, "error": 0}, } +_REROLL_THROTTLE_SECONDS = max(0.0, max(0, int(RANDOM_REROLL_THROTTLE_MS)) / 1000.0) +_RANDOM_USAGE_METRICS: dict[str, int] = { + "surprise": 0, + "theme": 0, + "reroll": 0, + "reroll_same_commander": 0, +} +_RANDOM_FALLBACK_METRICS: dict[str, int] = { + "none": 0, + "combo": 0, + "synergy": 0, + "combo_and_synergy": 0, +} +_RANDOM_FALLBACK_REASONS: dict[str, int] = {} + + +def _record_random_usage_event(mode: str, combo_fallback: bool, synergy_fallback: bool, fallback_reason: Any) -> None: + if not RANDOM_TELEMETRY: + return + try: + key = mode or "unknown" + _RANDOM_USAGE_METRICS[key] = int(_RANDOM_USAGE_METRICS.get(key, 0)) + 1 + fallback_key = "none" + if combo_fallback and synergy_fallback: + fallback_key = "combo_and_synergy" + elif combo_fallback: + fallback_key = "combo" + elif synergy_fallback: + fallback_key = "synergy" + _RANDOM_FALLBACK_METRICS[fallback_key] = int(_RANDOM_FALLBACK_METRICS.get(fallback_key, 0)) + 1 + if fallback_reason: + reason = str(fallback_reason) + if len(reason) > 80: + reason = reason[:80] + _RANDOM_FALLBACK_REASONS[reason] = int(_RANDOM_FALLBACK_REASONS.get(reason, 0)) + 1 + except Exception: + pass + + +def _classify_usage_mode(mode: Optional[str], theme_values: Iterable[Optional[str]], locked_commander: Optional[str]) -> str: + has_theme = False + try: + has_theme = any(bool((val or "").strip()) for val in theme_values) + except Exception: + has_theme = False + normalized_mode = (mode or "").strip().lower() + if locked_commander: + return "reroll_same_commander" + if has_theme: + return "theme" + if normalized_mode.startswith("reroll"): + return "reroll" + if normalized_mode == "theme": + return "theme" + return "surprise" + def _record_random_event(kind: str, *, success: bool = False, fallback: bool = False, constraints_impossible: bool = False, error: bool = False) -> None: if not RANDOM_TELEMETRY: return @@ -254,6 +378,35 @@ def _client_ip(request: Request) -> str: pass return "unknown" + +def _enforce_random_session_throttle(request: Request) -> None: + if _REROLL_THROTTLE_SECONDS <= 0: + return + sid = request.cookies.get("sid") + if not sid: + return + try: + sess = get_session(sid) + except Exception: + return + rb = sess.get("random_build") if isinstance(sess, dict) else None + if not isinstance(rb, dict): + return + last_ts = rb.get("last_random_request_ts") + if last_ts is None: + return + try: + last_time = float(last_ts) + except Exception: + return + now = time.time() + delta = now - last_time + if delta < _REROLL_THROTTLE_SECONDS: + retry_after = max(1, int(math.ceil(_REROLL_THROTTLE_SECONDS - delta))) + raise HTTPException(status_code=429, detail="random_mode_throttled", headers={ + "Retry-After": str(retry_after), + }) + def rate_limit_check(request: Request, group: str) -> tuple[int, int] | None: """Check and increment rate limit for (ip, group). @@ -328,31 +481,275 @@ def _ensure_session(request: Request) -> tuple[str, dict[str, Any], bool]: return sid, sess, had_cookie -def _update_random_session(request: Request, *, seed: int, theme: Any, constraints: Any) -> tuple[str, bool]: - """Update session with latest random build seed/theme/constraints and maintain a bounded recent list.""" +def _update_random_session( + request: Request, + *, + seed: int, + theme: Any, + constraints: Any, + requested_themes: dict[str, Any] | None = None, + resolved_themes: Any = None, + auto_fill_enabled: Optional[bool] = None, + auto_fill_secondary_enabled: Optional[bool] = None, + auto_fill_tertiary_enabled: Optional[bool] = None, + strict_theme_match: Optional[bool] = None, + auto_fill_applied: Optional[bool] = None, + auto_filled_themes: Optional[Iterable[Any]] = None, + display_themes: Optional[Iterable[Any]] = None, + request_timestamp: Optional[float] = None, +) -> tuple[str, bool]: + """Update session with latest random build context and maintain a bounded recent list.""" + sid, sess, had_cookie = _ensure_session(request) rb = dict(sess.get("random_build") or {}) + rb["seed"] = int(seed) if theme is not None: rb["theme"] = theme if constraints is not None: rb["constraints"] = constraints + if strict_theme_match is not None: + rb["strict_theme_match"] = bool(strict_theme_match) + + def _coerce_str_list(values: Iterable[Any]) -> list[str]: + cleaned: list[str] = [] + for item in values: + if item is None: + continue + try: + text = str(item).strip() + except Exception: + continue + if text: + cleaned.append(text) + return cleaned + + requested_copy: dict[str, Any] = {} + if requested_themes is not None and isinstance(requested_themes, dict): + requested_copy = dict(requested_themes) + elif isinstance(rb.get("requested_themes"), dict): + requested_copy = dict(rb.get("requested_themes")) # type: ignore[arg-type] + + if "auto_fill_enabled" in requested_copy: + afe = _sanitize_bool(requested_copy.get("auto_fill_enabled"), default=None) + if afe is None: + requested_copy.pop("auto_fill_enabled", None) + else: + requested_copy["auto_fill_enabled"] = bool(afe) + if auto_fill_enabled is not None: + requested_copy["auto_fill_enabled"] = bool(auto_fill_enabled) + + if "strict_theme_match" in requested_copy: + stm = _sanitize_bool(requested_copy.get("strict_theme_match"), default=None) + if stm is None: + requested_copy.pop("strict_theme_match", None) + else: + requested_copy["strict_theme_match"] = bool(stm) + if strict_theme_match is not None: + requested_copy["strict_theme_match"] = bool(strict_theme_match) + + if "auto_fill_secondary_enabled" in requested_copy: + afs = _sanitize_bool(requested_copy.get("auto_fill_secondary_enabled"), default=None) + if afs is None: + requested_copy.pop("auto_fill_secondary_enabled", None) + else: + requested_copy["auto_fill_secondary_enabled"] = bool(afs) + if auto_fill_secondary_enabled is not None: + requested_copy["auto_fill_secondary_enabled"] = bool(auto_fill_secondary_enabled) + + if "auto_fill_tertiary_enabled" in requested_copy: + aft = _sanitize_bool(requested_copy.get("auto_fill_tertiary_enabled"), default=None) + if aft is None: + requested_copy.pop("auto_fill_tertiary_enabled", None) + else: + requested_copy["auto_fill_tertiary_enabled"] = bool(aft) + if auto_fill_tertiary_enabled is not None: + requested_copy["auto_fill_tertiary_enabled"] = bool(auto_fill_tertiary_enabled) + + if requested_copy: + rb["requested_themes"] = requested_copy + + req_primary = requested_copy.get("primary") if requested_copy else None + req_secondary = requested_copy.get("secondary") if requested_copy else None + req_tertiary = requested_copy.get("tertiary") if requested_copy else None + if req_primary: + rb.setdefault("primary_theme", req_primary) + if req_secondary: + rb.setdefault("secondary_theme", req_secondary) + if req_tertiary: + rb.setdefault("tertiary_theme", req_tertiary) + + resolved_info: dict[str, Any] | None = None + if resolved_themes is not None: + if isinstance(resolved_themes, dict): + resolved_info = dict(resolved_themes) + elif isinstance(resolved_themes, list): + resolved_info = {"resolved_list": list(resolved_themes)} + else: + resolved_info = {"resolved_list": [resolved_themes] if resolved_themes else []} + elif isinstance(rb.get("resolved_theme_info"), dict): + resolved_info = dict(rb.get("resolved_theme_info")) # type: ignore[arg-type] + + if resolved_info is None: + resolved_info = {} + + if auto_fill_enabled is not None: + resolved_info["auto_fill_enabled"] = bool(auto_fill_enabled) + if auto_fill_secondary_enabled is not None: + resolved_info["auto_fill_secondary_enabled"] = bool(auto_fill_secondary_enabled) + if auto_fill_tertiary_enabled is not None: + resolved_info["auto_fill_tertiary_enabled"] = bool(auto_fill_tertiary_enabled) + if auto_fill_applied is not None: + resolved_info["auto_fill_applied"] = bool(auto_fill_applied) + if auto_filled_themes is not None: + resolved_info["auto_filled_themes"] = _coerce_str_list(auto_filled_themes) + if display_themes is not None: + resolved_info["display_list"] = _coerce_str_list(display_themes) + + rb["resolved_theme_info"] = resolved_info + + resolved_list = resolved_info.get("resolved_list") + if isinstance(resolved_list, list): + rb["resolved_themes"] = list(resolved_list) + primary_resolved = resolved_info.get("primary") + secondary_resolved = resolved_info.get("secondary") + tertiary_resolved = resolved_info.get("tertiary") + if primary_resolved: + rb["primary_theme"] = primary_resolved + if secondary_resolved: + rb["secondary_theme"] = secondary_resolved + if tertiary_resolved: + rb["tertiary_theme"] = tertiary_resolved + if "combo_fallback" in resolved_info: + rb["combo_fallback"] = bool(resolved_info.get("combo_fallback")) + if "synergy_fallback" in resolved_info: + rb["synergy_fallback"] = bool(resolved_info.get("synergy_fallback")) + if "fallback_reason" in resolved_info and resolved_info.get("fallback_reason") is not None: + rb["fallback_reason"] = resolved_info.get("fallback_reason") + if "display_list" in resolved_info and isinstance(resolved_info.get("display_list"), list): + rb["display_themes"] = list(resolved_info.get("display_list") or []) + if "auto_fill_enabled" in resolved_info and resolved_info.get("auto_fill_enabled") is not None: + rb["auto_fill_enabled"] = bool(resolved_info.get("auto_fill_enabled")) + if "auto_fill_secondary_enabled" in resolved_info and resolved_info.get("auto_fill_secondary_enabled") is not None: + rb["auto_fill_secondary_enabled"] = bool(resolved_info.get("auto_fill_secondary_enabled")) + if "auto_fill_tertiary_enabled" in resolved_info and resolved_info.get("auto_fill_tertiary_enabled") is not None: + rb["auto_fill_tertiary_enabled"] = bool(resolved_info.get("auto_fill_tertiary_enabled")) + if "auto_fill_enabled" not in rb: + rb["auto_fill_enabled"] = bool(rb.get("auto_fill_secondary_enabled") or rb.get("auto_fill_tertiary_enabled")) + if "auto_fill_applied" in resolved_info and resolved_info.get("auto_fill_applied") is not None: + rb["auto_fill_applied"] = bool(resolved_info.get("auto_fill_applied")) + if "auto_filled_themes" in resolved_info and resolved_info.get("auto_filled_themes") is not None: + rb["auto_filled_themes"] = list(resolved_info.get("auto_filled_themes") or []) + + if display_themes is not None: + rb["display_themes"] = _coerce_str_list(display_themes) + if auto_fill_applied is not None: + rb["auto_fill_applied"] = bool(auto_fill_applied) + if auto_filled_themes is not None: + rb["auto_filled_themes"] = _coerce_str_list(auto_filled_themes) + recent = list(rb.get("recent_seeds") or []) - # Append and keep last 10 unique (most-recent-first) recent.append(int(seed)) - # Dedupe while preserving order from the right (most recent) - seen = set() + seen: set[int] = set() dedup_rev: list[int] = [] for s in reversed(recent): if s in seen: continue seen.add(s) dedup_rev.append(s) - dedup = list(reversed(dedup_rev)) - rb["recent_seeds"] = dedup[-10:] + rb["recent_seeds"] = list(reversed(dedup_rev))[-10:] + + if request_timestamp is not None: + try: + rb["last_random_request_ts"] = float(request_timestamp) + except Exception: + pass + set_session_value(sid, "random_build", rb) return sid, had_cookie + +def _get_random_session_themes(request: Request) -> tuple[dict[str, Any], dict[str, Any]]: + """Retrieve previously requested and resolved theme data without mutating the session state.""" + sid = request.cookies.get("sid") + if not sid: + return {}, {} + try: + sess = get_session(sid) + except Exception: + return {}, {} + rb = sess.get("random_build") or {} + requested = dict(rb.get("requested_themes") or {}) + if "auto_fill_enabled" in requested: + requested["auto_fill_enabled"] = bool(_sanitize_bool(requested.get("auto_fill_enabled"), default=False)) + elif rb.get("auto_fill_enabled") is not None: + requested["auto_fill_enabled"] = bool(rb.get("auto_fill_enabled")) + + if "auto_fill_secondary_enabled" in requested: + requested["auto_fill_secondary_enabled"] = bool(_sanitize_bool(requested.get("auto_fill_secondary_enabled"), default=requested.get("auto_fill_enabled", False))) + elif rb.get("auto_fill_secondary_enabled") is not None: + requested["auto_fill_secondary_enabled"] = bool(rb.get("auto_fill_secondary_enabled")) + + if "auto_fill_tertiary_enabled" in requested: + requested["auto_fill_tertiary_enabled"] = bool(_sanitize_bool(requested.get("auto_fill_tertiary_enabled"), default=requested.get("auto_fill_enabled", False))) + elif rb.get("auto_fill_tertiary_enabled") is not None: + requested["auto_fill_tertiary_enabled"] = bool(rb.get("auto_fill_tertiary_enabled")) + + if "strict_theme_match" in requested: + requested["strict_theme_match"] = bool(_sanitize_bool(requested.get("strict_theme_match"), default=False)) + elif rb.get("strict_theme_match") is not None: + requested["strict_theme_match"] = bool(rb.get("strict_theme_match")) + + resolved: dict[str, Any] = {} + raw_resolved = rb.get("resolved_theme_info") + if isinstance(raw_resolved, dict): + resolved = dict(raw_resolved) + else: + legacy_resolved = rb.get("resolved_themes") + if isinstance(legacy_resolved, dict): + resolved = dict(legacy_resolved) + elif isinstance(legacy_resolved, list): + resolved = {"resolved_list": list(legacy_resolved)} + else: + resolved = {} + + if "resolved_list" not in resolved or not isinstance(resolved.get("resolved_list"), list): + candidates = [requested.get("primary"), requested.get("secondary"), requested.get("tertiary")] + resolved["resolved_list"] = [t for t in candidates if t] + if "primary" not in resolved and rb.get("primary_theme"): + resolved["primary"] = rb.get("primary_theme") + if "secondary" not in resolved and rb.get("secondary_theme"): + resolved["secondary"] = rb.get("secondary_theme") + if "tertiary" not in resolved and rb.get("tertiary_theme"): + resolved["tertiary"] = rb.get("tertiary_theme") + if "combo_fallback" not in resolved and rb.get("combo_fallback") is not None: + resolved["combo_fallback"] = bool(rb.get("combo_fallback")) + if "synergy_fallback" not in resolved and rb.get("synergy_fallback") is not None: + resolved["synergy_fallback"] = bool(rb.get("synergy_fallback")) + if "fallback_reason" not in resolved and rb.get("fallback_reason") is not None: + resolved["fallback_reason"] = rb.get("fallback_reason") + if "display_list" not in resolved and isinstance(rb.get("display_themes"), list): + resolved["display_list"] = list(rb.get("display_themes") or []) + if "auto_fill_enabled" in resolved: + resolved["auto_fill_enabled"] = bool(_sanitize_bool(resolved.get("auto_fill_enabled"), default=False)) + elif rb.get("auto_fill_enabled") is not None: + resolved["auto_fill_enabled"] = bool(rb.get("auto_fill_enabled")) + if "auto_fill_secondary_enabled" in resolved: + resolved["auto_fill_secondary_enabled"] = bool(_sanitize_bool(resolved.get("auto_fill_secondary_enabled"), default=resolved.get("auto_fill_enabled", False))) + elif rb.get("auto_fill_secondary_enabled") is not None: + resolved["auto_fill_secondary_enabled"] = bool(rb.get("auto_fill_secondary_enabled")) + if "auto_fill_tertiary_enabled" in resolved: + resolved["auto_fill_tertiary_enabled"] = bool(_sanitize_bool(resolved.get("auto_fill_tertiary_enabled"), default=resolved.get("auto_fill_enabled", False))) + elif rb.get("auto_fill_tertiary_enabled") is not None: + resolved["auto_fill_tertiary_enabled"] = bool(rb.get("auto_fill_tertiary_enabled")) + if "auto_fill_applied" in resolved: + resolved["auto_fill_applied"] = bool(_sanitize_bool(resolved.get("auto_fill_applied"), default=False)) + elif rb.get("auto_fill_applied") is not None: + resolved["auto_fill_applied"] = bool(rb.get("auto_fill_applied")) + if "auto_filled_themes" not in resolved and isinstance(rb.get("auto_filled_themes"), list): + resolved["auto_filled_themes"] = list(rb.get("auto_filled_themes") or []) + return requested, resolved + def _toggle_seed_favorite(sid: str, seed: int) -> list[int]: """Toggle a seed in the favorites list and persist. Returns updated favorites.""" sess = get_session(sid) @@ -435,6 +832,7 @@ async def status_sys(): "RANDOM_RATE_LIMIT_RANDOM": int(RATE_LIMIT_RANDOM), "RANDOM_RATE_LIMIT_BUILD": int(RATE_LIMIT_BUILD), "RANDOM_RATE_LIMIT_SUGGEST": int(RATE_LIMIT_SUGGEST), + "RANDOM_REROLL_THROTTLE_MS": int(RANDOM_REROLL_THROTTLE_MS), }, } except Exception: @@ -447,10 +845,31 @@ async def status_random_metrics(): return JSONResponse({"ok": False, "error": "telemetry_disabled"}, status_code=403) # Return a shallow copy to avoid mutation from clients out = {k: dict(v) for k, v in _RANDOM_METRICS.items()} - return JSONResponse({"ok": True, "metrics": out}) + usage = { + "modes": dict(_RANDOM_USAGE_METRICS), + "fallbacks": dict(_RANDOM_FALLBACK_METRICS), + "fallback_reasons": dict(_RANDOM_FALLBACK_REASONS), + } + return JSONResponse({"ok": True, "metrics": out, "usage": usage}) except Exception: return JSONResponse({"ok": False, "metrics": {}}, status_code=500) +@app.get("/status/random_theme_stats") +async def status_random_theme_stats(): + if not SHOW_DIAGNOSTICS: + raise HTTPException(status_code=404, detail="Not Found") + try: + from deck_builder.random_entrypoint import get_theme_tag_stats # type: ignore + + stats = get_theme_tag_stats() + return JSONResponse({"ok": True, "stats": stats}) + except HTTPException: + raise + except Exception as exc: # pragma: no cover - defensive log + logging.getLogger("web").warning("Failed to build random theme stats: %s", exc, exc_info=True) + return JSONResponse({"ok": False, "error": "internal_error"}, status_code=500) + + def random_modes_enabled() -> bool: """Dynamic check so tests that set env after import still work. @@ -467,6 +886,7 @@ async def api_random_build(request: Request): t0 = time.time() # Optional rate limiting (count this request per-IP) rl = rate_limit_check(request, "build") + _enforce_random_session_throttle(request) body = {} try: body = await request.json() @@ -474,8 +894,15 @@ async def api_random_build(request: Request): body = {} except Exception: body = {} - theme = body.get("theme") - theme = _sanitize_theme(theme) + legacy_theme = _sanitize_theme(body.get("theme")) + primary_theme = _sanitize_theme(body.get("primary_theme")) + secondary_theme = _sanitize_theme(body.get("secondary_theme")) + tertiary_theme = _sanitize_theme(body.get("tertiary_theme")) + auto_fill_enabled, auto_fill_secondary_enabled, auto_fill_tertiary_enabled = _parse_auto_fill_flags(body) + strict_theme_match = bool(_sanitize_bool(body.get("strict_theme_match"), default=False)) + if primary_theme is None: + primary_theme = legacy_theme + theme = primary_theme or legacy_theme constraints = body.get("constraints") seed = body.get("seed") attempts = body.get("attempts", int(RANDOM_MAX_ATTEMPTS)) @@ -487,12 +914,21 @@ async def api_random_build(request: Request): timeout_s = max(0.1, float(RANDOM_TIMEOUT_MS) / 1000.0) # Import on-demand to avoid heavy costs at module import time from deck_builder.random_entrypoint import build_random_deck, RandomConstraintsImpossibleError # type: ignore + from deck_builder.random_entrypoint import RandomThemeNoMatchError # type: ignore + res = build_random_deck( theme=theme, constraints=constraints, seed=seed, attempts=int(attempts), timeout_s=float(timeout_s), + primary_theme=primary_theme, + secondary_theme=secondary_theme, + tertiary_theme=tertiary_theme, + auto_fill_missing=bool(auto_fill_enabled), + auto_fill_secondary=auto_fill_secondary_enabled, + auto_fill_tertiary=auto_fill_tertiary_enabled, + strict_theme_match=strict_theme_match, ) rid = getattr(request.state, "request_id", None) _record_random_event("build", success=True) @@ -511,6 +947,20 @@ async def api_random_build(request: Request): "seed": int(res.seed), "commander": res.commander, "theme": res.theme, + "primary_theme": getattr(res, "primary_theme", None), + "secondary_theme": getattr(res, "secondary_theme", None), + "tertiary_theme": getattr(res, "tertiary_theme", None), + "resolved_themes": list(getattr(res, "resolved_themes", []) or []), + "display_themes": list(getattr(res, "display_themes", []) or []), + "combo_fallback": bool(getattr(res, "combo_fallback", False)), + "synergy_fallback": bool(getattr(res, "synergy_fallback", False)), + "fallback_reason": getattr(res, "fallback_reason", None), + "auto_fill_secondary_enabled": bool(getattr(res, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(res, "auto_fill_tertiary_enabled", False)), + "auto_fill_enabled": bool(getattr(res, "auto_fill_enabled", False)), + "auto_fill_applied": bool(getattr(res, "auto_fill_applied", False)), + "auto_filled_themes": list(getattr(res, "auto_filled_themes", []) or []), + "strict_theme_match": bool(getattr(res, "strict_theme_match", False)), "constraints": res.constraints or {}, "attempts": int(attempts), "timeout_ms": int(timeout_ms), @@ -525,6 +975,14 @@ async def api_random_build(request: Request): except Exception: pass return resp + except RandomThemeNoMatchError as ex: + _record_random_event("build", error=True) + _log_random_event("build", request, "strict_no_match", reason=str(ex)) + raise HTTPException(status_code=422, detail={ + "error": "strict_theme_no_match", + "message": str(ex), + "strict": True, + }) except HTTPException: raise except RandomConstraintsImpossibleError as ex: @@ -553,8 +1011,29 @@ async def api_random_full_build(request: Request): body = {} except Exception: body = {} - theme = body.get("theme") - theme = _sanitize_theme(theme) + cached_requested, _cached_resolved = _get_random_session_themes(request) + legacy_theme = _sanitize_theme(body.get("theme")) + primary_theme = _sanitize_theme(body.get("primary_theme")) + secondary_theme = _sanitize_theme(body.get("secondary_theme")) + tertiary_theme = _sanitize_theme(body.get("tertiary_theme")) + cached_enabled = _sanitize_bool(cached_requested.get("auto_fill_enabled"), default=False) + cached_secondary = _sanitize_bool(cached_requested.get("auto_fill_secondary_enabled"), default=cached_enabled) + cached_tertiary = _sanitize_bool(cached_requested.get("auto_fill_tertiary_enabled"), default=cached_enabled) + auto_fill_enabled, auto_fill_secondary_enabled, auto_fill_tertiary_enabled = _parse_auto_fill_flags( + body, + default_enabled=cached_enabled, + default_secondary=cached_secondary, + default_tertiary=cached_tertiary, + ) + cached_strict = _sanitize_bool(cached_requested.get("strict_theme_match"), default=False) + strict_sanitized = _sanitize_bool(body.get("strict_theme_match"), default=cached_strict) + strict_theme_match = bool(strict_sanitized) if strict_sanitized is not None else bool(cached_strict) + cached_strict = _sanitize_bool(cached_requested.get("strict_theme_match"), default=False) + strict_theme_match_raw = _sanitize_bool(body.get("strict_theme_match"), default=cached_strict) + strict_theme_match = bool(strict_theme_match_raw) if strict_theme_match_raw is not None else False + if primary_theme is None: + primary_theme = legacy_theme + theme = primary_theme or legacy_theme constraints = body.get("constraints") seed = body.get("seed") attempts = body.get("attempts", int(RANDOM_MAX_ATTEMPTS)) @@ -573,8 +1052,42 @@ async def api_random_full_build(request: Request): seed=seed, attempts=int(attempts), timeout_s=float(timeout_s), + primary_theme=primary_theme, + secondary_theme=secondary_theme, + tertiary_theme=tertiary_theme, + auto_fill_missing=bool(auto_fill_enabled), + auto_fill_secondary=auto_fill_secondary_enabled, + auto_fill_tertiary=auto_fill_tertiary_enabled, + strict_theme_match=strict_theme_match, ) + requested_themes = { + "primary": primary_theme, + "secondary": secondary_theme, + "tertiary": tertiary_theme, + "legacy": legacy_theme, + } + requested_themes["auto_fill_enabled"] = bool(auto_fill_enabled) + requested_themes["auto_fill_secondary_enabled"] = bool(auto_fill_secondary_enabled) + requested_themes["auto_fill_tertiary_enabled"] = bool(auto_fill_tertiary_enabled) + requested_themes["strict_theme_match"] = bool(strict_theme_match) + resolved_theme_info = { + "primary": getattr(res, "primary_theme", None), + "secondary": getattr(res, "secondary_theme", None), + "tertiary": getattr(res, "tertiary_theme", None), + "resolved_list": list(getattr(res, "resolved_themes", []) or []), + "combo_fallback": bool(getattr(res, "combo_fallback", False)), + "synergy_fallback": bool(getattr(res, "synergy_fallback", False)), + "fallback_reason": getattr(res, "fallback_reason", None), + "display_list": list(getattr(res, "display_themes", []) or []), + "auto_fill_secondary_enabled": bool(getattr(res, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(res, "auto_fill_tertiary_enabled", False)), + "auto_fill_enabled": bool(getattr(res, "auto_fill_enabled", False)), + "auto_fill_applied": bool(getattr(res, "auto_fill_applied", False)), + "auto_filled_themes": list(getattr(res, "auto_filled_themes", []) or []), + } + resolved_theme_info["strict_theme_match"] = bool(getattr(res, "strict_theme_match", False)) + # Create a permalink token reusing the existing format from /build/permalink payload = { "commander": res.commander, @@ -583,6 +1096,21 @@ async def api_random_full_build(request: Request): "seed": int(res.seed), "theme": res.theme, "constraints": res.constraints or {}, + "primary_theme": getattr(res, "primary_theme", None), + "secondary_theme": getattr(res, "secondary_theme", None), + "tertiary_theme": getattr(res, "tertiary_theme", None), + "resolved_themes": list(getattr(res, "resolved_themes", []) or []), + "display_themes": list(getattr(res, "display_themes", []) or []), + "combo_fallback": bool(getattr(res, "combo_fallback", False)), + "synergy_fallback": bool(getattr(res, "synergy_fallback", False)), + "fallback_reason": getattr(res, "fallback_reason", None), + "auto_fill_secondary_enabled": bool(getattr(res, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(res, "auto_fill_tertiary_enabled", False)), + "auto_fill_enabled": bool(getattr(res, "auto_fill_enabled", False)), + "auto_fill_applied": bool(getattr(res, "auto_fill_applied", False)), + "auto_filled_themes": list(getattr(res, "auto_filled_themes", []) or []), + "strict_theme_match": bool(getattr(res, "strict_theme_match", False)), + "requested_themes": requested_themes, }, } try: @@ -593,11 +1121,32 @@ async def api_random_full_build(request: Request): except Exception: permalink = None + usage_mode = _classify_usage_mode("full_build", [primary_theme, secondary_theme, tertiary_theme, legacy_theme], None) + combo_flag = bool(getattr(res, "combo_fallback", False)) + synergy_flag = bool(getattr(res, "synergy_fallback", False)) + _record_random_usage_event(usage_mode, combo_flag, synergy_flag, getattr(res, "fallback_reason", None)) + # Persist to session (so recent seeds includes initial seed) - sid, had_cookie = _update_random_session(request, seed=int(res.seed), theme=res.theme, constraints=res.constraints or {}) + request_timestamp = time.time() + sid, had_cookie = _update_random_session( + request, + seed=int(res.seed), + theme=res.theme, + constraints=res.constraints or {}, + requested_themes=requested_themes, + resolved_themes=resolved_theme_info, + auto_fill_enabled=auto_fill_enabled, + auto_fill_secondary_enabled=auto_fill_secondary_enabled, + auto_fill_tertiary_enabled=auto_fill_tertiary_enabled, + strict_theme_match=strict_theme_match, + auto_fill_applied=bool(getattr(res, "auto_fill_applied", False)), + auto_filled_themes=getattr(res, "auto_filled_themes", None), + display_themes=getattr(res, "display_themes", None), + request_timestamp=request_timestamp, + ) rid = getattr(request.state, "request_id", None) _record_random_event("full_build", success=True, fallback=bool(getattr(res, "theme_fallback", False))) - elapsed_ms = int(round((time.time() - t0) * 1000)) + elapsed_ms = int(round((request_timestamp - t0) * 1000)) _log_random_event( "full_build", request, @@ -614,6 +1163,20 @@ async def api_random_full_build(request: Request): "commander": res.commander, "decklist": res.decklist or [], "theme": res.theme, + "primary_theme": getattr(res, "primary_theme", None), + "secondary_theme": getattr(res, "secondary_theme", None), + "tertiary_theme": getattr(res, "tertiary_theme", None), + "resolved_themes": list(getattr(res, "resolved_themes", []) or []), + "display_themes": list(getattr(res, "display_themes", []) or []), + "combo_fallback": bool(getattr(res, "combo_fallback", False)), + "synergy_fallback": bool(getattr(res, "synergy_fallback", False)), + "fallback_reason": getattr(res, "fallback_reason", None), + "auto_fill_secondary_enabled": bool(getattr(res, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(res, "auto_fill_tertiary_enabled", False)), + "auto_fill_enabled": bool(getattr(res, "auto_fill_enabled", False)), + "auto_fill_applied": bool(getattr(res, "auto_fill_applied", False)), + "auto_filled_themes": list(getattr(res, "auto_filled_themes", []) or []), + "strict_theme_match": bool(getattr(res, "strict_theme_match", False)), "constraints": res.constraints or {}, "permalink": permalink, "attempts": int(attempts), @@ -621,6 +1184,8 @@ async def api_random_full_build(request: Request): "diagnostics": res.diagnostics or {}, "fallback": bool(getattr(res, "theme_fallback", False)), "original_theme": getattr(res, "original_theme", None), + "requested_themes": requested_themes, + "resolved_theme_info": resolved_theme_info, "summary": getattr(res, "summary", None), "csv_path": getattr(res, "csv_path", None), "txt_path": getattr(res, "txt_path", None), @@ -657,9 +1222,11 @@ async def api_random_reroll(request: Request): # Gate behind feature flag if not random_modes_enabled(): raise HTTPException(status_code=404, detail="Random Modes disabled") + strict_theme_match = False try: t0 = time.time() rl = rate_limit_check(request, "random") + _enforce_random_session_throttle(request) body = {} try: body = await request.json() @@ -667,8 +1234,33 @@ async def api_random_reroll(request: Request): body = {} except Exception: body = {} - theme = body.get("theme") - theme = _sanitize_theme(theme) + cached_requested, _cached_resolved = _get_random_session_themes(request) + legacy_theme = _sanitize_theme(body.get("theme")) + primary_theme = _sanitize_theme(body.get("primary_theme")) + secondary_theme = _sanitize_theme(body.get("secondary_theme")) + tertiary_theme = _sanitize_theme(body.get("tertiary_theme")) + cached_enabled = _sanitize_bool(cached_requested.get("auto_fill_enabled"), default=False) + cached_secondary = _sanitize_bool(cached_requested.get("auto_fill_secondary_enabled"), default=cached_enabled) + cached_tertiary = _sanitize_bool(cached_requested.get("auto_fill_tertiary_enabled"), default=cached_enabled) + auto_fill_enabled, auto_fill_secondary_enabled, auto_fill_tertiary_enabled = _parse_auto_fill_flags( + body, + default_enabled=cached_enabled, + default_secondary=cached_secondary, + default_tertiary=cached_tertiary, + ) + if primary_theme is None: + primary_theme = legacy_theme + # Fallback to cached session preferences when no themes provided + if primary_theme is None and secondary_theme is None and tertiary_theme is None: + if not primary_theme: + primary_theme = _sanitize_theme(cached_requested.get("primary")) + if not secondary_theme: + secondary_theme = _sanitize_theme(cached_requested.get("secondary")) + if not tertiary_theme: + tertiary_theme = _sanitize_theme(cached_requested.get("tertiary")) + if not legacy_theme: + legacy_theme = _sanitize_theme(cached_requested.get("legacy")) + theme = primary_theme or legacy_theme constraints = body.get("constraints") last_seed = body.get("seed") # Simple deterministic reroll policy: increment prior seed when provided; else generate fresh @@ -695,14 +1287,63 @@ async def api_random_reroll(request: Request): seed=new_seed, attempts=int(attempts), timeout_s=float(timeout_s), + primary_theme=primary_theme, + secondary_theme=secondary_theme, + tertiary_theme=tertiary_theme, + auto_fill_missing=bool(auto_fill_enabled), + auto_fill_secondary=auto_fill_secondary_enabled, + auto_fill_tertiary=auto_fill_tertiary_enabled, + strict_theme_match=strict_theme_match, ) + requested_themes = { + "primary": primary_theme, + "secondary": secondary_theme, + "tertiary": tertiary_theme, + "legacy": legacy_theme, + } + requested_themes["auto_fill_enabled"] = bool(auto_fill_enabled) + requested_themes["auto_fill_secondary_enabled"] = bool(auto_fill_secondary_enabled) + requested_themes["auto_fill_tertiary_enabled"] = bool(auto_fill_tertiary_enabled) + requested_themes["strict_theme_match"] = bool(strict_theme_match) + resolved_theme_info = { + "primary": getattr(res, "primary_theme", None), + "secondary": getattr(res, "secondary_theme", None), + "tertiary": getattr(res, "tertiary_theme", None), + "resolved_list": list(getattr(res, "resolved_themes", []) or []), + "combo_fallback": bool(getattr(res, "combo_fallback", False)), + "synergy_fallback": bool(getattr(res, "synergy_fallback", False)), + "fallback_reason": getattr(res, "fallback_reason", None), + "display_list": list(getattr(res, "display_themes", []) or []), + "auto_fill_secondary_enabled": bool(getattr(res, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(res, "auto_fill_tertiary_enabled", False)), + "auto_fill_enabled": bool(getattr(res, "auto_fill_enabled", False)), + "auto_fill_applied": bool(getattr(res, "auto_fill_applied", False)), + "auto_filled_themes": list(getattr(res, "auto_filled_themes", []) or []), + "strict_theme_match": bool(getattr(res, "strict_theme_match", strict_theme_match)), + } + payload = { "commander": res.commander, "random": { "seed": int(res.seed), "theme": res.theme, "constraints": res.constraints or {}, + "primary_theme": getattr(res, "primary_theme", None), + "secondary_theme": getattr(res, "secondary_theme", None), + "tertiary_theme": getattr(res, "tertiary_theme", None), + "resolved_themes": list(getattr(res, "resolved_themes", []) or []), + "display_themes": list(getattr(res, "display_themes", []) or []), + "combo_fallback": bool(getattr(res, "combo_fallback", False)), + "synergy_fallback": bool(getattr(res, "synergy_fallback", False)), + "fallback_reason": getattr(res, "fallback_reason", None), + "auto_fill_secondary_enabled": bool(getattr(res, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(res, "auto_fill_tertiary_enabled", False)), + "auto_fill_enabled": bool(getattr(res, "auto_fill_enabled", False)), + "auto_fill_applied": bool(getattr(res, "auto_fill_applied", False)), + "auto_filled_themes": list(getattr(res, "auto_filled_themes", []) or []), + "strict_theme_match": bool(getattr(res, "strict_theme_match", strict_theme_match)), + "requested_themes": requested_themes, }, } try: @@ -713,11 +1354,32 @@ async def api_random_reroll(request: Request): except Exception: permalink = None + usage_mode = _classify_usage_mode("reroll", [primary_theme, secondary_theme, tertiary_theme, legacy_theme], None) + combo_flag = bool(getattr(res, "combo_fallback", False)) + synergy_flag = bool(getattr(res, "synergy_fallback", False)) + _record_random_usage_event(usage_mode, combo_flag, synergy_flag, getattr(res, "fallback_reason", None)) + # Persist in session and set sid cookie if we just created it - sid, had_cookie = _update_random_session(request, seed=int(res.seed), theme=res.theme, constraints=res.constraints or {}) + request_timestamp = time.time() + sid, had_cookie = _update_random_session( + request, + seed=int(res.seed), + theme=res.theme, + constraints=res.constraints or {}, + requested_themes=requested_themes, + resolved_themes=resolved_theme_info, + auto_fill_enabled=auto_fill_enabled, + auto_fill_secondary_enabled=auto_fill_secondary_enabled, + auto_fill_tertiary_enabled=auto_fill_tertiary_enabled, + strict_theme_match=bool(getattr(res, "strict_theme_match", strict_theme_match)), + auto_fill_applied=bool(getattr(res, "auto_fill_applied", False)), + auto_filled_themes=getattr(res, "auto_filled_themes", None), + display_themes=getattr(res, "display_themes", None), + request_timestamp=request_timestamp, + ) rid = getattr(request.state, "request_id", None) _record_random_event("reroll", success=True, fallback=bool(getattr(res, "theme_fallback", False))) - elapsed_ms = int(round((time.time() - t0) * 1000)) + elapsed_ms = int(round((request_timestamp - t0) * 1000)) _log_random_event( "reroll", request, @@ -736,12 +1398,28 @@ async def api_random_reroll(request: Request): "commander": res.commander, "decklist": res.decklist or [], "theme": res.theme, + "primary_theme": getattr(res, "primary_theme", None), + "secondary_theme": getattr(res, "secondary_theme", None), + "tertiary_theme": getattr(res, "tertiary_theme", None), + "resolved_themes": list(getattr(res, "resolved_themes", []) or []), + "display_themes": list(getattr(res, "display_themes", []) or []), + "combo_fallback": bool(getattr(res, "combo_fallback", False)), + "synergy_fallback": bool(getattr(res, "synergy_fallback", False)), + "fallback_reason": getattr(res, "fallback_reason", None), + "auto_fill_secondary_enabled": bool(getattr(res, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(res, "auto_fill_tertiary_enabled", False)), + "auto_fill_enabled": bool(getattr(res, "auto_fill_enabled", False)), + "auto_fill_applied": bool(getattr(res, "auto_fill_applied", False)), + "auto_filled_themes": list(getattr(res, "auto_filled_themes", []) or []), + "strict_theme_match": bool(getattr(res, "strict_theme_match", strict_theme_match)), "constraints": res.constraints or {}, "permalink": permalink, "attempts": int(attempts), "timeout_ms": int(timeout_ms), "diagnostics": res.diagnostics or {}, "summary": getattr(res, "summary", None), + "requested_themes": requested_themes, + "resolved_theme_info": resolved_theme_info, "request_id": rid, }) if rl: @@ -772,6 +1450,7 @@ async def hx_random_reroll(request: Request): if not RANDOM_UI or not RANDOM_MODES: raise HTTPException(status_code=404, detail="Random UI disabled") rl = rate_limit_check(request, "random") + _enforce_random_session_throttle(request) body: Dict[str, Any] = {} raw_text = "" # Primary: attempt JSON @@ -796,14 +1475,187 @@ async def hx_random_reroll(request: Request): body = flat or {} except Exception: body = {} - last_seed = body.get("seed") - mode = body.get("mode") # "surprise" (default) vs "reroll_same_commander" - locked_commander = body.get("commander") if mode == "reroll_same_commander" else None - theme = body.get("theme") - theme = _sanitize_theme(theme) + def _first_value(val: Any) -> Any: + if isinstance(val, list): + return val[0] if val else None + return val + + def _extract_theme_field(field: str) -> tuple[Optional[str], bool]: + present = field in body + val = body.get(field) + if isinstance(val, list): + for item in val: + sanitized = _sanitize_theme(item) + if sanitized is not None: + return sanitized, True + return None, present + return _sanitize_theme(val), present + + def _extract_resolved_list(val: Any) -> list[str]: + items: list[str] = [] + if isinstance(val, list): + for entry in val: + if isinstance(entry, str): + parts = [seg.strip() for seg in entry.split("||") if seg.strip()] + if parts: + items.extend(parts) + elif isinstance(val, str): + items = [seg.strip() for seg in val.split("||") if seg.strip()] + return items + + last_seed = _first_value(body.get("seed")) + raw_mode = _first_value(body.get("mode")) + mode = "surprise" + if raw_mode is not None: + if isinstance(raw_mode, str): + raw_mode_str = raw_mode.strip() + if raw_mode_str.startswith("{") and raw_mode_str.endswith("}"): + try: + parsed_mode = _json.loads(raw_mode_str) + candidate = parsed_mode.get("mode") if isinstance(parsed_mode, dict) else None + if isinstance(candidate, str) and candidate.strip(): + mode = candidate.strip().lower() + else: + mode = raw_mode_str.lower() + except Exception: + mode = raw_mode_str.lower() + else: + mode = raw_mode_str.lower() + else: + mode = str(raw_mode).strip().lower() or "surprise" + if not mode: + mode = "surprise" + raw_commander = _first_value(body.get("commander")) + locked_commander: Optional[str] = None + if isinstance(raw_commander, str): + candidate = raw_commander.strip() + locked_commander = candidate if candidate else None + elif raw_commander is not None: + candidate = str(raw_commander).strip() + locked_commander = candidate if candidate else None + cached_requested, cached_resolved = _get_random_session_themes(request) + cached_enabled = _sanitize_bool(cached_requested.get("auto_fill_enabled"), default=False) + cached_secondary = _sanitize_bool(cached_requested.get("auto_fill_secondary_enabled"), default=cached_enabled) + cached_tertiary = _sanitize_bool(cached_requested.get("auto_fill_tertiary_enabled"), default=cached_enabled) + flag_source = { + "auto_fill_enabled": _first_value(body.get("auto_fill_enabled")), + "auto_fill_secondary_enabled": _first_value(body.get("auto_fill_secondary_enabled")), + "auto_fill_tertiary_enabled": _first_value(body.get("auto_fill_tertiary_enabled")), + } + auto_fill_enabled, auto_fill_secondary_enabled, auto_fill_tertiary_enabled = _parse_auto_fill_flags( + flag_source, + default_enabled=cached_enabled, + default_secondary=cached_secondary, + default_tertiary=cached_tertiary, + ) + cached_strict = _sanitize_bool(cached_requested.get("strict_theme_match"), default=False) + strict_raw = _first_value(body.get("strict_theme_match")) + strict_sanitized = _sanitize_bool(strict_raw, default=cached_strict) + strict_theme_match = bool(strict_sanitized) if strict_sanitized is not None else bool(cached_strict) + legacy_theme, legacy_provided = _extract_theme_field("theme") + primary_theme, primary_provided = _extract_theme_field("primary_theme") + secondary_theme, secondary_provided = _extract_theme_field("secondary_theme") + tertiary_theme, tertiary_provided = _extract_theme_field("tertiary_theme") + resolved_list_from_request = _extract_resolved_list(body.get("resolved_themes")) + if primary_theme is None and legacy_theme is not None: + primary_theme = legacy_theme + if not primary_provided and not secondary_provided and not tertiary_provided: + cached_primary = _sanitize_theme(cached_requested.get("primary")) + cached_secondary = _sanitize_theme(cached_requested.get("secondary")) + cached_tertiary = _sanitize_theme(cached_requested.get("tertiary")) + cached_legacy = _sanitize_theme(cached_requested.get("legacy")) + if primary_theme is None and cached_primary: + primary_theme = cached_primary + if secondary_theme is None and cached_secondary: + secondary_theme = cached_secondary + if tertiary_theme is None and cached_tertiary: + tertiary_theme = cached_tertiary + if legacy_theme is None and not legacy_provided and cached_legacy: + legacy_theme = cached_legacy + theme = primary_theme or legacy_theme + is_reroll_same = bool(locked_commander) + if not theme and is_reroll_same: + theme = _sanitize_theme(cached_resolved.get("primary")) or _sanitize_theme(cached_requested.get("primary")) constraints = body.get("constraints") - attempts_override = body.get("attempts") - timeout_ms_override = body.get("timeout_ms") + if isinstance(constraints, list): + constraints = constraints[0] + requested_themes: Optional[Dict[str, Any]] + if is_reroll_same: + requested_themes = dict(cached_requested) if cached_requested else None + if not requested_themes: + candidate_requested = { + "primary": primary_theme, + "secondary": secondary_theme, + "tertiary": tertiary_theme, + "legacy": legacy_theme, + } + if any(candidate_requested.values()): + requested_themes = candidate_requested + else: + requested_themes = { + "primary": primary_theme, + "secondary": secondary_theme, + "tertiary": tertiary_theme, + "legacy": legacy_theme, + } + if requested_themes is not None: + requested_themes["auto_fill_enabled"] = bool(auto_fill_enabled) + requested_themes["auto_fill_secondary_enabled"] = bool(auto_fill_secondary_enabled) + requested_themes["auto_fill_tertiary_enabled"] = bool(auto_fill_tertiary_enabled) + requested_themes["strict_theme_match"] = bool(strict_theme_match) + raw_cached_resolved_list = cached_resolved.get("resolved_list") + if isinstance(raw_cached_resolved_list, list): + cached_resolved_list = list(raw_cached_resolved_list) + elif isinstance(raw_cached_resolved_list, str): + cached_resolved_list = [seg.strip() for seg in raw_cached_resolved_list.split("||") if seg.strip()] + else: + cached_resolved_list = [] + cached_display_list = cached_resolved.get("display_list") + if isinstance(cached_display_list, list): + cached_display = list(cached_display_list) + elif isinstance(cached_display_list, str): + cached_display = [seg.strip() for seg in cached_display_list.split("||") if seg.strip()] + else: + cached_display = [] + cached_auto_filled = cached_resolved.get("auto_filled_themes") + if isinstance(cached_auto_filled, list): + cached_auto_filled_list = list(cached_auto_filled) + else: + cached_auto_filled_list = [] + resolved_theme_info: Dict[str, Any] = { + "primary": cached_resolved.get("primary"), + "secondary": cached_resolved.get("secondary"), + "tertiary": cached_resolved.get("tertiary"), + "resolved_list": cached_resolved_list, + "combo_fallback": bool(cached_resolved.get("combo_fallback")), + "synergy_fallback": bool(cached_resolved.get("synergy_fallback")), + "fallback_reason": cached_resolved.get("fallback_reason"), + "display_list": cached_display, + "auto_fill_secondary_enabled": bool(_sanitize_bool(cached_resolved.get("auto_fill_secondary_enabled"), default=auto_fill_secondary_enabled)), + "auto_fill_tertiary_enabled": bool(_sanitize_bool(cached_resolved.get("auto_fill_tertiary_enabled"), default=auto_fill_tertiary_enabled)), + "auto_fill_enabled": bool(_sanitize_bool(cached_resolved.get("auto_fill_enabled"), default=auto_fill_enabled)), + "auto_fill_applied": bool(_sanitize_bool(cached_resolved.get("auto_fill_applied"), default=False)), + "auto_filled_themes": cached_auto_filled_list, + "strict_theme_match": bool(_sanitize_bool(cached_resolved.get("strict_theme_match"), default=strict_theme_match)), + } + if not resolved_theme_info["primary"] and primary_theme: + resolved_theme_info["primary"] = primary_theme + if not resolved_theme_info["secondary"] and secondary_theme: + resolved_theme_info["secondary"] = secondary_theme + if not resolved_theme_info["tertiary"] and tertiary_theme: + resolved_theme_info["tertiary"] = tertiary_theme + if not resolved_theme_info["resolved_list"]: + if resolved_list_from_request: + resolved_theme_info["resolved_list"] = resolved_list_from_request + else: + resolved_theme_info["resolved_list"] = [t for t in [primary_theme, secondary_theme, tertiary_theme] if t] + if not resolved_theme_info.get("display_list"): + resolved_theme_info["display_list"] = list(resolved_theme_info.get("resolved_list") or []) + resolved_theme_info["auto_fill_enabled"] = bool(auto_fill_enabled) + resolved_theme_info["auto_fill_secondary_enabled"] = bool(auto_fill_secondary_enabled) + resolved_theme_info["auto_fill_tertiary_enabled"] = bool(auto_fill_tertiary_enabled) + attempts_override = _first_value(body.get("attempts")) + timeout_ms_override = _first_value(body.get("timeout_ms")) try: new_seed = int(last_seed) + 1 if last_seed is not None else None except Exception: @@ -821,7 +1673,7 @@ async def hx_random_reroll(request: Request): except Exception: _timeout_ms = int(RANDOM_TIMEOUT_MS) _timeout_s = max(0.1, float(_timeout_ms) / 1000.0) - if locked_commander: + if is_reroll_same: build_t0 = time.time() from headless_runner import run as _run # type: ignore # Suppress builder's internal initial export to control artifact generation (matches full random path logic) @@ -851,7 +1703,7 @@ async def hx_random_reroll(request: Request): compliance = None try: import os as _os - import json as _json + import json as _json_mod # Perform exactly one export sequence now if not csv_path and hasattr(builder, 'export_decklist_csv'): try: @@ -874,7 +1726,7 @@ async def hx_random_reroll(request: Request): if _os.path.isfile(comp_path): try: with open(comp_path, 'r', encoding='utf-8') as _cf: - compliance = _json.load(_cf) + compliance = _json_mod.load(_cf) except Exception: compliance = None else: @@ -894,7 +1746,20 @@ async def hx_random_reroll(request: Request): "txt": txt_path, "random_seed": int(new_seed), "random_theme": theme, + "random_primary_theme": primary_theme, + "random_secondary_theme": secondary_theme, + "random_tertiary_theme": tertiary_theme, + "random_resolved_themes": list(resolved_theme_info.get("resolved_list") or []), + "random_combo_fallback": bool(resolved_theme_info.get("combo_fallback")), + "random_synergy_fallback": bool(resolved_theme_info.get("synergy_fallback")), + "random_fallback_reason": resolved_theme_info.get("fallback_reason"), + "random_auto_fill_enabled": bool(auto_fill_enabled), + "random_auto_fill_secondary_enabled": bool(auto_fill_secondary_enabled), + "random_auto_fill_tertiary_enabled": bool(auto_fill_tertiary_enabled), + "random_auto_fill_applied": bool(resolved_theme_info.get("auto_fill_applied")), + "random_auto_filled_themes": list(resolved_theme_info.get("auto_filled_themes") or []), "random_constraints": constraints or {}, + "random_strict_theme_match": bool(strict_theme_match), "locked_commander": True, } try: @@ -905,17 +1770,36 @@ async def hx_random_reroll(request: Request): meta["name"] = custom_base.strip() try: with open(sidecar, 'w', encoding='utf-8') as f: - _json.dump({"meta": meta, "summary": summary}, f, ensure_ascii=False, indent=2) + _json_mod.dump({"meta": meta, "summary": summary}, f, ensure_ascii=False, indent=2) except Exception: pass except Exception: compliance = None + if "auto_fill_applied" not in resolved_theme_info: + resolved_theme_info["auto_fill_applied"] = bool(resolved_theme_info.get("auto_filled_themes")) class _Res: # minimal object with expected attrs pass res = _Res() res.seed = int(new_seed) res.commander = locked_commander res.theme = theme + res.primary_theme = primary_theme + res.secondary_theme = secondary_theme + res.tertiary_theme = tertiary_theme + res.strict_theme_match = bool(strict_theme_match) + if not resolved_theme_info.get("resolved_list"): + resolved_theme_info["resolved_list"] = [t for t in [primary_theme, secondary_theme, tertiary_theme] if t] + res.resolved_themes = list(resolved_theme_info.get("resolved_list") or []) + res.display_themes = list(resolved_theme_info.get("display_list") or res.resolved_themes) + res.auto_fill_enabled = bool(auto_fill_enabled) + res.auto_fill_secondary_enabled = bool(auto_fill_secondary_enabled) + res.auto_fill_tertiary_enabled = bool(auto_fill_tertiary_enabled) + res.auto_fill_applied = bool(resolved_theme_info.get("auto_fill_applied")) + res.auto_filled_themes = list(resolved_theme_info.get("auto_filled_themes") or []) + res.combo_fallback = bool(resolved_theme_info.get("combo_fallback")) + res.synergy_fallback = bool(resolved_theme_info.get("synergy_fallback")) + res.fallback_reason = resolved_theme_info.get("fallback_reason") + res.theme_fallback = bool(res.combo_fallback) or bool(res.synergy_fallback) res.constraints = constraints or {} res.diagnostics = {"locked_commander": True, "attempts": 1, "elapsed_ms": elapsed_ms} res.summary = summary @@ -930,7 +1814,33 @@ async def hx_random_reroll(request: Request): seed=new_seed, attempts=int(_attempts), timeout_s=float(_timeout_s), + primary_theme=primary_theme, + secondary_theme=secondary_theme, + tertiary_theme=tertiary_theme, + auto_fill_missing=bool(auto_fill_enabled), + auto_fill_secondary=auto_fill_secondary_enabled, + auto_fill_tertiary=auto_fill_tertiary_enabled, + strict_theme_match=strict_theme_match, ) + resolved_theme_info = { + "primary": getattr(res, "primary_theme", None), + "secondary": getattr(res, "secondary_theme", None), + "tertiary": getattr(res, "tertiary_theme", None), + "resolved_list": list(getattr(res, "resolved_themes", []) or []), + "combo_fallback": bool(getattr(res, "combo_fallback", False)), + "synergy_fallback": bool(getattr(res, "synergy_fallback", False)), + "fallback_reason": getattr(res, "fallback_reason", None), + "display_list": list(getattr(res, "display_themes", []) or []), + "auto_fill_secondary_enabled": bool(getattr(res, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(res, "auto_fill_tertiary_enabled", False)), + "auto_fill_enabled": bool(getattr(res, "auto_fill_enabled", False)), + "auto_fill_applied": bool(getattr(res, "auto_fill_applied", False)), + "auto_filled_themes": list(getattr(res, "auto_filled_themes", []) or []), + "strict_theme_match": bool(getattr(res, "strict_theme_match", strict_theme_match)), + } + resolved_theme_info["auto_fill_enabled"] = bool(auto_fill_enabled) + resolved_theme_info["auto_fill_secondary_enabled"] = bool(auto_fill_secondary_enabled) + resolved_theme_info["auto_fill_tertiary_enabled"] = bool(auto_fill_tertiary_enabled) except Exception as ex: # Map constraints-impossible to a friendly fragment; other errors to a plain note msg = "" @@ -944,12 +1854,36 @@ async def hx_random_reroll(request: Request): msg = "
Reroll failed. Please try again.
" return HTMLResponse(msg, status_code=200) + strict_theme_result = bool(getattr(res, "strict_theme_match", strict_theme_match)) + resolved_theme_info["strict_theme_match"] = strict_theme_result + + usage_mode = _classify_usage_mode(mode, [primary_theme, secondary_theme, tertiary_theme, legacy_theme], locked_commander) + combo_flag = bool(getattr(res, "combo_fallback", False)) + synergy_flag = bool(getattr(res, "synergy_fallback", False)) + _record_random_usage_event(usage_mode, combo_flag, synergy_flag, getattr(res, "fallback_reason", None)) + # Persist to session - sid, had_cookie = _update_random_session(request, seed=int(res.seed), theme=res.theme, constraints=res.constraints or {}) + request_timestamp = time.time() + sid, had_cookie = _update_random_session( + request, + seed=int(res.seed), + theme=res.theme, + constraints=res.constraints or {}, + requested_themes=requested_themes, + resolved_themes=resolved_theme_info, + auto_fill_enabled=auto_fill_enabled, + auto_fill_secondary_enabled=auto_fill_secondary_enabled, + auto_fill_tertiary_enabled=auto_fill_tertiary_enabled, + strict_theme_match=strict_theme_result, + auto_fill_applied=bool(getattr(res, "auto_fill_applied", False)), + auto_filled_themes=getattr(res, "auto_filled_themes", None), + display_themes=getattr(res, "display_themes", None), + request_timestamp=request_timestamp, + ) # Render minimal fragment via Jinja2 try: - elapsed_ms = int(round((time.time() - t0) * 1000)) + elapsed_ms = int(round((request_timestamp - t0) * 1000)) _log_random_event( "reroll", request, @@ -959,13 +1893,33 @@ async def hx_random_reroll(request: Request): attempts=int(RANDOM_MAX_ATTEMPTS), timeout_ms=int(RANDOM_TIMEOUT_MS), elapsed_ms=elapsed_ms, + fallback=bool(getattr(res, "combo_fallback", False) or getattr(res, "synergy_fallback", False) or getattr(res, "theme_fallback", False)), ) # Build permalink token for fragment copy button try: import base64 as _b64 _raw = _json.dumps({ "commander": res.commander, - "random": {"seed": int(res.seed), "theme": res.theme, "constraints": res.constraints or {}}, + "random": { + "seed": int(res.seed), + "theme": res.theme, + "constraints": res.constraints or {}, + "primary_theme": getattr(res, "primary_theme", None), + "secondary_theme": getattr(res, "secondary_theme", None), + "tertiary_theme": getattr(res, "tertiary_theme", None), + "resolved_themes": list(getattr(res, "resolved_themes", []) or []), + "display_themes": list(getattr(res, "display_themes", []) or []), + "combo_fallback": bool(getattr(res, "combo_fallback", False)), + "synergy_fallback": bool(getattr(res, "synergy_fallback", False)), + "fallback_reason": getattr(res, "fallback_reason", None), + "auto_fill_secondary_enabled": bool(getattr(res, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(res, "auto_fill_tertiary_enabled", False)), + "auto_fill_enabled": bool(getattr(res, "auto_fill_enabled", False)), + "auto_fill_applied": bool(getattr(res, "auto_fill_applied", False)), + "auto_filled_themes": list(getattr(res, "auto_filled_themes", []) or []), + "strict_theme_match": strict_theme_result, + "requested_themes": requested_themes, + }, }, separators=(",", ":")) _token = _b64.urlsafe_b64encode(_raw.encode("utf-8")).decode("ascii").rstrip("=") _permalink = f"/build/from?state={_token}" @@ -979,12 +1933,28 @@ async def hx_random_reroll(request: Request): "commander": res.commander, "decklist": res.decklist or [], "theme": res.theme, + "primary_theme": getattr(res, "primary_theme", None), + "secondary_theme": getattr(res, "secondary_theme", None), + "tertiary_theme": getattr(res, "tertiary_theme", None), + "resolved_themes": list(getattr(res, "resolved_themes", []) or []), + "display_themes": list(getattr(res, "display_themes", []) or []), + "combo_fallback": bool(getattr(res, "combo_fallback", False)), + "synergy_fallback": bool(getattr(res, "synergy_fallback", False)), + "fallback_reason": getattr(res, "fallback_reason", None), + "requested_themes": requested_themes, + "resolved_theme_info": resolved_theme_info, + "auto_fill_enabled": bool(getattr(res, "auto_fill_enabled", False)), + "auto_fill_secondary_enabled": bool(getattr(res, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(res, "auto_fill_tertiary_enabled", False)), + "auto_fill_applied": bool(getattr(res, "auto_fill_applied", False)), + "auto_filled_themes": list(getattr(res, "auto_filled_themes", []) or []), "constraints": res.constraints or {}, "diagnostics": res.diagnostics or {}, "permalink": _permalink, "show_diagnostics": SHOW_DIAGNOSTICS, - "fallback": bool(getattr(res, "theme_fallback", False)), + "fallback": bool(getattr(res, "theme_fallback", False) or getattr(res, "combo_fallback", False) or getattr(res, "synergy_fallback", False)), "summary": getattr(res, "summary", None), + "strict_theme_match": strict_theme_result, }, ) if rl: @@ -1009,8 +1979,24 @@ async def hx_random_reroll(request: Request): "commander": res.commander, "decklist": res.decklist or [], "theme": res.theme, + "primary_theme": getattr(res, "primary_theme", None), + "secondary_theme": getattr(res, "secondary_theme", None), + "tertiary_theme": getattr(res, "tertiary_theme", None), + "resolved_themes": list(getattr(res, "resolved_themes", []) or []), + "display_themes": list(getattr(res, "display_themes", []) or []), + "combo_fallback": bool(getattr(res, "combo_fallback", False)), + "synergy_fallback": bool(getattr(res, "synergy_fallback", False)), + "fallback_reason": getattr(res, "fallback_reason", None), + "requested_themes": requested_themes, + "resolved_theme_info": resolved_theme_info, + "auto_fill_enabled": bool(getattr(res, "auto_fill_enabled", False)), + "auto_fill_secondary_enabled": bool(getattr(res, "auto_fill_secondary_enabled", False)), + "auto_fill_tertiary_enabled": bool(getattr(res, "auto_fill_tertiary_enabled", False)), + "auto_fill_applied": bool(getattr(res, "auto_fill_applied", False)), + "auto_filled_themes": list(getattr(res, "auto_filled_themes", []) or []), "constraints": res.constraints or {}, "diagnostics": res.diagnostics or {}, + "strict_theme_match": strict_theme_result, } ) if not had_cookie: @@ -1272,7 +2258,16 @@ async def unhandled_exception_handler(request: Request, exc: Exception): async def random_modes_page(request: Request) -> HTMLResponse: if not random_modes_enabled(): raise HTTPException(status_code=404, detail="Random Modes disabled") - return templates.TemplateResponse("random/index.html", {"request": request, "random_ui": bool(RANDOM_UI)}) + cached_requested, _cached_resolved = _get_random_session_themes(request) + strict_pref = bool(_sanitize_bool(cached_requested.get("strict_theme_match"), default=False)) + return templates.TemplateResponse( + "random/index.html", + { + "request": request, + "random_ui": bool(RANDOM_UI), + "strict_theme_match": strict_pref, + }, + ) # Lightweight file download endpoint for exports @app.get("/files") diff --git a/code/web/routes/build.py b/code/web/routes/build.py index 635295a..64e5264 100644 --- a/code/web/routes/build.py +++ b/code/web/routes/build.py @@ -2,6 +2,7 @@ from __future__ import annotations from fastapi import APIRouter, Request, Form, Query from fastapi.responses import HTMLResponse, JSONResponse +from typing import Any from ..app import ALLOW_MUST_HAVES # Import feature flag from ..services.build_utils import ( step5_ctx_from_result, @@ -2859,7 +2860,35 @@ async def build_permalink(request: Request): rb = sess.get("random_build") or {} if rb: # Only include known keys to avoid leaking unrelated session data - inc = {k: rb.get(k) for k in ("seed", "theme", "constraints") if k in rb} + inc: dict[str, Any] = {} + for key in ("seed", "theme", "constraints", "primary_theme", "secondary_theme", "tertiary_theme"): + if rb.get(key) is not None: + inc[key] = rb.get(key) + resolved_list = rb.get("resolved_themes") + if isinstance(resolved_list, list): + inc["resolved_themes"] = list(resolved_list) + resolved_info = rb.get("resolved_theme_info") + if isinstance(resolved_info, dict): + inc["resolved_theme_info"] = dict(resolved_info) + if rb.get("combo_fallback") is not None: + inc["combo_fallback"] = bool(rb.get("combo_fallback")) + if rb.get("synergy_fallback") is not None: + inc["synergy_fallback"] = bool(rb.get("synergy_fallback")) + if rb.get("fallback_reason") is not None: + inc["fallback_reason"] = rb.get("fallback_reason") + requested = rb.get("requested_themes") + if isinstance(requested, dict): + inc["requested_themes"] = dict(requested) + if rb.get("auto_fill_enabled") is not None: + inc["auto_fill_enabled"] = bool(rb.get("auto_fill_enabled")) + if rb.get("auto_fill_applied") is not None: + inc["auto_fill_applied"] = bool(rb.get("auto_fill_applied")) + auto_filled = rb.get("auto_filled_themes") + if isinstance(auto_filled, list): + inc["auto_filled_themes"] = list(auto_filled) + display = rb.get("display_themes") + if isinstance(display, list): + inc["display_themes"] = list(display) if inc: payload["random"] = inc except Exception: @@ -2914,9 +2943,43 @@ async def build_from(request: Request, state: str | None = None) -> HTMLResponse try: r = data.get("random") or {} if r: - sess["random_build"] = { - k: r.get(k) for k in ("seed", "theme", "constraints") if k in r - } + rb_payload: dict[str, Any] = {} + for key in ("seed", "theme", "constraints", "primary_theme", "secondary_theme", "tertiary_theme"): + if r.get(key) is not None: + rb_payload[key] = r.get(key) + if isinstance(r.get("resolved_themes"), list): + rb_payload["resolved_themes"] = list(r.get("resolved_themes") or []) + if isinstance(r.get("resolved_theme_info"), dict): + rb_payload["resolved_theme_info"] = dict(r.get("resolved_theme_info")) + if r.get("combo_fallback") is not None: + rb_payload["combo_fallback"] = bool(r.get("combo_fallback")) + if r.get("synergy_fallback") is not None: + rb_payload["synergy_fallback"] = bool(r.get("synergy_fallback")) + if r.get("fallback_reason") is not None: + rb_payload["fallback_reason"] = r.get("fallback_reason") + if isinstance(r.get("requested_themes"), dict): + requested_payload = dict(r.get("requested_themes")) + if "auto_fill_enabled" in requested_payload: + requested_payload["auto_fill_enabled"] = bool(requested_payload.get("auto_fill_enabled")) + rb_payload["requested_themes"] = requested_payload + if r.get("auto_fill_enabled") is not None: + rb_payload["auto_fill_enabled"] = bool(r.get("auto_fill_enabled")) + if r.get("auto_fill_applied") is not None: + rb_payload["auto_fill_applied"] = bool(r.get("auto_fill_applied")) + auto_filled = r.get("auto_filled_themes") + if isinstance(auto_filled, list): + rb_payload["auto_filled_themes"] = list(auto_filled) + display = r.get("display_themes") + if isinstance(display, list): + rb_payload["display_themes"] = list(display) + if "seed" in rb_payload: + try: + seed_int = int(rb_payload["seed"]) + rb_payload["seed"] = seed_int + rb_payload.setdefault("recent_seeds", [seed_int]) + except Exception: + rb_payload.setdefault("recent_seeds", []) + sess["random_build"] = rb_payload except Exception: pass diff --git a/code/web/templates/diagnostics/index.html b/code/web/templates/diagnostics/index.html index 03cd3fb..c7a4b7f 100644 --- a/code/web/templates/diagnostics/index.html +++ b/code/web/templates/diagnostics/index.html @@ -7,6 +7,7 @@

System summary

Loading…
+
Loading theme stats…
@@ -76,6 +77,121 @@ try { fetch('/status/sys', { cache: 'no-store' }).then(function(r){ return r.json(); }).then(render).catch(function(){ el.textContent='Unavailable'; }); } catch(_){ el.textContent='Unavailable'; } } load(); + var tokenEl = document.getElementById('themeTokenStats'); + function renderTokens(payload){ + if (!tokenEl) return; + try { + if (!payload || payload.ok !== true) { + tokenEl.textContent = 'Theme stats unavailable'; + return; + } + var stats = payload.stats || {}; + var top = Array.isArray(stats.top_tokens) ? stats.top_tokens.slice(0, 5) : []; + var html = ''; + var commanders = (stats && stats.commanders != null) ? stats.commanders : '0'; + var withTags = (stats && stats.with_tags != null) ? stats.with_tags : '0'; + var uniqueTokens = (stats && stats.unique_tokens != null) ? stats.unique_tokens : '0'; + var assignments = (stats && stats.total_assignments != null) ? stats.total_assignments : '0'; + var avgTokens = (stats && stats.avg_tokens_per_commander != null) ? stats.avg_tokens_per_commander : '0'; + var medianTokens = (stats && stats.median_tokens_per_commander != null) ? stats.median_tokens_per_commander : '0'; + html += '
Commanders indexed: ' + String(commanders) + ' (' + String(withTags) + ' with tags)
'; + html += '
Theme tokens: ' + String(uniqueTokens) + ' unique; ' + String(assignments) + ' assignments
'; + html += '
Tokens per commander: avg ' + String(avgTokens) + ', median ' + String(medianTokens) + '
'; + if (top.length) { + var parts = []; + top.forEach(function(item){ + parts.push(String(item.token) + ' (' + String(item.count) + ')'); + }); + html += '
Top tokens: ' + parts.join(', ') + '
'; + } + var pool = stats.random_pool || {}; + if (pool && typeof pool.size !== 'undefined'){ + var coveragePct = null; + if (pool.coverage_ratio != null){ + var cov = Number(pool.coverage_ratio); + if (!Number.isNaN(cov)){ coveragePct = (cov * 100).toFixed(1); } + } + html += '
Curated random pool: ' + String(pool.size) + ' tokens'; + if (coveragePct !== null){ html += ' (' + coveragePct + '% of catalog tokens)'; } + html += '
'; + var rules = pool.rules || {}; + var threshold = rules.overrepresented_share_threshold; + if (threshold != null){ + var thrPct = Number(threshold); + if (!Number.isNaN(thrPct)){ html += '
Over-represented threshold: ≥ ' + (thrPct * 100).toFixed(1) + '% of commanders
'; } + } + var excludedCounts = pool.excluded_counts || {}; + var reasonKeys = Object.keys(excludedCounts); + if (reasonKeys.length){ + var badges = reasonKeys.map(function(reason){ + return reason + ' (' + excludedCounts[reason] + ')'; + }); + html += '
Exclusions: ' + badges.join(', ') + '
'; + } + var samples = pool.excluded_samples || {}; + var sampleKeys = Object.keys(samples); + if (sampleKeys.length){ + var sampleLines = []; + sampleKeys.slice(0, 3).forEach(function(reason){ + var tokens = samples[reason] || []; + var sampleTokens = (tokens || []).slice(0, 3); + var remainder = Math.max((tokens || []).length - sampleTokens.length, 0); + var tokenLabel = sampleTokens.join(', '); + if (remainder > 0){ tokenLabel += ' +' + remainder; } + sampleLines.push(reason + ': ' + tokenLabel); + }); + html += '
Samples → ' + sampleLines.join(' | ') + '
'; + } + var manualDetail = pool.manual_exclusion_detail || {}; + var manualKeys = Object.keys(manualDetail); + if (manualKeys.length){ + var manualSamples = manualKeys.slice(0, 3).map(function(token){ + var info = manualDetail[token] || {}; + var label = info.display || token; + var cat = info.category ? (' [' + info.category + ']') : ''; + return label + cat; + }); + var manualRemainder = Math.max(manualKeys.length - manualSamples.length, 0); + var manualLine = manualSamples.join(', '); + if (manualRemainder > 0){ manualLine += ' +' + manualRemainder; } + html += '
Manual exclusions: ' + manualLine + '
'; + } + var manualGroups = Array.isArray(rules.manual_exclusions) ? rules.manual_exclusions : []; + if (manualGroups.length){ + var categoryList = manualGroups.map(function(group){ return group.category || 'manual'; }); + html += '
Manual categories: ' + categoryList.join(', ') + '
'; + } + } + var telemetry = stats.index_telemetry || {}; + if (telemetry && typeof telemetry.token_count !== 'undefined'){ + var hitRate = telemetry.hit_rate != null ? Number(telemetry.hit_rate) : null; + var hitPct = (hitRate !== null && !Number.isNaN(hitRate)) ? (hitRate * 100).toFixed(1) : null; + var teleLine = '
Tag index: ' + String(telemetry.token_count || 0) + ' tokens · lookups ' + String(telemetry.lookups || 0); + if (hitPct !== null){ teleLine += ' · hit rate ' + hitPct + '%'; } + if (telemetry.substring_checks){ teleLine += ' · substring checks ' + String(telemetry.substring_checks || 0); } + teleLine += '
'; + html += teleLine; + } + tokenEl.innerHTML = html; + } catch(_){ + tokenEl.textContent = 'Theme stats unavailable'; + } + } + function loadTokenStats(){ + if (!tokenEl) return; + tokenEl.textContent = 'Loading theme stats…'; + fetch('/status/random_theme_stats', { cache: 'no-store' }) + .then(function(resp){ + if (resp.status === 404) { + tokenEl.textContent = 'Diagnostics disabled (stats unavailable)'; + return null; + } + return resp.json(); + }) + .then(function(data){ if (data) renderTokens(data); }) + .catch(function(){ tokenEl.textContent = 'Theme stats unavailable'; }); + } + loadTokenStats(); // Theme status and reset try{ var tEl = document.getElementById('themeSummary'); diff --git a/code/web/templates/partials/random_result.html b/code/web/templates/partials/random_result.html index 575e47c..0cb7898 100644 --- a/code/web/templates/partials/random_result.html +++ b/code/web/templates/partials/random_result.html @@ -1,11 +1,19 @@
Seed: {{ seed }} @@ -14,18 +22,90 @@ {% endif %} {% if show_diagnostics and diagnostics %} - - Att {{ diagnostics.attempts }} - {{ diagnostics.elapsed_ms }}ms - {% if diagnostics.timeout_hit %}Timeout{% endif %} - {% if diagnostics.retries_exhausted %}Retries{% endif %} - {% if fallback or diagnostics.fallback %}Fallback{% endif %} + + + + + + + + + + {% if diagnostics.timeout_hit %} + + + + + {% endif %} + {% if diagnostics.retries_exhausted %} + + + + + {% endif %} + {% if fallback or diagnostics.fallback %} + + + + + {% endif %} {% endif %}
+ {% set display_list = display_themes or resolved_themes or [] %} + {% set resolved_list = display_list %} + {% set has_primary = primary_theme or secondary_theme or tertiary_theme %} + {% if resolved_list or has_primary %} +
+ {% if resolved_list %} + Resolved themes: {{ resolved_list|join(' + ') }} + {% else %} + Resolved themes: Full pool fallback + {% endif %} +
+ {% endif %} + {% if auto_fill_applied and auto_filled_themes %} +
+ Auto-filled: {{ auto_filled_themes|join(', ') }} +
+ {% endif %} + {% if fallback_reason %} + {% if synergy_fallback and (not resolved_list) %} + {% set notice_class = 'danger' %} + {% elif synergy_fallback %} + {% set notice_class = 'warn' %} + {% else %} + {% set notice_class = 'info' %} + {% endif %} + {% if notice_class == 'danger' %} + {% set notice_icon = '⛔' %} + {% elif notice_class == 'warn' %} + {% set notice_icon = '⚠️' %} + {% else %} + {% set notice_icon = 'ℹ️' %} + {% endif %} +
+ + + Heads up: + {{ fallback_reason }}. + You can tweak secondary or tertiary themes for different mixes, or reroll to explore more options. + + +
+ {% endif %} + + + + + + + + +
Random UI is disabled. Set RANDOM_UI=1 to enable.
{% else %} -
- -
- -