feat(random): finalize multi-theme telemetry and polish
Some checks failed
Editorial Lint / lint-editorial (push) Has been cancelled

- document random theme exclusions, perf guard tooling, and roadmap completion

- tighten random reroll UX: strict theme persistence, throttle handling, export parity, diagnostics updates

- add regression coverage for telemetry counters, multi-theme flows, and locked rerolls; refresh README and notes

Tests: pytest -q (fast random + telemetry suites)
This commit is contained in:
matt 2025-09-26 18:15:52 -07:00
parent 73685f22c8
commit 49f1f8b2eb
28 changed files with 4888 additions and 251 deletions

View file

@ -0,0 +1,118 @@
"""Opt-in guard that compares multi-theme filter performance to a stored baseline.
Run inside the project virtual environment:
python -m code.scripts.check_random_theme_perf --baseline config/random_theme_perf_baseline.json
The script executes the same profiling loop as `profile_multi_theme_filter` and fails
if the observed mean or p95 timings regress more than the allowed threshold.
"""
from __future__ import annotations
import argparse
import json
import sys
from pathlib import Path
from typing import Any, Dict, Tuple
PROJECT_ROOT = Path(__file__).resolve().parents[2]
DEFAULT_BASELINE = PROJECT_ROOT / "config" / "random_theme_perf_baseline.json"
if str(PROJECT_ROOT) not in sys.path:
sys.path.append(str(PROJECT_ROOT))
from code.scripts.profile_multi_theme_filter import run_profile # type: ignore # noqa: E402
def _load_baseline(path: Path) -> Dict[str, Any]:
if not path.exists():
raise FileNotFoundError(f"Baseline file not found: {path}")
data = json.loads(path.read_text(encoding="utf-8"))
return data
def _extract(metric: Dict[str, Any], key: str) -> float:
try:
value = float(metric.get(key, 0.0))
except Exception:
value = 0.0
return value
def _check_section(name: str, actual: Dict[str, Any], baseline: Dict[str, Any], threshold: float) -> Tuple[bool, str]:
a_mean = _extract(actual, "mean_ms")
b_mean = _extract(baseline, "mean_ms")
a_p95 = _extract(actual, "p95_ms")
b_p95 = _extract(baseline, "p95_ms")
allowed_mean = b_mean * (1.0 + threshold)
allowed_p95 = b_p95 * (1.0 + threshold)
mean_ok = a_mean <= allowed_mean or b_mean == 0.0
p95_ok = a_p95 <= allowed_p95 or b_p95 == 0.0
status = mean_ok and p95_ok
def _format_row(label: str, actual_val: float, baseline_val: float, allowed_val: float, ok: bool) -> str:
trend = ((actual_val - baseline_val) / baseline_val * 100.0) if baseline_val else 0.0
trend_str = f"{trend:+.1f}%" if baseline_val else "n/a"
limit_str = f"{allowed_val:.3f}ms" if baseline_val else "n/a"
return f" {label:<6} actual={actual_val:.3f}ms baseline={baseline_val:.3f}ms ({trend_str}), limit {limit_str} -> {'OK' if ok else 'FAIL'}"
rows = [f"Section: {name}"]
rows.append(_format_row("mean", a_mean, b_mean, allowed_mean, mean_ok))
rows.append(_format_row("p95", a_p95, b_p95, allowed_p95, p95_ok))
return status, "\n".join(rows)
def main(argv: list[str] | None = None) -> int:
parser = argparse.ArgumentParser(description="Check multi-theme filtering performance against a baseline")
parser.add_argument("--baseline", type=Path, default=DEFAULT_BASELINE, help="Baseline JSON file (default: config/random_theme_perf_baseline.json)")
parser.add_argument("--iterations", type=int, default=400, help="Number of iterations to sample (default: 400)")
parser.add_argument("--seed", type=int, default=None, help="Optional RNG seed for reproducibility")
parser.add_argument("--threshold", type=float, default=0.15, help="Allowed regression threshold as a fraction (default: 0.15 = 15%)")
parser.add_argument("--update-baseline", action="store_true", help="Overwrite the baseline file with the newly collected metrics")
args = parser.parse_args(argv)
baseline_path = args.baseline if args.baseline else DEFAULT_BASELINE
if args.update_baseline and not baseline_path.parent.exists():
baseline_path.parent.mkdir(parents=True, exist_ok=True)
if not args.update_baseline:
baseline = _load_baseline(baseline_path)
else:
baseline = {}
results = run_profile(args.iterations, args.seed)
cascade_status, cascade_report = _check_section("cascade", results.get("cascade", {}), baseline.get("cascade", {}), args.threshold)
synergy_status, synergy_report = _check_section("synergy", results.get("synergy", {}), baseline.get("synergy", {}), args.threshold)
print("Iterations:", results.get("iterations"))
print("Seed:", results.get("seed"))
print(cascade_report)
print(synergy_report)
overall_ok = cascade_status and synergy_status
if args.update_baseline:
payload = {
"iterations": results.get("iterations"),
"seed": results.get("seed"),
"cascade": results.get("cascade"),
"synergy": results.get("synergy"),
}
baseline_path.write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8")
print(f"Baseline updated → {baseline_path}")
return 0
if not overall_ok:
print(f"FAIL: performance regressions exceeded {args.threshold * 100:.1f}% threshold", file=sys.stderr)
return 1
print("PASS: performance within allowed threshold")
return 0
if __name__ == "__main__": # pragma: no cover
raise SystemExit(main())

View file

@ -0,0 +1,136 @@
"""Profile helper for multi-theme commander filtering.
Run within the project virtual environment:
python code/scripts/profile_multi_theme_filter.py --iterations 500
Outputs aggregate timing for combination and synergy fallback scenarios.
"""
from __future__ import annotations
import argparse
import json
import statistics
import sys
import time
from pathlib import Path
from typing import Any, Dict, List, Tuple
import pandas as pd
PROJECT_ROOT = Path(__file__).resolve().parents[1]
if str(PROJECT_ROOT) not in sys.path:
sys.path.append(str(PROJECT_ROOT))
from deck_builder.random_entrypoint import _ensure_theme_tag_cache, _filter_multi, _load_commanders_df # noqa: E402
def _sample_combinations(tags: List[str], iterations: int) -> List[Tuple[str | None, str | None, str | None]]:
import random
combos: List[Tuple[str | None, str | None, str | None]] = []
if not tags:
return combos
for _ in range(iterations):
primary = random.choice(tags)
secondary = random.choice(tags) if random.random() < 0.45 else None
tertiary = random.choice(tags) if random.random() < 0.25 else None
combos.append((primary, secondary, tertiary))
return combos
def _collect_tag_pool(df: pd.DataFrame) -> List[str]:
tag_pool: set[str] = set()
for tags in df.get("_ltags", []): # type: ignore[assignment]
if not tags:
continue
for token in tags:
tag_pool.add(token)
return sorted(tag_pool)
def _summarize(values: List[float]) -> Dict[str, float]:
mean_ms = statistics.mean(values) * 1000
if len(values) >= 20:
p95_ms = statistics.quantiles(values, n=20)[18] * 1000
else:
p95_ms = max(values) * 1000 if values else 0.0
return {
"mean_ms": round(mean_ms, 6),
"p95_ms": round(p95_ms, 6),
"samples": len(values),
}
def run_profile(iterations: int, seed: int | None = None) -> Dict[str, Any]:
if iterations <= 0:
raise ValueError("Iterations must be a positive integer")
df = _load_commanders_df()
df = _ensure_theme_tag_cache(df)
tag_pool = _collect_tag_pool(df)
if not tag_pool:
raise RuntimeError("No theme tags available in dataset; ensure commander catalog is populated")
combos = _sample_combinations(tag_pool, iterations)
if not combos:
raise RuntimeError("Failed to generate theme combinations for profiling")
timings: List[float] = []
synergy_timings: List[float] = []
for primary, secondary, tertiary in combos:
start = time.perf_counter()
_filter_multi(df, primary, secondary, tertiary)
timings.append(time.perf_counter() - start)
improbable_primary = f"{primary or 'aggro'}_unlikely_value"
start_synergy = time.perf_counter()
_filter_multi(df, improbable_primary, secondary, tertiary)
synergy_timings.append(time.perf_counter() - start_synergy)
return {
"iterations": iterations,
"seed": seed,
"cascade": _summarize(timings),
"synergy": _summarize(synergy_timings),
}
def main() -> None:
parser = argparse.ArgumentParser(description="Profile multi-theme filtering performance")
parser.add_argument("--iterations", type=int, default=400, help="Number of random theme combinations to evaluate")
parser.add_argument("--seed", type=int, default=None, help="Optional RNG seed for repeatability")
parser.add_argument("--json", type=Path, help="Optional path to write the raw metrics as JSON")
args = parser.parse_args()
if args.seed is not None:
import random
random.seed(args.seed)
results = run_profile(args.iterations, args.seed)
def _print(label: str, stats: Dict[str, float]) -> None:
mean_ms = stats.get("mean_ms", 0.0)
p95_ms = stats.get("p95_ms", 0.0)
samples = stats.get("samples", 0)
print(f"{label}: mean={mean_ms:.4f}ms p95={p95_ms:.4f}ms (n={samples})")
_print("AND-combo cascade", results.get("cascade", {}))
_print("Synergy fallback", results.get("synergy", {}))
if args.json:
payload = {
"iterations": results.get("iterations"),
"seed": results.get("seed"),
"cascade": results.get("cascade"),
"synergy": results.get("synergy"),
}
args.json.parent.mkdir(parents=True, exist_ok=True)
args.json.write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8")
if __name__ == "__main__":
main()

View file

@ -0,0 +1,193 @@
"""Summarize the curated random theme pool and exclusion rules.
Usage examples:
python -m code.scripts.report_random_theme_pool --format markdown
python -m code.scripts.report_random_theme_pool --output logs/random_theme_pool.json
The script refreshes the commander catalog, rebuilds the curated random
pool using the same heuristics as Random Mode auto-fill, and prints a
summary (JSON by default).
"""
from __future__ import annotations
import argparse
import json
import sys
from pathlib import Path
from typing import Any, Dict, List
PROJECT_ROOT = Path(__file__).resolve().parents[1]
if str(PROJECT_ROOT) not in sys.path:
sys.path.append(str(PROJECT_ROOT))
from deck_builder.random_entrypoint import ( # type: ignore # noqa: E402
_build_random_theme_pool,
_ensure_theme_tag_cache,
_load_commanders_df,
_OVERREPRESENTED_SHARE_THRESHOLD,
)
def build_report(refresh: bool = False) -> Dict[str, Any]:
df = _load_commanders_df()
if refresh:
# Force re-cache of tag structures
df = _ensure_theme_tag_cache(df)
else:
try:
df = _ensure_theme_tag_cache(df)
except Exception:
pass
allowed, metadata = _build_random_theme_pool(df, include_details=True)
detail = metadata.pop("excluded_detail", {})
report = {
"allowed_tokens": sorted(allowed),
"allowed_count": len(allowed),
"metadata": metadata,
"excluded_detail": detail,
}
return report
def format_markdown(report: Dict[str, Any], *, limit: int = 20) -> str:
lines: List[str] = []
meta = report.get("metadata", {})
rules = meta.get("rules", {})
lines.append("# Curated Random Theme Pool")
lines.append("")
lines.append(f"- Allowed tokens: **{report.get('allowed_count', 0)}**")
total_commander_count = meta.get("total_commander_count")
if total_commander_count is not None:
lines.append(f"- Commander entries analyzed: **{total_commander_count}**")
coverage = meta.get("coverage_ratio")
if coverage is not None:
pct = round(float(coverage) * 100.0, 2)
lines.append(f"- Coverage: **{pct}%** of catalog tokens")
if rules:
thresh = rules.get("overrepresented_share_threshold", _OVERREPRESENTED_SHARE_THRESHOLD)
thresh_pct = round(float(thresh) * 100.0, 2)
lines.append("- Exclusion rules:")
lines.append(" - Minimum commander coverage: 5 unique commanders")
lines.append(f" - Kindred filter keywords: {', '.join(rules.get('kindred_keywords', []))}")
lines.append(f" - Global theme keywords: {', '.join(rules.get('excluded_keywords', []))}")
pattern_str = ", ".join(rules.get("excluded_patterns", []))
if pattern_str:
lines.append(f" - Global theme patterns: {pattern_str}")
lines.append(f" - Over-represented threshold: ≥ {thresh_pct}% of commanders")
manual_src = rules.get("manual_exclusions_source")
manual_groups = rules.get("manual_exclusions") or []
if manual_src or manual_groups:
lines.append(f" - Manual exclusion config: {manual_src or 'config/random_theme_exclusions.yml'}")
if manual_groups:
lines.append(f" - Manual categories: {len(manual_groups)} tracked groups")
counts = meta.get("excluded_counts", {}) or {}
if counts:
lines.append("")
lines.append("## Excluded tokens by reason")
lines.append("Reason | Count")
lines.append("------ | -----")
for reason, count in sorted(counts.items(), key=lambda item: item[0]):
lines.append(f"{reason} | {count}")
samples = meta.get("excluded_samples", {}) or {}
if samples:
lines.append("")
lines.append("## Sample tokens per exclusion reason")
for reason, tokens in sorted(samples.items(), key=lambda item: item[0]):
subset = tokens[:limit]
more = "" if len(tokens) <= limit else f" … (+{len(tokens) - limit})"
lines.append(f"- **{reason}**: {', '.join(subset)}{more}")
detail = report.get("excluded_detail", {}) or {}
if detail:
lines.append("")
lines.append("## Detailed exclusions (first few)")
for token, reasons in list(sorted(detail.items()))[:limit]:
lines.append(f"- {token}: {', '.join(reasons)}")
if len(detail) > limit:
lines.append(f"… (+{len(detail) - limit} more tokens)")
manual_detail = meta.get("manual_exclusion_detail", {}) or {}
if manual_detail:
lines.append("")
lines.append("## Manual exclusions applied")
for token, info in sorted(manual_detail.items(), key=lambda item: item[0]):
display = info.get("display", token)
category = info.get("category")
summary = info.get("summary")
notes = info.get("notes")
descriptors: List[str] = []
if category:
descriptors.append(f"category={category}")
if summary:
descriptors.append(summary)
if notes:
descriptors.append(notes)
suffix = f"{'; '.join(descriptors)}" if descriptors else ""
lines.append(f"- {display}{suffix}")
if rules.get("manual_exclusions"):
lines.append("")
lines.append("## Manual exclusion categories")
for group in rules["manual_exclusions"]:
if not isinstance(group, dict):
continue
category = group.get("category", "manual")
summary = group.get("summary")
tokens = group.get("tokens", []) or []
notes = group.get("notes")
lines.append(f"- **{category}** — {summary or 'no summary provided'}")
if notes:
lines.append(f" - Notes: {notes}")
if tokens:
token_list = tokens[:limit]
more = "" if len(tokens) <= limit else f" … (+{len(tokens) - limit})"
lines.append(f" - Tokens: {', '.join(token_list)}{more}")
return "\n".join(lines)
def write_output(path: Path, payload: Dict[str, Any]) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w", encoding="utf-8") as handle:
json.dump(payload, handle, indent=2, sort_keys=True)
handle.write("\n")
def write_manual_exclusions(path: Path, report: Dict[str, Any]) -> None:
meta = report.get("metadata", {}) or {}
rules = meta.get("rules", {}) or {}
detail = meta.get("manual_exclusion_detail", {}) or {}
payload = {
"source": rules.get("manual_exclusions_source"),
"categories": rules.get("manual_exclusions", []),
"tokens": detail,
}
write_output(path, payload)
def main(argv: List[str] | None = None) -> int:
parser = argparse.ArgumentParser(description="Report the curated random theme pool heuristics")
parser.add_argument("--format", choices={"json", "markdown"}, default="json", help="Output format (default: json)")
parser.add_argument("--output", type=Path, help="Optional path to write the structured report (JSON regardless of --format)")
parser.add_argument("--limit", type=int, default=20, help="Max sample tokens per reason when printing markdown (default: 20)")
parser.add_argument("--refresh", action="store_true", help="Bypass caches when rebuilding commander stats")
parser.add_argument("--write-exclusions", type=Path, help="Optional path for writing manual exclusion tokens + metadata (JSON)")
args = parser.parse_args(argv)
report = build_report(refresh=args.refresh)
if args.output:
write_output(args.output, report)
if args.write_exclusions:
write_manual_exclusions(args.write_exclusions, report)
if args.format == "markdown":
print(format_markdown(report, limit=max(1, args.limit)))
else:
print(json.dumps(report, indent=2, sort_keys=True))
return 0
if __name__ == "__main__": # pragma: no cover
raise SystemExit(main())