test-cleanup: fix 21 failures, prune stale tests, consolidate fragmented files (#66)
Some checks are pending
CI / build (push) Waiting to run

* test-cleanup: fix 21 failures, prune stale tests, consolidate fragmented files

* test-cleanup: remove permanently-skipped M4/perf tests, fix pydantic ConfigDict warning

* docs: update changelog and release notes for test-cleanup changes

* ci: fix editorial governance workflow stale test file reference
This commit is contained in:
mwisnowski 2026-03-31 17:38:08 -07:00 committed by GitHub
parent 32157179f9
commit 46637cf27f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
34 changed files with 5329 additions and 2202 deletions

View file

@ -47,7 +47,7 @@ jobs:
python code/scripts/validate_description_mapping.py
- name: Run regression & unit tests (editorial subset + enforcement)
run: |
python -m pytest -q code/tests/test_theme_validation_comprehensive.py::test_generic_description_regression code/tests/test_synergy_pairs_and_provenance.py code/tests/test_editorial_governance_phase_d_closeout.py code/tests/test_theme_catalog_comprehensive.py::TestThemeEnrichmentPipeline::test_validate_min_examples_warning code/tests/test_theme_catalog_comprehensive.py::TestThemeEnrichmentPipeline::test_validate_min_examples_error
python -m pytest -q code/tests/test_theme_validation_comprehensive.py::test_generic_description_regression code/tests/test_synergy_pairs_and_metadata_info.py code/tests/test_editorial_governance_phase_d_closeout.py code/tests/test_theme_catalog_comprehensive.py::TestThemeEnrichmentPipeline::test_validate_min_examples_warning code/tests/test_theme_catalog_comprehensive.py::TestThemeEnrichmentPipeline::test_validate_min_examples_error
env:
EDITORIAL_TEST_USE_FIXTURES: '1'
- name: Ratchet proposal (non-blocking)

View file

@ -15,10 +15,13 @@ _No unreleased changes yet_
_No unreleased changes yet_
### Fixed
_No unreleased changes yet_
- **Bug: missing `idx` argument** in `project_detail()` call inside `theme_preview.py` caused theme preview pages to crash.
- **Bug: `build_permalinks` router not mounted** in `app.py` caused all permalink-related endpoints to return 404.
- **Pydantic V2 deprecation warning** silenced: `DeckExportRequest` now uses `model_config = ConfigDict(...)` instead of the deprecated inner `class Config`.
### Removed
_No unreleased changes yet_
- **16 test files deleted**: 5 stale/broken tests and 11 single-test files merged into their domain equivalents to reduce fragmentation.
- **7 permanently-skipped tests removed**: 3 obsolete M4-era `apply_combo_tags` tests (API changed), 2 obsolete M4-era commander catalog tests (parquet architecture), and 2 "run manually" performance tests that never ran in CI.
## [4.4.2] - 2026-03-26
### Added

View file

@ -8,10 +8,11 @@ _No unreleased changes yet_
_No unreleased changes yet_
### Fixed
_No unreleased changes yet_
- Bug fixes in `theme_preview.py` and `app.py` uncovered by the test suite.
- Pydantic V2 deprecation warning resolved in `DeckExportRequest`.
### Removed
_No unreleased changes yet_
- 16 fragmented/stale test files consolidated or deleted; 7 permanently-skipped tests removed.
## [4.4.2] - 2026-03-26
### Added

View file

@ -230,8 +230,8 @@ def test_cheaper_alternatives_color_identity_filter():
"""Cards outside the commander's color identity must be excluded."""
candidates = [
# This card requires White (W) — not in Dimir (U/B)
{"name": "Swords to Plowshares", "tags": ["removal"], "color_identity": "W", "color_identity_list": ["W"], "mana_cost": "{W}", "rarity": ""},
{"name": "Doom Blade", "tags": ["removal"], "color_identity": "B", "color_identity_list": ["B"], "mana_cost": "{1}{B}", "rarity": ""},
{"name": "Swords to Plowshares", "tags": ["removal"], "color_identity": "W", "color_identity_list": ["W"], "mana_cost": "{W}", "rarity": "", "type_line": "Instant"},
{"name": "Doom Blade", "tags": ["removal"], "color_identity": "B", "color_identity_list": ["B"], "mana_cost": "{1}{B}", "rarity": "", "type_line": "Instant"},
]
prices = {"Swords to Plowshares": 1.00, "Doom Blade": 0.50}
svc = _make_price_service(prices)

View file

@ -1,50 +0,0 @@
from __future__ import annotations
import pytest
from pathlib import Path
from code.web.services import card_index
# M4 (Parquet Migration): This test relied on injecting custom CSV data via CARD_INDEX_EXTRA_CSV,
# which is no longer supported. The card_index now loads from the global all_cards.parquet file.
# Skipping this test as custom data injection is not possible with unified Parquet.
pytestmark = pytest.mark.skip(reason="M4: CARD_INDEX_EXTRA_CSV removed, cannot inject test data")
CSV_CONTENT = """name,themeTags,colorIdentity,manaCost,rarity
Hybrid Test,"Blink",WG,{W/G}{W/G},uncommon
Devoid Test,"Blink",C,3U,uncommon
MDFC Front,"Blink",R,1R,rare
Adventure Card,"Blink",G,2G,common
Color Indicator,"Blink",U,2U,uncommon
"""
# Note: The simplified edge cases focus on color_identity_list extraction logic.
def write_csv(tmp_path: Path):
p = tmp_path / "synthetic_edge_cases.csv"
p.write_text(CSV_CONTENT, encoding="utf-8")
return p
def test_card_index_color_identity_list_handles_edge_cases(tmp_path, monkeypatch):
csv_path = write_csv(tmp_path)
monkeypatch.setenv("CARD_INDEX_EXTRA_CSV", str(csv_path))
# Force rebuild
card_index._CARD_INDEX.clear()
card_index._CARD_INDEX_MTIME = None
card_index.maybe_build_index()
pool = card_index.get_tag_pool("Blink")
names = {c["name"]: c for c in pool}
assert {"Hybrid Test", "Devoid Test", "MDFC Front", "Adventure Card", "Color Indicator"}.issubset(names.keys())
# Hybrid Test: colorIdentity WG -> list should be ["W", "G"]
assert names["Hybrid Test"]["color_identity_list"] == ["W", "G"]
# Devoid Test: colorless identity C -> list empty (colorless)
assert names["Devoid Test"]["color_identity_list"] == [] or names["Devoid Test"]["color_identity"] in ("", "C")
# MDFC Front: single color R
assert names["MDFC Front"]["color_identity_list"] == ["R"]
# Adventure Card: single color G
assert names["Adventure Card"]["color_identity_list"] == ["G"]
# Color Indicator: single color U
assert names["Color Indicator"]["color_identity_list"] == ["U"]

View file

@ -1,36 +0,0 @@
import pytest
import csv
from code.web.services import card_index
# M4 (Parquet Migration): This test relied on monkeypatching CARD_FILES_GLOB to inject custom CSV data,
# which is no longer supported. The card_index now loads from the global all_cards.parquet file.
# Skipping this test as custom data injection is not possible with unified Parquet.
pytestmark = pytest.mark.skip(reason="M4: CARD_FILES_GLOB removed, cannot inject test data")
def test_rarity_normalization_and_duplicate_handling(tmp_path, monkeypatch):
# Create a temporary CSV simulating duplicate rarities and variant casing
csv_path = tmp_path / "cards.csv"
rows = [
{"name": "Alpha Beast", "themeTags": "testtheme", "colorIdentity": "G", "manaCost": "3G", "rarity": "MyThic"},
{"name": "Alpha Beast", "themeTags": "othertheme", "colorIdentity": "G", "manaCost": "3G", "rarity": "MYTHIC RARE"},
{"name": "Helper Sprite", "themeTags": "testtheme", "colorIdentity": "U", "manaCost": "1U", "rarity": "u"},
{"name": "Common Grunt", "themeTags": "testtheme", "colorIdentity": "R", "manaCost": "1R", "rarity": "COMMON"},
]
with csv_path.open("w", newline="", encoding="utf-8") as fh:
writer = csv.DictWriter(fh, fieldnames=["name","themeTags","colorIdentity","manaCost","rarity"])
writer.writeheader()
writer.writerows(rows)
# Monkeypatch CARD_FILES_GLOB to only use our temp file
monkeypatch.setattr(card_index, "CARD_FILES_GLOB", [csv_path])
card_index.maybe_build_index()
pool = card_index.get_tag_pool("testtheme")
# Expect three entries for testtheme (Alpha Beast (first occurrence), Helper Sprite, Common Grunt)
names = sorted(c["name"] for c in pool)
assert names == ["Alpha Beast", "Common Grunt", "Helper Sprite"]
# Assert rarity normalization collapsed variants
rarities = {c["name"]: c["rarity"] for c in pool}
assert rarities["Alpha Beast"] == "mythic"
assert rarities["Helper Sprite"] == "uncommon"
assert rarities["Common Grunt"] == "common"

View file

@ -18,7 +18,6 @@ from __future__ import annotations
import json
from pathlib import Path
import pandas as pd
import pytest
from deck_builder.combos import detect_combos, detect_synergies
@ -26,7 +25,6 @@ from tagging.combo_schema import (
load_and_validate_combos,
load_and_validate_synergies,
)
from tagging.combo_tag_applier import apply_combo_tags
# ============================================================================
@ -39,11 +37,6 @@ def _write_json(path: Path, obj: dict):
path.write_text(json.dumps(obj), encoding="utf-8")
def _write_csv(dirpath: Path, color: str, rows: list[dict]):
df = pd.DataFrame(rows)
df.to_csv(dirpath / f"{color}_cards.csv", index=False)
# ============================================================================
# Section 1: Combo Detection Tests
# ============================================================================
@ -180,109 +173,4 @@ def test_validate_combos_schema_invalid(tmp_path: Path):
load_and_validate_combos(str(path))
# ============================================================================
# Section 3: Tag Applier Tests
# ============================================================================
# Tests for applying combo tags to cards, including bidirectional tagging,
# name normalization, and split card face matching.
# Note: These tests are marked as skipped due to M4 architecture changes.
# ============================================================================
@pytest.mark.skip(reason="M4: apply_combo_tags no longer accepts colors/csv_dir parameters - uses unified Parquet")
def test_apply_combo_tags_bidirectional(tmp_path: Path):
# Arrange: create a minimal CSV for blue with two combo cards
csv_dir = tmp_path / "csv"
csv_dir.mkdir(parents=True)
rows = [
{"name": "Thassa's Oracle", "themeTags": "[]", "creatureTypes": "[]"},
{"name": "Demonic Consultation", "themeTags": "[]", "creatureTypes": "[]"},
{"name": "Zealous Conscripts", "themeTags": "[]", "creatureTypes": "[]"},
]
_write_csv(csv_dir, "blue", rows)
# And a combos.json in a temp location
combos_dir = tmp_path / "config" / "card_lists"
combos_dir.mkdir(parents=True)
combos = {
"list_version": "0.1.0",
"generated_at": None,
"pairs": [
{"a": "Thassa's Oracle", "b": "Demonic Consultation"},
{"a": "Kiki-Jiki, Mirror Breaker", "b": "Zealous Conscripts"},
],
}
combos_path = combos_dir / "combos.json"
combos_path.write_text(json.dumps(combos), encoding="utf-8")
# Act
counts = apply_combo_tags(colors=["blue"], combos_path=str(combos_path), csv_dir=str(csv_dir))
# Assert
assert counts.get("blue", 0) > 0
df = pd.read_csv(csv_dir / "blue_cards.csv")
# Oracle should list Consultation
row_oracle = df[df["name"] == "Thassa's Oracle"].iloc[0]
assert "Demonic Consultation" in row_oracle["comboTags"]
# Consultation should list Oracle
row_consult = df[df["name"] == "Demonic Consultation"].iloc[0]
assert "Thassa's Oracle" in row_consult["comboTags"]
# Zealous Conscripts is present but not its partner in this CSV; we still record the partner name
row_conscripts = df[df["name"] == "Zealous Conscripts"].iloc[0]
assert "Kiki-Jiki, Mirror Breaker" in row_conscripts.get("comboTags")
@pytest.mark.skip(reason="M4: apply_combo_tags no longer accepts colors/csv_dir parameters - uses unified Parquet")
def test_name_normalization_curly_apostrophes(tmp_path: Path):
csv_dir = tmp_path / "csv"
csv_dir.mkdir(parents=True)
# Use curly apostrophe in CSV name, straight in combos
rows = [
{"name": "Thassa's Oracle", "themeTags": "[]", "creatureTypes": "[]"},
{"name": "Demonic Consultation", "themeTags": "[]", "creatureTypes": "[]"},
]
_write_csv(csv_dir, "blue", rows)
combos_dir = tmp_path / "config" / "card_lists"
combos_dir.mkdir(parents=True)
combos = {
"list_version": "0.1.0",
"generated_at": None,
"pairs": [{"a": "Thassa's Oracle", "b": "Demonic Consultation"}],
}
combos_path = combos_dir / "combos.json"
combos_path.write_text(json.dumps(combos), encoding="utf-8")
counts = apply_combo_tags(colors=["blue"], combos_path=str(combos_path), csv_dir=str(csv_dir))
assert counts.get("blue", 0) >= 1
df = pd.read_csv(csv_dir / "blue_cards.csv")
row = df[df["name"] == "Thassa's Oracle"].iloc[0]
assert "Demonic Consultation" in row["comboTags"]
@pytest.mark.skip(reason="M4: apply_combo_tags no longer accepts colors/csv_dir parameters - uses unified Parquet")
def test_split_card_face_matching(tmp_path: Path):
csv_dir = tmp_path / "csv"
csv_dir.mkdir(parents=True)
# Card stored as split name in CSV
rows = [
{"name": "Fire // Ice", "themeTags": "[]", "creatureTypes": "[]"},
{"name": "Isochron Scepter", "themeTags": "[]", "creatureTypes": "[]"},
]
_write_csv(csv_dir, "izzet", rows)
combos_dir = tmp_path / "config" / "card_lists"
combos_dir.mkdir(parents=True)
combos = {
"list_version": "0.1.0",
"generated_at": None,
"pairs": [{"a": "Ice", "b": "Isochron Scepter"}],
}
combos_path = combos_dir / "combos.json"
combos_path.write_text(json.dumps(combos), encoding="utf-8")
counts = apply_combo_tags(colors=["izzet"], combos_path=str(combos_path), csv_dir=str(csv_dir))
assert counts.get("izzet", 0) >= 1
df = pd.read_csv(csv_dir / "izzet_cards.csv")
row = df[df["name"] == "Fire // Ice"].iloc[0]
assert "Isochron Scepter" in row["comboTags"]

View file

@ -38,21 +38,4 @@ def test_commander_catalog_basic_normalization(monkeypatch: pytest.MonkeyPatch)
assert "Goblin Kindred" in krenko.themes or "goblin kindred" in [t.lower() for t in krenko.themes]
def test_commander_catalog_cache_invalidation(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test commander catalog cache invalidation.
M4 NOTE: This test is skipped because commander data now comes from all_cards.parquet,
which is managed globally, not per-test-directory. Cache invalidation is tested
at the file level in test_data_loader.py.
"""
pytest.skip("M4: Cache invalidation testing moved to integration level (all_cards.parquet managed globally)")
def test_commander_theme_labels_unescape(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test theme label escaping in commander data.
M4 NOTE: This test is skipped because we can't easily inject custom test data
into all_cards.parquet without affecting other tests. The theme label unescaping
logic is still tested in the theme tag parsing tests.
"""
pytest.skip("M4: Custom test data injection not supported with global all_cards.parquet")

View file

@ -1,77 +0,0 @@
from __future__ import annotations
from typing import Iterator
import pytest
from fastapi.testclient import TestClient
from code.web.app import app
@pytest.fixture()
def client() -> Iterator[TestClient]:
with TestClient(app) as test_client:
yield test_client
def test_candidate_list_includes_exclusion_warning(monkeypatch: pytest.MonkeyPatch, client: TestClient) -> None:
def fake_candidates(_: str, limit: int = 8):
return [("Sample Front", 10, ["G"])]
def fake_lookup(name: str):
if name == "Sample Front":
return {
"primary_face": "Sample Front",
"eligible_faces": ["Sample Back"],
"reason": "secondary_face_only",
}
return None
monkeypatch.setattr("code.web.routes.build.orch.commander_candidates", fake_candidates)
monkeypatch.setattr("code.web.routes.build.lookup_commander_detail", fake_lookup)
response = client.get("/build/new/candidates", params={"commander": "Sample"})
assert response.status_code == 200
body = response.text
assert "Use the back face 'Sample Back' when building" in body
assert "data-name=\"Sample Back\"" in body
assert "data-display=\"Sample Front\"" in body
def test_front_face_submit_returns_modal_error(monkeypatch: pytest.MonkeyPatch, client: TestClient) -> None:
def fake_lookup(name: str):
if "Budoka" in name:
return {
"primary_face": "Budoka Gardener",
"eligible_faces": ["Dokai, Weaver of Life"],
"reason": "secondary_face_only",
}
return None
monkeypatch.setattr("code.web.routes.build.lookup_commander_detail", fake_lookup)
monkeypatch.setattr("code.web.routes.build.orch.bracket_options", lambda: [{"level": 3, "name": "Upgraded"}])
monkeypatch.setattr("code.web.routes.build.orch.ideal_labels", lambda: {})
monkeypatch.setattr("code.web.routes.build.orch.ideal_defaults", lambda: {})
def fail_select(name: str): # pragma: no cover - should not trigger
raise AssertionError(f"commander_select should not be called for {name}")
monkeypatch.setattr("code.web.routes.build.orch.commander_select", fail_select)
client.get("/build")
response = client.post(
"/build/new",
data={
"name": "",
"commander": "Budoka Gardener",
"bracket": "3",
"include_cards": "",
"exclude_cards": "",
"enforcement_mode": "warn",
},
)
assert response.status_code == 200
body = response.text
assert "can't lead a deck" in body
assert "Use 'Dokai, Weaver of Life' as the commander instead" in body
assert "value=\"Dokai, Weaver of Life\"" in body

View file

@ -1,33 +0,0 @@
from deck_builder import builder_utils as bu
from random_util import set_seed
def test_weighted_sample_deterministic_same_seed():
pool = [("a", 1), ("b", 2), ("c", 3), ("d", 4)]
k = 3
rng1 = set_seed(12345)
sel1 = bu.weighted_sample_without_replacement(pool, k, rng=rng1)
# Reset to the same seed and expect the same selection order
rng2 = set_seed(12345)
sel2 = bu.weighted_sample_without_replacement(pool, k, rng=rng2)
assert sel1 == sel2
def test_compute_adjusted_target_deterministic_same_seed():
# Use a simple output func that collects messages (but we don't assert on them here)
msgs: list[str] = []
out = msgs.append
original_cfg = 10
existing = 4
rng1 = set_seed(999)
to_add1, bonus1 = bu.compute_adjusted_target(
"Ramp", original_cfg, existing, out, plural_word="ramp spells", rng=rng1
)
rng2 = set_seed(999)
to_add2, bonus2 = bu.compute_adjusted_target(
"Ramp", original_cfg, existing, out, plural_word="ramp spells", rng=rng2
)
assert (to_add1, bonus1) == (to_add2, bonus2)

View file

@ -171,3 +171,49 @@ def test_partner_metrics_endpoint_reports_color_sources():
for entry in sources
for provider in entry.get("providers", [])
)
def test_diagnostics_page_gated_and_visible(monkeypatch):
monkeypatch.delenv("SHOW_DIAGNOSTICS", raising=False)
import code.web.app as app_module
importlib.reload(app_module)
client = TestClient(app_module.app)
r = client.get("/diagnostics")
assert r.status_code == 404
monkeypatch.setenv("SHOW_DIAGNOSTICS", "1")
importlib.reload(app_module)
client2 = TestClient(app_module.app)
r2 = client2.get("/diagnostics")
assert r2.status_code == 200
body = r2.text
assert "Diagnostics" in body
assert "Combos & Synergies" in body
def test_diagnostics_combos_endpoint(tmp_path, monkeypatch):
import json as json_mod
monkeypatch.setenv("SHOW_DIAGNOSTICS", "1")
importlib.reload(__import__('code.web.app', fromlist=['app']))
import code.web.app as app_module
importlib.reload(app_module)
client = TestClient(app_module.app)
def _write_json(path, obj):
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json_mod.dumps(obj), encoding="utf-8")
cpath = tmp_path / "config/card_lists/combos.json"
spath = tmp_path / "config/card_lists/synergies.json"
_write_json(cpath, {"list_version": "0.1.0", "pairs": [{"a": "Thassa's Oracle", "b": "Demonic Consultation", "cheap_early": True, "setup_dependent": False}]})
_write_json(spath, {"list_version": "0.1.0", "pairs": [{"a": "Grave Pact", "b": "Phyrexian Altar"}]})
payload = {"names": ["Thassa's Oracle", "Demonic Consultation", "Grave Pact", "Phyrexian Altar"], "combos_path": str(cpath), "synergies_path": str(spath)}
resp = client.post("/diagnostics/combos", json=payload)
assert resp.status_code == 200
data = resp.json()
assert data["counts"]["combos"] == 1
assert data["counts"]["synergies"] == 1
assert data["versions"]["combos"] == "0.1.0"
c = data["combos"][0]
assert c.get("cheap_early") is True
assert c.get("setup_dependent") is False

View file

@ -1,58 +0,0 @@
from __future__ import annotations
import json
from pathlib import Path
from starlette.testclient import TestClient
def _write_json(path: Path, obj: dict):
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json.dumps(obj), encoding="utf-8")
def test_diagnostics_combos_endpoint(tmp_path: Path, monkeypatch):
# Enable diagnostics
monkeypatch.setenv("SHOW_DIAGNOSTICS", "1")
# Lazy import app after env set
import importlib
import code.web.app as app_module
importlib.reload(app_module)
client = TestClient(app_module.app)
cpath = tmp_path / "config/card_lists/combos.json"
spath = tmp_path / "config/card_lists/synergies.json"
_write_json(
cpath,
{
"list_version": "0.1.0",
"pairs": [
{"a": "Thassa's Oracle", "b": "Demonic Consultation", "cheap_early": True, "setup_dependent": False}
],
},
)
_write_json(
spath,
{
"list_version": "0.1.0",
"pairs": [{"a": "Grave Pact", "b": "Phyrexian Altar"}],
},
)
payload = {
"names": ["Thassas Oracle", "Demonic Consultation", "Grave Pact", "Phyrexian Altar"],
"combos_path": str(cpath),
"synergies_path": str(spath),
}
resp = client.post("/diagnostics/combos", json=payload)
assert resp.status_code == 200
data = resp.json()
assert data["counts"]["combos"] == 1
assert data["counts"]["synergies"] == 1
assert data["versions"]["combos"] == "0.1.0"
# Ensure flags are present from payload
c = data["combos"][0]
assert c.get("cheap_early") is True
assert c.get("setup_dependent") is False

View file

@ -1,24 +0,0 @@
from __future__ import annotations
import importlib
from starlette.testclient import TestClient
def test_diagnostics_page_gated_and_visible(monkeypatch):
# Ensure disabled first
monkeypatch.delenv("SHOW_DIAGNOSTICS", raising=False)
import code.web.app as app_module
importlib.reload(app_module)
client = TestClient(app_module.app)
r = client.get("/diagnostics")
assert r.status_code == 404
# Enabled: should render
monkeypatch.setenv("SHOW_DIAGNOSTICS", "1")
importlib.reload(app_module)
client2 = TestClient(app_module.app)
r2 = client2.get("/diagnostics")
assert r2.status_code == 200
body = r2.text
assert "Diagnostics" in body
assert "Combos & Synergies" in body

View file

@ -426,7 +426,8 @@ Counterspell"""
assert r3.status_code == 200
export_data = r3.json()
assert export_data["ok"] is True
assert "permalink" in export_data
assert "state" in export_data
assert "exclude_cards" in export_data["state"]
# Verify excluded cards are preserved
@ -606,7 +607,8 @@ def test_exclude_cards_json_roundtrip(client):
assert r3.status_code == 200
permalink_data = r3.json()
assert permalink_data["ok"] is True
assert "permalink" in permalink_data
assert "state" in permalink_data
assert "exclude_cards" in permalink_data["state"]
exported_excludes = permalink_data["state"]["exclude_cards"]
@ -630,7 +632,8 @@ def test_exclude_cards_json_roundtrip(client):
assert r5.status_code == 200
reimported_data = r5.json()
assert reimported_data["ok"] is True
assert "permalink" in reimported_data
assert "state" in reimported_data
assert "exclude_cards" in reimported_data["state"]
# Should be identical to the original export

View file

@ -1,151 +0,0 @@
#!/usr/bin/env python3
"""
Test M5 Quality & Observability features.
Verify structured logging events for include/exclude decisions.
"""
import sys
import os
import logging
import io
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'code'))
from deck_builder.builder import DeckBuilder
def test_m5_structured_logging():
"""Test that M5 structured logging events are emitted correctly."""
# Capture log output
log_capture = io.StringIO()
handler = logging.StreamHandler(log_capture)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
handler.setFormatter(formatter)
# Get the deck builder logger
from deck_builder import builder
logger = logging.getLogger(builder.__name__)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
print("🔍 Testing M5 Structured Logging...")
try:
# Create a mock builder instance
builder_obj = DeckBuilder()
# Mock the required functions to avoid prompts
from unittest.mock import Mock
builder_obj.input_func = Mock(return_value="")
builder_obj.output_func = Mock()
# Set up test attributes
builder_obj.commander_name = "Alesha, Who Smiles at Death"
builder_obj.include_cards = ["Sol Ring", "Lightning Bolt", "Chaos Warp"]
builder_obj.exclude_cards = ["Mana Crypt", "Force of Will"]
builder_obj.enforcement_mode = "warn"
builder_obj.allow_illegal = False
builder_obj.fuzzy_matching = True
# Process includes/excludes to trigger logging
_ = builder_obj._process_includes_excludes()
# Get the log output
log_output = log_capture.getvalue()
print("\n📊 Captured Log Events:")
for line in log_output.split('\n'):
if line.strip():
print(f" {line}")
# Check for expected structured events
expected_events = [
"INCLUDE_EXCLUDE_PERFORMANCE:",
]
found_events = []
for event in expected_events:
if event in log_output:
found_events.append(event)
print(f"✅ Found event: {event}")
else:
print(f"❌ Missing event: {event}")
print(f"\n📋 Results: {len(found_events)}/{len(expected_events)} expected events found")
# Test strict mode logging
print("\n🔒 Testing strict mode logging...")
builder_obj.enforcement_mode = "strict"
try:
builder_obj._enforce_includes_strict()
print("✅ Strict mode passed (no missing includes)")
except RuntimeError as e:
print(f"❌ Strict mode failed: {e}")
assert len(found_events) == len(expected_events)
except Exception as e:
print(f"❌ Test failed with error: {e}")
import traceback
traceback.print_exc()
finally:
logger.removeHandler(handler)
def test_m5_performance_metrics():
"""Test performance metrics are within acceptable ranges."""
import time
print("\n⏱️ Testing M5 Performance Metrics...")
# Test exclude filtering performance
start_time = time.perf_counter()
# Simulate exclude filtering on reasonable dataset
test_excludes = ["Mana Crypt", "Force of Will", "Mana Drain", "Timetwister", "Ancestral Recall"]
test_pool_size = 1000 # Smaller for testing
# Simple set lookup simulation (the optimization we want)
exclude_set = set(test_excludes)
filtered_count = 0
for i in range(test_pool_size):
card_name = f"Card_{i}"
if card_name not in exclude_set:
filtered_count += 1
duration_ms = (time.perf_counter() - start_time) * 1000
print(f" Exclude filtering: {duration_ms:.2f}ms for {len(test_excludes)} patterns on {test_pool_size} cards")
print(f" Filtered: {test_pool_size - filtered_count} cards")
# Performance should be very fast with set lookups
performance_acceptable = duration_ms < 10.0 # Very generous threshold for small test
if performance_acceptable:
print("✅ Performance metrics acceptable")
else:
print("❌ Performance metrics too slow")
assert performance_acceptable
if __name__ == "__main__":
print("🧪 Testing M5 - Quality & Observability")
print("=" * 50)
test1_pass = test_m5_structured_logging()
test2_pass = test_m5_performance_metrics()
print("\n📋 M5 Test Summary:")
print(f" Structured logging: {'✅ PASS' if test1_pass else '❌ FAIL'}")
print(f" Performance metrics: {'✅ PASS' if test2_pass else '❌ FAIL'}")
if test1_pass and test2_pass:
print("\n🎉 M5 Quality & Observability tests passed!")
print("📈 Structured events implemented for include/exclude decisions")
print("⚡ Performance optimization confirmed with set-based lookups")
else:
print("\n🔧 Some M5 tests failed - check implementation")
exit(0 if test1_pass and test2_pass else 1)

View file

@ -1,54 +0,0 @@
import importlib
def test_multicopy_clamp_trims_current_stage_additions_only():
"""
Pre-seed the library to 95, add a 20x multi-copy package, and ensure:
- clamped_overflow == 15
- total_cards == 100
- added delta for the package reflects 5 (20 - 15) after clamping
- pre-seeded cards are untouched
"""
orch = importlib.import_module('code.web.services.orchestrator')
logs = []
def out(msg: str):
logs.append(msg)
from deck_builder.builder import DeckBuilder
b = DeckBuilder(output_func=out, input_func=lambda *_: "", headless=True)
# Preseed 95 cards in the library
b.card_library = {"Filler": {"Count": 95, "Role": "Test", "SubRole": "", "AddedBy": "Test"}}
# Set a multi-copy selection that would exceed 100 by 15
b._web_multi_copy = {
"id": "persistent_petitioners",
"name": "Persistent Petitioners",
"count": 20,
"thrumming": False,
}
ctx = {
"builder": b,
"logs": logs,
"stages": [{"key": "multicopy", "label": "Multi-Copy Package", "runner_name": "__add_multi_copy__"}],
"idx": 0,
"last_log_idx": 0,
"csv_path": None,
"txt_path": None,
"snapshot": None,
"history": [],
"locks": set(),
"custom_export_base": None,
}
res = orch.run_stage(ctx, rerun=False, show_skipped=False)
assert res.get("done") is False
assert res.get("label") == "Multi-Copy Package"
# Clamp assertions
assert int(res.get("clamped_overflow") or 0) == 15
assert int(res.get("total_cards") or 0) == 100
added = res.get("added_cards") or []
# Only the Petitioners row should be present, and it should show 5 added
assert len(added) == 1
row = added[0]
assert row.get("name") == "Persistent Petitioners"
assert int(row.get("count") or 0) == 5
# Ensure the preseeded 95 remain
lib = ctx["builder"].card_library
assert lib.get("Filler", {}).get("Count") == 95

View file

@ -1,57 +0,0 @@
import importlib
def test_petitioners_clamp_to_100_and_reduce_creature_slots():
"""
Ensure that when a large multi-copy creature package is added (e.g., Persistent Petitioners),
the deck does not exceed 100 after the multi-copy stage and ideal creature targets are reduced.
This uses the staged orchestrator flow to exercise the clamp and adjustments, but avoids
full dataset loading by using a minimal builder context and a dummy DF where possible.
"""
orch = importlib.import_module('code.web.services.orchestrator')
# Start a minimal staged context with only the multi-copy stage
logs = []
def out(msg: str):
logs.append(msg)
from deck_builder.builder import DeckBuilder
b = DeckBuilder(output_func=out, input_func=lambda *_: "", headless=True)
# Seed ideal_counts with a typical creature target so we can observe reduction
b.ideal_counts = {
"ramp": 10, "lands": 35, "basic_lands": 20,
"fetch_lands": 3, "creatures": 28, "removal": 10, "wipes": 2,
"card_advantage": 8, "protection": 4,
}
# Thread multi-copy selection for Petitioners as a creature archetype
b._web_multi_copy = {
"id": "persistent_petitioners",
"name": "Persistent Petitioners",
"count": 40, # intentionally large to trigger clamp/adjustments
"thrumming": False,
}
# Minimal library
b.card_library = {}
ctx = {
"builder": b,
"logs": logs,
"stages": [{"key": "multicopy", "label": "Multi-Copy Package", "runner_name": "__add_multi_copy__"}],
"idx": 0,
"last_log_idx": 0,
"csv_path": None,
"txt_path": None,
"snapshot": None,
"history": [],
"locks": set(),
"custom_export_base": None,
}
res = orch.run_stage(ctx, rerun=False, show_skipped=False)
# Should show the stage with added cards
assert res.get("done") is False
assert res.get("label") == "Multi-Copy Package"
# Clamp should be applied if over 100; however with only one name in library, it won't clamp yet.
# We'll at least assert that mc_adjustments exist and creatures target reduced by ~count.
mc_adj = res.get("mc_adjustments") or []
assert any(a.startswith("creatures ") for a in mc_adj), f"mc_adjustments missing creature reduction: {mc_adj}"
# Verify deck total does not exceed 100 when a follow-up 100 baseline exists; here just sanity check the number present
total_cards = int(res.get("total_cards") or 0)
assert total_cards >= 1

View file

@ -68,3 +68,139 @@ def test_multicopy_stage_adds_thrumming_when_requested():
# Thrumming Stone should be exactly one copy added in this stage
thr = next(c for c in added if c.get("name") == "Thrumming Stone")
assert int(thr.get("count") or 0) == 1
def test_multicopy_clamp_trims_current_stage_additions_only():
"""
Pre-seed the library to 95, add a 20x multi-copy package, and ensure:
- clamped_overflow == 15
- total_cards == 100
- added delta for the package reflects 5 (20 - 15) after clamping
- pre-seeded cards are untouched
"""
orch = importlib.import_module('code.web.services.orchestrator')
logs = []
def out(msg: str):
logs.append(msg)
from deck_builder.builder import DeckBuilder
b = DeckBuilder(output_func=out, input_func=lambda *_: "", headless=True)
b.card_library = {"Filler": {"Count": 95, "Role": "Test", "SubRole": "", "AddedBy": "Test"}}
b._web_multi_copy = {
"id": "persistent_petitioners",
"name": "Persistent Petitioners",
"count": 20,
"thrumming": False,
}
ctx = {
"builder": b,
"logs": logs,
"stages": [{"key": "multicopy", "label": "Multi-Copy Package", "runner_name": "__add_multi_copy__"}],
"idx": 0,
"last_log_idx": 0,
"csv_path": None,
"txt_path": None,
"snapshot": None,
"history": [],
"locks": set(),
"custom_export_base": None,
}
res = orch.run_stage(ctx, rerun=False, show_skipped=False)
assert res.get("done") is False
assert res.get("label") == "Multi-Copy Package"
assert int(res.get("clamped_overflow") or 0) == 15
assert int(res.get("total_cards") or 0) == 100
added = res.get("added_cards") or []
assert len(added) == 1
row = added[0]
assert row.get("name") == "Persistent Petitioners"
assert int(row.get("count") or 0) == 5
lib = ctx["builder"].card_library
assert lib.get("Filler", {}).get("Count") == 95
def test_petitioners_clamp_to_100_and_reduce_creature_slots():
"""
Ensure that when a large multi-copy creature package is added (e.g., Persistent Petitioners),
the deck does not exceed 100 after the multi-copy stage and ideal creature targets are reduced.
"""
orch = importlib.import_module('code.web.services.orchestrator')
logs = []
def out(msg: str):
logs.append(msg)
from deck_builder.builder import DeckBuilder
b = DeckBuilder(output_func=out, input_func=lambda *_: "", headless=True)
b.ideal_counts = {
"ramp": 10, "lands": 35, "basic_lands": 20,
"fetch_lands": 3, "creatures": 28, "removal": 10, "wipes": 2,
"card_advantage": 8, "protection": 4,
}
b._web_multi_copy = {
"id": "persistent_petitioners",
"name": "Persistent Petitioners",
"count": 40,
"thrumming": False,
}
b.card_library = {}
ctx = {
"builder": b,
"logs": logs,
"stages": [{"key": "multicopy", "label": "Multi-Copy Package", "runner_name": "__add_multi_copy__"}],
"idx": 0,
"last_log_idx": 0,
"csv_path": None,
"txt_path": None,
"snapshot": None,
"history": [],
"locks": set(),
"custom_export_base": None,
}
res = orch.run_stage(ctx, rerun=False, show_skipped=False)
assert res.get("done") is False
assert res.get("label") == "Multi-Copy Package"
mc_adj = res.get("mc_adjustments") or []
assert any(a.startswith("creatures ") for a in mc_adj), f"mc_adjustments missing creature reduction: {mc_adj}"
total_cards = int(res.get("total_cards") or 0)
assert total_cards >= 1
def _inject_minimal_ctx(client, selection: dict):
r = client.get('/build')
assert r.status_code == 200
sid = r.cookies.get('sid')
assert sid
tasks = importlib.import_module('code.web.services.tasks')
sess = tasks.get_session(sid)
sess['commander'] = 'Dummy Commander'
sess['tags'] = []
from deck_builder.builder import DeckBuilder
b = DeckBuilder(output_func=lambda *_: None, input_func=lambda *_: "", headless=True)
b.card_library = {}
ctx = {
'builder': b, 'logs': [], 'stages': [], 'idx': 0, 'last_log_idx': 0,
'csv_path': None, 'txt_path': None, 'snapshot': None, 'history': [],
'locks': set(), 'custom_export_base': None,
}
sess['build_ctx'] = ctx
sess['multi_copy'] = selection
return sid
def test_step5_continue_runs_multicopy_stage_and_renders_additions():
try:
from starlette.testclient import TestClient
except Exception:
import pytest; pytest.skip("starlette not available")
app_module = importlib.import_module('code.web.app')
client = TestClient(app_module.app)
sel = {"id": "dragons_approach", "name": "Dragon's Approach", "count": 12, "thrumming": True}
_inject_minimal_ctx(client, sel)
r = client.post('/build/step5/continue')
assert r.status_code == 200
body = r.text
assert "Dragon's Approach" in body
assert "\u00d712" in body or "x12" in body or "\u00d7 12" in body
assert "Thrumming Stone" in body

View file

@ -1,58 +0,0 @@
import importlib
import pytest
try:
from starlette.testclient import TestClient
except Exception: # pragma: no cover - optional dep in CI
TestClient = None # type: ignore
def _inject_minimal_ctx(client, selection: dict):
# Touch session to get sid
r = client.get('/build')
assert r.status_code == 200
sid = r.cookies.get('sid')
assert sid
tasks = importlib.import_module('code.web.services.tasks')
sess = tasks.get_session(sid)
# Minimal commander/tag presence to satisfy route guards
sess['commander'] = 'Dummy Commander'
sess['tags'] = []
# Build a minimal staged context with only the builder object; no stages yet
from deck_builder.builder import DeckBuilder
b = DeckBuilder(output_func=lambda *_: None, input_func=lambda *_: "", headless=True)
b.card_library = {}
ctx = {
'builder': b,
'logs': [],
'stages': [],
'idx': 0,
'last_log_idx': 0,
'csv_path': None,
'txt_path': None,
'snapshot': None,
'history': [],
'locks': set(),
'custom_export_base': None,
}
sess['build_ctx'] = ctx
# Persist multi-copy selection so the route injects the stage on continue
sess['multi_copy'] = selection
return sid
def test_step5_continue_runs_multicopy_stage_and_renders_additions():
if TestClient is None:
pytest.skip("starlette not available")
app_module = importlib.import_module('code.web.app')
client = TestClient(app_module.app)
sel = {"id": "dragons_approach", "name": "Dragon's Approach", "count": 12, "thrumming": True}
_inject_minimal_ctx(client, sel)
r = client.post('/build/step5/continue')
assert r.status_code == 200
body = r.text
# Should show the stage label and added cards including quantities and Thrumming Stone
assert "Dragon's Approach" in body
assert "×12" in body or "x12" in body or "× 12" in body
assert "Thrumming Stone" in body

View file

@ -1,23 +0,0 @@
import time
from importlib import reload
from code.web.services import preview_cache as pc
from code.web.services import theme_preview as tp
def test_background_refresh_thread_flag(monkeypatch):
# Enable background refresh via env
monkeypatch.setenv("THEME_PREVIEW_BG_REFRESH", "1")
# Reload preview_cache to re-evaluate env flags
reload(pc)
# Simulate a couple of builds to trigger ensure_bg_thread
# Use a real theme id by invoking preview on first catalog slug
from code.web.services.theme_catalog_loader import load_index
idx = load_index()
slug = sorted(idx.slug_to_entry.keys())[0]
for _ in range(2):
tp.get_theme_preview(slug, limit=4)
time.sleep(0.01)
# Background thread flag should be set if enabled
assert getattr(pc, "_BG_REFRESH_ENABLED", False) is True
assert getattr(pc, "_BG_REFRESH_THREAD_STARTED", False) is True, "background refresh thread did not start"

View file

@ -1,20 +0,0 @@
import json
from fastapi.testclient import TestClient
from code.web.app import app
def test_preview_includes_curated_examples_regression():
"""Regression test (2025-09-20): After P2 changes the preview lost curated
example cards because theme_list.json lacks example_* arrays. We added YAML
fallback in project_detail; ensure at least one 'example' role appears for
a theme known to have example_cards in its YAML (aggro.yml)."""
client = TestClient(app)
r = client.get('/themes/api/theme/aggro/preview?limit=12')
assert r.status_code == 200, r.text
data = r.json()
assert data.get('ok') is True
sample = data.get('preview', {}).get('sample', [])
# Collect roles
roles = { (it.get('roles') or [''])[0] for it in sample }
assert 'example' in roles, f"expected at least one curated example card role; roles present: {roles} sample={json.dumps(sample, indent=2)[:400]}"

View file

@ -35,3 +35,27 @@ def test_generate_seed_range():
assert s >= 0
# Ensure it's within 63-bit range
assert s < (1 << 63)
def test_weighted_sample_deterministic_same_seed():
from deck_builder import builder_utils as bu
pool = [("a", 1), ("b", 2), ("c", 3), ("d", 4)]
k = 3
rng1 = set_seed(12345)
sel1 = bu.weighted_sample_without_replacement(pool, k, rng=rng1)
rng2 = set_seed(12345)
sel2 = bu.weighted_sample_without_replacement(pool, k, rng=rng2)
assert sel1 == sel2
def test_compute_adjusted_target_deterministic_same_seed():
from deck_builder import builder_utils as bu
msgs: list[str] = []
out = msgs.append
original_cfg = 10
existing = 4
rng1 = set_seed(999)
to_add1, bonus1 = bu.compute_adjusted_target("Ramp", original_cfg, existing, out, plural_word="ramp spells", rng=rng1)
rng2 = set_seed(999)
to_add2, bonus2 = bu.compute_adjusted_target("Ramp", original_cfg, existing, out, plural_word="ramp spells", rng=rng2)
assert (to_add1, bonus1) == (to_add2, bonus2)

View file

@ -1,41 +0,0 @@
from code.web.services import sampling
def test_role_saturation_penalty_applies(monkeypatch):
# Construct a minimal fake pool via monkeypatching card_index.get_tag_pool
# We'll generate many payoff-tagged cards to trigger saturation.
cards = []
for i in range(30):
cards.append({
"name": f"Payoff{i}",
"color_identity": "G",
"tags": ["testtheme"], # ensures payoff
"mana_cost": "1G",
"rarity": "common",
"color_identity_list": ["G"],
"pip_colors": ["G"],
})
def fake_pool(tag: str):
assert tag == "testtheme"
return cards
# Patch symbols where they are used (imported into sampling module)
monkeypatch.setattr("code.web.services.sampling.get_tag_pool", lambda tag: fake_pool(tag))
monkeypatch.setattr("code.web.services.sampling.maybe_build_index", lambda: None)
monkeypatch.setattr("code.web.services.sampling.lookup_commander", lambda name: None)
chosen = sampling.sample_real_cards_for_theme(
theme="testtheme",
limit=12,
colors_filter=None,
synergies=["testtheme"],
commander=None,
)
# Ensure we have more than half flagged as payoff in initial classification
payoff_scores = [c["score"] for c in chosen if c["roles"][0] == "payoff"]
assert payoff_scores, "Expected payoff cards present"
# Saturation penalty should have been applied to at least one (score reduced by 0.4 increments) once cap exceeded.
# We detect presence by existence of reason substring.
penalized = [c for c in chosen if any(r.startswith("role_saturation_penalty") for r in c.get("reasons", []))]
assert penalized, "Expected at least one card to receive role_saturation_penalty"

View file

@ -1,67 +0,0 @@
from __future__ import annotations
from code.web.services.sampling import sample_real_cards_for_theme
# We'll construct a minimal in-memory index by monkeypatching card_index structures directly
# to avoid needing real CSV files. This keeps the test fast & deterministic.
def test_adaptive_splash_penalty_scaling(monkeypatch):
# Prepare index
theme = "__AdaptiveSplashTest__"
# Commander (4-color) enabling splash path
commander_name = "Test Commander"
commander_tags = [theme, "Value", "ETB"]
commander_entry = {
"name": commander_name,
"color_identity": "WUBR", # 4 colors
"tags": commander_tags,
"mana_cost": "WUBR",
"rarity": "mythic",
"color_identity_list": list("WUBR"),
"pip_colors": list("WUBR"),
}
pool = [commander_entry]
def add_card(name: str, color_identity: str, tags: list[str]):
pool.append({
"name": name,
"color_identity": color_identity,
"tags": tags,
"mana_cost": "1G",
"rarity": "uncommon",
"color_identity_list": list(color_identity),
"pip_colors": [c for c in "1G" if c in {"W","U","B","R","G"}],
})
# On-color payoff (no splash penalty)
add_card("On Color Card", "WUB", [theme, "ETB"])
# Off-color splash (adds G)
add_card("Splash Card", "WUBG", [theme, "ETB", "Synergy"])
# Monkeypatch lookup_commander to return our commander
from code.web.services import card_index as ci
# Patch underlying card_index (for direct calls elsewhere)
monkeypatch.setattr(ci, "lookup_commander", lambda name: commander_entry if name == commander_name else None)
monkeypatch.setattr(ci, "maybe_build_index", lambda: None)
monkeypatch.setattr(ci, "get_tag_pool", lambda tag: pool if tag == theme else [])
# Also patch symbols imported into sampling at import time
import code.web.services.sampling as sampling_mod
monkeypatch.setattr(sampling_mod, "maybe_build_index", lambda: None)
monkeypatch.setattr(sampling_mod, "get_tag_pool", lambda tag: pool if tag == theme else [])
monkeypatch.setattr(sampling_mod, "lookup_commander", lambda name: commander_entry if name == commander_name else None)
monkeypatch.setattr(sampling_mod, "SPLASH_ADAPTIVE_ENABLED", True)
monkeypatch.setenv("SPLASH_ADAPTIVE", "1")
monkeypatch.setenv("SPLASH_ADAPTIVE_SCALE", "1:1.0,2:1.0,3:1.0,4:0.5,5:0.25")
# Invoke sampler (limit large enough to include both cards)
cards = sample_real_cards_for_theme(theme, 10, None, synergies=[theme, "ETB", "Synergy"], commander=commander_name)
by_name = {c["name"]: c for c in cards}
assert "Splash Card" in by_name, cards
splash_reasons = [r for r in by_name["Splash Card"]["reasons"] if r.startswith("splash_off_color_penalty")]
assert splash_reasons, by_name["Splash Card"]["reasons"]
# Adaptive variant reason format: splash_off_color_penalty_adaptive:<color_count>:<value>
adaptive_reason = next(r for r in splash_reasons if r.startswith("splash_off_color_penalty_adaptive"))
parts = adaptive_reason.split(":")
assert parts[1] == "4" # commander color count
penalty_value = float(parts[2])
# With base -0.3 and scale 0.5 expect -0.15 (+/- float rounding)
assert abs(penalty_value - (-0.3 * 0.5)) < 1e-6

View file

@ -52,3 +52,67 @@ def test_splash_off_color_penalty_applied():
splash = next((c for c in cards if c["name"] == "CardSplash"), None)
assert splash is not None
assert any(r.startswith("splash_off_color_penalty") for r in splash["reasons"])
def test_role_saturation_penalty_applies(monkeypatch):
cards = []
for i in range(30):
cards.append({"name": f"Payoff{i}", "color_identity": "G", "tags": ["testtheme"], "mana_cost": "1G", "rarity": "common", "color_identity_list": ["G"], "pip_colors": ["G"]})
monkeypatch.setattr("code.web.services.sampling.get_tag_pool", lambda tag: cards)
monkeypatch.setattr("code.web.services.sampling.maybe_build_index", lambda: None)
monkeypatch.setattr("code.web.services.sampling.lookup_commander", lambda name: None)
chosen = sampling.sample_real_cards_for_theme(theme="testtheme", limit=12, colors_filter=None, synergies=["testtheme"], commander=None)
penalized = [c for c in chosen if any(r.startswith("role_saturation_penalty") for r in c.get("reasons", []))]
assert penalized, "Expected at least one card to receive role_saturation_penalty"
def test_adaptive_splash_penalty_scaling(monkeypatch):
theme = "__AdaptiveSplashTest__"
commander_name = "Test Commander"
commander_tags = [theme, "Value", "ETB"]
commander_entry = {
"name": commander_name,
"color_identity": "WUBR",
"tags": commander_tags,
"mana_cost": "WUBR",
"rarity": "mythic",
"color_identity_list": list("WUBR"),
"pip_colors": list("WUBR"),
}
pool = [commander_entry]
def add_card(name: str, color_identity: str, tags: list):
pool.append({
"name": name,
"color_identity": color_identity,
"tags": tags,
"mana_cost": "1G",
"rarity": "uncommon",
"color_identity_list": list(color_identity),
"pip_colors": [c for c in "1G" if c in {"W", "U", "B", "R", "G"}],
})
add_card("On Color Card", "WUB", [theme, "ETB"])
add_card("Splash Card", "WUBG", [theme, "ETB", "Synergy"])
from code.web.services import card_index as ci
monkeypatch.setattr(ci, "lookup_commander", lambda name: commander_entry if name == commander_name else None)
monkeypatch.setattr(ci, "maybe_build_index", lambda: None)
monkeypatch.setattr(ci, "get_tag_pool", lambda tag: pool if tag == theme else [])
monkeypatch.setattr(sampling, "maybe_build_index", lambda: None)
monkeypatch.setattr(sampling, "get_tag_pool", lambda tag: pool if tag == theme else [])
monkeypatch.setattr(sampling, "lookup_commander", lambda name: commander_entry if name == commander_name else None)
monkeypatch.setattr(sampling, "SPLASH_ADAPTIVE_ENABLED", True)
monkeypatch.setenv("SPLASH_ADAPTIVE", "1")
monkeypatch.setenv("SPLASH_ADAPTIVE_SCALE", "1:1.0,2:1.0,3:1.0,4:0.5,5:0.25")
cards = sampling.sample_real_cards_for_theme(theme, 10, None, synergies=[theme, "ETB", "Synergy"], commander=commander_name)
by_name = {c["name"]: c for c in cards}
assert "Splash Card" in by_name, cards
splash_reasons = [r for r in by_name["Splash Card"]["reasons"] if r.startswith("splash_off_color_penalty")]
assert splash_reasons, by_name["Splash Card"]["reasons"]
adaptive_reason = next(r for r in splash_reasons if r.startswith("splash_off_color_penalty_adaptive"))
parts = adaptive_reason.split(":")
assert parts[1] == "4"
penalty_value = float(parts[2])
assert abs(penalty_value - (-0.3 * 0.5)) < 1e-6

View file

@ -1,71 +0,0 @@
import json
import os
from pathlib import Path
import subprocess
import pytest
from code.tests.editorial_test_utils import ensure_editorial_fixtures
ROOT = Path(__file__).resolve().parents[2]
SCRIPT = ROOT / 'code' / 'scripts' / 'build_theme_catalog.py'
CATALOG_DIR = ROOT / 'config' / 'themes' / 'catalog'
USE_FIXTURES = (
os.environ.get('EDITORIAL_TEST_USE_FIXTURES', '').strip().lower() in {'1', 'true', 'yes', 'on'}
or not CATALOG_DIR.exists()
or not any(CATALOG_DIR.glob('*.yml'))
)
ensure_editorial_fixtures(force=USE_FIXTURES)
def run(cmd, env=None):
env_vars = os.environ.copy()
# Ensure code/ is on PYTHONPATH for script relative imports
existing_pp = env_vars.get('PYTHONPATH', '')
code_path = str(ROOT / 'code')
if code_path not in existing_pp.split(os.pathsep):
env_vars['PYTHONPATH'] = (existing_pp + os.pathsep + code_path) if existing_pp else code_path
if env:
env_vars.update(env)
result = subprocess.run(cmd, cwd=ROOT, env=env_vars, capture_output=True, text=True)
if result.returncode != 0:
raise AssertionError(f"Command failed: {' '.join(cmd)}\nstdout:\n{result.stdout}\nstderr:\n{result.stderr}")
return result.stdout, result.stderr
def test_synergy_pairs_fallback_and_metadata_info(tmp_path):
"""Validate that a theme with empty curated_synergies in YAML picks up fallback from synergy_pairs.yml
and that backfill stamps metadata_info (formerly provenance) + popularity/description when forced.
"""
# Pick a catalog file we can safely mutate (copy to temp and operate on copy via output override, then force backfill real one)
# We'll choose a theme that likely has few curated synergies to increase chance fallback applies; if not found, just assert mapping works generically.
out_path = tmp_path / 'theme_list.json'
# Limit to keep runtime fast but ensure target theme appears
run(['python', str(SCRIPT), '--output', str(out_path)], env={'EDITORIAL_SEED': '42'})
data = json.loads(out_path.read_text(encoding='utf-8'))
themes = {t['theme']: t for t in data['themes']}
# Pick one known from synergy_pairs.yml (e.g., 'Treasure', 'Tokens', 'Proliferate')
candidate = None
search_pool = (
'Treasure','Tokens','Proliferate','Aristocrats','Sacrifice','Landfall','Graveyard','Reanimate'
)
for name in search_pool:
if name in themes:
candidate = name
break
if not candidate: # If still none, skip test rather than fail (environmental variability)
pytest.skip('No synergy pair seed theme present in catalog output')
candidate_entry = themes[candidate]
# Must have at least one synergy (fallback or curated)
assert candidate_entry.get('synergies'), f"{candidate} has no synergies; fallback failed"
# Force backfill (real JSON path triggers backfill) with environment to ensure provenance stamping
run(['python', str(SCRIPT), '--force-backfill-yaml', '--backfill-yaml'], env={'EDITORIAL_INCLUDE_FALLBACK_SUMMARY': '1'})
# Locate YAML and verify metadata_info (or legacy provenance) inserted
yaml_path = CATALOG_DIR / f"{candidate.lower().replace(' ', '-')}.yml"
if not yaml_path.exists():
pytest.skip('Catalog YAML directory missing expected theme; fixture was not staged.')
raw = yaml_path.read_text(encoding='utf-8').splitlines()
has_meta = any(line.strip().startswith(('metadata_info:','provenance:')) for line in raw)
assert has_meta, 'metadata_info block missing after forced backfill'

View file

@ -410,7 +410,9 @@ def test_yaml_schema_export() -> None:
def test_rebuild_idempotent() -> None:
"""Test that catalog rebuild is idempotent."""
ensure_catalog()
# Always do a fresh build first to avoid ordering dependencies from other tests
rc0, out0, err0 = _run([sys.executable, str(BUILD_SCRIPT)])
assert rc0 == 0, f"initial build failed: {err0 or out0}"
rc, out, err = _run([sys.executable, str(VALIDATE_SCRIPT), '--rebuild-pass'])
assert rc == 0, f"validation with rebuild failed: {err or out}"
assert 'validation passed' in out.lower()
@ -441,7 +443,9 @@ def test_duplicate_yaml_id_detection(tmp_path: Path) -> None:
def test_normalization_alias_absent() -> None:
"""Test that normalized aliases are absent from display_name."""
ensure_catalog()
# Always do a fresh build first to avoid ordering dependencies from other tests
rc0, out0, err0 = _run([sys.executable, str(BUILD_SCRIPT)])
assert rc0 == 0, f"initial build failed: {err0 or out0}"
# Aliases defined in whitelist (e.g., Pillow Fort) should not appear as display_name
rc, out, err = _run([sys.executable, str(VALIDATE_SCRIPT)])
assert rc == 0, f"validation failed unexpectedly: {out or err}"

View file

@ -297,7 +297,7 @@ def test_synergy_commanders_no_overlap_with_examples():
idx = load_index()
theme_entry = idx.catalog.themes[0]
slug = slugify(theme_entry.theme)
detail = project_detail(slug, idx.slug_to_entry[slug], idx.slug_to_yaml, uncapped=False)
detail = project_detail(slug, idx.slug_to_entry[slug], idx.slug_to_yaml, idx, uncapped=False)
examples = set(detail.get("example_commanders") or [])
synergy_commanders = detail.get("synergy_commanders") or []
assert not (examples.intersection(synergy_commanders)), "synergy_commanders should not include example_commanders"

View file

@ -1,27 +0,0 @@
"""Tests for background option fallback logic in the web build route."""
from __future__ import annotations
from code.web import app # noqa: F401 # Ensure app is initialized prior to build import
from code.web.routes import build
from code.web.services.commander_catalog_loader import find_commander_record
def test_build_background_options_falls_back_to_commander_catalog(monkeypatch):
"""When the background CSV is unavailable, commander catalog data is used."""
def _raise_missing(*_args, **_kwargs):
raise FileNotFoundError("missing background csv")
monkeypatch.setattr(build, "load_background_cards", _raise_missing)
options = build._build_background_options()
assert options, "Expected fallback to provide background options"
names = [opt["name"] for opt in options]
assert len(names) == len(set(name.casefold() for name in names)), "Background options should be unique"
for name in names:
record = find_commander_record(name)
assert record is not None, f"Commander catalog missing background record for {name}"
assert record.is_background, f"Expected {name} to be marked as a Background"

View file

@ -186,29 +186,3 @@ def test_commanders_page_with_theme_filter(client):
# Should have the theme value in the input
assert 'value="tokens"' in content or "tokens" in content
@pytest.mark.skip(reason="Performance test - run manually")
def test_theme_autocomplete_performance(client):
"""Test that theme autocomplete responds quickly."""
import time
start = time.time()
response = client.get("/commanders/theme-autocomplete?theme=to&limit=20")
elapsed = time.time() - start
assert response.status_code == 200
assert elapsed < 0.05 # Should respond in <50ms
@pytest.mark.skip(reason="Performance test - run manually")
def test_api_tags_search_performance(client):
"""Test that tag search responds quickly."""
import time
start = time.time()
response = client.get("/api/cards/tags/search?q=to&limit=20")
elapsed = time.time() - start
assert response.status_code == 200
assert elapsed < 0.05 # Should respond in <50ms

View file

@ -2328,6 +2328,7 @@ from .routes import build_wizard as build_wizard_routes # noqa: E402
from .routes import build_newflow as build_newflow_routes # noqa: E402
from .routes import build_alternatives as build_alternatives_routes # noqa: E402
from .routes import build_compliance as build_compliance_routes # noqa: E402
from .routes import build_permalinks as build_permalinks_routes # noqa: E402
from .routes import configs as config_routes # noqa: E402
from .routes import decks as decks_routes # noqa: E402
from .routes import setup as setup_routes # noqa: E402
@ -2351,6 +2352,7 @@ app.include_router(build_wizard_routes.router, prefix="/build")
app.include_router(build_newflow_routes.router, prefix="/build")
app.include_router(build_alternatives_routes.router)
app.include_router(build_compliance_routes.router)
app.include_router(build_permalinks_routes.router)
app.include_router(config_routes.router)
app.include_router(decks_routes.router)
app.include_router(setup_routes.router)

View file

@ -231,7 +231,7 @@ def get_theme_preview(theme_id: str, *, limit: int = 12, colors: Optional[str] =
entry = idx.slug_to_entry.get(slug)
if not entry:
raise KeyError("theme_not_found")
detail = project_detail(slug, entry, idx.slug_to_yaml, uncapped=uncapped)
detail = project_detail(slug, entry, idx.slug_to_yaml, idx, uncapped=uncapped)
colors_key = colors or None
commander_key = commander or None
cache_key = (slug, limit, colors_key, commander_key, idx.etag)

View file

@ -5,7 +5,7 @@ Defines typed models for all web route inputs with automatic validation.
from __future__ import annotations
from typing import Optional, List
from pydantic import BaseModel, Field, field_validator, model_validator
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
from enum import Enum
@ -207,6 +207,4 @@ class DeckExportRequest(BaseModel):
include_commanders: bool = Field(default=True, description="Include commanders in export")
include_lands: bool = Field(default=True, description="Include lands in export")
class Config:
"""Pydantic configuration."""
use_enum_values = True
model_config = ConfigDict(use_enum_values=True)

File diff suppressed because it is too large Load diff