Bracket enforcement + inline gating; global pool prune; compliance JSON artifacts; UI combos gating; compose envs consolidated; fix YAML; bump version to 2.2.5

This commit is contained in:
mwisnowski 2025-09-03 18:00:06 -07:00
parent 42c8fc9f9e
commit 4e03997923
32 changed files with 2819 additions and 125 deletions

View file

@ -380,6 +380,8 @@ class CreatureAdditionMixin:
commander_name = getattr(self, 'commander', None) or getattr(self, 'commander_name', None)
if commander_name and 'name' in creature_df.columns:
creature_df = creature_df[creature_df['name'] != commander_name]
# Apply bracket-based pre-filters (e.g., disallow game changers or tutors when bracket limit == 0)
creature_df = self._apply_bracket_pre_filters(creature_df)
if creature_df.empty:
return None
if '_parsedThemeTags' not in creature_df.columns:
@ -392,6 +394,66 @@ class CreatureAdditionMixin:
creature_df['_multiMatch'] = creature_df['_normTags'].apply(lambda lst: sum(1 for t in selected_tags_lower if t in lst))
return creature_df
def _apply_bracket_pre_filters(self, df):
"""Preemptively filter disallowed categories for the current bracket for creatures.
Excludes when bracket limit == 0 for a category:
- Game Changers
- Nonland Tutors
Note: Extra Turns and Mass Land Denial generally don't apply to creature cards,
but if present as tags, they'll be respected too.
"""
try:
if df is None or getattr(df, 'empty', False):
return df
limits = getattr(self, 'bracket_limits', {}) or {}
disallow = {
'game_changers': (limits.get('game_changers') is not None and int(limits.get('game_changers')) == 0),
'tutors_nonland': (limits.get('tutors_nonland') is not None and int(limits.get('tutors_nonland')) == 0),
'extra_turns': (limits.get('extra_turns') is not None and int(limits.get('extra_turns')) == 0),
'mass_land_denial': (limits.get('mass_land_denial') is not None and int(limits.get('mass_land_denial')) == 0),
}
if not any(disallow.values()):
return df
def norm_tags(val):
try:
return [str(t).strip().lower() for t in (val or [])]
except Exception:
return []
if '_ltags' not in df.columns:
try:
if 'themeTags' in df.columns:
df = df.copy()
df['_ltags'] = df['themeTags'].apply(bu.normalize_tag_cell)
except Exception:
pass
tag_col = '_ltags' if '_ltags' in df.columns else ('themeTags' if 'themeTags' in df.columns else None)
if not tag_col:
return df
syn = {
'game_changers': { 'bracket:gamechanger', 'gamechanger', 'game-changer', 'game changer' },
'tutors_nonland': { 'bracket:tutornonland', 'tutor', 'tutors', 'nonland tutor', 'non-land tutor' },
'extra_turns': { 'bracket:extraturn', 'extra turn', 'extra turns', 'extraturn' },
'mass_land_denial': { 'bracket:masslanddenial', 'mass land denial', 'mld', 'masslanddenial' },
}
tags_series = df[tag_col].apply(norm_tags)
mask_keep = [True] * len(df)
for cat, dis in disallow.items():
if not dis:
continue
needles = syn.get(cat, set())
drop_idx = tags_series.apply(lambda lst, nd=needles: any(any(n in t for n in nd) for t in lst))
mask_keep = [mk and (not di) for mk, di in zip(mask_keep, drop_idx.tolist())]
try:
import pandas as _pd # type: ignore
mask_keep = _pd.Series(mask_keep, index=df.index)
except Exception:
pass
return df[mask_keep]
except Exception:
return df
def _add_creatures_for_role(self, role: str):
"""Add creatures for a single theme role ('primary'|'secondary'|'tertiary')."""
df = getattr(self, '_combined_cards_df', None)

View file

@ -2,6 +2,7 @@ from __future__ import annotations
import math
from typing import List, Dict
import os
from .. import builder_utils as bu
from .. import builder_constants as bc
@ -16,6 +17,99 @@ class SpellAdditionMixin:
(e.g., further per-category sub-mixins) can split this class if complexity grows.
"""
def _apply_bracket_pre_filters(self, df):
"""Preemptively filter disallowed categories for the current bracket.
Excludes when bracket limit == 0 for a category:
- Game Changers
- Extra Turns
- Mass Land Denial (MLD)
- Nonland Tutors
"""
try:
if df is None or getattr(df, 'empty', False):
return df
limits = getattr(self, 'bracket_limits', {}) or {}
# Determine which categories are hard-disallowed
disallow = {
'game_changers': (limits.get('game_changers') is not None and int(limits.get('game_changers')) == 0),
'extra_turns': (limits.get('extra_turns') is not None and int(limits.get('extra_turns')) == 0),
'mass_land_denial': (limits.get('mass_land_denial') is not None and int(limits.get('mass_land_denial')) == 0),
'tutors_nonland': (limits.get('tutors_nonland') is not None and int(limits.get('tutors_nonland')) == 0),
}
if not any(disallow.values()):
return df
# Normalize tags helper
def norm_tags(val):
try:
return [str(t).strip().lower() for t in (val or [])]
except Exception:
return []
# Build predicate masks only if column exists
if '_ltags' not in df.columns:
try:
from .. import builder_utils as _bu
if 'themeTags' in df.columns:
df = df.copy()
df['_ltags'] = df['themeTags'].apply(_bu.normalize_tag_cell)
except Exception:
pass
def has_any(tags, needles):
return any((nd in t) for t in tags for nd in needles)
tag_col = '_ltags' if '_ltags' in df.columns else ('themeTags' if 'themeTags' in df.columns else None)
if not tag_col:
return df
# Define synonyms per category
syn = {
'game_changers': { 'bracket:gamechanger', 'gamechanger', 'game-changer', 'game changer' },
'extra_turns': { 'bracket:extraturn', 'extra turn', 'extra turns', 'extraturn' },
'mass_land_denial': { 'bracket:masslanddenial', 'mass land denial', 'mld', 'masslanddenial' },
'tutors_nonland': { 'bracket:tutornonland', 'tutor', 'tutors', 'nonland tutor', 'non-land tutor' },
}
# Build exclusion mask
mask_keep = [True] * len(df)
tags_series = df[tag_col].apply(norm_tags)
for cat, dis in disallow.items():
if not dis:
continue
needles = syn.get(cat, set())
drop_idx = tags_series.apply(lambda lst, nd=needles: any(any(n in t for n in nd) for t in lst))
# Combine into keep mask
mask_keep = [mk and (not di) for mk, di in zip(mask_keep, drop_idx.tolist())]
try:
import pandas as _pd # type: ignore
mask_keep = _pd.Series(mask_keep, index=df.index)
except Exception:
pass
return df[mask_keep]
except Exception:
return df
def _debug_dump_pool(self, df, label: str) -> None:
"""If DEBUG_SPELL_POOLS_WRITE is set, write the pool to logs/pool_{label}_{timestamp}.csv"""
try:
if str(os.getenv('DEBUG_SPELL_POOLS_WRITE', '')).strip().lower() not in {"1","true","yes","on"}:
return
import os as _os
from datetime import datetime as _dt
_os.makedirs('logs', exist_ok=True)
ts = getattr(self, 'timestamp', _dt.now().strftime('%Y%m%d%H%M%S'))
path = _os.path.join('logs', f"pool_{label}_{ts}.csv")
cols = [c for c in ['name','type','manaValue','manaCost','edhrecRank','themeTags'] if c in df.columns]
try:
if cols:
df[cols].to_csv(path, index=False, encoding='utf-8')
else:
df.to_csv(path, index=False, encoding='utf-8')
except Exception:
df.to_csv(path, index=False)
try:
self.output_func(f"[DEBUG] Wrote pool CSV: {path} ({len(df)})")
except Exception:
pass
except Exception:
pass
# ---------------------------
# Ramp
# ---------------------------
@ -56,7 +150,16 @@ class SpellAdditionMixin:
commander_name = getattr(self, 'commander', None)
if commander_name:
work = work[work['name'] != commander_name]
work = self._apply_bracket_pre_filters(work)
work = bu.sort_by_priority(work, ['edhrecRank','manaValue'])
self._debug_dump_pool(work, 'ramp_all')
# Debug: print ramp pool details
try:
if str(os.getenv('DEBUG_SPELL_POOLS', '')).strip().lower() in {"1","true","yes","on"}:
names = work['name'].astype(str).head(30).tolist()
self.output_func(f"[DEBUG][Ramp] Total pool (non-lands): {len(work)}; top {len(names)}: {', '.join(names)}")
except Exception:
pass
# Prefer-owned bias: stable reorder to put owned first while preserving prior sort
if getattr(self, 'prefer_owned', False):
owned_set = getattr(self, 'owned_card_names', None)
@ -97,10 +200,24 @@ class SpellAdditionMixin:
return added_now
rocks_pool = work[work['type'].fillna('').str.contains('Artifact', case=False, na=False)]
try:
if str(os.getenv('DEBUG_SPELL_POOLS', '')).strip().lower() in {"1","true","yes","on"}:
rnames = rocks_pool['name'].astype(str).head(25).tolist()
self.output_func(f"[DEBUG][Ramp] Rocks pool: {len(rocks_pool)}; sample: {', '.join(rnames)}")
except Exception:
pass
self._debug_dump_pool(rocks_pool, 'ramp_rocks')
if rocks_target > 0:
add_from_pool(rocks_pool, rocks_target, added_rocks, 'Rocks')
dorks_pool = work[work['type'].fillna('').str.contains('Creature', case=False, na=False)]
try:
if str(os.getenv('DEBUG_SPELL_POOLS', '')).strip().lower() in {"1","true","yes","on"}:
dnames = dorks_pool['name'].astype(str).head(25).tolist()
self.output_func(f"[DEBUG][Ramp] Dorks pool: {len(dorks_pool)}; sample: {', '.join(dnames)}")
except Exception:
pass
self._debug_dump_pool(dorks_pool, 'ramp_dorks')
if dorks_target > 0:
add_from_pool(dorks_pool, dorks_target, added_dorks, 'Dorks')
@ -108,6 +225,13 @@ class SpellAdditionMixin:
remaining = target_total - current_total
if remaining > 0:
general_pool = work[~work['name'].isin(added_rocks + added_dorks)]
try:
if str(os.getenv('DEBUG_SPELL_POOLS', '')).strip().lower() in {"1","true","yes","on"}:
gnames = general_pool['name'].astype(str).head(25).tolist()
self.output_func(f"[DEBUG][Ramp] General pool (remaining): {len(general_pool)}; sample: {', '.join(gnames)}")
except Exception:
pass
self._debug_dump_pool(general_pool, 'ramp_general')
add_from_pool(general_pool, remaining, added_general, 'General')
total_added_now = len(added_rocks)+len(added_dorks)+len(added_general)
@ -148,7 +272,15 @@ class SpellAdditionMixin:
commander_name = getattr(self, 'commander', None)
if commander_name:
pool = pool[pool['name'] != commander_name]
pool = self._apply_bracket_pre_filters(pool)
pool = bu.sort_by_priority(pool, ['edhrecRank','manaValue'])
self._debug_dump_pool(pool, 'removal')
try:
if str(os.getenv('DEBUG_SPELL_POOLS', '')).strip().lower() in {"1","true","yes","on"}:
names = pool['name'].astype(str).head(40).tolist()
self.output_func(f"[DEBUG][Removal] Pool size: {len(pool)}; top {len(names)}: {', '.join(names)}")
except Exception:
pass
if getattr(self, 'prefer_owned', False):
owned_set = getattr(self, 'owned_card_names', None)
if owned_set:
@ -210,7 +342,15 @@ class SpellAdditionMixin:
commander_name = getattr(self, 'commander', None)
if commander_name:
pool = pool[pool['name'] != commander_name]
pool = self._apply_bracket_pre_filters(pool)
pool = bu.sort_by_priority(pool, ['edhrecRank','manaValue'])
self._debug_dump_pool(pool, 'wipes')
try:
if str(os.getenv('DEBUG_SPELL_POOLS', '')).strip().lower() in {"1","true","yes","on"}:
names = pool['name'].astype(str).head(30).tolist()
self.output_func(f"[DEBUG][Wipes] Pool size: {len(pool)}; sample: {', '.join(names)}")
except Exception:
pass
if getattr(self, 'prefer_owned', False):
owned_set = getattr(self, 'owned_card_names', None)
if owned_set:
@ -278,6 +418,7 @@ class SpellAdditionMixin:
def is_draw(tags):
return any(('draw' in t) or ('card advantage' in t) for t in tags)
df = df[df['_ltags'].apply(is_draw)]
df = self._apply_bracket_pre_filters(df)
df = df[~df['type'].fillna('').str.contains('Land', case=False, na=False)]
commander_name = getattr(self, 'commander', None)
if commander_name:
@ -291,6 +432,19 @@ class SpellAdditionMixin:
return bu.sort_by_priority(d, ['edhrecRank','manaValue'])
conditional_df = sortit(conditional_df)
unconditional_df = sortit(unconditional_df)
self._debug_dump_pool(conditional_df, 'card_advantage_conditional')
self._debug_dump_pool(unconditional_df, 'card_advantage_unconditional')
try:
if str(os.getenv('DEBUG_SPELL_POOLS', '')).strip().lower() in {"1","true","yes","on"}:
c_names = conditional_df['name'].astype(str).head(30).tolist()
u_names = unconditional_df['name'].astype(str).head(30).tolist()
self.output_func(f"[DEBUG][CardAdv] Total pool: {len(df)}; conditional: {len(conditional_df)}; unconditional: {len(unconditional_df)}")
if c_names:
self.output_func(f"[DEBUG][CardAdv] Conditional sample: {', '.join(c_names)}")
if u_names:
self.output_func(f"[DEBUG][CardAdv] Unconditional sample: {', '.join(u_names)}")
except Exception:
pass
if getattr(self, 'prefer_owned', False):
owned_set = getattr(self, 'owned_card_names', None)
if owned_set:
@ -368,7 +522,15 @@ class SpellAdditionMixin:
commander_name = getattr(self, 'commander', None)
if commander_name:
pool = pool[pool['name'] != commander_name]
pool = self._apply_bracket_pre_filters(pool)
pool = bu.sort_by_priority(pool, ['edhrecRank','manaValue'])
self._debug_dump_pool(pool, 'protection')
try:
if str(os.getenv('DEBUG_SPELL_POOLS', '')).strip().lower() in {"1","true","yes","on"}:
names = pool['name'].astype(str).head(30).tolist()
self.output_func(f"[DEBUG][Protection] Pool size: {len(pool)}; sample: {', '.join(names)}")
except Exception:
pass
if getattr(self, 'prefer_owned', False):
owned_set = getattr(self, 'owned_card_names', None)
if owned_set:
@ -467,6 +629,7 @@ class SpellAdditionMixin:
~df['type'].str.contains('Land', case=False, na=False)
& ~df['type'].str.contains('Creature', case=False, na=False)
].copy()
spells_df = self._apply_bracket_pre_filters(spells_df)
if spells_df.empty:
return
selected_tags_lower = [t.lower() for _r, t in themes_ordered]
@ -521,6 +684,7 @@ class SpellAdditionMixin:
if owned_set:
subset = bu.prefer_owned_first(subset, {str(n).lower() for n in owned_set})
pool = subset.head(top_n).copy()
pool = self._apply_bracket_pre_filters(pool)
pool = pool[~pool['name'].isin(self.card_library.keys())]
if pool.empty:
continue
@ -563,6 +727,7 @@ class SpellAdditionMixin:
if total_added < remaining:
need = remaining - total_added
multi_pool = spells_df[~spells_df['name'].isin(self.card_library.keys())].copy()
multi_pool = self._apply_bracket_pre_filters(multi_pool)
if combine_mode == 'AND' and len(selected_tags_lower) > 1:
prioritized = multi_pool[multi_pool['_multiMatch'] >= 2]
if prioritized.empty:
@ -607,6 +772,7 @@ class SpellAdditionMixin:
if total_added < remaining:
extra_needed = remaining - total_added
leftover = spells_df[~spells_df['name'].isin(self.card_library.keys())].copy()
leftover = self._apply_bracket_pre_filters(leftover)
if not leftover.empty:
if '_normTags' not in leftover.columns:
leftover['_normTags'] = leftover['themeTags'].apply(

View file

@ -26,6 +26,176 @@ class ReportingMixin:
self.print_card_library(table=True)
"""Phase 6: Reporting, summaries, and export helpers."""
def enforce_and_reexport(self, base_stem: str | None = None, mode: str = "prompt") -> dict:
"""Run bracket enforcement, then re-export CSV/TXT and recompute compliance.
mode: 'prompt' for CLI interactive; 'auto' for headless/web.
Returns the final compliance report dict.
"""
try:
# Lazy import to avoid cycles
from deck_builder.enforcement import enforce_bracket_compliance # type: ignore
except Exception:
self.output_func("Enforcement module unavailable.")
return {}
# Enforce
report = enforce_bracket_compliance(self, mode=mode)
# If enforcement removed cards without enough replacements, top up to 100 using theme filler
try:
total_cards = 0
for _n, _e in getattr(self, 'card_library', {}).items():
try:
total_cards += int(_e.get('Count', 1))
except Exception:
total_cards += 1
if int(total_cards) < 100 and hasattr(self, 'fill_remaining_theme_spells'):
before = int(total_cards)
try:
self.fill_remaining_theme_spells() # type: ignore[attr-defined]
except Exception:
pass
# Recompute after filler
try:
total_cards = 0
for _n, _e in getattr(self, 'card_library', {}).items():
try:
total_cards += int(_e.get('Count', 1))
except Exception:
total_cards += 1
except Exception:
total_cards = before
try:
self.output_func(f"Topped up deck to {total_cards}/100 after enforcement.")
except Exception:
pass
except Exception:
pass
# Print what changed
try:
enf = report.get('enforcement') or {}
removed = list(enf.get('removed') or [])
added = list(enf.get('added') or [])
if removed or added:
self.output_func("\nEnforcement Summary (swaps):")
if removed:
self.output_func("Removed:")
for n in removed:
self.output_func(f" - {n}")
if added:
self.output_func("Added:")
for n in added:
self.output_func(f" + {n}")
except Exception:
pass
# Re-export using same base, if provided
try:
import os as _os
import json as _json
if isinstance(base_stem, str) and base_stem.strip():
# Mirror CSV/TXT export naming
csv_name = base_stem + ".csv"
txt_name = base_stem + ".txt"
# Overwrite exports with updated library
self.export_decklist_csv(directory='deck_files', filename=csv_name, suppress_output=True) # type: ignore[attr-defined]
self.export_decklist_text(directory='deck_files', filename=txt_name, suppress_output=True) # type: ignore[attr-defined]
# Recompute and write compliance next to them
self.compute_and_print_compliance(base_stem=base_stem) # type: ignore[attr-defined]
# Inject enforcement details into the saved compliance JSON for UI transparency
comp_path = _os.path.join('deck_files', f"{base_stem}_compliance.json")
try:
if _os.path.exists(comp_path) and isinstance(report, dict) and report.get('enforcement'):
with open(comp_path, 'r', encoding='utf-8') as _f:
comp_obj = _json.load(_f)
comp_obj['enforcement'] = report.get('enforcement')
with open(comp_path, 'w', encoding='utf-8') as _f:
_json.dump(comp_obj, _f, indent=2)
except Exception:
pass
else:
# Fall back to default export flow
csv_path = self.export_decklist_csv() # type: ignore[attr-defined]
try:
base, _ = _os.path.splitext(csv_path)
base_only = _os.path.basename(base)
except Exception:
base_only = None
self.export_decklist_text(filename=(base_only + '.txt') if base_only else None) # type: ignore[attr-defined]
if base_only:
self.compute_and_print_compliance(base_stem=base_only) # type: ignore[attr-defined]
# Inject enforcement into written JSON as above
try:
comp_path = _os.path.join('deck_files', f"{base_only}_compliance.json")
if _os.path.exists(comp_path) and isinstance(report, dict) and report.get('enforcement'):
with open(comp_path, 'r', encoding='utf-8') as _f:
comp_obj = _json.load(_f)
comp_obj['enforcement'] = report.get('enforcement')
with open(comp_path, 'w', encoding='utf-8') as _f:
_json.dump(comp_obj, _f, indent=2)
except Exception:
pass
except Exception:
pass
return report
def compute_and_print_compliance(self, base_stem: str | None = None) -> dict:
"""Compute bracket compliance, print a compact summary, and optionally write a JSON report.
If base_stem is provided, writes deck_files/{base_stem}_compliance.json.
Returns the compliance report dict.
"""
try:
# Late import to avoid circulars in some environments
from deck_builder.brackets_compliance import evaluate_deck # type: ignore
except Exception:
self.output_func("Bracket compliance module unavailable.")
return {}
try:
bracket_key = str(getattr(self, 'bracket_name', '') or getattr(self, 'bracket_level', 'core')).lower()
commander = getattr(self, 'commander_name', None)
report = evaluate_deck(self.card_library, commander_name=commander, bracket=bracket_key)
except Exception as e:
self.output_func(f"Compliance evaluation failed: {e}")
return {}
# Print concise summary
try:
self.output_func("\nBracket Compliance:")
self.output_func(f" Overall: {report.get('overall', 'PASS')}")
cats = report.get('categories', {}) or {}
order = [
('game_changers', 'Game Changers'),
('mass_land_denial', 'Mass Land Denial'),
('extra_turns', 'Extra Turns'),
('tutors_nonland', 'Nonland Tutors'),
('two_card_combos', 'Two-Card Combos'),
]
for key, label in order:
c = cats.get(key, {}) or {}
cnt = int(c.get('count', 0) or 0)
lim = c.get('limit')
status = str(c.get('status') or 'PASS')
lim_txt = ('Unlimited' if lim is None else str(int(lim)))
self.output_func(f" {label:<16} {cnt} / {lim_txt} [{status}]")
except Exception:
pass
# Optionally write JSON report next to exports
if isinstance(base_stem, str) and base_stem.strip():
try:
import os as _os
_os.makedirs('deck_files', exist_ok=True)
path = _os.path.join('deck_files', f"{base_stem}_compliance.json")
import json as _json
with open(path, 'w', encoding='utf-8') as f:
_json.dump(report, f, indent=2)
self.output_func(f"Compliance report saved to {path}")
except Exception:
pass
return report
def _wrap_cell(self, text: str, width: int = 28) -> str:
"""Wraps a string to a specified width for table display.
Used for pretty-printing card names, roles, and tags in tabular output.