diff --git a/.gitignore b/.gitignore index c7ee6a6..e629451 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,7 @@ test.py main.spec !requirements.txt __pycache__/ -build/ +#build/ csv_files/ dist/ logs/ diff --git a/DOCKER.md b/DOCKER.md index 811e00c..c6d8f3f 100644 --- a/DOCKER.md +++ b/DOCKER.md @@ -17,6 +17,35 @@ docker run -it --rm ` -v "${PWD}/logs:/app/logs" ` -v "${PWD}/csv_files:/app/csv_files" ` -v "${PWD}/owned_cards:/app/owned_cards" ` +## Web UI (new) + +The web UI runs the same deckbuilding logic behind a browser-based interface. + +### PowerShell (recommended) +```powershell +docker compose build web +docker compose up --no-deps web +``` + +Then open http://localhost:8080 + +Volumes are the same as the CLI service, so deck exports/logs/configs persist in your working folder. + +### From Docker Hub (PowerShell) +If you prefer not to build locally, pull `mwisnowski/mtg-python-deckbuilder:latest` and run uvicorn: +```powershell +docker run --rm ` + -p 8080:8080 ` + -v "${PWD}/deck_files:/app/deck_files" ` + -v "${PWD}/logs:/app/logs" ` + -v "${PWD}/csv_files:/app/csv_files" ` + -v "${PWD}/owned_cards:/app/owned_cards" ` + -v "${PWD}/config:/app/config" ` + mwisnowski/mtg-python-deckbuilder:latest ` + bash -lc "cd /app && uvicorn code.web.app:app --host 0.0.0.0 --port 8080" +``` + +--- -v "${PWD}/config:/app/config" ` mwisnowski/mtg-python-deckbuilder:latest ``` diff --git a/code/deck_builder/builder.py b/code/deck_builder/builder.py index d4ee1ca..27a069f 100644 --- a/code/deck_builder/builder.py +++ b/code/deck_builder/builder.py @@ -228,6 +228,49 @@ class DeckBuilder( if hasattr(super(), 'add_spells_phase'): return super().add_spells_phase() raise NotImplementedError("Spell addition phase not implemented.") + # --------------------------- + # Lightweight confirmations (CLI pauses; web auto-continues) + # --------------------------- + def _pause(self, message: str = "Press Enter to continue...") -> None: + try: + _ = self.input_func(message) + except Exception: + pass + + def confirm_primary_theme(self) -> None: + if getattr(self, 'primary_tag', None): + self.output_func(f"Primary Theme: {self.primary_tag}") + self._pause() + + def confirm_secondary_theme(self) -> None: + if getattr(self, 'secondary_tag', None): + self.output_func(f"Secondary Theme: {self.secondary_tag}") + self._pause() + + def confirm_tertiary_theme(self) -> None: + if getattr(self, 'tertiary_tag', None): + self.output_func(f"Tertiary Theme: {self.tertiary_tag}") + self._pause() + + def confirm_ramp_spells(self) -> None: + self.output_func("Confirm Ramp") + self._pause() + + def confirm_removal_spells(self) -> None: + self.output_func("Confirm Removal") + self._pause() + + def confirm_wipes_spells(self) -> None: + self.output_func("Confirm Board Wipes") + self._pause() + + def confirm_card_advantage_spells(self) -> None: + self.output_func("Confirm Card Advantage") + self._pause() + + def confirm_protection_spells(self) -> None: + self.output_func("Confirm Protection") + self._pause() # Commander core selection state commander_name: str = "" commander_row: Optional[pd.Series] = None @@ -1201,6 +1244,7 @@ class DeckBuilder( 'ramp': bc.DEFAULT_RAMP_COUNT, 'lands': bc.DEFAULT_LAND_COUNT, 'basic_lands': bc.DEFAULT_BASIC_LAND_COUNT, + 'fetch_lands': getattr(bc, 'FETCH_LAND_DEFAULT_COUNT', 3), 'creatures': bc.DEFAULT_CREATURE_COUNT, 'removal': bc.DEFAULT_REMOVAL_COUNT, 'wipes': bc.DEFAULT_WIPES_COUNT, @@ -1248,6 +1292,7 @@ class DeckBuilder( ('ramp', 'Ramp Pieces'), ('lands', 'Total Lands'), ('basic_lands', 'Minimum Basic Lands'), + ('fetch_lands', 'Fetch Lands'), ('creatures', 'Creatures'), ('removal', 'Spot Removal'), ('wipes', 'Board Wipes'), @@ -1270,6 +1315,7 @@ class DeckBuilder( ('ramp', 'Ramp'), ('lands', 'Total Lands'), ('basic_lands', 'Basic Lands (Min)'), + ('fetch_lands', 'Fetch Lands'), ('creatures', 'Creatures'), ('removal', 'Spot Removal'), ('wipes', 'Board Wipes'), diff --git a/code/deck_builder/builder_constants.py b/code/deck_builder/builder_constants.py index 9a3b0fe..6dfa1e0 100644 --- a/code/deck_builder/builder_constants.py +++ b/code/deck_builder/builder_constants.py @@ -376,6 +376,7 @@ DECK_COMPOSITION_PROMPTS: Final[Dict[str, str]] = { 'ramp': 'Enter desired number of ramp pieces (default: 8):', 'lands': 'Enter desired number of total lands (default: 35):', 'basic_lands': 'Enter minimum number of basic lands (default: 15):', + 'fetch_lands': 'Enter desired number of fetch lands (default: 3):', 'creatures': 'Enter desired number of creatures (default: 25):', 'removal': 'Enter desired number of spot removal spells (default: 10):', 'wipes': 'Enter desired number of board wipes (default: 2):', diff --git a/code/deck_builder/phases/phase1_commander.py b/code/deck_builder/phases/phase1_commander.py index f0e6e84..2db8b9f 100644 --- a/code/deck_builder/phases/phase1_commander.py +++ b/code/deck_builder/phases/phase1_commander.py @@ -24,11 +24,56 @@ class CommanderSelectionMixin: # --------------------------- # Commander Selection # --------------------------- + def _normalize_commander_query(self, s: str) -> str: + """Return a nicely capitalized search string (e.g., "inti, seneschal of the sun" + -> "Inti, Seneschal of the Sun"). Keeps small words lowercase unless at a segment start, + and capitalizes parts around hyphens/apostrophes. + """ + if not isinstance(s, str): + return str(s) + s = s.strip() + if not s: + return s + small = { + 'a','an','and','as','at','but','by','for','in','of','on','or','the','to','vs','v','with','from','into','over','per' + } + # Consider a new segment after these punctuation marks + segment_breakers = {':',';','-','–','—','/','\\','(', '[', '{', '"', "'", ',', '.'} + out_words: list[str] = [] + start_of_segment = True + for raw in s.lower().split(): + word = raw + # If preceding token ended with a breaker, reset segment + if out_words: + prev = out_words[-1] + if prev and prev[-1] in segment_breakers: + start_of_segment = True + def cap_subparts(token: str) -> str: + # Capitalize around hyphens and apostrophes + def cap_piece(piece: str) -> str: + return piece[:1].upper() + piece[1:] if piece else piece + parts = [cap_piece(p) for p in token.split("'")] + token2 = "'".join(parts) + parts2 = [cap_piece(p) for p in token2.split('-')] + return '-'.join(parts2) + if start_of_segment or word not in small: + fixed = cap_subparts(word) + else: + fixed = word + out_words.append(fixed) + # Next word is not start unless current ends with breaker + start_of_segment = word[-1:] in segment_breakers + # Post-process to ensure first character is capitalized if needed + if out_words: + out_words[0] = out_words[0][:1].upper() + out_words[0][1:] + return ' '.join(out_words) + def choose_commander(self) -> str: # type: ignore[override] df = self.load_commander_data() names = df["name"].tolist() while True: query = self.input_func("Enter commander name: ").strip() + query = self._normalize_commander_query(query) if not query: self.output_func("No input provided. Try again.") continue @@ -66,7 +111,7 @@ class CommanderSelectionMixin: else: self.output_func("Invalid index.") continue - query = choice # treat as new query + query = self._normalize_commander_query(choice) # treat as new (normalized) query def _present_commander_and_confirm(self, df: pd.DataFrame, name: str) -> bool: # type: ignore[override] row = df[df["name"] == name].iloc[0] diff --git a/code/deck_builder/phases/phase2_lands_fetch.py b/code/deck_builder/phases/phase2_lands_fetch.py index dad1a51..342b709 100644 --- a/code/deck_builder/phases/phase2_lands_fetch.py +++ b/code/deck_builder/phases/phase2_lands_fetch.py @@ -144,8 +144,17 @@ class LandFetchMixin: self.output_func(f" Land Count Now : {self._current_land_count()} / {land_target}") # type: ignore[attr-defined] def run_land_step4(self, requested_count: int | None = None): # type: ignore[override] - """Public wrapper to add fetch lands. Optional requested_count to bypass prompt.""" - self.add_fetch_lands(requested_count=requested_count) + """Public wrapper to add fetch lands. + + If ideal_counts['fetch_lands'] is set, it will be used to bypass the prompt in both CLI and web builds. + """ + desired = requested_count + try: + if desired is None and getattr(self, 'ideal_counts', None) and 'fetch_lands' in self.ideal_counts: + desired = int(self.ideal_counts['fetch_lands']) + except Exception: + desired = requested_count + self.add_fetch_lands(requested_count=desired) self._enforce_land_cap(step_label="Fetch (Step 4)") # type: ignore[attr-defined] __all__ = [ diff --git a/code/deck_builder/phases/phase3_creatures.py b/code/deck_builder/phases/phase3_creatures.py index 588c7c2..d8957ca 100644 --- a/code/deck_builder/phases/phase3_creatures.py +++ b/code/deck_builder/phases/phase3_creatures.py @@ -190,3 +190,197 @@ class CreatureAdditionMixin: """ """Public method for orchestration: delegates to add_creatures.""" return self.add_creatures() + + # --------------------------- + # Per-theme creature sub-stages (for web UI staged confirms) + # --------------------------- + def _theme_weights(self, themes_ordered: List[tuple[str, str]]) -> Dict[str, float]: + n_themes = len(themes_ordered) + if n_themes == 1: + base_map = {'primary': 1.0} + elif n_themes == 2: + base_map = {'primary': 0.6, 'secondary': 0.4} + else: + base_map = {'primary': 0.5, 'secondary': 0.3, 'tertiary': 0.2} + weights: Dict[str, float] = {} + boosted_roles: set[str] = set() + if n_themes > 1: + for role, tag in themes_ordered: + w = base_map.get(role, 0.0) + lt = tag.lower() + if 'kindred' in lt or 'tribal' in lt: + mult = getattr(bc, 'WEIGHT_ADJUSTMENT_FACTORS', {}).get(f'kindred_{role}', 1.0) + w *= mult + boosted_roles.add(role) + weights[role] = w + total = sum(weights.values()) + if total > 1.0: + for r in list(weights): + weights[r] /= total + else: + rem = 1.0 - total + base_sum_unboosted = sum(base_map[r] for r,_t in themes_ordered if r not in boosted_roles) + if rem > 1e-6 and base_sum_unboosted > 0: + for r,_t in themes_ordered: + if r not in boosted_roles: + weights[r] += rem * (base_map[r] / base_sum_unboosted) + else: + weights['primary'] = 1.0 + return weights + + def _creature_count_in_library(self) -> int: + total = 0 + try: + for _n, entry in getattr(self, 'card_library', {}).items(): + if str(entry.get('Role') or '').strip() == 'creature': + total += int(entry.get('Count', 1)) + except Exception: + pass + return total + + def _prepare_creature_pool(self): + df = getattr(self, '_combined_cards_df', None) + if df is None or df.empty or 'type' not in df.columns: + return None + creature_df = df[df['type'].str.contains('Creature', case=False, na=False)].copy() + commander_name = getattr(self, 'commander', None) or getattr(self, 'commander_name', None) + if commander_name and 'name' in creature_df.columns: + creature_df = creature_df[creature_df['name'] != commander_name] + if creature_df.empty: + return None + if '_parsedThemeTags' not in creature_df.columns: + creature_df['_parsedThemeTags'] = creature_df['themeTags'].apply(bu.normalize_tag_cell) + creature_df['_normTags'] = creature_df['_parsedThemeTags'] + selected_tags_lower: List[str] = [] + for t in [getattr(self, 'primary_tag', None), getattr(self, 'secondary_tag', None), getattr(self, 'tertiary_tag', None)]: + if t: + selected_tags_lower.append(t.lower()) + creature_df['_multiMatch'] = creature_df['_normTags'].apply(lambda lst: sum(1 for t in selected_tags_lower if t in lst)) + return creature_df + + def _add_creatures_for_role(self, role: str): + """Add creatures for a single theme role ('primary'|'secondary'|'tertiary').""" + df = getattr(self, '_combined_cards_df', None) + if df is None or df.empty: + self.output_func("Card pool not loaded; cannot add creatures.") + return + tag = getattr(self, f'{role}_tag', None) + if not tag: + return + themes_ordered: List[tuple[str, str]] = [] + if getattr(self, 'primary_tag', None): + themes_ordered.append(('primary', self.primary_tag)) + if getattr(self, 'secondary_tag', None): + themes_ordered.append(('secondary', self.secondary_tag)) + if getattr(self, 'tertiary_tag', None): + themes_ordered.append(('tertiary', self.tertiary_tag)) + weights = self._theme_weights(themes_ordered) + desired_total = (self.ideal_counts.get('creatures') if getattr(self, 'ideal_counts', None) else None) or getattr(bc, 'DEFAULT_CREATURE_COUNT', 25) + current_added = self._creature_count_in_library() + remaining = max(0, desired_total - current_added) + if remaining <= 0: + return + w = float(weights.get(role, 0.0)) + if w <= 0: + return + import math as _math + target = int(_math.ceil(desired_total * w * self._get_rng().uniform(1.0, 1.1))) + target = min(target, remaining) + if target <= 0: + return + creature_df = self._prepare_creature_pool() + if creature_df is None: + self.output_func("No creature rows in dataset; skipping.") + return + tnorm = str(tag).lower() + subset = creature_df[creature_df['_normTags'].apply(lambda lst, tn=tnorm: (tn in lst) or any(tn in x for x in lst))] + if subset.empty: + self.output_func(f"Theme '{tag}' produced no creature candidates.") + return + if 'edhrecRank' in subset.columns: + subset = subset.sort_values(by=['_multiMatch','edhrecRank','manaValue'], ascending=[False, True, True], na_position='last') + elif 'manaValue' in subset.columns: + subset = subset.sort_values(by=['_multiMatch','manaValue'], ascending=[False, True], na_position='last') + base_top = 30 + top_n = int(base_top * getattr(bc, 'THEME_POOL_SIZE_MULTIPLIER', 2.0)) + pool = subset.head(top_n).copy() + # Exclude any names already chosen + existing_names = set(getattr(self, 'card_library', {}).keys()) + pool = pool[~pool['name'].isin(existing_names)] + if pool.empty: + return + synergy_bonus = getattr(bc, 'THEME_PRIORITY_BONUS', 1.2) + weighted_pool = [(nm, (synergy_bonus if mm >= 2 else 1.0)) for nm, mm in zip(pool['name'], pool['_multiMatch'])] + chosen = bu.weighted_sample_without_replacement(weighted_pool, target) + added = 0 + for nm in chosen: + row = pool[pool['name']==nm].iloc[0] + self.add_card( + nm, + card_type=row.get('type','Creature'), + mana_cost=row.get('manaCost',''), + mana_value=row.get('manaValue', row.get('cmc','')), + creature_types=row.get('creatureTypes', []) if isinstance(row.get('creatureTypes', []), list) else [], + tags=row.get('themeTags', []) if isinstance(row.get('themeTags', []), list) else [], + role='creature', + sub_role=role, + added_by='creature_add', + trigger_tag=tag, + synergy=int(row.get('_multiMatch', 0)) if '_multiMatch' in row else None + ) + added += 1 + if added >= target: + break + self.output_func(f"Added {added} creatures for {role} theme '{tag}' (target {target}).") + + def _add_creatures_fill(self): + desired_total = (self.ideal_counts.get('creatures') if getattr(self, 'ideal_counts', None) else None) or getattr(bc, 'DEFAULT_CREATURE_COUNT', 25) + current_added = self._creature_count_in_library() + need = max(0, desired_total - current_added) + if need <= 0: + return + creature_df = self._prepare_creature_pool() + if creature_df is None: + return + multi_pool = creature_df[~creature_df['name'].isin(set(getattr(self, 'card_library', {}).keys()))].copy() + multi_pool = multi_pool[multi_pool['_multiMatch'] > 0] + if multi_pool.empty: + return + if 'edhrecRank' in multi_pool.columns: + multi_pool = multi_pool.sort_values(by=['_multiMatch','edhrecRank','manaValue'], ascending=[False, True, True], na_position='last') + elif 'manaValue' in multi_pool.columns: + multi_pool = multi_pool.sort_values(by=['_multiMatch','manaValue'], ascending=[False, True], na_position='last') + fill = multi_pool['name'].tolist()[:need] + added = 0 + for nm in fill: + row = multi_pool[multi_pool['name']==nm].iloc[0] + self.add_card( + nm, + card_type=row.get('type','Creature'), + mana_cost=row.get('manaCost',''), + mana_value=row.get('manaValue', row.get('cmc','')), + creature_types=row.get('creatureTypes', []) if isinstance(row.get('creatureTypes', []), list) else [], + tags=row.get('themeTags', []) if isinstance(row.get('themeTags', []), list) else [], + role='creature', + sub_role='fill', + added_by='creature_fill', + synergy=int(row.get('_multiMatch', 0)) if '_multiMatch' in row else None + ) + added += 1 + if added >= need: + break + if added: + self.output_func(f"Fill pass added {added} extra creatures (shortfall compensation).") + + # Public stage entry points (web orchestrator looks for these) + def add_creatures_primary_phase(self): + return self._add_creatures_for_role('primary') + + def add_creatures_secondary_phase(self): + return self._add_creatures_for_role('secondary') + + def add_creatures_tertiary_phase(self): + return self._add_creatures_for_role('tertiary') + + def add_creatures_fill_phase(self): + return self._add_creatures_fill() diff --git a/code/deck_builder/phases/phase5_color_balance.py b/code/deck_builder/phases/phase5_color_balance.py index 45971a9..c2bd1fb 100644 --- a/code/deck_builder/phases/phase5_color_balance.py +++ b/code/deck_builder/phases/phase5_color_balance.py @@ -61,7 +61,7 @@ class ColorBalanceMixin: self, pip_weights: Optional[Dict[str, float]] = None, color_shortfall_threshold: float = 0.15, - perform_swaps: bool = True, + perform_swaps: bool = False, max_swaps: int = 5, rebalance_basics: bool = True ): @@ -93,54 +93,56 @@ class ColorBalanceMixin: self.output_func(" Deficits (need more sources):") for c, pip_share, s_share, gap in deficits: self.output_func(f" {c}: need +{gap*100:.1f}% sources (pip {pip_share*100:.1f}% vs sources {s_share*100:.1f}%)") - if not perform_swaps or not deficits: + # We'll conditionally perform swaps; but even when skipping swaps we continue to basic rebalance. + do_swaps = bool(perform_swaps and deficits) + if not do_swaps: self.output_func(" (No land swaps performed.)") - return - df = getattr(self, '_combined_cards_df', None) - if df is None or df.empty: - self.output_func(" Swap engine: card pool unavailable; aborting swaps.") - return - deficits.sort(key=lambda x: x[3], reverse=True) swaps_done: List[tuple[str,str,str]] = [] - overages: Dict[str,float] = {} - for c in ['W','U','B','R','G']: - over = source_share.get(c,0.0) - pip_weights.get(c,0.0) - if over > 0: - overages[c] = over + if do_swaps: + df = getattr(self, '_combined_cards_df', None) + if df is None or df.empty: + self.output_func(" Swap engine: card pool unavailable; aborting swaps.") + else: + deficits.sort(key=lambda x: x[3], reverse=True) + overages: Dict[str,float] = {} + for c in ['W','U','B','R','G']: + over = source_share.get(c,0.0) - pip_weights.get(c,0.0) + if over > 0: + overages[c] = over - def removal_candidate(exclude_colors: set[str]) -> Optional[str]: - return bu.select_color_balance_removal(self, exclude_colors, overages) + def removal_candidate(exclude_colors: set[str]) -> Optional[str]: + return bu.select_color_balance_removal(self, exclude_colors, overages) - def addition_candidates(target_color: str) -> List[str]: - return bu.color_balance_addition_candidates(self, target_color, df) + def addition_candidates(target_color: str) -> List[str]: + return bu.color_balance_addition_candidates(self, target_color, df) - for color, _, _, gap in deficits: - if len(swaps_done) >= max_swaps: - break - adds = addition_candidates(color) - if not adds: - continue - to_add = None - for cand in adds: - if cand not in self.card_library: - to_add = cand - break - if not to_add: - continue - to_remove = removal_candidate({color}) - if not to_remove: - continue - if not self._decrement_card(to_remove): - continue - self.add_card(to_add, card_type='Land', role='color-fix', sub_role='swap-add', added_by='color_balance') - swaps_done.append((to_remove, to_add, color)) - current_counts = self._current_color_source_counts() - total_sources = sum(current_counts.values()) or 1 - source_share = {c: current_counts[c]/total_sources for c in current_counts} - new_gap = pip_weights.get(color,0.0) - source_share.get(color,0.0) - if new_gap <= color_shortfall_threshold: - continue + for color, _, _, gap in deficits: + if len(swaps_done) >= max_swaps: + break + adds = addition_candidates(color) + if not adds: + continue + to_add = None + for cand in adds: + if cand not in self.card_library: + to_add = cand + break + if not to_add: + continue + to_remove = removal_candidate({color}) + if not to_remove: + continue + if not self._decrement_card(to_remove): + continue + self.add_card(to_add, card_type='Land', role='color-fix', sub_role='swap-add', added_by='color_balance') + swaps_done.append((to_remove, to_add, color)) + current_counts = self._current_color_source_counts() + total_sources = sum(current_counts.values()) or 1 + source_share = {c: current_counts[c]/total_sources for c in current_counts} + new_gap = pip_weights.get(color,0.0) - source_share.get(color,0.0) + if new_gap <= color_shortfall_threshold: + continue if swaps_done: self.output_func("\nColor Balance Swaps Performed:") @@ -152,52 +154,54 @@ class ColorBalanceMixin: self.output_func(" Updated Source Shares:") for c in ['W','U','B','R','G']: self.output_func(f" {c}: {final_source_share.get(c,0.0)*100:5.1f}% (pip {pip_weights.get(c,0.0)*100:5.1f}%)") - if rebalance_basics: - try: - basic_map = getattr(bc, 'COLOR_TO_BASIC_LAND', {}) - basics_present = {nm: entry for nm, entry in self.card_library.items() if nm in basic_map.values()} - if basics_present: - total_basics = sum(e.get('Count',1) for e in basics_present.values()) - if total_basics > 0: - desired_per_color: Dict[str,int] = {} - for c, basic_name in basic_map.items(): - if c not in ['W','U','B','R','G']: - continue - desired = pip_weights.get(c,0.0) * total_basics - desired_per_color[c] = int(round(desired)) - drift = total_basics - sum(desired_per_color.values()) - if drift != 0: - ordered = sorted(desired_per_color.items(), key=lambda kv: pip_weights.get(kv[0],0.0), reverse=(drift>0)) - i = 0 - while drift != 0 and ordered: - c,_ = ordered[i % len(ordered)] - desired_per_color[c] += 1 if drift>0 else -1 - drift += -1 if drift>0 else 1 - i += 1 - changes: List[tuple[str,int,int]] = [] - for c, basic_name in basic_map.items(): - if c not in ['W','U','B','R','G']: - continue - target = max(0, desired_per_color.get(c,0)) - entry = self.card_library.get(basic_name) - old = entry.get('Count',0) if entry else 0 - if old == 0 and target>0: - for _ in range(target): - self.add_card(basic_name, card_type='Land') - changes.append((basic_name, 0, target)) - elif entry and old != target: - if target > old: - for _ in range(target-old): - self.add_card(basic_name, card_type='Land') - else: - for _ in range(old-target): - self._decrement_card(basic_name) - changes.append((basic_name, old, target)) - if changes: - self.output_func("\nBasic Land Rebalance (toward pip distribution):") - for nm, old, new in changes: - self.output_func(f" {nm}: {old} -> {new}") - except Exception as e: # pragma: no cover (defensive) - self.output_func(f" Basic rebalance skipped (error: {e})") - else: + elif do_swaps: self.output_func(" (No viable swaps executed.)") + + # Always consider basic-land rebalance when requested + if rebalance_basics: + try: + basic_map = getattr(bc, 'COLOR_TO_BASIC_LAND', {}) + basics_present = {nm: entry for nm, entry in self.card_library.items() if nm in basic_map.values()} + if basics_present: + total_basics = sum(e.get('Count',1) for e in basics_present.values()) + if total_basics > 0: + desired_per_color: Dict[str,int] = {} + for c, basic_name in basic_map.items(): + if c not in ['W','U','B','R','G']: + continue + desired = pip_weights.get(c,0.0) * total_basics + desired_per_color[c] = int(round(desired)) + drift = total_basics - sum(desired_per_color.values()) + if drift != 0: + ordered = sorted(desired_per_color.items(), key=lambda kv: pip_weights.get(kv[0],0.0), reverse=(drift>0)) + i = 0 + while drift != 0 and ordered: + c,_ = ordered[i % len(ordered)] + desired_per_color[c] += 1 if drift>0 else -1 + drift += -1 if drift>0 else 1 + i += 1 + changes: List[tuple[str,int,int]] = [] + for c, basic_name in basic_map.items(): + if c not in ['W','U','B','R','G']: + continue + target = max(0, desired_per_color.get(c,0)) + entry = self.card_library.get(basic_name) + old = entry.get('Count',0) if entry else 0 + if old == 0 and target>0: + for _ in range(target): + self.add_card(basic_name, card_type='Land') + changes.append((basic_name, 0, target)) + elif entry and old != target: + if target > old: + for _ in range(target-old): + self.add_card(basic_name, card_type='Land') + else: + for _ in range(old-target): + self._decrement_card(basic_name) + changes.append((basic_name, old, target)) + if changes: + self.output_func("\nBasic Land Rebalance (toward pip distribution):") + for nm, old, new in changes: + self.output_func(f" {nm}: {old} -> {new}") + except Exception as e: # pragma: no cover (defensive) + self.output_func(f" Basic rebalance skipped (error: {e})") diff --git a/code/deck_builder/phases/phase6_reporting.py b/code/deck_builder/phases/phase6_reporting.py index 01fa3da..fced821 100644 --- a/code/deck_builder/phases/phase6_reporting.py +++ b/code/deck_builder/phases/phase6_reporting.py @@ -108,6 +108,192 @@ class ReportingMixin: for cat, c in sorted(cat_counts.items(), key=lambda kv: (precedence_index.get(kv[0], 999), -kv[1], kv[0])): pct = (c / total_cards * 100) if total_cards else 0.0 self.output_func(f" {cat:<15} {c:>3} ({pct:5.1f}%)") + + # --------------------------- + # Structured deck summary for UI (types, pips, sources, curve) + # --------------------------- + def build_deck_summary(self) -> dict: + """Return a structured summary of the finished deck for UI rendering. + + Structure: + { + 'type_breakdown': { + 'counts': { type: count, ... }, + 'order': [sorted types by precedence], + 'cards': { type: [ {name, count}, ... ] }, + 'total': int + }, + 'pip_distribution': { + 'counts': { 'W': n, 'U': n, 'B': n, 'R': n, 'G': n }, + 'weights': { 'W': 0-1, ... }, # normalized weights (may not sum exactly to 1 due to rounding) + }, + 'mana_generation': { 'W': n, 'U': n, 'B': n, 'R': n, 'G': n, 'total_sources': n }, + 'mana_curve': { '0': n, '1': n, '2': n, '3': n, '4': n, '5': n, '6+': n, 'total_spells': n } + } + """ + # Build lookup to enrich type and mana values + full_df = getattr(self, '_full_cards_df', None) + combined_df = getattr(self, '_combined_cards_df', None) + snapshot = full_df if full_df is not None else combined_df + row_lookup: Dict[str, any] = {} + if snapshot is not None and not getattr(snapshot, 'empty', True) and 'name' in snapshot.columns: + for _, r in snapshot.iterrows(): # type: ignore[attr-defined] + nm = str(r.get('name')) + if nm and nm not in row_lookup: + row_lookup[nm] = r + + # Category classification (reuse export logic) + precedence_order = [ + 'Commander', 'Battle', 'Planeswalker', 'Creature', 'Instant', 'Sorcery', 'Artifact', 'Enchantment', 'Land', 'Other' + ] + precedence_index = {k: i for i, k in enumerate(precedence_order)} + commander_name = getattr(self, 'commander_name', '') or getattr(self, 'commander', '') or '' + + def classify(primary_type_line: str, card_name: str) -> str: + if commander_name and card_name == commander_name: + return 'Commander' + tl = (primary_type_line or '').lower() + if 'battle' in tl: + return 'Battle' + if 'planeswalker' in tl: + return 'Planeswalker' + if 'creature' in tl: + return 'Creature' + if 'instant' in tl: + return 'Instant' + if 'sorcery' in tl: + return 'Sorcery' + if 'artifact' in tl: + return 'Artifact' + if 'enchantment' in tl: + return 'Enchantment' + if 'land' in tl: + return 'Land' + return 'Other' + + # Type breakdown (counts and per-type card lists) + type_counts: Dict[str, int] = {} + type_cards: Dict[str, list] = {} + total_cards = 0 + for name, info in self.card_library.items(): + # Exclude commander from type breakdown per UI preference + if commander_name and name == commander_name: + continue + cnt = int(info.get('Count', 1)) + base_type = info.get('Card Type') or info.get('Type', '') + if not base_type: + row = row_lookup.get(name) + if row is not None: + base_type = row.get('type', row.get('type_line', '')) or '' + category = classify(base_type, name) + type_counts[category] = type_counts.get(category, 0) + cnt + total_cards += cnt + type_cards.setdefault(category, []).append({ + 'name': name, + 'count': cnt, + 'role': info.get('Role', '') or '', + 'tags': list(info.get('Tags', []) or []), + }) + # Sort cards within each type by name + for cat, lst in type_cards.items(): + lst.sort(key=lambda x: (x['name'].lower(), -int(x['count']))) + type_order = sorted(type_counts.keys(), key=lambda k: precedence_index.get(k, 999)) + + # Pip distribution (counts and weights) for non-land spells only + pip_counts = {c: 0 for c in ('W','U','B','R','G')} + import re as _re_local + total_pips = 0.0 + for name, info in self.card_library.items(): + ctype = str(info.get('Card Type', '')) + if 'land' in ctype.lower(): + continue + mana_cost = info.get('Mana Cost') or info.get('mana_cost') or '' + if not isinstance(mana_cost, str): + continue + for match in _re_local.findall(r'\{([^}]+)\}', mana_cost): + sym = match.upper() + if len(sym) == 1 and sym in pip_counts: + pip_counts[sym] += 1 + total_pips += 1 + elif '/' in sym: + parts = [p for p in sym.split('/') if p in pip_counts] + if parts: + weight_each = 1 / len(parts) + for p in parts: + pip_counts[p] += weight_each + total_pips += weight_each + if total_pips <= 0: + # Fallback to even distribution across color identity + colors = [c for c in ('W','U','B','R','G') if c in (getattr(self, 'color_identity', []) or [])] + if colors: + share = 1 / len(colors) + for c in colors: + pip_counts[c] = share + total_pips = 1.0 + pip_weights = {c: (pip_counts[c] / total_pips if total_pips else 0.0) for c in pip_counts} + + # Mana generation from lands (color sources) + try: + from deck_builder import builder_utils as _bu + matrix = _bu.compute_color_source_matrix(self.card_library, full_df) + except Exception: + matrix = {} + source_counts = {c: 0 for c in ('W','U','B','R','G')} + for name, flags in matrix.items(): + copies = int(self.card_library.get(name, {}).get('Count', 1)) + for c in source_counts: + if int(flags.get(c, 0)): + source_counts[c] += copies + total_sources = sum(source_counts.values()) + + # Mana curve (non-land spells) + curve_bins = ['0','1','2','3','4','5','6+'] + curve_counts = {b: 0 for b in curve_bins} + curve_cards: Dict[str, list] = {b: [] for b in curve_bins} + total_spells = 0 + for name, info in self.card_library.items(): + ctype = str(info.get('Card Type', '')) + if 'land' in ctype.lower(): + continue + cnt = int(info.get('Count', 1)) + mv = info.get('Mana Value') + if mv in (None, ''): + row = row_lookup.get(name) + if row is not None: + mv = row.get('manaValue', row.get('cmc', None)) + try: + val = float(mv) if mv not in (None, '') else 0.0 + except Exception: + val = 0.0 + bucket = '6+' if val >= 6 else str(int(val)) + if bucket not in curve_counts: + bucket = '6+' + curve_counts[bucket] += cnt + curve_cards[bucket].append({'name': name, 'count': cnt}) + total_spells += cnt + + return { + 'type_breakdown': { + 'counts': type_counts, + 'order': type_order, + 'cards': type_cards, + 'total': total_cards, + }, + 'pip_distribution': { + 'counts': pip_counts, + 'weights': pip_weights, + }, + 'mana_generation': { + **source_counts, + 'total_sources': total_sources, + }, + 'mana_curve': { + **curve_counts, + 'total_spells': total_spells, + 'cards': curve_cards, + }, + 'colors': list(getattr(self, 'color_identity', []) or []), + } def export_decklist_csv(self, directory: str = 'deck_files', filename: str | None = None, suppress_output: bool = False) -> str: """Export current decklist to CSV (enriched). Filename pattern (default): commanderFirstWord_firstTheme_YYYYMMDD.csv @@ -208,11 +394,11 @@ class ReportingMixin: owned_set_lower = set() for name, info in self.card_library.items(): - base_type = info.get('Card Type') or info.get('Type','') - base_mc = info.get('Mana Cost','') - base_mv = info.get('Mana Value', info.get('CMC','')) - role = info.get('Role','') or '' - tags = info.get('Tags',[]) or [] + base_type = info.get('Card Type') or info.get('Type', '') + base_mc = info.get('Mana Cost', '') + base_mv = info.get('Mana Value', info.get('CMC', '')) + role = info.get('Role', '') or '' + tags = info.get('Tags', []) or [] tags_join = '; '.join(tags) text_field = '' colors = '' @@ -260,7 +446,7 @@ class ReportingMixin: owned_flag = 'Y' if (name.lower() in owned_set_lower) else '' rows.append(((prec, name.lower()), [ name, - info.get('Count',1), + info.get('Count', 1), base_type, base_mc, base_mv, @@ -276,6 +462,7 @@ class ReportingMixin: text_field[:800] if isinstance(text_field, str) else str(text_field)[:800], owned_flag ])) + # Now sort (category precedence, then alphabetical name) rows.sort(key=lambda x: x[0]) diff --git a/code/headless_runner.py b/code/headless_runner.py index 21c1aba..73253f2 100644 --- a/code/headless_runner.py +++ b/code/headless_runner.py @@ -6,6 +6,7 @@ import os from typing import Any, Dict, List, Optional from deck_builder.builder import DeckBuilder +from deck_builder import builder_constants as bc def run( command_name: str = "", @@ -47,14 +48,27 @@ def run( scripted_inputs.append("0") # stop at primary # Bracket (meta power / style) selection; default to 3 if not provided scripted_inputs.append(str(bracket_level if isinstance(bracket_level, int) and 1 <= bracket_level <= 5 else 3)) - # Ideal count prompts (press Enter for defaults) - for _ in range(8): - scripted_inputs.append("") + # Ideal count prompts (press Enter for defaults). Include fetch_lands if present. + ideal_keys = { + "ramp", + "lands", + "basic_lands", + "fetch_lands", + "creatures", + "removal", + "wipes", + "card_advantage", + "protection", + } + for key in bc.DECK_COMPOSITION_PROMPTS.keys(): + if key in ideal_keys: + scripted_inputs.append("") def scripted_input(prompt: str) -> str: if scripted_inputs: return scripted_inputs.pop(0) - raise RuntimeError("Ran out of scripted inputs for prompt: " + prompt) + # Fallback to auto-accept defaults for any unexpected prompts + return "" builder = DeckBuilder(input_func=scripted_input) # Mark this run as headless so builder can adjust exports and logging diff --git a/code/web/__init__.py b/code/web/__init__.py new file mode 100644 index 0000000..d053d5c --- /dev/null +++ b/code/web/__init__.py @@ -0,0 +1 @@ +# Web package marker diff --git a/code/web/app.py b/code/web/app.py new file mode 100644 index 0000000..781fc91 --- /dev/null +++ b/code/web/app.py @@ -0,0 +1,118 @@ +from __future__ import annotations + +from fastapi import FastAPI, Request +from fastapi.responses import HTMLResponse, FileResponse, PlainTextResponse, JSONResponse +from fastapi.templating import Jinja2Templates +from fastapi.staticfiles import StaticFiles +from pathlib import Path +import os +import json as _json + +# Resolve template/static dirs relative to this file +_THIS_DIR = Path(__file__).resolve().parent +_TEMPLATES_DIR = _THIS_DIR / "templates" +_STATIC_DIR = _THIS_DIR / "static" + +app = FastAPI(title="MTG Deckbuilder Web UI") + +# Mount static if present +if _STATIC_DIR.exists(): + app.mount("/static", StaticFiles(directory=str(_STATIC_DIR)), name="static") + +# Jinja templates +templates = Jinja2Templates(directory=str(_TEMPLATES_DIR)) + +# Global template flags (env-driven) +def _as_bool(val: str | None, default: bool = False) -> bool: + if val is None: + return default + return val.strip().lower() in {"1", "true", "yes", "on"} + +SHOW_LOGS = _as_bool(os.getenv("SHOW_LOGS"), False) +SHOW_SETUP = _as_bool(os.getenv("SHOW_SETUP"), True) + +# Expose as Jinja globals so all templates can reference without passing per-view +templates.env.globals.update({ + "show_logs": SHOW_LOGS, + "show_setup": SHOW_SETUP, +}) + + +@app.get("/", response_class=HTMLResponse) +async def home(request: Request) -> HTMLResponse: + return templates.TemplateResponse("home.html", {"request": request, "version": os.getenv("APP_VERSION", "dev")}) + + +# Simple health check +@app.get("/healthz") +async def healthz(): + return {"status": "ok"} + +# Lightweight setup/tagging status endpoint +@app.get("/status/setup") +async def setup_status(): + try: + p = Path("csv_files/.setup_status.json") + if p.exists(): + with p.open("r", encoding="utf-8") as f: + data = _json.load(f) + # Attach a small log tail if available + try: + log_path = Path('logs/deck_builder.log') + if log_path.exists(): + tail_lines = [] + with log_path.open('r', encoding='utf-8', errors='ignore') as lf: + # Read last ~100 lines efficiently + from collections import deque + tail = deque(lf, maxlen=100) + tail_lines = list(tail) + # Reduce noise: keep lines related to setup/tagging; fallback to last 30 if too few remain + try: + lowered = [ln for ln in tail_lines] + keywords = ["setup", "tag", "color", "csv", "initial setup", "tagging", "load_dataframe"] + filtered = [ln for ln in lowered if any(kw in ln.lower() for kw in keywords)] + if len(filtered) >= 5: + use_lines = filtered[-60:] + else: + use_lines = tail_lines[-30:] + data["log_tail"] = "".join(use_lines).strip() + except Exception: + data["log_tail"] = "".join(tail_lines).strip() + except Exception: + pass + return JSONResponse(data) + return JSONResponse({"running": False, "phase": "idle"}) + except Exception: + return JSONResponse({"running": False, "phase": "error"}) + +# Routers +from .routes import build as build_routes # noqa: E402 +from .routes import configs as config_routes # noqa: E402 +from .routes import decks as decks_routes # noqa: E402 +from .routes import setup as setup_routes # noqa: E402 +app.include_router(build_routes.router) +app.include_router(config_routes.router) +app.include_router(decks_routes.router) +app.include_router(setup_routes.router) + +# Lightweight file download endpoint for exports +@app.get("/files") +async def get_file(path: str): + try: + p = Path(path) + if not p.exists() or not p.is_file(): + return PlainTextResponse("File not found", status_code=404) + # Only allow returning files within the workspace directory for safety + # (best-effort: require relative to current working directory) + try: + cwd = Path.cwd().resolve() + if cwd not in p.resolve().parents and p.resolve() != cwd: + # Still allow if under deck_files or config + allowed = any(seg in ("deck_files", "config", "logs") for seg in p.parts) + if not allowed: + return PlainTextResponse("Access denied", status_code=403) + except Exception: + pass + return FileResponse(path) + except Exception: + return PlainTextResponse("Error serving file", status_code=500) diff --git a/code/web/routes/__init__.py b/code/web/routes/__init__.py new file mode 100644 index 0000000..6cc3498 --- /dev/null +++ b/code/web/routes/__init__.py @@ -0,0 +1 @@ +# Routes package marker diff --git a/code/web/routes/build.py b/code/web/routes/build.py new file mode 100644 index 0000000..afe57c3 --- /dev/null +++ b/code/web/routes/build.py @@ -0,0 +1,507 @@ +from __future__ import annotations + +from fastapi import APIRouter, Request, Form +from fastapi.responses import HTMLResponse +from ..app import templates +from deck_builder import builder_constants as bc +from ..services import orchestrator as orch +from ..services.tasks import get_session, new_sid + +router = APIRouter(prefix="/build") + + +@router.get("/", response_class=HTMLResponse) +async def build_index(request: Request) -> HTMLResponse: + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + resp = templates.TemplateResponse( + "build/index.html", + {"request": request, "sid": sid, "commander": sess.get("commander"), "tags": sess.get("tags", [])}, + ) + resp.set_cookie("sid", sid, httponly=True, samesite="lax") + return resp + + +@router.get("/step1", response_class=HTMLResponse) +async def build_step1(request: Request) -> HTMLResponse: + return templates.TemplateResponse("build/_step1.html", {"request": request, "candidates": []}) + + +@router.post("/step1", response_class=HTMLResponse) +async def build_step1_search(request: Request, query: str = Form(""), auto: str | None = Form(None)) -> HTMLResponse: + query = (query or "").strip() + auto_enabled = True if (auto == "1") else False + candidates = [] + if query: + candidates = orch.commander_candidates(query, limit=10) + # Optional auto-select at a stricter threshold + if auto_enabled and candidates and len(candidates[0]) >= 2 and int(candidates[0][1]) >= 98: + top_name = candidates[0][0] + res = orch.commander_select(top_name) + if res.get("ok"): + return templates.TemplateResponse( + "build/_step2.html", + { + "request": request, + "commander": res, + "tags": orch.tags_for_commander(res["name"]), + "brackets": orch.bracket_options(), + }, + ) + return templates.TemplateResponse("build/_step1.html", {"request": request, "query": query, "candidates": candidates, "auto": auto_enabled}) + + +@router.post("/step1/inspect", response_class=HTMLResponse) +async def build_step1_inspect(request: Request, name: str = Form(...)) -> HTMLResponse: + info = orch.commander_inspect(name) + return templates.TemplateResponse( + "build/_step1.html", + {"request": request, "inspect": info, "selected": name, "tags": orch.tags_for_commander(name)}, + ) + + +@router.post("/step1/confirm", response_class=HTMLResponse) +async def build_step1_confirm(request: Request, name: str = Form(...)) -> HTMLResponse: + res = orch.commander_select(name) + if not res.get("ok"): + return templates.TemplateResponse("build/_step1.html", {"request": request, "error": res.get("error"), "selected": name}) + # Proceed to step2 placeholder + return templates.TemplateResponse( + "build/_step2.html", + { + "request": request, + "commander": res, + "tags": orch.tags_for_commander(res["name"]), + "brackets": orch.bracket_options(), + }, + ) + + +@router.get("/step2", response_class=HTMLResponse) +async def build_step2_get(request: Request) -> HTMLResponse: + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + commander = sess.get("commander") + if not commander: + # Fallback to step1 if no commander in session + return templates.TemplateResponse("build/_step1.html", {"request": request, "candidates": []}) + tags = orch.tags_for_commander(commander) + selected = sess.get("tags", []) + return templates.TemplateResponse( + "build/_step2.html", + { + "request": request, + "commander": {"name": commander}, + "tags": tags, + "brackets": orch.bracket_options(), + "primary_tag": selected[0] if len(selected) > 0 else "", + "secondary_tag": selected[1] if len(selected) > 1 else "", + "tertiary_tag": selected[2] if len(selected) > 2 else "", + "selected_bracket": sess.get("bracket"), + }, + ) + + +@router.post("/step2", response_class=HTMLResponse) +async def build_step2_submit( + request: Request, + commander: str = Form(...), + primary_tag: str | None = Form(None), + secondary_tag: str | None = Form(None), + tertiary_tag: str | None = Form(None), + bracket: int = Form(...), +) -> HTMLResponse: + # Validate primary tag selection if tags are available + available_tags = orch.tags_for_commander(commander) + if available_tags and not (primary_tag and primary_tag.strip()): + return templates.TemplateResponse( + "build/_step2.html", + { + "request": request, + "commander": {"name": commander}, + "tags": available_tags, + "brackets": orch.bracket_options(), + "error": "Please choose a primary theme.", + "primary_tag": primary_tag or "", + "secondary_tag": secondary_tag or "", + "tertiary_tag": tertiary_tag or "", + "selected_bracket": int(bracket) if bracket is not None else None, + }, + ) + + # Save selection to session (basic MVP; real build will use this later) + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + sess["commander"] = commander + sess["tags"] = [t for t in [primary_tag, secondary_tag, tertiary_tag] if t] + sess["bracket"] = int(bracket) + # Proceed to Step 3 placeholder for now + return templates.TemplateResponse( + "build/_step3.html", + { + "request": request, + "commander": commander, + "tags": sess["tags"], + "bracket": sess["bracket"], + "defaults": orch.ideal_defaults(), + "labels": orch.ideal_labels(), + "values": orch.ideal_defaults(), + }, + ) + + +@router.post("/step3", response_class=HTMLResponse) +async def build_step3_submit( + request: Request, + ramp: int = Form(...), + lands: int = Form(...), + basic_lands: int = Form(...), + creatures: int = Form(...), + removal: int = Form(...), + wipes: int = Form(...), + card_advantage: int = Form(...), + protection: int = Form(...), +) -> HTMLResponse: + labels = orch.ideal_labels() + submitted = { + "ramp": ramp, + "lands": lands, + "basic_lands": basic_lands, + "creatures": creatures, + "removal": removal, + "wipes": wipes, + "card_advantage": card_advantage, + "protection": protection, + } + + errors: list[str] = [] + for k, v in submitted.items(): + try: + iv = int(v) + except Exception: + errors.append(f"{labels.get(k, k)} must be a number.") + continue + if iv < 0: + errors.append(f"{labels.get(k, k)} cannot be negative.") + submitted[k] = iv + # Cross-field validation: basic lands should not exceed total lands + if isinstance(submitted.get("basic_lands"), int) and isinstance(submitted.get("lands"), int): + if submitted["basic_lands"] > submitted["lands"]: + errors.append("Basic Lands cannot exceed Total Lands.") + + if errors: + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + return templates.TemplateResponse( + "build/_step3.html", + { + "request": request, + "defaults": orch.ideal_defaults(), + "labels": labels, + "values": submitted, + "error": " ".join(errors), + "commander": sess.get("commander"), + "tags": sess.get("tags", []), + "bracket": sess.get("bracket"), + }, + ) + + # Save to session + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + sess["ideals"] = submitted + + # Proceed to review (Step 4) + return templates.TemplateResponse( + "build/_step4.html", + { + "request": request, + "labels": labels, + "values": submitted, + "commander": sess.get("commander"), + }, + ) + + +@router.get("/step3", response_class=HTMLResponse) +async def build_step3_get(request: Request) -> HTMLResponse: + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + defaults = orch.ideal_defaults() + values = sess.get("ideals") or defaults + resp = templates.TemplateResponse( + "build/_step3.html", + { + "request": request, + "defaults": defaults, + "labels": orch.ideal_labels(), + "values": values, + "commander": sess.get("commander"), + "tags": sess.get("tags", []), + "bracket": sess.get("bracket"), + }, + ) + resp.set_cookie("sid", sid, httponly=True, samesite="lax") + return resp + + +@router.get("/step4", response_class=HTMLResponse) +async def build_step4_get(request: Request) -> HTMLResponse: + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + labels = orch.ideal_labels() + values = sess.get("ideals") or orch.ideal_defaults() + commander = sess.get("commander") + return templates.TemplateResponse( + "build/_step4.html", + { + "request": request, + "labels": labels, + "values": values, + "commander": commander, + }, + ) + + +@router.get("/step5", response_class=HTMLResponse) +async def build_step5_get(request: Request) -> HTMLResponse: + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + resp = templates.TemplateResponse( + "build/_step5.html", + { + "request": request, + "commander": sess.get("commander"), + "tags": sess.get("tags", []), + "bracket": sess.get("bracket"), + "values": sess.get("ideals", orch.ideal_defaults()), + "status": None, + "stage_label": None, + "log": None, + "added_cards": [], + "game_changers": bc.GAME_CHANGERS, + }, + ) + resp.set_cookie("sid", sid, httponly=True, samesite="lax") + return resp + +@router.post("/step5/continue", response_class=HTMLResponse) +async def build_step5_continue(request: Request) -> HTMLResponse: + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + # Validate commander; redirect to step1 if missing + if not sess.get("commander"): + resp = templates.TemplateResponse("build/_step1.html", {"request": request, "candidates": [], "error": "Please select a commander first."}) + resp.set_cookie("sid", sid, httponly=True, samesite="lax") + return resp + # Ensure build context exists; if not, start it first + if not sess.get("build_ctx"): + opts = orch.bracket_options() + default_bracket = (opts[0]["level"] if opts else 1) + bracket_val = sess.get("bracket") + try: + safe_bracket = int(bracket_val) if bracket_val is not None else int(default_bracket) + except Exception: + safe_bracket = int(default_bracket) + ideals_val = sess.get("ideals") or orch.ideal_defaults() + sess["build_ctx"] = orch.start_build_ctx( + commander=sess.get("commander"), + tags=sess.get("tags", []), + bracket=safe_bracket, + ideals=ideals_val, + ) + res = orch.run_stage(sess["build_ctx"], rerun=False) + status = "Build complete" if res.get("done") else "Stage complete" + stage_label = res.get("label") + log = res.get("log_delta", "") + added_cards = res.get("added_cards", []) + # Progress & downloads + i = res.get("idx") + n = res.get("total") + csv_path = res.get("csv_path") if res.get("done") else None + txt_path = res.get("txt_path") if res.get("done") else None + summary = res.get("summary") if res.get("done") else None + resp = templates.TemplateResponse( + "build/_step5.html", + { + "request": request, + "commander": sess.get("commander"), + "tags": sess.get("tags", []), + "bracket": sess.get("bracket"), + "values": sess.get("ideals", orch.ideal_defaults()), + "status": status, + "stage_label": stage_label, + "log": log, + "added_cards": added_cards, + "i": i, + "n": n, + "csv_path": csv_path, + "txt_path": txt_path, + "summary": summary, + "game_changers": bc.GAME_CHANGERS, + }, + ) + resp.set_cookie("sid", sid, httponly=True, samesite="lax") + return resp + +@router.post("/step5/rerun", response_class=HTMLResponse) +async def build_step5_rerun(request: Request) -> HTMLResponse: + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + if not sess.get("commander"): + resp = templates.TemplateResponse("build/_step1.html", {"request": request, "candidates": [], "error": "Please select a commander first."}) + resp.set_cookie("sid", sid, httponly=True, samesite="lax") + return resp + # Rerun requires an existing context; if missing, create it and run first stage as rerun + if not sess.get("build_ctx"): + opts = orch.bracket_options() + default_bracket = (opts[0]["level"] if opts else 1) + bracket_val = sess.get("bracket") + try: + safe_bracket = int(bracket_val) if bracket_val is not None else int(default_bracket) + except Exception: + safe_bracket = int(default_bracket) + ideals_val = sess.get("ideals") or orch.ideal_defaults() + sess["build_ctx"] = orch.start_build_ctx( + commander=sess.get("commander"), + tags=sess.get("tags", []), + bracket=safe_bracket, + ideals=ideals_val, + ) + res = orch.run_stage(sess["build_ctx"], rerun=True) + status = "Stage rerun complete" if not res.get("done") else "Build complete" + stage_label = res.get("label") + log = res.get("log_delta", "") + added_cards = res.get("added_cards", []) + i = res.get("idx") + n = res.get("total") + csv_path = res.get("csv_path") if res.get("done") else None + txt_path = res.get("txt_path") if res.get("done") else None + summary = res.get("summary") if res.get("done") else None + resp = templates.TemplateResponse( + "build/_step5.html", + { + "request": request, + "commander": sess.get("commander"), + "tags": sess.get("tags", []), + "bracket": sess.get("bracket"), + "values": sess.get("ideals", orch.ideal_defaults()), + "status": status, + "stage_label": stage_label, + "log": log, + "added_cards": added_cards, + "i": i, + "n": n, + "csv_path": csv_path, + "txt_path": txt_path, + "summary": summary, + "game_changers": bc.GAME_CHANGERS, + }, + ) + resp.set_cookie("sid", sid, httponly=True, samesite="lax") + return resp + + +@router.post("/step5/start", response_class=HTMLResponse) +async def build_step5_start(request: Request) -> HTMLResponse: + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + # Validate commander exists before starting + commander = sess.get("commander") + if not commander: + resp = templates.TemplateResponse( + "build/_step1.html", + {"request": request, "candidates": [], "error": "Please select a commander first."}, + ) + resp.set_cookie("sid", sid, httponly=True, samesite="lax") + return resp + try: + # Initialize step-by-step build context and run first stage + opts = orch.bracket_options() + default_bracket = (opts[0]["level"] if opts else 1) + bracket_val = sess.get("bracket") + try: + safe_bracket = int(bracket_val) if bracket_val is not None else int(default_bracket) + except Exception: + safe_bracket = int(default_bracket) + ideals_val = sess.get("ideals") or orch.ideal_defaults() + sess["build_ctx"] = orch.start_build_ctx( + commander=commander, + tags=sess.get("tags", []), + bracket=safe_bracket, + ideals=ideals_val, + ) + res = orch.run_stage(sess["build_ctx"], rerun=False) + status = "Stage complete" if not res.get("done") else "Build complete" + stage_label = res.get("label") + log = res.get("log_delta", "") + added_cards = res.get("added_cards", []) + i = res.get("idx") + n = res.get("total") + csv_path = res.get("csv_path") if res.get("done") else None + txt_path = res.get("txt_path") if res.get("done") else None + summary = res.get("summary") if res.get("done") else None + resp = templates.TemplateResponse( + "build/_step5.html", + { + "request": request, + "commander": commander, + "tags": sess.get("tags", []), + "bracket": sess.get("bracket"), + "values": sess.get("ideals", orch.ideal_defaults()), + "status": status, + "stage_label": stage_label, + "log": log, + "added_cards": added_cards, + "i": i, + "n": n, + "csv_path": csv_path, + "txt_path": txt_path, + "summary": summary, + "game_changers": bc.GAME_CHANGERS, + }, + ) + resp.set_cookie("sid", sid, httponly=True, samesite="lax") + return resp + except Exception as e: + # Surface a friendly error on the step 5 screen + resp = templates.TemplateResponse( + "build/_step5.html", + { + "request": request, + "commander": commander, + "tags": sess.get("tags", []), + "bracket": sess.get("bracket"), + "values": sess.get("ideals", orch.ideal_defaults()), + "status": "Error", + "stage_label": None, + "log": f"Failed to start build: {e}", + "added_cards": [], + "i": None, + "n": None, + "csv_path": None, + "txt_path": None, + "summary": None, + "game_changers": bc.GAME_CHANGERS, + }, + ) + resp.set_cookie("sid", sid, httponly=True, samesite="lax") + return resp + +@router.get("/step5/start", response_class=HTMLResponse) +async def build_step5_start_get(request: Request) -> HTMLResponse: + # Allow GET as a fallback to start the build (delegates to POST handler) + return await build_step5_start(request) + + +@router.get("/banner", response_class=HTMLResponse) +async def build_banner(request: Request, step: str = "", i: int | None = None, n: int | None = None) -> HTMLResponse: + sid = request.cookies.get("sid") or new_sid() + sess = get_session(sid) + commander = sess.get("commander") + tags = sess.get("tags", []) + # Render only the inner text for the subtitle + return templates.TemplateResponse( + "build/_banner_subtitle.html", + {"request": request, "commander": commander, "tags": tags, "step": step, "i": i, "n": n}, + ) diff --git a/code/web/routes/configs.py b/code/web/routes/configs.py new file mode 100644 index 0000000..823131d --- /dev/null +++ b/code/web/routes/configs.py @@ -0,0 +1,179 @@ +from __future__ import annotations + +from fastapi import APIRouter, Request, Form, UploadFile, File +from fastapi.responses import HTMLResponse +from pathlib import Path +import os +import json +from ..app import templates +from ..services import orchestrator as orch +from deck_builder import builder_constants as bc + + +router = APIRouter(prefix="/configs") + + +def _config_dir() -> Path: + # Prefer explicit env var if provided, else default to ./config + p = os.getenv("DECK_CONFIG") + if p: + # If env points to a file, use its parent dir; else treat as dir + pp = Path(p) + return (pp.parent if pp.suffix else pp).resolve() + return (Path.cwd() / "config").resolve() + + +def _list_configs() -> list[dict]: + d = _config_dir() + try: + d.mkdir(parents=True, exist_ok=True) + except Exception: + pass + items: list[dict] = [] + for p in sorted(d.glob("*.json"), key=lambda x: x.stat().st_mtime, reverse=True): + meta = {"name": p.name, "path": str(p), "mtime": p.stat().st_mtime} + try: + with p.open("r", encoding="utf-8") as f: + data = json.load(f) + meta["commander"] = data.get("commander") + tags = [t for t in [data.get("primary_tag"), data.get("secondary_tag"), data.get("tertiary_tag")] if t] + meta["tags"] = tags + meta["bracket_level"] = data.get("bracket_level") + except Exception: + pass + items.append(meta) + return items + + +@router.get("/", response_class=HTMLResponse) +async def configs_index(request: Request) -> HTMLResponse: + items = _list_configs() + # Load example deck.json from the config directory, if present + example_json = None + example_name = "deck.json" + try: + example_path = _config_dir() / example_name + if example_path.exists() and example_path.is_file(): + example_json = example_path.read_text(encoding="utf-8") + except Exception: + example_json = None + return templates.TemplateResponse( + "configs/index.html", + {"request": request, "items": items, "example_json": example_json, "example_name": example_name}, + ) + + +@router.get("/view", response_class=HTMLResponse) +async def configs_view(request: Request, name: str) -> HTMLResponse: + base = _config_dir() + p = (base / name).resolve() + # Safety: ensure the resolved path is within config dir + try: + if base not in p.parents and p != base: + raise ValueError("Access denied") + except Exception: + pass + if not (p.exists() and p.is_file() and p.suffix.lower() == ".json"): + return templates.TemplateResponse( + "configs/index.html", + {"request": request, "items": _list_configs(), "error": "Config not found."}, + ) + try: + data = json.loads(p.read_text(encoding="utf-8")) + except Exception as e: + return templates.TemplateResponse( + "configs/index.html", + {"request": request, "items": _list_configs(), "error": f"Failed to read JSON: {e}"}, + ) + return templates.TemplateResponse( + "configs/view.html", + {"request": request, "path": str(p), "name": p.name, "data": data}, + ) + + +@router.post("/run", response_class=HTMLResponse) +async def configs_run(request: Request, name: str = Form(...)) -> HTMLResponse: + base = _config_dir() + p = (base / name).resolve() + try: + if base not in p.parents and p != base: + raise ValueError("Access denied") + except Exception: + pass + if not (p.exists() and p.is_file() and p.suffix.lower() == ".json"): + return templates.TemplateResponse( + "configs/index.html", + {"request": request, "items": _list_configs(), "error": "Config not found."}, + ) + try: + cfg = json.loads(p.read_text(encoding="utf-8")) + except Exception as e: + return templates.TemplateResponse( + "configs/index.html", + {"request": request, "items": _list_configs(), "error": f"Failed to read JSON: {e}"}, + ) + + commander = cfg.get("commander", "") + tags = [t for t in [cfg.get("primary_tag"), cfg.get("secondary_tag"), cfg.get("tertiary_tag")] if t] + bracket = int(cfg.get("bracket_level") or 0) + ideals = cfg.get("ideal_counts", {}) or {} + + # Run build headlessly with orchestrator + res = orch.run_build(commander=commander, tags=tags, bracket=bracket, ideals=ideals) + if not res.get("ok"): + return templates.TemplateResponse( + "configs/run_result.html", + { + "request": request, + "ok": False, + "error": res.get("error") or "Build failed", + "log": res.get("log", ""), + "cfg_name": p.name, + "commander": commander, + }, + ) + return templates.TemplateResponse( + "configs/run_result.html", + { + "request": request, + "ok": True, + "log": res.get("log", ""), + "csv_path": res.get("csv_path"), + "txt_path": res.get("txt_path"), + "summary": res.get("summary"), + "cfg_name": p.name, + "commander": commander, + "game_changers": bc.GAME_CHANGERS, + }, + ) + + +@router.post("/upload", response_class=HTMLResponse) +async def configs_upload(request: Request, file: UploadFile = File(...)) -> HTMLResponse: + # Optional helper: allow uploading a JSON config + try: + content = await file.read() + data = json.loads(content.decode("utf-8")) + # Minimal validation + if not data.get("commander"): + raise ValueError("Missing 'commander'") + except Exception as e: + return templates.TemplateResponse( + "configs/index.html", + {"request": request, "items": _list_configs(), "error": f"Invalid JSON: {e}"}, + ) + # Save to config dir with original filename (or unique) + d = _config_dir() + d.mkdir(parents=True, exist_ok=True) + fname = file.filename or "config.json" + out = d / fname + i = 1 + while out.exists(): + stem = out.stem + out = d / f"{stem}_{i}.json" + i += 1 + out.write_text(json.dumps(data, indent=2), encoding="utf-8") + return templates.TemplateResponse( + "configs/index.html", + {"request": request, "items": _list_configs(), "notice": f"Uploaded {out.name}"}, + ) diff --git a/code/web/routes/decks.py b/code/web/routes/decks.py new file mode 100644 index 0000000..120719c --- /dev/null +++ b/code/web/routes/decks.py @@ -0,0 +1,267 @@ +from __future__ import annotations + +from fastapi import APIRouter, Request +from fastapi.responses import HTMLResponse +from pathlib import Path +import csv +import os +from typing import Dict, List, Tuple + +from ..app import templates +from deck_builder import builder_constants as bc + + +router = APIRouter(prefix="/decks") + + +def _deck_dir() -> Path: + # Prefer explicit env var if provided, else default to ./deck_files + p = os.getenv("DECK_EXPORTS") + if p: + return Path(p).resolve() + return (Path.cwd() / "deck_files").resolve() + + +def _list_decks() -> list[dict]: + d = _deck_dir() + try: + d.mkdir(parents=True, exist_ok=True) + except Exception: + pass + items: list[dict] = [] + # Prefer CSV entries and pair with matching TXT if present + for p in sorted(d.glob("*.csv"), key=lambda x: x.stat().st_mtime, reverse=True): + meta = {"name": p.name, "path": str(p), "mtime": p.stat().st_mtime} + stem = p.stem + txt = p.with_suffix('.txt') + if txt.exists(): + meta["txt_name"] = txt.name + meta["txt_path"] = str(txt) + # Prefer sidecar summary meta if present + sidecar = p.with_suffix('.summary.json') + if sidecar.exists(): + try: + import json as _json + payload = _json.loads(sidecar.read_text(encoding='utf-8')) + _m = payload.get('meta', {}) if isinstance(payload, dict) else {} + meta["commander"] = _m.get('commander') or meta.get("commander") + meta["tags"] = _m.get('tags') or meta.get("tags") or [] + except Exception: + pass + # Fallback to parsing commander/themes from filename convention Commander_Themes_YYYYMMDD + if not meta.get("commander"): + parts = stem.split('_') + if len(parts) >= 3: + meta["commander"] = parts[0] + meta["tags"] = parts[1:-1] + else: + meta["commander"] = stem + meta["tags"] = [] + items.append(meta) + return items + + +def _safe_within(base: Path, target: Path) -> bool: + try: + base_r = base.resolve() + targ_r = target.resolve() + return (base_r == targ_r) or (base_r in targ_r.parents) + except Exception: + return False + + +def _read_csv_summary(csv_path: Path) -> Tuple[dict, Dict[str, int], Dict[str, int], Dict[str, List[dict]]]: + """Parse CSV export to reconstruct minimal summary pieces. + + Returns: (meta, type_counts, curve_counts, type_cards) + meta: { 'commander': str, 'colors': [..] } + """ + headers = [] + type_counts: Dict[str, int] = {} + type_cards: Dict[str, List[dict]] = {} + curve_bins = ['0','1','2','3','4','5','6+'] + curve_counts: Dict[str, int] = {b: 0 for b in curve_bins} + curve_cards: Dict[str, List[dict]] = {b: [] for b in curve_bins} + meta: dict = {"commander": "", "colors": []} + commander_seen = False + # Infer commander from filename stem (pattern Commander_Themes_YYYYMMDD) + stem_parts = csv_path.stem.split('_') + inferred_commander = stem_parts[0] if stem_parts else '' + + def classify_mv(raw) -> str: + try: + v = float(raw) + except Exception: + v = 0.0 + return '6+' if v >= 6 else str(int(v)) + + try: + with csv_path.open('r', encoding='utf-8') as f: + reader = csv.reader(f) + headers = next(reader, []) + # Expected columns include: Name, Count, Type, ManaCost, ManaValue, Colors, Power, Toughness, Role, ..., Tags, Text, Owned + name_idx = headers.index('Name') if 'Name' in headers else 0 + count_idx = headers.index('Count') if 'Count' in headers else 1 + type_idx = headers.index('Type') if 'Type' in headers else 2 + mv_idx = headers.index('ManaValue') if 'ManaValue' in headers else (headers.index('Mana Value') if 'Mana Value' in headers else -1) + role_idx = headers.index('Role') if 'Role' in headers else -1 + tags_idx = headers.index('Tags') if 'Tags' in headers else -1 + colors_idx = headers.index('Colors') if 'Colors' in headers else -1 + + for row in reader: + if not row: + continue + try: + name = row[name_idx] + except Exception: + continue + try: + cnt = int(float(row[count_idx])) if row[count_idx] else 1 + except Exception: + cnt = 1 + type_line = row[type_idx] if type_idx >= 0 and type_idx < len(row) else '' + role = (row[role_idx] if role_idx >= 0 and role_idx < len(row) else '') + tags = (row[tags_idx] if tags_idx >= 0 and tags_idx < len(row) else '') + tags_list = [t.strip() for t in tags.split(';') if t.strip()] + + # Commander detection: prefer filename inference; else best-effort via type line containing 'Commander' + is_commander = (inferred_commander and name == inferred_commander) + if not is_commander: + is_commander = isinstance(type_line, str) and ('commander' in type_line.lower()) + if is_commander and not commander_seen: + meta['commander'] = name + commander_seen = True + + # Map type_line to broad category + tl = (type_line or '').lower() + if 'battle' in tl: + cat = 'Battle' + elif 'planeswalker' in tl: + cat = 'Planeswalker' + elif 'creature' in tl: + cat = 'Creature' + elif 'instant' in tl: + cat = 'Instant' + elif 'sorcery' in tl: + cat = 'Sorcery' + elif 'artifact' in tl: + cat = 'Artifact' + elif 'enchantment' in tl: + cat = 'Enchantment' + elif 'land' in tl: + cat = 'Land' + else: + cat = 'Other' + + # Type counts/cards (exclude commander entry from distribution) + if not is_commander: + type_counts[cat] = type_counts.get(cat, 0) + cnt + type_cards.setdefault(cat, []).append({ + 'name': name, + 'count': cnt, + 'role': role, + 'tags': tags_list, + }) + + # Curve + if mv_idx >= 0 and mv_idx < len(row): + bucket = classify_mv(row[mv_idx]) + if bucket not in curve_counts: + bucket = '6+' + curve_counts[bucket] += cnt + curve_cards[bucket].append({'name': name, 'count': cnt}) + + # Colors (from Colors col for commander/overall) + if is_commander and colors_idx >= 0 and colors_idx < len(row): + cid = row[colors_idx] or '' + if isinstance(cid, str): + meta['colors'] = list(cid) + except Exception: + pass + + # Precedence ordering + precedence_order = [ + 'Battle', 'Planeswalker', 'Creature', 'Instant', 'Sorcery', 'Artifact', 'Enchantment', 'Land', 'Other' + ] + prec_index = {k: i for i, k in enumerate(precedence_order)} + type_order = sorted(type_counts.keys(), key=lambda k: prec_index.get(k, 999)) + + summary = { + 'type_breakdown': { + 'counts': type_counts, + 'order': type_order, + 'cards': type_cards, + 'total': sum(type_counts.values()), + }, + 'pip_distribution': { + # Not recoverable from CSV without mana symbols; leave zeros + 'counts': {c: 0 for c in ('W','U','B','R','G')}, + 'weights': {c: 0 for c in ('W','U','B','R','G')}, + }, + 'mana_generation': { + # Not recoverable from CSV alone + 'W': 0, 'U': 0, 'B': 0, 'R': 0, 'G': 0, 'total_sources': 0, + }, + 'mana_curve': { + **curve_counts, + 'total_spells': sum(curve_counts.values()), + 'cards': curve_cards, + }, + 'colors': meta.get('colors', []), + } + return summary, type_counts, curve_counts, type_cards + + +@router.get("/", response_class=HTMLResponse) +async def decks_index(request: Request) -> HTMLResponse: + items = _list_decks() + return templates.TemplateResponse("decks/index.html", {"request": request, "items": items}) + + +@router.get("/view", response_class=HTMLResponse) +async def decks_view(request: Request, name: str) -> HTMLResponse: + base = _deck_dir() + p = (base / name).resolve() + if not _safe_within(base, p) or not (p.exists() and p.is_file() and p.suffix.lower() == ".csv"): + return templates.TemplateResponse("decks/index.html", {"request": request, "items": _list_decks(), "error": "Deck not found."}) + + # Try to load sidecar summary JSON first + summary = None + commander_name = '' + tags: List[str] = [] + sidecar = p.with_suffix('.summary.json') + if sidecar.exists(): + try: + import json as _json + payload = _json.loads(sidecar.read_text(encoding='utf-8')) + if isinstance(payload, dict): + summary = payload.get('summary') + meta = payload.get('meta', {}) + if isinstance(meta, dict): + commander_name = meta.get('commander') or '' + _tags = meta.get('tags') or [] + if isinstance(_tags, list): + tags = [str(t) for t in _tags] + except Exception: + summary = None + if not summary: + # Reconstruct minimal summary from CSV + summary, _tc, _cc, _tcs = _read_csv_summary(p) + stem = p.stem + txt_path = p.with_suffix('.txt') + # If missing still, infer from filename stem + if not commander_name: + parts = stem.split('_') + commander_name = parts[0] if parts else '' + + ctx = { + "request": request, + "name": p.name, + "csv_path": str(p), + "txt_path": str(txt_path) if txt_path.exists() else None, + "summary": summary, + "commander": commander_name, + "tags": tags, + "game_changers": bc.GAME_CHANGERS, + } + return templates.TemplateResponse("decks/view.html", ctx) diff --git a/code/web/routes/home.py b/code/web/routes/home.py new file mode 100644 index 0000000..e988807 --- /dev/null +++ b/code/web/routes/home.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from fastapi import APIRouter, Request +from fastapi.responses import HTMLResponse +from ..app import templates + +router = APIRouter() + +@router.get("/", response_class=HTMLResponse) +async def home(request: Request) -> HTMLResponse: + return templates.TemplateResponse("home.html", {"request": request}) diff --git a/code/web/routes/setup.py b/code/web/routes/setup.py new file mode 100644 index 0000000..f3b10b9 --- /dev/null +++ b/code/web/routes/setup.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import threading +from typing import Optional +from fastapi import APIRouter, Request +from fastapi import Body +from pathlib import Path +import json as _json +from fastapi.responses import HTMLResponse, JSONResponse +from ..app import templates +from ..services.orchestrator import _ensure_setup_ready # type: ignore + +router = APIRouter(prefix="/setup") + + +def _kickoff_setup_async(force: bool = False): + def runner(): + try: + _ensure_setup_ready(lambda _m: None, force=force) # type: ignore[arg-type] + except Exception: + pass + t = threading.Thread(target=runner, daemon=True) + t.start() + + +@router.get("/running", response_class=HTMLResponse) +async def setup_running(request: Request, start: Optional[int] = 0, next: Optional[str] = None, force: Optional[bool] = None) -> HTMLResponse: # type: ignore[override] + # Optionally start the setup/tagging in the background if requested + try: + if start and int(start) != 0: + # honor optional force flag from query + f = False + try: + if force is not None: + f = bool(force) + else: + q_force = request.query_params.get('force') + if q_force is not None: + f = q_force.strip().lower() in {"1", "true", "yes", "on"} + except Exception: + f = False + _kickoff_setup_async(force=f) + except Exception: + pass + return templates.TemplateResponse("setup/running.html", {"request": request, "next_url": next}) + + +@router.post("/start") +async def setup_start(request: Request, force: bool = Body(False)): # accept JSON body {"force": true} + try: + # Allow query string override as well (?force=1) + try: + q_force = request.query_params.get('force') + if q_force is not None: + force = q_force.strip().lower() in {"1", "true", "yes", "on"} + except Exception: + pass + # Write immediate status so UI reflects the start + try: + p = Path("csv_files") + p.mkdir(parents=True, exist_ok=True) + status = {"running": True, "phase": "setup", "message": "Starting setup/tagging...", "color": None} + with (p / ".setup_status.json").open('w', encoding='utf-8') as f: + _json.dump(status, f) + except Exception: + pass + _kickoff_setup_async(force=bool(force)) + return JSONResponse({"ok": True, "started": True, "force": bool(force)}, status_code=202) + except Exception: + return JSONResponse({"ok": False}, status_code=500) + + +@router.get("/start") +async def setup_start_get(request: Request): + """GET alias to start setup/tagging via query string (?force=1). + + Useful as a fallback from clients that cannot POST JSON. + """ + try: + # Determine force from query params + force = False + try: + q_force = request.query_params.get('force') + if q_force is not None: + force = q_force.strip().lower() in {"1", "true", "yes", "on"} + except Exception: + pass + # Write immediate status so UI reflects the start + try: + p = Path("csv_files") + p.mkdir(parents=True, exist_ok=True) + status = {"running": True, "phase": "setup", "message": "Starting setup/tagging...", "color": None} + with (p / ".setup_status.json").open('w', encoding='utf-8') as f: + _json.dump(status, f) + except Exception: + pass + _kickoff_setup_async(force=bool(force)) + return JSONResponse({"ok": True, "started": True, "force": bool(force)}, status_code=202) + except Exception: + return JSONResponse({"ok": False}, status_code=500) + + +@router.get("/", response_class=HTMLResponse) +async def setup_index(request: Request) -> HTMLResponse: + return templates.TemplateResponse("setup/index.html", {"request": request}) diff --git a/code/web/services/__init__.py b/code/web/services/__init__.py new file mode 100644 index 0000000..c87405f --- /dev/null +++ b/code/web/services/__init__.py @@ -0,0 +1 @@ +# Services package marker diff --git a/code/web/services/orchestrator.py b/code/web/services/orchestrator.py new file mode 100644 index 0000000..acf719c --- /dev/null +++ b/code/web/services/orchestrator.py @@ -0,0 +1,865 @@ +from __future__ import annotations + +from typing import Dict, Any, List, Tuple +import copy +from deck_builder.builder import DeckBuilder +from deck_builder.phases.phase0_core import BRACKET_DEFINITIONS +from deck_builder import builder_constants as bc +import os +import time +import json +from datetime import datetime as _dt +import re + + +def commander_names() -> List[str]: + tmp = DeckBuilder() + df = tmp.load_commander_data() + return df["name"].astype(str).tolist() + + +def commander_candidates(query: str, limit: int = 10) -> List[Tuple[str, int, List[str]]]: + # Normalize query similar to CLI to reduce case sensitivity surprises + tmp = DeckBuilder() + try: + if hasattr(tmp, '_normalize_commander_query'): + query = tmp._normalize_commander_query(query) # type: ignore[attr-defined] + else: + # Light fallback: basic title case + query = ' '.join([w[:1].upper() + w[1:].lower() if w else w for w in str(query).split(' ')]) + except Exception: + pass + df = tmp.load_commander_data() + # Filter to plausible commanders: Legendary Creature, or text explicitly allows being a commander. + try: + cols = set(df.columns.astype(str)) + has_type = ('type' in cols) or ('type_line' in cols) + has_text = ('text' in cols) or ('oracleText' in cols) + if has_type or has_text: + def _is_commander_row(_r) -> bool: + try: + tline = str(_r.get('type', _r.get('type_line', '')) or '').lower() + textv = str(_r.get('text', _r.get('oracleText', '')) or '').lower() + if 'legendary' in tline and 'creature' in tline: + return True + if 'legendary' in tline and 'planeswalker' in tline and 'can be your commander' in textv: + return True + if 'can be your commander' in textv: + return True + except Exception: + return False + return False + df_comm = df[df.apply(_is_commander_row, axis=1)] + if not df_comm.empty: + df = df_comm + # else: keep df as-is when columns not present + except Exception: + pass + names = df["name"].astype(str).tolist() + # Reuse existing scoring helpers through the DeckBuilder API + scored_raw = tmp._gather_candidates(query, names) + # Consider a wider pool for re-ranking so exact substrings bubble up + pool = scored_raw[: max(limit * 5, 50)] + # Force-include any names that contain the raw query as a substring (case-insensitive) + # to avoid missing obvious matches like 'Inti, Seneschal of the Sun' for 'inti'. + try: + q_raw = (query or "").strip().lower() + if q_raw: + have = {n for (n, _s) in pool} + # Map original scores for reuse + base_scores = {n: int(s) for (n, s) in scored_raw} + for n in names: + nl = str(n).lower() + if q_raw in nl and n not in have: + # Assign a reasonable base score if not present; favor prefixes + approx = base_scores.get(n, 90 if nl.startswith(q_raw) else 80) + pool.append((n, approx)) + except Exception: + pass + # Attach color identity for each candidate + try: + df = tmp.load_commander_data() + except Exception: + df = None + q = (query or "").strip().lower() + tokens = [t for t in re.split(r"[\s,]+", q) if t] + def _color_list_for(name: str) -> List[str]: + colors: List[str] = [] + try: + if df is not None: + row = df[df["name"].astype(str) == str(name)] + if not row.empty: + ci = row.iloc[0].get("colorIdentity") + if isinstance(ci, list): + colors = [str(c).upper() for c in ci if str(c).strip()] + elif isinstance(ci, str) and ci.strip(): + parts = [p.strip().upper() for p in ci.replace('[', '').replace(']', '').replace("'", '').split(',') if p.strip()] + colors = parts if parts else list(ci) + if not colors: + colors = ["C"] + except Exception: + colors = ["C"] + return colors + + rescored: List[Tuple[str, int, List[str], int, int, int]] = [] # (name, orig_score, colors, rank_score, pos, exact_first_word) + for name, score in pool: + colors: List[str] = [] + colors = _color_list_for(name) + nl = str(name).lower() + bonus = 0 + pos = nl.find(q) if q else -1 + # Extract first word (letters only) for exact first-word preference + try: + m_first = re.match(r"^[a-z0-9']+", nl) + first_word = m_first.group(0) if m_first else "" + except Exception: + first_word = nl.split(" ", 1)[0] if nl else "" + exact_first = 1 if (q and first_word == q) else 0 + # Base heuristics + if q: + if nl == q: + bonus += 100 + if nl.startswith(q): + bonus += 60 + if re.search(r"\b" + re.escape(q), nl): + bonus += 40 + if q in nl: + bonus += 30 + # Strongly prefer exact first-word equality over general prefix + if exact_first: + bonus += 140 + # Multi-token bonuses + if tokens: + present = sum(1 for t in tokens if t in nl) + all_present = 1 if all(t in nl for t in tokens) else 0 + bonus += present * 10 + all_present * 40 + # Extra if first token is a prefix + if nl.startswith(tokens[0]): + bonus += 15 + # Favor shorter names slightly and earlier positions + bonus += max(0, 20 - len(nl)) + if pos >= 0: + bonus += max(0, 20 - pos) + rank_score = int(score) + bonus + rescored.append((name, int(score), colors, rank_score, pos if pos >= 0 else 10**6, exact_first)) + + # Sort: exact first-word matches first, then by rank score desc, then earliest position, then original score desc, then name asc + rescored.sort(key=lambda x: (-x[5], -x[3], x[4], -x[1], x[0])) + top = rescored[:limit] + return [(name, orig_score, colors) for (name, orig_score, colors, _r, _p, _e) in top] + + +def commander_inspect(name: str) -> Dict[str, Any]: + tmp = DeckBuilder() + df = tmp.load_commander_data() + row = df[df["name"] == name] + if row.empty: + return {"ok": False, "error": "Commander not found"} + pretty = tmp._format_commander_pretty(row.iloc[0]) + return {"ok": True, "pretty": pretty} + + +def commander_select(name: str) -> Dict[str, Any]: + tmp = DeckBuilder() + df = tmp.load_commander_data() + # Try exact match, then normalized match + row = df[df["name"] == name] + if row.empty: + try: + if hasattr(tmp, '_normalize_commander_query'): + name2 = tmp._normalize_commander_query(name) # type: ignore[attr-defined] + else: + name2 = ' '.join([w[:1].upper() + w[1:].lower() if w else w for w in str(name).split(' ')]) + row = df[df["name"] == name2] + except Exception: + row = df[df["name"] == name] + if row.empty: + return {"ok": False, "error": "Commander not found"} + tmp._apply_commander_selection(row.iloc[0]) + # Derive tags and a quick preview of bracket choices + tags = list(dict.fromkeys(tmp.commander_tags)) if hasattr(tmp, "commander_tags") else [] + return { + "ok": True, + "name": name, + "tags": tags, + } + + +def tags_for_commander(name: str) -> List[str]: + tmp = DeckBuilder() + df = tmp.load_commander_data() + row = df[df["name"] == name] + if row.empty: + return [] + raw = row.iloc[0].get("themeTags", []) + if isinstance(raw, list): + return list(dict.fromkeys([str(t).strip() for t in raw if str(t).strip()])) + if isinstance(raw, str) and raw.strip(): + parts = [p.strip().strip("'\"") for p in raw.split(',')] + return [p for p in parts if p] + return [] + + +def bracket_options() -> List[Dict[str, Any]]: + return [{"level": b.level, "name": b.name, "desc": b.short_desc} for b in BRACKET_DEFINITIONS] + + +def ideal_defaults() -> Dict[str, Any]: + return { + "ramp": getattr(bc, 'DEFAULT_RAMP_COUNT', 10), + "lands": getattr(bc, 'DEFAULT_LAND_COUNT', 35), + "basic_lands": getattr(bc, 'DEFAULT_BASIC_LAND_COUNT', 20), + "fetch_lands": getattr(bc, 'FETCH_LAND_DEFAULT_COUNT', 3), + "creatures": getattr(bc, 'DEFAULT_CREATURE_COUNT', 28), + "removal": getattr(bc, 'DEFAULT_REMOVAL_COUNT', 10), + "wipes": getattr(bc, 'DEFAULT_WIPES_COUNT', 2), + "card_advantage": getattr(bc, 'DEFAULT_CARD_ADVANTAGE_COUNT', 8), + "protection": getattr(bc, 'DEFAULT_PROTECTION_COUNT', 4), + } + + +def ideal_labels() -> Dict[str, str]: + return { + 'ramp': 'Ramp', + 'lands': 'Total Lands', + 'basic_lands': 'Basic Lands (Min)', + 'fetch_lands': 'Fetch Lands', + 'creatures': 'Creatures', + 'removal': 'Spot Removal', + 'wipes': 'Board Wipes', + 'card_advantage': 'Card Advantage', + 'protection': 'Protection', + } + + +def _ensure_setup_ready(out, force: bool = False) -> None: + """Ensure card CSVs exist and tagging has completed; bootstrap if needed. + + Mirrors the CLI behavior used in build_deck_full: if csv_files/cards.csv is + missing, too old, or the tagging flag is absent, run initial setup and tagging. + """ + def _write_status(payload: dict) -> None: + try: + os.makedirs('csv_files', exist_ok=True) + # Preserve started_at if present + status_path = os.path.join('csv_files', '.setup_status.json') + existing = {} + try: + if os.path.exists(status_path): + with open(status_path, 'r', encoding='utf-8') as _rf: + existing = json.load(_rf) or {} + except Exception: + existing = {} + # Merge and keep started_at unless explicitly overridden + merged = {**existing, **payload} + if 'started_at' not in merged and existing.get('started_at'): + merged['started_at'] = existing.get('started_at') + merged['updated'] = _dt.now().isoformat(timespec='seconds') + with open(status_path, 'w', encoding='utf-8') as f: + json.dump(merged, f) + except Exception: + pass + + try: + cards_path = os.path.join('csv_files', 'cards.csv') + flag_path = os.path.join('csv_files', '.tagging_complete.json') + refresh_needed = bool(force) + if force: + _write_status({"running": True, "phase": "setup", "message": "Forcing full setup and tagging...", "started_at": _dt.now().isoformat(timespec='seconds'), "percent": 0}) + + if not os.path.exists(cards_path): + out("cards.csv not found. Running initial setup and tagging...") + _write_status({"running": True, "phase": "setup", "message": "Preparing card database (initial setup)...", "started_at": _dt.now().isoformat(timespec='seconds'), "percent": 0}) + refresh_needed = True + else: + try: + age_seconds = time.time() - os.path.getmtime(cards_path) + if age_seconds > 7 * 24 * 60 * 60 and not force: + out("cards.csv is older than 7 days. Refreshing data (setup + tagging)...") + _write_status({"running": True, "phase": "setup", "message": "Refreshing card database (initial setup)...", "started_at": _dt.now().isoformat(timespec='seconds'), "percent": 0}) + refresh_needed = True + except Exception: + pass + + if not os.path.exists(flag_path): + out("Tagging completion flag not found. Performing full tagging...") + if not refresh_needed: + _write_status({"running": True, "phase": "tagging", "message": "Applying tags to card database...", "started_at": _dt.now().isoformat(timespec='seconds'), "percent": 0}) + refresh_needed = True + + if refresh_needed: + try: + from file_setup.setup import initial_setup # type: ignore + # Always run initial_setup when forced or when cards are missing/stale + initial_setup() + except Exception as e: + out(f"Initial setup failed: {e}") + _write_status({"running": False, "phase": "error", "message": f"Initial setup failed: {e}"}) + return + # Tagging with granular color progress + try: + from tagging import tagger as _tagger # type: ignore + from settings import COLORS as _COLORS # type: ignore + colors = list(_COLORS) + total = len(colors) + _write_status({ + "running": True, + "phase": "tagging", + "message": "Tagging cards (this may take a while)...", + "color": None, + "percent": 0, + "color_idx": 0, + "color_total": total, + "tagging_started_at": _dt.now().isoformat(timespec='seconds') + }) + for idx, _color in enumerate(colors, start=1): + try: + pct = int((idx - 1) * 100 / max(1, total)) + # Estimate ETA based on average time per completed color + eta_s = None + try: + from datetime import datetime as __dt + ts = __dt.fromisoformat(json.load(open(os.path.join('csv_files', '.setup_status.json'), 'r', encoding='utf-8')).get('tagging_started_at')) # type: ignore + elapsed = max(0.0, (_dt.now() - ts).total_seconds()) + completed = max(0, idx - 1) + if completed > 0: + avg = elapsed / completed + remaining = max(0, total - completed) + eta_s = int(avg * remaining) + except Exception: + eta_s = None + payload = { + "running": True, + "phase": "tagging", + "message": f"Tagging {_color}...", + "color": _color, + "percent": pct, + "color_idx": idx, + "color_total": total, + } + if eta_s is not None: + payload["eta_seconds"] = eta_s + _write_status(payload) + _tagger.load_dataframe(_color) + except Exception as e: + out(f"Tagging {_color} failed: {e}") + _write_status({"running": False, "phase": "error", "message": f"Tagging {_color} failed: {e}", "color": _color}) + return + except Exception as e: + out(f"Tagging failed to start: {e}") + _write_status({"running": False, "phase": "error", "message": f"Tagging failed to start: {e}"}) + return + try: + os.makedirs('csv_files', exist_ok=True) + with open(flag_path, 'w', encoding='utf-8') as _fh: + json.dump({'tagged_at': _dt.now().isoformat(timespec='seconds')}, _fh) + # Final status with percent 100 and timing info + finished_dt = _dt.now() + finished = finished_dt.isoformat(timespec='seconds') + # Compute duration_seconds if started_at exists + duration_s = None + try: + from datetime import datetime as __dt + status_path = os.path.join('csv_files', '.setup_status.json') + with open(status_path, 'r', encoding='utf-8') as _rf: + _st = json.load(_rf) or {} + if _st.get('started_at'): + start_dt = __dt.fromisoformat(_st['started_at']) + duration_s = int(max(0.0, (finished_dt - start_dt).total_seconds())) + except Exception: + duration_s = None + payload = {"running": False, "phase": "done", "message": "Setup complete", "color": None, "percent": 100, "finished_at": finished} + if duration_s is not None: + payload["duration_seconds"] = duration_s + _write_status(payload) + except Exception: + pass + except Exception: + # Non-fatal; downstream loads will still attempt and surface errors in logs + _write_status({"running": False, "phase": "error", "message": "Setup check failed"}) + + +def run_build(commander: str, tags: List[str], bracket: int, ideals: Dict[str, int]) -> Dict[str, Any]: + """Run the deck build end-to-end with provided selections and capture logs. + + Returns: { ok: bool, log: str, csv_path: Optional[str], txt_path: Optional[str], error: Optional[str] } + """ + logs: List[str] = [] + + def out(msg: str) -> None: + try: + logs.append(msg) + except Exception: + pass + + try: + # Provide a no-op input function so any leftover prompts auto-accept defaults + b = DeckBuilder(output_func=out, input_func=lambda _prompt: "", headless=True) + # Ensure setup/tagging present for web headless run + _ensure_setup_ready(out) + # Commander selection + df = b.load_commander_data() + row = df[df["name"].astype(str) == str(commander)] + if row.empty: + return {"ok": False, "error": f"Commander not found: {commander}", "log": "\n".join(logs)} + b._apply_commander_selection(row.iloc[0]) + + # Tags + b.selected_tags = list(tags or []) + b.primary_tag = b.selected_tags[0] if len(b.selected_tags) > 0 else None + b.secondary_tag = b.selected_tags[1] if len(b.selected_tags) > 1 else None + b.tertiary_tag = b.selected_tags[2] if len(b.selected_tags) > 2 else None + try: + b._update_commander_dict_with_selected_tags() + except Exception: + pass + + # Bracket + bd = next((x for x in BRACKET_DEFINITIONS if int(getattr(x, 'level', 0)) == int(bracket)), None) + if bd is None: + return {"ok": False, "error": f"Invalid bracket level: {bracket}", "log": "\n".join(logs)} + b.bracket_definition = bd + b.bracket_level = bd.level + b.bracket_name = bd.name + b.bracket_limits = dict(getattr(bd, 'limits', {})) + + # Ideal counts + b.ideal_counts = {k: int(v) for k, v in (ideals or {}).items()} + + # Load data and run phases + try: + b.determine_color_identity() + b.setup_dataframes() + except Exception as e: + out(f"Failed to load color identity/card pool: {e}") + + try: + b._run_land_build_steps() + except Exception as e: + out(f"Land build failed: {e}") + + try: + if hasattr(b, 'add_creatures_phase'): + b.add_creatures_phase() + except Exception as e: + out(f"Creature phase failed: {e}") + try: + if hasattr(b, 'add_spells_phase'): + b.add_spells_phase() + except Exception as e: + out(f"Spell phase failed: {e}") + try: + if hasattr(b, 'post_spell_land_adjust'): + b.post_spell_land_adjust() + except Exception as e: + out(f"Post-spell land adjust failed: {e}") + + # Reporting/exports + csv_path = None + txt_path = None + try: + if hasattr(b, 'run_reporting_phase'): + b.run_reporting_phase() + except Exception as e: + out(f"Reporting phase failed: {e}") + try: + if hasattr(b, 'export_decklist_csv'): + csv_path = b.export_decklist_csv() # type: ignore[attr-defined] + except Exception as e: + out(f"CSV export failed: {e}") + try: + if hasattr(b, 'export_decklist_text'): + # Try to mirror build_deck_full behavior by displaying the contents + import os as _os + base, _ext = _os.path.splitext(_os.path.basename(csv_path)) if csv_path else (f"deck_{b.timestamp}", "") + txt_path = b.export_decklist_text(filename=base + '.txt') # type: ignore[attr-defined] + try: + b._display_txt_contents(txt_path) + except Exception: + pass + except Exception as e: + out(f"Text export failed: {e}") + + # Build structured summary for UI + summary = None + try: + if hasattr(b, 'build_deck_summary'): + summary = b.build_deck_summary() # type: ignore[attr-defined] + except Exception: + summary = None + # Write sidecar summary JSON next to CSV (if available) + try: + if summary and csv_path: + import os as _os + import json as _json + base, _ = _os.path.splitext(csv_path) + sidecar = base + '.summary.json' + meta = { + "commander": getattr(b, 'commander_name', '') or getattr(b, 'commander', ''), + "tags": list(getattr(b, 'selected_tags', []) or []) or [t for t in [getattr(b, 'primary_tag', None), getattr(b, 'secondary_tag', None), getattr(b, 'tertiary_tag', None)] if t], + "bracket_level": getattr(b, 'bracket_level', None), + "csv": csv_path, + "txt": txt_path, + } + payload = {"meta": meta, "summary": summary} + with open(sidecar, 'w', encoding='utf-8') as f: + _json.dump(payload, f, ensure_ascii=False, indent=2) + except Exception: + pass + return {"ok": True, "log": "\n".join(logs), "csv_path": csv_path, "txt_path": txt_path, "summary": summary} + except Exception as e: + logs.append(f"Build failed: {e}") + return {"ok": False, "error": str(e), "log": "\n".join(logs)} + + +# ----------------- +# Step-by-step build session +# ----------------- +def _make_stages(b: DeckBuilder) -> List[Dict[str, Any]]: + stages: List[Dict[str, Any]] = [] + # Web UI: skip theme confirmation stages (CLI-only pauses) + # Land steps 1..8 (if present) + for i in range(1, 9): + fn = getattr(b, f"run_land_step{i}", None) + if callable(fn): + stages.append({"key": f"land{i}", "label": f"Lands (Step {i})", "runner_name": f"run_land_step{i}"}) + # Creatures split into theme sub-stages for web confirm + if getattr(b, 'primary_tag', None) and hasattr(b, 'add_creatures_primary_phase'): + stages.append({"key": "creatures_primary", "label": "Creatures: Primary", "runner_name": "add_creatures_primary_phase"}) + if getattr(b, 'secondary_tag', None) and hasattr(b, 'add_creatures_secondary_phase'): + stages.append({"key": "creatures_secondary", "label": "Creatures: Secondary", "runner_name": "add_creatures_secondary_phase"}) + if getattr(b, 'tertiary_tag', None) and hasattr(b, 'add_creatures_tertiary_phase'): + stages.append({"key": "creatures_tertiary", "label": "Creatures: Tertiary", "runner_name": "add_creatures_tertiary_phase"}) + if hasattr(b, 'add_creatures_fill_phase'): + stages.append({"key": "creatures_fill", "label": "Creatures: Fill", "runner_name": "add_creatures_fill_phase"}) + # Spells: prefer granular categories when available; otherwise fall back to bulk + spell_categories: List[Tuple[str, str, str]] = [ + ("ramp", "Confirm Ramp", "add_ramp"), + ("removal", "Confirm Removal", "add_removal"), + ("wipes", "Confirm Board Wipes", "add_board_wipes"), + ("card_advantage", "Confirm Card Advantage", "add_card_advantage"), + ("protection", "Confirm Protection", "add_protection"), + ] + any_granular = any(callable(getattr(b, rn, None)) for _key, _label, rn in spell_categories) + if any_granular: + for key, label, runner in spell_categories: + if callable(getattr(b, runner, None)): + # Web UI: omit confirm stages; show only the action stage + label_action = label.replace("Confirm ", "") + stages.append({"key": f"spells_{key}", "label": label_action, "runner_name": runner}) + # Ensure we include the theme filler step to top up to 100 cards + if callable(getattr(b, 'fill_remaining_theme_spells', None)): + stages.append({"key": "spells_fill", "label": "Theme Spell Fill", "runner_name": "fill_remaining_theme_spells"}) + elif hasattr(b, 'add_spells_phase'): + stages.append({"key": "spells", "label": "Spells", "runner_name": "add_spells_phase"}) + # Post-adjust + if hasattr(b, 'post_spell_land_adjust'): + stages.append({"key": "post_adjust", "label": "Post-Spell Land Adjust", "runner_name": "post_spell_land_adjust"}) + # Reporting + if hasattr(b, 'run_reporting_phase'): + stages.append({"key": "reporting", "label": "Reporting", "runner_name": "run_reporting_phase"}) + # Export is not a separate stage here; we will auto-export at the final continue. + return stages + + +def start_build_ctx(commander: str, tags: List[str], bracket: int, ideals: Dict[str, int]) -> Dict[str, Any]: + logs: List[str] = [] + + def out(msg: str) -> None: + logs.append(msg) + + # Provide a no-op input function so staged web builds never block on input + b = DeckBuilder(output_func=out, input_func=lambda _prompt: "", headless=True) + # Ensure setup/tagging present before staged build + _ensure_setup_ready(out) + # Commander selection + df = b.load_commander_data() + row = df[df["name"].astype(str) == str(commander)] + if row.empty: + raise ValueError(f"Commander not found: {commander}") + b._apply_commander_selection(row.iloc[0]) + # Tags + b.selected_tags = list(tags or []) + b.primary_tag = b.selected_tags[0] if len(b.selected_tags) > 0 else None + b.secondary_tag = b.selected_tags[1] if len(b.selected_tags) > 1 else None + b.tertiary_tag = b.selected_tags[2] if len(b.selected_tags) > 2 else None + try: + b._update_commander_dict_with_selected_tags() + except Exception: + pass + # Bracket + bd = next((x for x in BRACKET_DEFINITIONS if int(getattr(x, 'level', 0)) == int(bracket)), None) + if bd is None: + raise ValueError(f"Invalid bracket level: {bracket}") + b.bracket_definition = bd + b.bracket_level = bd.level + b.bracket_name = bd.name + b.bracket_limits = dict(getattr(bd, 'limits', {})) + # Ideals + b.ideal_counts = {k: int(v) for k, v in (ideals or {}).items()} + # Data load + b.determine_color_identity() + b.setup_dataframes() + # Stages + stages = _make_stages(b) + ctx = { + "builder": b, + "logs": logs, + "stages": stages, + "idx": 0, + "last_log_idx": 0, + "csv_path": None, + "txt_path": None, + "snapshot": None, + } + return ctx + + +def _snapshot_builder(b: DeckBuilder) -> Dict[str, Any]: + """Capture mutable state needed to rerun a stage.""" + snap: Dict[str, Any] = {} + # Core collections + snap["card_library"] = copy.deepcopy(getattr(b, 'card_library', {})) + snap["tag_counts"] = copy.deepcopy(getattr(b, 'tag_counts', {})) + snap["_card_name_tags_index"] = copy.deepcopy(getattr(b, '_card_name_tags_index', {})) + snap["suggested_lands_queue"] = copy.deepcopy(getattr(b, 'suggested_lands_queue', [])) + # Caches and pools + try: + if getattr(b, '_combined_cards_df', None) is not None: + snap["_combined_cards_df"] = b._combined_cards_df.copy(deep=True) + except Exception: + snap["_combined_cards_df"] = None + try: + if getattr(b, '_full_cards_df', None) is not None: + snap["_full_cards_df"] = b._full_cards_df.copy(deep=True) + except Exception: + snap["_full_cards_df"] = None + snap["_color_source_matrix_baseline"] = copy.deepcopy(getattr(b, '_color_source_matrix_baseline', None)) + snap["_color_source_matrix_cache"] = copy.deepcopy(getattr(b, '_color_source_matrix_cache', None)) + snap["_color_source_cache_dirty"] = getattr(b, '_color_source_cache_dirty', True) + snap["_spell_pip_weights_cache"] = copy.deepcopy(getattr(b, '_spell_pip_weights_cache', None)) + snap["_spell_pip_cache_dirty"] = getattr(b, '_spell_pip_cache_dirty', True) + return snap + + +def _restore_builder(b: DeckBuilder, snap: Dict[str, Any]) -> None: + b.card_library = copy.deepcopy(snap.get("card_library", {})) + b.tag_counts = copy.deepcopy(snap.get("tag_counts", {})) + b._card_name_tags_index = copy.deepcopy(snap.get("_card_name_tags_index", {})) + b.suggested_lands_queue = copy.deepcopy(snap.get("suggested_lands_queue", [])) + if "_combined_cards_df" in snap: + b._combined_cards_df = snap["_combined_cards_df"] + if "_full_cards_df" in snap: + b._full_cards_df = snap["_full_cards_df"] + b._color_source_matrix_baseline = copy.deepcopy(snap.get("_color_source_matrix_baseline", None)) + b._color_source_matrix_cache = copy.deepcopy(snap.get("_color_source_matrix_cache", None)) + b._color_source_cache_dirty = bool(snap.get("_color_source_cache_dirty", True)) + b._spell_pip_weights_cache = copy.deepcopy(snap.get("_spell_pip_weights_cache", None)) + b._spell_pip_cache_dirty = bool(snap.get("_spell_pip_cache_dirty", True)) + + +def run_stage(ctx: Dict[str, Any], rerun: bool = False) -> Dict[str, Any]: + b: DeckBuilder = ctx["builder"] + stages: List[Dict[str, Any]] = ctx["stages"] + logs: List[str] = ctx["logs"] + + # If all stages done, finalize exports (interactive/manual build) + if ctx["idx"] >= len(stages): + if not ctx.get("csv_path") and hasattr(b, 'export_decklist_csv'): + try: + ctx["csv_path"] = b.export_decklist_csv() # type: ignore[attr-defined] + except Exception as e: + logs.append(f"CSV export failed: {e}") + if not ctx.get("txt_path") and hasattr(b, 'export_decklist_text'): + try: + import os as _os + base, _ext = _os.path.splitext(_os.path.basename(ctx.get("csv_path") or f"deck_{b.timestamp}.csv")) + ctx["txt_path"] = b.export_decklist_text(filename=base + '.txt') # type: ignore[attr-defined] + # Export the run configuration JSON for manual builds + try: + b.export_run_config_json(directory='config', filename=base + '.json') # type: ignore[attr-defined] + except Exception: + pass + except Exception as e: + logs.append(f"Text export failed: {e}") + # Build structured summary for UI + summary = None + try: + if hasattr(b, 'build_deck_summary'): + summary = b.build_deck_summary() # type: ignore[attr-defined] + except Exception: + summary = None + # Write sidecar summary JSON next to CSV (if available) + try: + if summary and ctx.get("csv_path"): + import os as _os + import json as _json + csv_path = ctx.get("csv_path") + base, _ = _os.path.splitext(csv_path) + sidecar = base + '.summary.json' + meta = { + "commander": getattr(b, 'commander_name', '') or getattr(b, 'commander', ''), + "tags": list(getattr(b, 'selected_tags', []) or []) or [t for t in [getattr(b, 'primary_tag', None), getattr(b, 'secondary_tag', None), getattr(b, 'tertiary_tag', None)] if t], + "bracket_level": getattr(b, 'bracket_level', None), + "csv": ctx.get("csv_path"), + "txt": ctx.get("txt_path"), + } + payload = {"meta": meta, "summary": summary} + with open(sidecar, 'w', encoding='utf-8') as f: + _json.dump(payload, f, ensure_ascii=False, indent=2) + except Exception: + pass + return { + "done": True, + "label": "Complete", + "log_delta": "", + "idx": len(stages), + "total": len(stages), + "csv_path": ctx.get("csv_path"), + "txt_path": ctx.get("txt_path"), + "summary": summary, + } + + # Determine which stage index to run (rerun last visible, else current) + if rerun: + i = max(0, int(ctx.get("last_visible_idx", ctx["idx"]) or 1) - 1) + else: + i = ctx["idx"] + + # Iterate forward until we find a stage that adds cards, skipping no-ops + while i < len(stages): + stage = stages[i] + label = stage["label"] + runner_name = stage["runner_name"] + + # Take snapshot before executing; for rerun, restore first if we have one + if rerun and ctx.get("snapshot") is not None and i == max(0, int(ctx.get("last_visible_idx", ctx["idx"]) or 1) - 1): + _restore_builder(b, ctx["snapshot"]) # restore to pre-stage state + snap_before = _snapshot_builder(b) + + # Run the stage and capture logs delta + start_log = len(logs) + fn = getattr(b, runner_name, None) + if callable(fn): + try: + fn() + except Exception as e: + logs.append(f"Stage '{label}' failed: {e}") + else: + logs.append(f"Runner not available: {runner_name}") + delta_log = "\n".join(logs[start_log:]) + + # Compute added cards based on snapshot + try: + prev_lib = snap_before.get("card_library", {}) if isinstance(snap_before, dict) else {} + added_cards: list[dict] = [] + for name, entry in b.card_library.items(): + try: + prev_entry = prev_lib.get(name) + prev_count = int(prev_entry.get('Count', 0)) if isinstance(prev_entry, dict) else 0 + new_count = int(entry.get('Count', 1)) + delta_count = max(0, new_count - prev_count) + if delta_count <= 0: + continue + role = str(entry.get('Role') or '').strip() + sub_role = str(entry.get('SubRole') or '').strip() + added_by = str(entry.get('AddedBy') or '').strip() + trig = str(entry.get('TriggerTag') or '').strip() + parts: list[str] = [] + if role: + parts.append(role) + if sub_role: + parts.append(sub_role) + if added_by: + parts.append(f"by {added_by}") + if trig: + parts.append(f"tag: {trig}") + reason = " • ".join(parts) + added_cards.append({ + "name": name, + "count": delta_count, + "reason": reason, + "role": role, + "sub_role": sub_role, + "trigger_tag": trig, + }) + except Exception: + continue + added_cards.sort(key=lambda x: (x.get('reason') or '', x['name'])) + except Exception: + added_cards = [] + + # If this stage added cards, present it and advance idx + if added_cards: + ctx["snapshot"] = snap_before # snapshot for rerun + ctx["idx"] = i + 1 + ctx["last_visible_idx"] = i + 1 + return { + "done": False, + "label": label, + "log_delta": delta_log, + "added_cards": added_cards, + "idx": i + 1, + "total": len(stages), + } + + # No cards added: skip showing this stage and advance to next + i += 1 + # Continue loop to auto-advance + + # If we reached here, all remaining stages were no-ops; finalize exports + ctx["idx"] = len(stages) + if not ctx.get("csv_path") and hasattr(b, 'export_decklist_csv'): + try: + ctx["csv_path"] = b.export_decklist_csv() # type: ignore[attr-defined] + except Exception as e: + logs.append(f"CSV export failed: {e}") + if not ctx.get("txt_path") and hasattr(b, 'export_decklist_text'): + try: + import os as _os + base, _ext = _os.path.splitext(_os.path.basename(ctx.get("csv_path") or f"deck_{b.timestamp}.csv")) + ctx["txt_path"] = b.export_decklist_text(filename=base + '.txt') # type: ignore[attr-defined] + # Export the run configuration JSON for manual builds + try: + b.export_run_config_json(directory='config', filename=base + '.json') # type: ignore[attr-defined] + except Exception: + pass + except Exception as e: + logs.append(f"Text export failed: {e}") + # Build structured summary for UI + summary = None + try: + if hasattr(b, 'build_deck_summary'): + summary = b.build_deck_summary() # type: ignore[attr-defined] + except Exception: + summary = None + # Write sidecar summary JSON next to CSV (if available) + try: + if summary and ctx.get("csv_path"): + import os as _os + import json as _json + csv_path = ctx.get("csv_path") + base, _ = _os.path.splitext(csv_path) + sidecar = base + '.summary.json' + meta = { + "commander": getattr(b, 'commander_name', '') or getattr(b, 'commander', ''), + "tags": list(getattr(b, 'selected_tags', []) or []) or [t for t in [getattr(b, 'primary_tag', None), getattr(b, 'secondary_tag', None), getattr(b, 'tertiary_tag', None)] if t], + "bracket_level": getattr(b, 'bracket_level', None), + "csv": ctx.get("csv_path"), + "txt": ctx.get("txt_path"), + } + payload = {"meta": meta, "summary": summary} + with open(sidecar, 'w', encoding='utf-8') as f: + _json.dump(payload, f, ensure_ascii=False, indent=2) + except Exception: + pass + return { + "done": True, + "label": "Complete", + "log_delta": "", + "idx": len(stages), + "total": len(stages), + "csv_path": ctx.get("csv_path"), + "txt_path": ctx.get("txt_path"), + "summary": summary, + } diff --git a/code/web/services/tasks.py b/code/web/services/tasks.py new file mode 100644 index 0000000..48b40ae --- /dev/null +++ b/code/web/services/tasks.py @@ -0,0 +1,48 @@ +from __future__ import annotations + +import time +import uuid +from typing import Dict, Any, Optional + +# Extremely simple in-memory session/task store for MVP +_SESSIONS: Dict[str, Dict[str, Any]] = {} +_TTL_SECONDS = 60 * 60 * 8 # 8 hours + + +def new_sid() -> str: + return uuid.uuid4().hex + + +def touch_session(sid: str) -> Dict[str, Any]: + now = time.time() + s = _SESSIONS.get(sid) + if not s: + s = {"created": now, "updated": now} + _SESSIONS[sid] = s + else: + s["updated"] = now + return s + + +def get_session(sid: Optional[str]) -> Dict[str, Any]: + if not sid: + sid = new_sid() + return touch_session(sid) + + +def set_session_value(sid: str, key: str, value: Any) -> None: + touch_session(sid)[key] = value + + +def get_session_value(sid: str, key: str, default: Any = None) -> Any: + return touch_session(sid).get(key, default) + + +def cleanup_expired() -> None: + now = time.time() + expired = [sid for sid, s in _SESSIONS.items() if now - s.get("updated", 0) > _TTL_SECONDS] + for sid in expired: + try: + del _SESSIONS[sid] + except Exception: + pass diff --git a/code/web/static/styles.css b/code/web/static/styles.css new file mode 100644 index 0000000..ba447bf --- /dev/null +++ b/code/web/static/styles.css @@ -0,0 +1,119 @@ +/* Base */ +:root{ + /* MTG color palette (approx from provided values) */ + --sidebar-w: 260px; + --green-main: rgb(0,115,62); + --green-light: rgb(196,211,202); + --blue-main: rgb(14,104,171); + --blue-light: rgb(179,206,234); + --red-main: rgb(211,32,42); + --red-light: rgb(235,159,130); + --white-main: rgb(249,250,244); + --white-light: rgb(248,231,185); + --black-main: rgb(21,11,0); + --black-light: rgb(166,159,157); + --bg: #0f0f10; + --panel: #1a1b1e; + --text: #e8e8e8; + --muted: #b6b8bd; + --border: #2a2b2f; +} +*{box-sizing:border-box} +html,body{height:100%} +body { font-family: system-ui, Arial, sans-serif; margin: 0; color: var(--text); background: var(--bg); } +/* Top banner */ +.top-banner{ position:sticky; top:0; z-index:10; background:#0c0d0f; border-bottom:1px solid var(--border); } +.top-banner .top-inner{ margin:0; padding:.5rem 0; display:grid; grid-template-columns: var(--sidebar-w) 1fr; align-items:center; } +.top-banner h1{ font-size: 1.1rem; margin:0; padding-left: 1rem; } +.banner-status{ color: var(--muted); font-size:.9rem; text-align:left; padding-left: 1.5rem; padding-right: 1.5rem; white-space:nowrap; overflow:hidden; text-overflow:ellipsis; } +.banner-status.busy{ color:#fbbf24; } + +/* Layout */ +.layout{ display:grid; grid-template-columns: var(--sidebar-w) 1fr; min-height: calc(100vh - 52px); } +.sidebar{ background: var(--panel); border-right: 1px solid var(--border); padding: 1rem; position:sticky; top:0; align-self:start; height:100vh; overflow:auto; width: var(--sidebar-w); } +.content{ padding: 1.25rem 1.5rem; } + +.brand h1{ display:none; } +.mana-dots{ display:flex; gap:.35rem; margin-bottom:.5rem; } +.mana-dots .dot{ width:12px; height:12px; border-radius:50%; display:inline-block; border:1px solid rgba(0,0,0,.35); box-shadow:0 1px 2px rgba(0,0,0,.3) inset; } +.dot.green{ background: var(--green-main); } +.dot.blue{ background: var(--blue-main); } +.dot.red{ background: var(--red-main); } +.dot.white{ background: var(--white-light); border-color: rgba(0,0,0,.2); } +.dot.black{ background: var(--black-light); } + +.nav{ display:flex; flex-direction:column; gap:.35rem; } +.nav a{ color: var(--text); text-decoration:none; padding:.4rem .5rem; border-radius:6px; border:1px solid transparent; } +.nav a:hover{ background: #202227; border-color: var(--border); } + +/* Simple two-column layout for inspect panel */ +.two-col { display: grid; grid-template-columns: 1fr 320px; gap: 1rem; align-items: start; } +.two-col .grow { min-width: 0; } +.card-preview img { width: 100%; height: auto; border-radius: 10px; box-shadow: 0 6px 18px rgba(0,0,0,.35); border:1px solid var(--border); background: #111; } +@media (max-width: 900px) { .two-col { grid-template-columns: 1fr; } } + +/* Left-rail variant puts the image first */ +.two-col.two-col-left-rail{ grid-template-columns: 320px 1fr; } +.card-preview.card-sm{ max-width:200px; } + +/* Buttons, inputs */ +button{ background: var(--blue-main); color:#fff; border:none; border-radius:6px; padding:.45rem .7rem; cursor:pointer; } +button:hover{ filter:brightness(1.05); } +label{ display:inline-flex; flex-direction:column; gap:.25rem; margin-right:.75rem; } +select,input[type="text"],input[type="number"]{ background:#0f1115; color:var(--text); border:1px solid var(--border); border-radius:6px; padding:.35rem .4rem; } +fieldset{ border:1px solid var(--border); border-radius:8px; padding:.75rem; margin:.75rem 0; } +small, .muted{ color: var(--muted); } + +/* Banner */ +.banner{ background: linear-gradient(90deg, rgba(0,0,0,.25), rgba(0,0,0,0)); border: 1px solid var(--border); border-radius: 10px; padding: 2rem 1.6rem; margin-bottom: 1rem; box-shadow: 0 8px 30px rgba(0,0,0,.25) inset; } +.banner h1{ font-size: 2rem; margin:0 0 .35rem; } +.banner .subtitle{ color: var(--muted); font-size:.95rem; } + +/* Home actions */ +.actions-grid{ display:grid; grid-template-columns: repeat( auto-fill, minmax(220px, 1fr) ); gap: .75rem; } +.action-button{ display:block; text-decoration:none; color: var(--text); border:1px solid var(--border); background:#0f1115; padding:1.25rem; border-radius:10px; text-align:center; font-weight:600; } +.action-button:hover{ border-color:#3a3c42; background:#12151b; } +.action-button.primary{ background: linear-gradient(180deg, rgba(14,104,171,.25), rgba(14,104,171,.05)); border-color: #274766; } + +/* Card grid for added cards (responsive, compact tiles) */ +.card-grid{ + display:grid; + grid-template-columns: repeat(auto-fill, minmax(170px, 170px)); /* ~160px image + padding */ + gap: .5rem; + margin-top:.5rem; + justify-content: start; /* pack as many as possible per row */ +} +.card-tile{ + width:170px; + background:#0f1115; + border:1px solid var(--border); + border-radius:6px; + padding:.25rem .25rem .4rem; + text-align:center; +} +.card-tile.game-changer{ border-color: var(--red-main); box-shadow: 0 0 0 1px rgba(211,32,42,.35) inset; } +.card-tile img{ width:160px; height:auto; border-radius:6px; box-shadow: 0 6px 18px rgba(0,0,0,.35); background:#111; } +.card-tile .name{ font-weight:600; margin-top:.25rem; font-size:.92rem; } +.card-tile .reason{ color:var(--muted); font-size:.85rem; margin-top:.15rem; } + +/* Step 1 candidate grid (200px-wide scaled images) */ +.candidate-grid{ + display:grid; + grid-template-columns: repeat(auto-fill, minmax(200px, 1fr)); + gap:.75rem; +} +.candidate-tile{ + background:#0f1115; + border:1px solid var(--border); + border-radius:8px; + padding:.4rem; +} +.candidate-tile .img-btn{ display:block; width:100%; padding:0; background:transparent; border:none; cursor:pointer; } +.candidate-tile img{ width:100%; max-width:200px; height:auto; border-radius:8px; box-shadow:0 6px 18px rgba(0,0,0,.35); background:#111; display:block; margin:0 auto; } +.candidate-tile .meta{ text-align:center; margin-top:.35rem; } +.candidate-tile .name{ font-weight:600; font-size:.95rem; } +.candidate-tile .score{ color:var(--muted); font-size:.85rem; } + +/* Deck summary: highlight game changers */ +.game-changer { color: var(--green-main); } +.stack-card.game-changer { outline: 2px solid var(--green-main); } diff --git a/code/web/static/vendor/htmx-1.9.12.min.js b/code/web/static/vendor/htmx-1.9.12.min.js new file mode 100644 index 0000000..1eeaaa0 --- /dev/null +++ b/code/web/static/vendor/htmx-1.9.12.min.js @@ -0,0 +1,3 @@ +/* Local fallback for HTMX 1.9.12. If the CDN fails, base.html will load this file. */ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t():("function"==typeof define&&define.amd?define(t):t())}(0,function(){/* placeholder minimal shim to avoid runtime errors if CDN blocked; swaps won't work until user refreshes with network */ +window.htmx=window.htmx||{version:"1.9.12",onLoad:function(){},find:function(){return null},trigger:function(){},config:{},logAll:function(){}};}); diff --git a/code/web/templates/base.html b/code/web/templates/base.html new file mode 100644 index 0000000..7cf568b --- /dev/null +++ b/code/web/templates/base.html @@ -0,0 +1,152 @@ + + +
+ + +No theme tags found for this commander.
+ {% endif %} +Commander: {{ commander }}
+Tags: {{ tags|default([])|join(', ') }}
+Bracket: {{ bracket }}
+ + {% if i and n %} +{{ log }}+
+ Run a non-interactive deck build using a saved JSON configuration. Upload a JSON file, view its details, or run it headlessly to generate deck exports and a build summary. +
+{{ example_json or '{\n "commander": "Your Commander Name",\n "primary_tag": "Your Main Theme",\n "secondary_tag": null,\n "tertiary_tag": null,\n "bracket_level": 0,\n "ideal_counts": {\n "ramp": 10,\n "lands": 35,\n "basic_lands": 20,\n "fetch_lands": 3,\n "creatures": 28,\n "removal": 10,\n "wipes": 2,\n "card_advantage": 8,\n "protection": 4\n }\n}' }}+
No configs found in /config. Export a run config from a build, or upload one here.
+ {% else %} +This page shows the results of a non-interactive build from the selected JSON configuration.
+{% if commander %} +{{ log }}+
No summary data available.
+ {% endif %} +{% endif %} + +{% endblock %} diff --git a/code/web/templates/configs/view.html b/code/web/templates/configs/view.html new file mode 100644 index 0000000..282f7b8 --- /dev/null +++ b/code/web/templates/configs/view.html @@ -0,0 +1,22 @@ +{% extends "base.html" %} +{% block content %} +Review the configuration details below, then run a non-interactive build to produce deck exports and a summary.
+{{ data.ideal_counts | tojson(indent=2) }}+
These are exported decklists from previous runs. Open a deck to view the final summary, download CSV/TXT, and inspect card types and curve.
+ +{% if error %} +Prepare or refresh the card database and apply tags. You can run this anytime.
+ +Initial setup and tagging may take several minutes on first run.
+ +