Added logic for more themes

Adjusted setup to exclude unsets and Heroes of the realm cards
This commit is contained in:
mwisnowski 2024-12-17 12:02:53 -08:00
parent 94d42d93cf
commit 4a3d9c423e
3 changed files with 441 additions and 118 deletions

View file

@ -45,10 +45,12 @@ def filter_by_color(df, column_name, value, new_csv_name):
columns_to_keep = ['name', 'faceName','edhrecRank','colorIdentity', 'colors', 'manaCost', 'manaValue', 'type', 'text', 'power', 'toughness', 'keywords']
filtered_df = filtered_df[columns_to_keep]
filtered_df.sort_values(by='name', key=lambda col: col.str.lower(), inplace=True)
filtered_df.to_csv(new_csv_name, index=False)
def determine_legendary():
print('Generating legendary_cards.csv, containing all Legendary Creatures elligible to be commanders.')
def determine_commanders():
print('Generating commander_cards.csv, containing all cards elligible to be commanders.')
# Filter dataframe
while True:
try:
@ -66,7 +68,7 @@ def determine_legendary():
# Load cards.csv file into pandas dataframe so it can be further broken down
df = pd.read_csv(f'{csv_directory}/cards.csv', low_memory=False)
legendary_options = ['Legendary Creature', 'Legendary Artifact Creature', 'Legendary Enchantment Creature']
legendary_options = ['Legendary Creature','Legendary Artifact', 'Legendary Artifact Creature', 'Legendary Enchantment Creature', 'Legendary Planeswalker']
filtered_df = df[df['type'].str.contains('|'.join(legendary_options))]
"""
Save the filtered dataframe to a new csv file, and narrow down/rearranges the columns it
@ -74,12 +76,27 @@ def determine_legendary():
Additionally attempts to remove as many duplicates (including cards with reversible prints,
as well as taking out Arena-only cards.
"""
rows_to_drop = []
non_legel_sets = ['PHTR', 'PH17', 'PH18' ,'PH19', 'PH20', 'PH21', 'UGL', 'UND', 'UNH', 'UST',]
for index, row in filtered_df.iterrows():
if ('Legendary Artifact' in row['type']
or 'Legendary Planeswalker' in row['type']):
if 'Legendary Artifact Creature' not in row['type']:
if pd.notna(row['text']):
if f'{row['name']} can be your commander' in row['text']:
rows_to_drop.append(index)
for illegal_set in non_legel_sets:
if illegal_set in row['printings']:
rows_to_drop.append(index)
filtered_df = filtered_df.drop(rows_to_drop)
filtered_df.sort_values('name')
filtered_df = filtered_df.loc[filtered_df['layout'] != 'reversible_card']
filtered_df = filtered_df[filtered_df['availability'].str.contains('paper')]
filtered_df = filtered_df.loc[filtered_df['promoTypes'] != 'playtest']
filtered_df = filtered_df.loc[filtered_df['securityStamp'] != 'heart']
filtered_df = filtered_df.loc[filtered_df['securityStamp'] != 'acorn']
filtered_df = filtered_df.loc[filtered_df['securityStamp'] != 'acorn']
card_types = ['Plane —', 'Conspiracy', 'Vanguard', 'Scheme', 'Phenomena', 'Stickers', 'Attraction']
for card_type in card_types:
@ -89,8 +106,8 @@ def determine_legendary():
columns_to_keep = ['name', 'faceName','edhrecRank','colorIdentity', 'colors', 'manaCost', 'manaValue', 'type', 'keywords', 'text', 'power', 'toughness']
filtered_df = filtered_df[columns_to_keep]
filtered_df.sort_values(by='name', key=lambda col: col.str.lower(), inplace=True)
filtered_df.to_csv(f'{csv_directory}/legendary_cards.csv', index=False)
print('legendary_cards.csv file generated.')
filtered_df.to_csv(f'{csv_directory}/commander_cards.csv', index=False)
print('commander_cards.csv file generated.')
def initial_setup():
print('Checking for cards.csv file.\n')
@ -127,7 +144,7 @@ def initial_setup():
filter_by_color(df, 'colorIdentity', color_abrv[i], f'{csv_directory}/{colors[i]}_cards.csv')
# Once by-color lists have been made, Determine legendary creatures
determine_legendary()
determine_commanders()
# Once Legendary creatures are determined, generate staple lists
# generate_staple_lists()
@ -144,11 +161,19 @@ def regenerate_csvs_all():
outputfile.write(r.content)
# Load cards.csv file into pandas dataframe so it can be further broken down
df = pd.read_csv('csv_files/cards.csv', low_memory=False)
df = pd.read_csv('csv_files/cards.csv', low_memory=False)#, converters={'printings': pd.eval})
# Set frames that have nothing for color identity to be 'Colorless' instead
df['colorIdentity'] = df['colorIdentity'].fillna('Colorless')
rows_to_drop = []
non_legel_sets = ['PHTR', 'PH17', 'PH18' ,'PH19', 'PH20', 'PH21', 'UGL', 'UND', 'UNH', 'UST',]
for index, row in df.iterrows():
for illegal_set in non_legel_sets:
if illegal_set in row['printings']:
rows_to_drop.append(index)
df = df.drop(rows_to_drop)
# Color identity sorted cards
print('Regenerating color identity sorted files.\n')
@ -159,7 +184,7 @@ def regenerate_csvs_all():
print(f'A new {colors[i]}_cards.csv file has been made.\n')
# Once files are regenerated, create a new legendary list
determine_legendary()
determine_commanders()
def regenerate_csv_by_color(color):
"""
@ -188,7 +213,7 @@ def regenerate_csv_by_color(color):
print(f'A new {color}_cards.csv file has been made.\n')
# Once files are regenerated, create a new legendary list
determine_legendary()
determine_commanders()
def generate_staple_lists():
for color in colors:
@ -257,5 +282,6 @@ def setup():
break
break
#setup()
#regenerate_csv_by_color('white')
#regenerate_csvs_all()
#regenerate_csv_by_color('white')
#determine_commanders()