test: convert tests to pytest assertions; add server-availability skips; clean up warnings and minor syntax/indent issues

This commit is contained in:
matt 2025-09-12 10:50:57 -07:00
parent f07daaeb4a
commit 947adacfe2
21 changed files with 374 additions and 311 deletions

View file

@ -27,7 +27,7 @@ def test_cli_ideal_counts():
if result.returncode != 0:
print(f"❌ Command failed: {result.stderr}")
return False
assert False
try:
config = json.loads(result.stdout)
@ -46,16 +46,14 @@ def test_cli_ideal_counts():
actual_val = ideal_counts.get(key)
if actual_val != expected_val:
print(f"{key}: expected {expected_val}, got {actual_val}")
return False
assert False
print(f"{key}: {actual_val}")
print("✅ All CLI ideal count arguments working correctly!")
return True
except json.JSONDecodeError as e:
print(f"❌ Failed to parse JSON output: {e}")
print(f"Output was: {result.stdout}")
return False
assert False
def test_help_contains_types():
"""Test that help text shows value types."""
@ -66,7 +64,7 @@ def test_help_contains_types():
if result.returncode != 0:
print(f"❌ Help command failed: {result.stderr}")
return False
assert False
help_text = result.stdout
@ -82,7 +80,7 @@ def test_help_contains_types():
if missing:
print(f"❌ Missing type indicators: {missing}")
return False
assert False
# Check for organized sections
sections = [
@ -99,10 +97,9 @@ def test_help_contains_types():
if missing_sections:
print(f"❌ Missing help sections: {missing_sections}")
return False
assert False
print("✅ Help text contains proper type information and sections!")
return True
if __name__ == "__main__":
os.chdir(os.path.dirname(os.path.abspath(__file__)))

View file

@ -4,10 +4,6 @@ Advanced integration test for exclude functionality.
Tests that excluded cards are completely removed from all dataframe sources.
"""
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'code'))
from code.deck_builder.builder import DeckBuilder
def test_comprehensive_exclude_filtering():
@ -74,18 +70,10 @@ def test_comprehensive_exclude_filtering():
print(f"'{exclude_card}' incorrectly found in lookup: {lookup_result['name'].tolist()}")
print("\n=== Test Complete ===")
return True
except Exception as e:
print(f"Test failed with error: {e}")
import traceback
print(traceback.format_exc())
return False
if __name__ == "__main__":
success = test_comprehensive_exclude_filtering()
if success:
print("✅ Comprehensive exclude filtering test passed!")
else:
print("❌ Comprehensive exclude filtering test failed!")
sys.exit(1)
assert False

View file

@ -143,10 +143,9 @@ def test_direct_exclude_filtering():
if failed_exclusions:
print(f"\n❌ FAILED: {len(failed_exclusions)} cards were not excluded: {failed_exclusions}")
return False
assert False
else:
print(f"\n✅ SUCCESS: All {len(exclude_list)} cards were properly excluded")
return True
if __name__ == "__main__":
success = test_direct_exclude_filtering()

View file

@ -106,7 +106,9 @@ def test_exclude_cards_json_roundtrip(client):
assert session_cookie is not None, "Session cookie not found"
# Export permalink with exclude_cards
r3 = client.get('/build/permalink', cookies={'sid': session_cookie})
if session_cookie:
client.cookies.set('sid', session_cookie)
r3 = client.get('/build/permalink')
assert r3.status_code == 200
permalink_data = r3.json()
@ -128,7 +130,9 @@ def test_exclude_cards_json_roundtrip(client):
import_cookie = r4.cookies.get('sid')
assert import_cookie is not None, "Import session cookie not found"
r5 = client.get('/build/permalink', cookies={'sid': import_cookie})
if import_cookie:
client.cookies.set('sid', import_cookie)
r5 = client.get('/build/permalink')
assert r5.status_code == 200
reimported_data = r5.json()

View file

@ -96,7 +96,10 @@ Counterspell"""
# Get session cookie and export permalink
session_cookie = r2.cookies.get('sid')
r3 = client.get('/build/permalink', cookies={'sid': session_cookie})
# Set cookie on client to avoid per-request cookies deprecation
if session_cookie:
client.cookies.set('sid', session_cookie)
r3 = client.get('/build/permalink')
assert r3.status_code == 200
export_data = r3.json()

View file

@ -57,15 +57,14 @@ def test_exclude_filtering():
for exclude_card in exclude_list:
if exclude_card in remaining_cards:
print(f"ERROR: {exclude_card} was NOT excluded!")
return False
assert False
else:
print(f"{exclude_card} was properly excluded")
print(f"\n✓ SUCCESS: All {len(exclude_list)} cards were properly excluded")
print(f"✓ Remaining cards: {len(remaining_cards)} out of {len(test_cards_df)}")
return True
return False
else:
assert False
if __name__ == "__main__":
test_exclude_filtering()

View file

@ -2,66 +2,43 @@
"""Test the improved fuzzy matching and modal styling"""
import requests
import pytest
test_cases = [
("lightn", "Should find Lightning cards"),
("lightni", "Should find Lightning with slight typo"),
("bolt", "Should find Bolt cards"),
("bligh", "Should find Blightning"),
("unknowncard", "Should trigger confirmation modal"),
("ligth", "Should find Light cards"),
("boltt", "Should find Bolt with typo")
]
for input_text, description in test_cases:
@pytest.mark.parametrize(
"input_text,description",
[
("lightn", "Should find Lightning cards"),
("lightni", "Should find Lightning with slight typo"),
("bolt", "Should find Bolt cards"),
("bligh", "Should find Blightning"),
("unknowncard", "Should trigger confirmation modal"),
("ligth", "Should find Light cards"),
("boltt", "Should find Bolt with typo"),
],
)
def test_final_fuzzy(input_text: str, description: str):
# Skip if local server isn't running
try:
requests.get('http://localhost:8080/', timeout=0.5)
except Exception:
pytest.skip('Local web server is not running on http://localhost:8080; skipping HTTP-based test')
print(f"\n🔍 Testing: '{input_text}' ({description})")
print("=" * 60)
test_data = {
"include_cards": input_text,
"exclude_cards": "",
"commander": "",
"enforcement_mode": "warn",
"allow_illegal": "false",
"fuzzy_matching": "true"
"fuzzy_matching": "true",
}
try:
response = requests.post(
"http://localhost:8080/build/validate/include_exclude",
data=test_data,
timeout=10
)
if response.status_code == 200:
data = response.json()
# Check results
if data.get("confirmation_needed"):
print(f"🔄 Confirmation modal would show:")
for item in data["confirmation_needed"]:
print(f" Input: '{item['input']}'")
print(f" Confidence: {item['confidence']:.1%}")
print(f" Suggestions: {item['suggestions'][:3]}")
elif data.get("includes", {}).get("legal"):
legal = data["includes"]["legal"]
fuzzy = data["includes"].get("fuzzy_matches", {})
if input_text in fuzzy:
print(f"✅ Auto-accepted fuzzy match: '{input_text}''{fuzzy[input_text]}'")
else:
print(f"✅ Exact match: {legal}")
elif data.get("includes", {}).get("illegal"):
print(f"❌ No matches found")
else:
print(f"❓ Unclear result")
else:
print(f"❌ HTTP {response.status_code}")
except Exception as e:
print(f"❌ EXCEPTION: {e}")
print(f"\n🎯 Summary:")
print("✅ Enhanced prefix matching prioritizes Lightning cards for 'lightn'")
print("✅ Dark theme modal styling implemented")
print("✅ Confidence threshold set to 95% for more confirmations")
print("💡 Ready for user testing in web UI!")
response = requests.post(
"http://localhost:8080/build/validate/include_exclude",
data=test_data,
timeout=10,
)
assert response.status_code == 200
data = response.json()
assert isinstance(data, dict)
assert 'includes' in data or 'confirmation_needed' in data or 'invalid' in data

View file

@ -34,10 +34,9 @@ def test_fuzzy_matching_direct():
if result.matched_name is None and not result.auto_accepted and result.suggestions:
print("✅ Fuzzy matching correctly triggered confirmation!")
return True
else:
print("❌ Fuzzy matching should have triggered confirmation")
return False
assert False
def test_exact_match_direct():
"""Test exact matching directly."""
@ -52,17 +51,16 @@ def test_exact_match_direct():
result = fuzzy_match_card_name('Lightning Bolt', available_cards)
print(f"Input: 'Lightning Bolt'")
print("Input: 'Lightning Bolt'")
print(f"Matched name: {result.matched_name}")
print(f"Auto accepted: {result.auto_accepted}")
print(f"Confidence: {result.confidence:.2%}")
if result.matched_name and result.auto_accepted:
print("✅ Exact match correctly auto-accepted!")
return True
else:
print("❌ Exact match should have been auto-accepted")
return False
assert False
if __name__ == "__main__":
print("🧪 Testing Fuzzy Matching Logic")

View file

@ -8,11 +8,17 @@ import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'code'))
import requests
import pytest
import json
def test_fuzzy_match_confirmation():
"""Test that fuzzy matching returns confirmation_needed items for low confidence matches."""
print("🔍 Testing fuzzy match confirmation modal backend...")
# Skip if local server isn't running
try:
requests.get('http://localhost:8080/', timeout=0.5)
except Exception:
pytest.skip('Local web server is not running on http://localhost:8080; skipping HTTP-based test')
# Test with a typo that should trigger confirmation
test_data = {
@ -29,19 +35,19 @@ def test_fuzzy_match_confirmation():
if response.status_code != 200:
print(f"❌ Request failed with status {response.status_code}")
return False
assert False
data = response.json()
# Check if confirmation_needed is populated
if 'confirmation_needed' not in data:
print("❌ No confirmation_needed field in response")
return False
assert False
if not data['confirmation_needed']:
print("❌ confirmation_needed is empty")
print(f"Response: {json.dumps(data, indent=2)}")
return False
assert False
confirmation = data['confirmation_needed'][0]
expected_fields = ['input', 'suggestions', 'confidence', 'type']
@ -49,23 +55,25 @@ def test_fuzzy_match_confirmation():
for field in expected_fields:
if field not in confirmation:
print(f"❌ Missing field '{field}' in confirmation")
return False
print(f"✅ Fuzzy match confirmation working!")
assert False
print("✅ Fuzzy match confirmation working!")
print(f" Input: {confirmation['input']}")
print(f" Suggestions: {confirmation['suggestions']}")
print(f" Confidence: {confirmation['confidence']:.2%}")
print(f" Type: {confirmation['type']}")
return True
except Exception as e:
print(f"❌ Test failed with error: {e}")
return False
assert False
def test_exact_match_no_confirmation():
"""Test that exact matches don't trigger confirmation."""
print("\n🎯 Testing exact match (no confirmation)...")
# Skip if local server isn't running
try:
requests.get('http://localhost:8080/', timeout=0.5)
except Exception:
pytest.skip('Local web server is not running on http://localhost:8080; skipping HTTP-based test')
test_data = {
'include_cards': 'Lightning Bolt', # Exact match
@ -81,27 +89,25 @@ def test_exact_match_no_confirmation():
if response.status_code != 200:
print(f"❌ Request failed with status {response.status_code}")
return False
assert False
data = response.json()
# Should not have confirmation_needed for exact match
if data.get('confirmation_needed'):
print(f"❌ Exact match should not trigger confirmation: {data['confirmation_needed']}")
return False
assert False
# Should have legal includes
if not data.get('includes', {}).get('legal'):
print("❌ Exact match should be in legal includes")
print(f"Response: {json.dumps(data, indent=2)}")
return False
assert False
print("✅ Exact match correctly bypasses confirmation!")
return True
except Exception as e:
print(f"❌ Test failed with error: {e}")
return False
assert False
if __name__ == "__main__":
print("🧪 Testing Fuzzy Match Confirmation Modal")

View file

@ -2,69 +2,43 @@
"""Test improved fuzzy matching algorithm with the new endpoint"""
import requests
import json
import pytest
def test_improved_fuzzy():
"""Test improved fuzzy matching with various inputs"""
test_cases = [
@pytest.mark.parametrize(
"input_text,description",
[
("lightn", "Should find Lightning cards"),
("light", "Should find Light cards"),
("bolt", "Should find Bolt cards"),
("bolt", "Should find Bolt cards"),
("blightni", "Should find Blightning"),
("lightn bo", "Should be unclear match")
]
for input_text, description in test_cases:
print(f"\n🔍 Testing: '{input_text}' ({description})")
print("=" * 60)
test_data = {
"include_cards": input_text,
"exclude_cards": "",
"commander": "",
"enforcement_mode": "warn",
"allow_illegal": "false",
"fuzzy_matching": "true"
}
try:
response = requests.post(
"http://localhost:8080/build/validate/include_exclude",
data=test_data,
timeout=10
)
if response.status_code == 200:
data = response.json()
# Check results
if data.get("confirmation_needed"):
print(f"🔄 Fuzzy confirmation needed for '{input_text}'")
for item in data["confirmation_needed"]:
print(f" Best: '{item['best_match']}' ({item['confidence']:.1%})")
if item.get('suggestions'):
print(f" Top 3:")
for i, suggestion in enumerate(item['suggestions'][:3], 1):
print(f" {i}. {suggestion}")
elif data.get("valid"):
print(f"✅ Auto-accepted: {[card['name'] for card in data['valid']]}")
# Show best match info if available
for card in data['valid']:
if card.get('fuzzy_match_info'):
print(f" Fuzzy matched '{input_text}''{card['name']}' ({card['fuzzy_match_info'].get('confidence', 0):.1%})")
elif data.get("invalid"):
print(f"❌ Invalid: {[card['input'] for card in data['invalid']]}")
else:
print(f"❓ No clear result for '{input_text}'")
print(f"Response keys: {list(data.keys())}")
else:
print(f"❌ HTTP {response.status_code}")
except Exception as e:
print(f"❌ EXCEPTION: {e}")
("lightn bo", "Should be unclear match"),
],
)
def test_improved_fuzzy(input_text: str, description: str):
# Skip if local server isn't running
try:
requests.get('http://localhost:8080/', timeout=0.5)
except Exception:
pytest.skip('Local web server is not running on http://localhost:8080; skipping HTTP-based test')
if __name__ == "__main__":
print("🧪 Testing Improved Fuzzy Match Algorithm")
print("==========================================")
test_improved_fuzzy()
print(f"\n🔍 Testing: '{input_text}' ({description})")
test_data = {
"include_cards": input_text,
"exclude_cards": "",
"commander": "",
"enforcement_mode": "warn",
"allow_illegal": "false",
"fuzzy_matching": "true",
}
response = requests.post(
"http://localhost:8080/build/validate/include_exclude",
data=test_data,
timeout=10,
)
assert response.status_code == 200
data = response.json()
# Ensure we got some structured response
assert isinstance(data, dict)
assert 'includes' in data or 'confirmation_needed' in data or 'invalid' in data

View file

@ -73,7 +73,7 @@ def test_m5_structured_logging():
print(f"❌ Missing event: {event}")
print(f"\n📋 Results: {len(found_events)}/{len(expected_events)} expected events found")
# Test strict mode logging
print("\n🔒 Testing strict mode logging...")
builder_obj.enforcement_mode = "strict"
@ -82,14 +82,13 @@ def test_m5_structured_logging():
print("✅ Strict mode passed (no missing includes)")
except RuntimeError as e:
print(f"❌ Strict mode failed: {e}")
return len(found_events) == len(expected_events)
assert len(found_events) == len(expected_events)
except Exception as e:
print(f"❌ Test failed with error: {e}")
import traceback
traceback.print_exc()
return False
finally:
logger.removeHandler(handler)
@ -128,7 +127,7 @@ def test_m5_performance_metrics():
else:
print("❌ Performance metrics too slow")
return performance_acceptable
assert performance_acceptable
if __name__ == "__main__":

View file

@ -2,59 +2,46 @@
"""Test improved matching for specific cases that were problematic"""
import requests
import pytest
# Test the specific cases from the screenshots
test_cases = [
("lightn", "Should prioritize Lightning Bolt over Blightning/Flight"),
("cahso warp", "Should clearly find Chaos Warp first"),
("bolt", "Should find Lightning Bolt"),
("warp", "Should find Chaos Warp")
]
for input_text, description in test_cases:
@pytest.mark.parametrize(
"input_text,description",
[
("lightn", "Should prioritize Lightning Bolt over Blightning/Flight"),
("cahso warp", "Should clearly find Chaos Warp first"),
("bolt", "Should find Lightning Bolt"),
("warp", "Should find Chaos Warp"),
],
)
def test_specific_matches(input_text: str, description: str):
# Skip if local server isn't running
try:
requests.get('http://localhost:8080/', timeout=0.5)
except Exception:
pytest.skip('Local web server is not running on http://localhost:8080; skipping HTTP-based test')
print(f"\n🔍 Testing: '{input_text}' ({description})")
print("=" * 70)
test_data = {
"include_cards": input_text,
"exclude_cards": "",
"commander": "",
"enforcement_mode": "warn",
"allow_illegal": "false",
"fuzzy_matching": "true"
"fuzzy_matching": "true",
}
try:
response = requests.post(
"http://localhost:8080/build/validate/include_exclude",
data=test_data,
timeout=10
)
if response.status_code == 200:
data = response.json()
# Check results
if data.get("confirmation_needed"):
print("🔄 Confirmation modal would show:")
for item in data["confirmation_needed"]:
print(f" Input: '{item['input']}'")
print(f" Confidence: {item['confidence']:.1%}")
print(f" Top suggestions:")
for i, suggestion in enumerate(item['suggestions'][:5], 1):
print(f" {i}. {suggestion}")
elif data.get("includes", {}).get("legal"):
fuzzy = data["includes"].get("fuzzy_matches", {})
if input_text in fuzzy:
print(f"✅ Auto-accepted: '{input_text}''{fuzzy[input_text]}'")
else:
print(f"✅ Exact match: {data['includes']['legal']}")
else:
print("❌ No matches found")
else:
print(f"❌ HTTP {response.status_code}")
except Exception as e:
print(f"❌ EXCEPTION: {e}")
print(f"\n💡 Testing complete! Check if Lightning/Chaos suggestions are now prioritized.")
response = requests.post(
"http://localhost:8080/build/validate/include_exclude",
data=test_data,
timeout=10,
)
assert response.status_code == 200
data = response.json()
assert isinstance(data, dict)
# At least one of the expected result containers should exist
assert (
data.get("confirmation_needed") is not None
or data.get("includes") is not None
or data.get("invalid") is not None
)

View file

@ -71,9 +71,9 @@ def test_m5_structured_logging():
print(f"✅ Found event: {event}")
else:
print(f"❌ Missing event: {event}")
print(f"\n📋 Results: {len(found_events)}/{len(expected_events)} expected events found")
# Test strict mode logging
print("\n🔒 Testing strict mode logging...")
builder_obj.enforcement_mode = "strict"
@ -82,14 +82,14 @@ def test_m5_structured_logging():
print("✅ Strict mode passed (no missing includes)")
except RuntimeError as e:
print(f"❌ Strict mode failed: {e}")
return len(found_events) == len(expected_events)
# Final assertion inside try so except/finally remain valid
assert len(found_events) == len(expected_events)
except Exception as e:
print(f"❌ Test failed with error: {e}")
import traceback
traceback.print_exc()
return False
finally:
logger.removeHandler(handler)
@ -128,7 +128,7 @@ def test_m5_performance_metrics():
else:
print("❌ Performance metrics too slow")
return performance_acceptable
assert performance_acceptable
if __name__ == "__main__":

View file

@ -1,18 +1,21 @@
#!/usr/bin/env python3
"""
Test the web validation endpoint to confirm fuzzy matching works.
Skips if the local web server is not running.
"""
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'code'))
import requests
import json
import pytest
def test_validation_with_empty_commander():
"""Test validation without commander to see basic fuzzy logic."""
print("🔍 Testing validation endpoint with empty commander...")
# Skip if local server isn't running
try:
requests.get('http://localhost:8080/', timeout=0.5)
except Exception:
pytest.skip('Local web server is not running on http://localhost:8080; skipping HTTP-based test')
test_data = {
'include_cards': 'Lighning', # Should trigger suggestions
@ -25,20 +28,25 @@ def test_validation_with_empty_commander():
try:
response = requests.post('http://localhost:8080/build/validate/include_exclude', data=test_data)
assert response.status_code == 200
data = response.json()
# Check expected structure keys exist
assert isinstance(data, dict)
assert 'includes' in data or 'confirmation_needed' in data or 'invalid' in data
print("Response:")
print(json.dumps(data, indent=2))
return data
except Exception as e:
print(f"❌ Test failed with error: {e}")
return None
assert False
def test_validation_with_false_fuzzy():
"""Test with fuzzy matching disabled."""
print("\n🎯 Testing with fuzzy matching disabled...")
# Skip if local server isn't running
try:
requests.get('http://localhost:8080/', timeout=0.5)
except Exception:
pytest.skip('Local web server is not running on http://localhost:8080; skipping HTTP-based test')
test_data = {
'include_cards': 'Lighning',
@ -51,29 +59,14 @@ def test_validation_with_false_fuzzy():
try:
response = requests.post('http://localhost:8080/build/validate/include_exclude', data=test_data)
assert response.status_code == 200
data = response.json()
assert isinstance(data, dict)
print("Response:")
print(json.dumps(data, indent=2))
return data
except Exception as e:
print(f"❌ Test failed with error: {e}")
return None
assert False
if __name__ == "__main__":
print("🧪 Testing Web Validation Endpoint")
print("=" * 45)
data1 = test_validation_with_empty_commander()
data2 = test_validation_with_false_fuzzy()
print("\n📋 Analysis:")
if data1:
has_confirmation = data1.get('confirmation_needed', [])
print(f" With fuzzy enabled: {len(has_confirmation)} confirmations needed")
if data2:
has_confirmation2 = data2.get('confirmation_needed', [])
print(f" With fuzzy disabled: {len(has_confirmation2)} confirmations needed")
print("🧪 Run this test with pytest for proper reporting")

View file

@ -67,15 +67,15 @@ Hare Apparent"""
combo_balance=mock_session.get("combo_balance", "mix"),
exclude_cards=mock_session.get("exclude_cards"),
)
print(f" ✓ Build context created successfully")
print(" ✓ Build context created successfully")
print(f" Context exclude_cards: {ctx.get('exclude_cards')}")
# Test running the first stage
print("4. Running first build stage...")
result = orch.run_stage(ctx, rerun=False, show_skipped=False)
print(f" ✓ Stage completed: {result.get('label', 'Unknown')}")
print(f" Stage done: {result.get('done', False)}")
# Check if there were any exclude-related messages in output
output = result.get('output', [])
exclude_messages = [msg for msg in output if 'exclude' in msg.lower() or 'excluded' in msg.lower()]
@ -86,14 +86,12 @@ Hare Apparent"""
else:
print("5. ⚠️ No exclude-related output found in stage result")
print(" This might indicate the filtering isn't working")
return True
except Exception as e:
print(f"❌ Error during build: {e}")
import traceback
traceback.print_exc()
return False
assert False
if __name__ == "__main__":
success = test_web_exclude_flow()

View file

@ -4,7 +4,8 @@ Test to check if the web form is properly sending exclude_cards
"""
import requests
import re
import pytest
# removed unused import re
def test_web_form_exclude():
"""Test that the web form properly handles exclude cards"""
@ -14,6 +15,12 @@ def test_web_form_exclude():
# Test 1: Check if the exclude textarea is visible
print("1. Checking if exclude textarea is visible in new deck modal...")
# Skip if local server isn't running
try:
requests.get('http://localhost:8080/', timeout=0.5)
except Exception:
pytest.skip('Local web server is not running on http://localhost:8080; skipping HTTP-based test')
try:
response = requests.get("http://localhost:8080/build/new")
if response.status_code == 200:
@ -27,7 +34,7 @@ def test_web_form_exclude():
print(" ✅ Advanced Options section found")
else:
print(" ❌ Advanced Options section NOT found")
return False
assert False
# Check if feature flag is working
if 'allow_must_haves' in content or 'exclude_cards' in content:
@ -37,11 +44,11 @@ def test_web_form_exclude():
else:
print(f" ❌ Failed to get modal: HTTP {response.status_code}")
return False
assert False
except Exception as e:
print(f" ❌ Error checking modal: {e}")
return False
assert False
# Test 2: Try to submit a form with exclude cards
print("2. Testing form submission with exclude cards...")
@ -68,14 +75,14 @@ def test_web_form_exclude():
else:
print(f" ❌ Form submission failed: HTTP {response.status_code}")
return False
assert False
except Exception as e:
print(f" ❌ Error submitting form: {e}")
return False
assert False
print("3. ✅ Web form test completed")
return True
# If we reached here without assertions, the test passed
if __name__ == "__main__":
test_web_form_exclude()