// Testing workflow patterns and quality standards. Activate when working with tests, test files, test directories, code quality tools, coverage reports, or testing tasks. Includes zero-warnings policy, targeted testing during development, mocking patterns, and best practices across languages.
| name | testing-workflow |
| description | Testing workflow patterns and quality standards. Activate when working with tests, test files, test directories, code quality tools, coverage reports, or testing tasks. Includes zero-warnings policy, targeted testing during development, mocking patterns, and best practices across languages. |
| location | user |
Testing workflow patterns and quality standards for various frameworks and languages.
Treat all warnings as errors. No exceptions.
| Status | Output | Action |
|---|---|---|
| โ PASS | All tests passed, no warnings | Proceed |
| โ FAIL | Tests passed with DeprecationWarning | Fix immediately |
| โ FAIL | Any warning present | Block commit |
Pre-Commit Requirements:
Never commit with:
โ DO test:
โ DON'T test:
During Development:
Before Commit:
tests/
โโโ __init__.py / conftest.py # Shared fixtures and setup
โโโ unit/ # Fast, isolated tests
โ โโโ test_*.py
โโโ integration/ # Component interaction tests
โ โโโ test_*.py
โโโ e2e/ # End-to-end tests
โโโ test_*.py
test_*.py or *_test.pyTest* (e.g., TestUserService)test_* (e.g., test_create_user_success)user_service, mock_database)CRITICAL: Never name non-test classes with "Test" prefix - framework will try to collect them as tests.
Test these:
Skip these:
All tests follow the Arrange-Act-Assert (AAA) pattern for clarity:
This structure makes tests:
# โ
GOOD - Clear AAA structure
def test_user_registration():
# Arrange
user_data = {"email": "test@example.com", "password": "secure"}
# Act
result = register_user(user_data)
# Assert
assert result.success
assert result.user.email == "test@example.com"
# โ BAD - Testing implementation details
def test_internal_method():
obj = MyClass()
assert obj._internal_state == expected # Don't test private state
# conftest.py
@pytest.fixture
def db_connection():
conn = create_test_database()
yield conn
conn.close()
@pytest.fixture
def sample_user():
return User(email="test@example.com", name="Test User")
# test_file.py
def test_save_user(db_connection, sample_user):
save_user(db_connection, sample_user)
assert user_exists(db_connection, sample_user.email)
Scopes: function (default), class, module, session
@pytest.mark.parametrize("input,expected", [
("hello", "HELLO"),
("world", "WORLD"),
("", ""),
(None, None),
])
def test_uppercase_conversion(input, expected):
assert to_uppercase(input) == expected
from unittest.mock import Mock, patch
# Mock external API
@patch('module.requests.get')
def test_api_call(mock_get):
mock_get.return_value.json.return_value = {"status": "ok"}
result = fetch_data()
assert result["status"] == "ok"
# Dependency injection for testability
class UserService:
def __init__(self, db_connection):
self.db = db_connection
def test_get_user():
mock_db = Mock()
mock_db.query.return_value = {"id": 1, "name": "Test"}
service = UserService(mock_db)
assert service.get_user(1)["name"] == "Test"
Tests should verify that exceptions are raised with correct messages for invalid inputs:
# โ
GOOD - Testing exception
def test_create_user_invalid_email(user_service):
"""Test user creation fails with invalid email."""
user_data = {
"username": "testuser",
"email": "invalid-email", # Invalid format
"age": 25
}
# Expect exception when invalid email is provided
with pytest.raises(ValidationError) as exc_info:
user_service.create_user(user_data)
assert "email" in str(exc_info.value)
Tests should mock external dependencies to:
from unittest.mock import Mock, patch
# Mock external service
@patch('module.requests.get')
def test_api_call(mock_get):
mock_get.return_value.json.return_value = {"status": "ok"}
result = fetch_data()
# Verify mock was called and behavior is correct
mock_get.assert_called_once()
assert result["status"] == "ok"
# Dependency injection for testability
class UserService:
def __init__(self, db_connection):
self.db = db_connection
def test_get_user():
mock_db = Mock()
mock_db.query.return_value = {"id": 1, "name": "Test"}
service = UserService(mock_db)
assert service.get_user(1)["name"] == "Test"
Integration tests verify that multiple components work together correctly. They typically:
@pytest.fixture(scope="module")
def test_database():
"""Provide test database for integration tests."""
db = create_test_database()
run_migrations(db)
yield db
cleanup_database(db)
def test_user_operations(test_database):
"""Test user repository with real database."""
user = create_user(test_database, email="test@example.com")
assert user.id is not None
retrieved = get_user(test_database, user.id)
assert retrieved.email == "test@example.com"
# 1. Write failing test
def test_calculate_discount():
result = calculate_discount(price=100, discount_percent=10)
assert result == 90
# 2. Implement
def calculate_discount(price, discount_percent):
if not 0 <= discount_percent <= 100:
raise ValueError("Discount must be between 0 and 100")
return price - (price * discount_percent / 100)
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
addopts = [
"-v",
"--strict-markers",
"--tb=short",
"--cov=app",
"--cov-report=term-missing",
]
markers = [
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
"integration: marks tests as integration tests",
]
Tests can be categorized with markers to allow selective execution:
Common marker categories:
slow - Tests that take longer to run (deselect with -m "not slow")integration - Integration tests that use external servicesunit - Unit tests (fast, isolated)e2e - End-to-end testsperformance - Performance/benchmark tests@pytest.mark.slow
def test_expensive_operation():
result = process_large_dataset()
assert result.success
@pytest.mark.integration
def test_database_integration():
result = query_database()
assert result is not None
# Run only fast tests
pytest -m "not slow"
# Run only integration tests
pytest -m integration
Performance tests verify that operations complete within acceptable time limits:
import time
def test_performance_within_limit(data_processor):
"""Test processing completes within time limit."""
large_dataset = generate_test_data(10000)
start = time.time()
result = data_processor.process(large_dataset)
duration = time.time() - start
assert duration < 1.0 # Should complete in under 1 second
assert len(result) == 10000
make check)Always:
Never:
# Development - Targeted
pytest tests/unit/test_file.py -v # Specific file
pytest -k "test_name" -v # Pattern match
pytest tests/unit/test_file.py::test_func # Exact test
pytest -v --tb=short # Cleaner errors
# Debugging
pytest -l # Show locals
pytest --pdb # Debug on failure
pytest -x # Stop on first failure
pytest --lf # Rerun last failed
# Coverage
pytest --cov=app --cov-report=html # HTML report
pytest --cov=app --cov-report=term-missing # Terminal report
# Verification
make check # Full suite + quality
uv run pytest # All tests
uv run black --check app/ tests/ # Format check
uv run isort --check app/ tests/ # Import order
uv run flake8 app/ tests/ # Linting
uv run mypy app/ tests/ # Type check
TL;DR: Zero warnings policy. Follow test pyramid. Arrange-Act-Assert pattern. Mock external dependencies. Test behavior not implementation. >80% coverage on critical paths. Run targeted tests during development, full suite before commit.