pytest Complete Guide — Fixtures, Parametrize, Mock, and Coverage

Master pytest for Python testing: write fixtures, parametrize test cases, mock external dependencies, measure coverage, and structure tests for large projects.

Introduction

pytest is the de facto standard for Python testing. It is simpler than unittest, more powerful than doctest, and has a rich ecosystem of plugins. Its killer feature is the fixture system — a dependency injection mechanism for test setup that scales from simple values to complex database-backed environments.

pip install pytest pytest-cov pytest-mock httpx

Basic Test Structure

pytest discovers tests in files matching test_*.py or *_test.py, in functions and methods prefixed with test_.

# test_calculator.py

def add(a: float, b: float) -> float:
    return a + b

def divide(a: float, b: float) -> float:
    if b == 0:
        raise ValueError("Cannot divide by zero")
    return a / b

# Tests
def test_add():
    assert add(1, 2) == 3
    assert add(-1, 1) == 0
    assert add(0.1, 0.2) == pytest.approx(0.3)

def test_divide():
    assert divide(10, 2) == 5.0

def test_divide_by_zero():
    import pytest
    with pytest.raises(ValueError, match="Cannot divide by zero"):
        divide(10, 0)

Fixtures — Reusable Test Setup

Fixtures are pytest's most powerful feature. They are functions decorated with @pytest.fixture that provide values or objects to tests:

import pytest
from myapp.models import User
from myapp.database import Database

@pytest.fixture
def db():
    """Create a fresh in-memory database for each test."""
    database = Database(":memory:")
    database.create_tables()
    yield database        # provide to test
    database.close()      # teardown runs after test

@pytest.fixture
def user(db):
    """Create a test user. Depends on db fixture."""
    return db.create_user(
        email="[email protected]",
        name="Test User"
    )

def test_user_creation(user):
    assert user.email == "[email protected]"
    assert user.id is not None

def test_user_in_db(user, db):
    found = db.get_user(user.id)
    assert found.name == "Test User"

Fixture Scopes

@pytest.fixture(scope="function")  # default: one per test
def fresh_user():
    ...

@pytest.fixture(scope="class")     # one per test class
def class_user():
    ...

@pytest.fixture(scope="module")    # one per test module/file
def module_db():
    ...

@pytest.fixture(scope="session")   # one for entire test run
def session_db():
    """Expensive setup: start Docker container, run migrations."""
    db = start_postgres_container()
    run_migrations(db)
    yield db
    stop_postgres_container()

conftest.py — Shared Fixtures

Put fixtures in conftest.py to make them available to all tests in the same directory and subdirectories — no imports needed:

# conftest.py
import pytest
from fastapi.testclient import TestClient
from myapp.main import app
from myapp.database import get_db, create_test_db

@pytest.fixture(scope="session")
def test_db():
    db = create_test_db()
    yield db
    db.drop_all()

@pytest.fixture
def client(test_db):
    app.dependency_overrides[get_db] = lambda: test_db
    with TestClient(app) as c:
        yield c
    app.dependency_overrides.clear()

@pytest.mark.parametrize — Data-Driven Tests

Parametrize runs the same test function with multiple input sets, eliminating test duplication:

import pytest

@pytest.mark.parametrize("email,is_valid", [
    ("[email protected]", True),
    ("[email protected]", True),
    ("not-an-email", False),
    ("@nodomain.com", False),
    ("", False),
])
def test_email_validation(email, is_valid):
    from myapp.validators import is_valid_email
    assert is_valid_email(email) == is_valid

# Parametrize with multiple parameters
@pytest.mark.parametrize("a,b,expected", [
    (1, 2, 3),
    (-1, 1, 0),
    (100, 200, 300),
    (0, 0, 0),
])
def test_add(a, b, expected):
    assert add(a, b) == expected

# Named test IDs for readability
@pytest.mark.parametrize("status_code,message", [
    pytest.param(200, "OK", id="success"),
    pytest.param(404, "Not Found", id="not-found"),
    pytest.param(500, "Server Error", id="server-error"),
])
def test_status_messages(status_code, message):
    assert get_status_message(status_code) == message

Mocking with pytest-mock and unittest.mock

Mocking replaces real implementations with controlled fakes during testing:

import pytest
from unittest.mock import patch, MagicMock, AsyncMock

# Using pytest-mock (mocker fixture)
def test_send_email(mocker):
    mock_smtp = mocker.patch("myapp.email.smtplib.SMTP")
    mock_smtp.return_value.__enter__.return_value.sendmail.return_value = {}

    from myapp.email import send_welcome_email
    send_welcome_email("[email protected]")

    mock_smtp.assert_called_once()

# Patch external HTTP calls
def test_fetch_user_data(mocker):
    mock_response = mocker.MagicMock()
    mock_response.status_code = 200
    mock_response.json.return_value = {"id": 1, "name": "Alice"}

    mocker.patch("requests.get", return_value=mock_response)

    from myapp.api import fetch_user
    user = fetch_user(1)
    assert user["name"] == "Alice"

# Mock with side_effect for raising exceptions
def test_retry_on_failure(mocker):
    mock_call = mocker.patch("myapp.service.external_api_call")
    mock_call.side_effect = [
        ConnectionError("first failure"),
        ConnectionError("second failure"),
        {"status": "ok"}   # succeeds on third try
    ]

    from myapp.service import call_with_retry
    result = call_with_retry()
    assert result == {"status": "ok"}
    assert mock_call.call_count == 3

Mocking Async Code

import pytest
import pytest_asyncio

@pytest.mark.asyncio
async def test_async_fetch(mocker):
    mock_get = mocker.patch("httpx.AsyncClient.get", new_callable=AsyncMock)
    mock_get.return_value.json.return_value = {"data": [1, 2, 3]}

    from myapp.async_api import fetch_data
    result = await fetch_data()
    assert result == [1, 2, 3]

Coverage — Measuring Test Completeness

# Run tests with coverage
pytest --cov=myapp --cov-report=html --cov-report=term-missing

# Fail if coverage drops below threshold
pytest --cov=myapp --cov-fail-under=80

# Coverage for specific files
pytest --cov=myapp/models.py tests/test_models.py
# setup.cfg or pyproject.toml
[tool:pytest]
addopts = --cov=myapp --cov-report=html --cov-fail-under=80

[coverage:run]
source = myapp
omit =
    myapp/migrations/*
    myapp/config.py

[coverage:report]
exclude_lines =
    pragma: no cover
    if TYPE_CHECKING:
    raise NotImplementedError

Markers — Categorizing Tests

import pytest

@pytest.mark.slow
def test_large_data_processing():
    ...

@pytest.mark.integration
def test_database_write():
    ...

@pytest.mark.skip(reason="Blocked by issue #123")
def test_pending_feature():
    ...

@pytest.mark.xfail(reason="Known bug, tracked in issue #456")
def test_known_failure():
    ...
# Run only fast tests
pytest -m "not slow"

# Run only integration tests
pytest -m integration

# Skip integration tests in CI
pytest -m "not integration"

pytest.ini / pyproject.toml Configuration

# pyproject.toml
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py", "*_test.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
addopts = [
    "-v",
    "--tb=short",
    "--strict-markers",
]
markers = [
    "slow: marks tests as slow (deselect with '-m not slow')",
    "integration: marks integration tests",
    "unit: marks unit tests",
]

DevKits Tools for Python Development

Speed up your Python development workflow with these DevKits tools:

Summary

pytest is one of Python's most important tools. The key concepts to master:

  • Fixtures — dependency injection for test setup and teardown
  • conftest.py — shared fixtures without imports
  • parametrize — data-driven tests without code duplication
  • mocker — replace real implementations with controlled fakes
  • coverage — measure what code your tests actually execute
  • markers — categorize tests and run subsets efficiently