What Is pytest?
pytest is the most widely used testing framework in Python. It offers more concise syntax than the built-in unittest, a powerful fixture system, and a rich plugin ecosystem. Tests can be written as plain functions without any classes, making the barrier to entry very low.
This article covers basic usage, fixtures, parametrization, mocking, and practical tips.
Installation and Basic Usage
pip install pytest
pytest automatically discovers and runs files and functions that start with test_. Simply use assert statements for verification.
# test_calculator.py
def add(a: int, b: int) -> int:
"""Adds two numbers."""
return a + b
def subtract(a: int, b: int) -> int:
"""Subtracts two numbers."""
return a - b
# Test functions start with test_
def test_add():
assert add(2, 3) == 5 # Basic addition
assert add(-1, 1) == 0 # Negative addition
assert add(0, 0) == 0 # Zero case
def test_subtract():
assert subtract(5, 3) == 2
assert subtract(1, 5) == -4 # Negative result
def test_add_type_error():
"""Tests that an exception is raised for invalid type input"""
import pytest
with pytest.raises(TypeError):
add("string", 3) # Raises TypeError
# Run
pytest test_calculator.py -v
# Example output:
# test_calculator.py::test_add PASSED
# test_calculator.py::test_subtract PASSED
# test_calculator.py::test_add_type_error PASSED
# ============ 3 passed in 0.02s ============
| pytest Command Option | Description |
|---|---|
-v | Verbose output (shows each test name) |
-x | Stop on first failure |
-k "keyword" | Filter tests by keyword |
--tb=short | Short traceback |
-s | Show print output |
Fixtures
Fixtures separate setup and teardown work needed for tests into reusable functions. Define them with the @pytest.fixture decorator and inject them as test function parameters.
# test_user_service.py
import pytest
class User:
def __init__(self, name: str, email: str):
self.name = name
self.email = email
class UserService:
def __init__(self):
self.users: dict[str, User] = {}
def add_user(self, user: User) -> None:
if user.email in self.users:
raise ValueError("Email already exists")
self.users[user.email] = user
def get_user(self, email: str) -> User:
if email not in self.users:
raise KeyError("User not found")
return self.users[email]
# Fixture: creates a fresh service instance for each test
@pytest.fixture
def user_service():
"""Provides a clean UserService instance."""
service = UserService()
# Add default test data
service.add_user(User("John Doe", "john@example.com"))
return service
# Inject the fixture as a parameter
def test_get_existing_user(user_service):
user = user_service.get_user("john@example.com")
assert user.name == "John Doe"
def test_add_duplicate_user(user_service):
duplicate = User("Other Name", "john@example.com")
with pytest.raises(ValueError, match="Email already exists"):
user_service.add_user(duplicate)
def test_get_nonexistent_user(user_service):
with pytest.raises(KeyError, match="User not found"):
user_service.get_user("nobody@example.com")
Each time a test function runs, the user_service fixture is freshly created, preventing state contamination between tests.
Fixture Scope and yield
The scope parameter controls a fixture’s lifecycle. Using yield also lets you define teardown logic.
import pytest
import tempfile
import os
@pytest.fixture(scope="function") # Default: runs for each test function
def temp_file():
"""Creates a temporary file and deletes it after the test."""
fd, path = tempfile.mkstemp(suffix=".txt")
os.close(fd)
print(f"\n[SETUP] Temporary file created: {path}")
yield path # This value is passed to the test
# Everything after yield is teardown (runs after test completes)
if os.path.exists(path):
os.remove(path)
print(f"[TEARDOWN] Temporary file deleted: {path}")
def test_write_to_temp_file(temp_file):
with open(temp_file, "w") as f:
f.write("test data")
with open(temp_file, "r") as f:
assert f.read() == "test data"
| Scope | When It Runs |
|---|---|
function | For each test function (default) |
class | For each test class |
module | For each module (file) |
session | Once for the entire test session |
Parametrize
To run the same test logic with various input values, use @pytest.mark.parametrize.
import pytest
def is_palindrome(text: str) -> bool:
"""Determines whether a string is a palindrome."""
cleaned = text.lower().replace(" ", "")
return cleaned == cleaned[::-1]
@pytest.mark.parametrize("text, expected", [
("racecar", True), # English palindrome
("hello", False), # Not a palindrome
("A man a plan a canal Panama", True), # Palindrome with spaces
("level", True), # Short palindrome
("python", False), # Not a palindrome
("", True), # Empty string
])
def test_is_palindrome(text, expected):
assert is_palindrome(text) == expected
# Execution result:
# test_palindrome.py::test_is_palindrome[racecar-True] PASSED
# test_palindrome.py::test_is_palindrome[hello-False] PASSED
# test_palindrome.py::test_is_palindrome[A man a plan...-True] PASSED
# test_palindrome.py::test_is_palindrome[level-True] PASSED
# test_palindrome.py::test_is_palindrome[python-False] PASSED
# test_palindrome.py::test_is_palindrome[-True] PASSED
# ============ 6 passed in 0.01s ============
Using parametrization makes it easy to add test cases, and each case runs independently, so you can immediately see which input caused a failure.
Mock — Isolating External Dependencies
Use unittest.mock or pytest-mock to replace external dependencies like APIs and databases with fake objects.
# weather_service.py
import requests
def get_temperature(city: str) -> float:
"""Fetches temperature from an external weather API."""
response = requests.get(
f"https://api.weather.example.com/{city}"
)
response.raise_for_status()
data = response.json()
return data["temperature"]
# test_weather_service.py
from unittest.mock import patch, MagicMock
def test_get_temperature_success():
"""Tests that temperature is correctly returned on a successful response"""
# Replace requests.get with a fake object
mock_response = MagicMock()
mock_response.json.return_value = {"temperature": 22.5}
mock_response.raise_for_status.return_value = None
with patch("weather_service.requests.get",
return_value=mock_response) as mock_get:
temp = get_temperature("Seoul")
assert temp == 22.5
# Verify that the API was called with the correct URL
mock_get.assert_called_once_with(
"https://api.weather.example.com/Seoul"
)
def test_get_temperature_api_error():
"""Tests that an exception is raised on API error"""
mock_response = MagicMock()
mock_response.raise_for_status.side_effect = (
requests.exceptions.HTTPError("500 Server Error")
)
with patch("weather_service.requests.get",
return_value=mock_response):
with pytest.raises(requests.exceptions.HTTPError):
get_temperature("Seoul")
With mocks, you can run tests without a network and simulate various response scenarios (success, failure, timeout).
conftest.py — Shared Fixtures
Fixtures commonly used across multiple test files should be defined in conftest.py. pytest discovers it automatically.
# conftest.py (placed in the tests/ directory)
import pytest
@pytest.fixture
def sample_users():
"""User data shared across multiple tests"""
return [
{"name": "John Doe", "email": "john@test.com", "age": 30},
{"name": "Jane Smith", "email": "jane@test.com", "age": 25},
{"name": "Bob Wilson", "email": "bob@test.com", "age": 35},
]
@pytest.fixture(autouse=True)
def reset_environment(monkeypatch):
"""Resets environment variables for all tests."""
monkeypatch.setenv("APP_ENV", "test")
monkeypatch.setenv("DEBUG", "true")
Setting autouse=True applies the fixture automatically to all tests in the directory.
Practical Tips
- Test naming: Use the
test_target_scenario_expected_resultformat to quickly identify the cause of failures - AAA pattern: Structure tests in 3 phases — Arrange, Act, Assert
- One assertion per test: Verify only one behavior per test function
- Test isolation: Do not share state between tests. Use fixtures to create fresh state each time
- Boundary testing: Always test edge cases like empty values, None, maximum values, and negative numbers
- Coverage measurement: Check coverage with
pytest --cov=src --cov-report=html - Separate slow tests: Mark them with
@pytest.mark.slowand run separately in CI