Pytest - Only run tests that match a substring expression


Pytest tip:

You can filter and run only tests that contain or do not contain some substring in their name.

Examples:

# run all tests that contain login in their name
$ pytest -k login

# run all tests that do not contain login in their name
$ pytest -k 'not login'

Pytest - clean up resources at the end of a test session


Python clean test tip:

Clean up resources needed for test after the pytest session is finished -- i.e., drop test database, remove files added to the file system.

Example:

import csv
import os
import pathlib

import pytest


def list_users_from_csv(file_path):
    return [
        {field_name: field_value for field_name, field_value in row.items()}
        for row in csv.DictReader(
            file_path.open(),
            skipinitialspace=True,
            fieldnames=["first_name", "last_name"],
        )
    ]


@pytest.fixture
def users_csv_path():
    # before test - create resource
    file_path = pathlib.Path("users.csv")
    file_path.write_text("Jan,Giacomelli")
    yield file_path
    # after test - remove resource
    file_path.unlink()


def test_all_users_are_listed(users_csv_path):
    assert list_users_from_csv(users_csv_path) == [
        {"first_name": "Jan", "last_name": "Giacomelli"}
    ]

Pytest - Parameterizing Tests


Python clean test tip:

Use pytest parametrize when you need multiple cases to prove a single behavior.

Example:

import difflib
import pytest


def names_are_almost_equal(first, second):
    return difflib.SequenceMatcher(None, first, second).ratio() > 0.7


@pytest.mark.parametrize(
    "first,second",
    [
        ("John", "Johny"),
        ("Many", "Mary"),
    ]
)
def test_names_are_almost_equal(first, second):
    assert names_are_almost_equal(first, second)


@pytest.mark.parametrize(
    "first,second",
    [
        ("John", "Joe"),
        ("Daisy", "Serena"),
    ]
)
def test_names_are_not_almost_equal(first, second):
    assert not names_are_almost_equal(first, second)

Tests should fail for exactly one reason - aim for a single assert per test


Python clean test tip:

Aim for a single assert per test. Tests will be more readable and it's easier to locate a defect when a test is failing.

Example:

import pytest


class User:
    def __init__(self, username):
        if len(username) < 1:
            raise Exception("Username must not be empty.")
        self._username = username

    @property
    def username(self):
        return self._username


# BAD
def test_user():
    username = "johndoe"
    assert User(username).username == username

    username = ""
    with pytest.raises(Exception):
        User(username)


# GOOD
def test_user_with_valid_username_can_be_initialized():
    username = "johndoe"
    assert User(username).username == username


def test_user_with_empty_username_cannot_be_initialized():
    username = ""
    with pytest.raises(Exception):
        User(username)

It's fine to deviate from this, to include multiple asserts per test as long as you're testing the same concept.

Testing Naming Conventions - GIVEN-WHEN-THEN


Python clean test tip:

Tests should have descriptive names to reveal their intention. For example, you could follow GIVEN-WHEN-THEN or SHOULD-WHEN naming conventions:

import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from pydantic import BaseModel

app = FastAPI()


class LoginRequest(BaseModel):
    username: str
    password: str


@app.post("/login")
def login(data: LoginRequest):
    return {"access_token": "1234"}


@pytest.fixture()
def client():
    yield TestClient(app)


# BAD
def test_login(client):
    response = client.post("/login", json={"username": "johndoe", "password": "correct_password"})
    assert response.status_code == 200
    assert response.json()["access_token"] == "1234"


# GOOD
def test_valid_username_and_password_combination_can_be_exchanged_for_access_token(client):
    response = client.post("/login", json={"username": "johndoe", "password": "correct_password"})
    assert response.status_code == 200
    assert response.json()["access_token"] == "1234"


def test_given_valid_username_and_password_combination_when_user_calls_login_then_access_token_is_returned(client):
    response = client.post("/login", json={"username": "johndoe", "password": "correct_password"})
    assert response.status_code == 200
    assert response.json()["access_token"] == "1234"


def test_access_token_should_be_returned_when_valid_username_and_password_combination_is_provided(client):
    response = client.post("/login", json={"username": "johndoe", "password": "correct_password"})
    assert response.status_code == 200
    assert response.json()["access_token"] == "1234"

Mock AWS Services


Pytest tip:

Use moto to mock AWS services such as S3 and DynamoDB:

https://docs.getmoto.org/

👇

import bot03
import pytest
from moto import mock_dynamodb2


@pytest.fixture
def dynamodb_table():
    with mock_dynamodb2():
        dynamodb = bot03.resource("dynamodb")

        table = dynamodb.create_table(
            TableName="test",
            KeySchema=[
                {"AttributeName": "PK", "KeyType": "HASH"},
                {"AttributeName": "SK", "KeyType": "Range"},
            ],
            AttributeDefinitions=[
                {"AttributeName": "PK", "AttributeType": "S"},
                {"AttributeName": "SK", "AttributeType": "S"},
                {"AttributeName": "GSIPK", "AttributeType": "S"},
                {"AttributeName": "GSISK", "AttributeType": "S"},
            ],
            GlobalSecondarylndexes=[
                {
                    "IndexName": "GS1",
                    "KeySchema": [
                        {"AttributeName": "GS1PK", "KeyType": "HASH"},
                        {"AttributeName": "GS1SK", "KeyType": "Range"},
                    ],
                    "Projection": {"ProjectionType": "ALL"},
                },
            ],
        )

        table.delete()

Skip tests based on a condition in pytest


Pytest tip:

You can skip a test based on a given condition.

For example, you can skip a test when running on Windows

import sys

import pytest


@pytest.mark.skipif(sys.platform == "win32", reason="Not running on Window")
def test_windows():
    assert True

Testing code examples in docstrings with pytest


Pytest tip:

You can test code examples inside your docstrings like so:

$ pytest --doctest-modules http://yourmodule.py

👇

from typing import List


def daily_average(temperatures: List[float]) -> float:
    """
    Get average daily temperature

    Calculate average temperature from multiple measurements

    >>> daily_average( [10.0, 12.0, 14.0])
    12.0

    : param temperatures: list of temperatures
    : return: Average temperature
    """

    return sum(temperatures) / len(temperatures)


# python -m pytest --doctest-modules temperature. py