Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,10 @@ jobs:

benchmarks:
runs-on: ubuntu-22.04
strategy:
matrix:
shard: [1,2]

steps:
- uses: actions/checkout@v4

Expand All @@ -74,11 +78,11 @@ jobs:
run: |
python -VV
uv venv
uv pip install pytest==7.4.4 pyyaml==6.0.1 pytest-codspeed==3.2.0 Django==5.1.1 /home/runner/work/grimp/grimp
uv pip install pytest==7.4.4 pyyaml==6.0.1 pytest-codspeed==3.2.0 pytest-test-groups==1.2.0 Django==5.1.1 /home/runner/work/grimp/grimp

- name: Run benchmarks
uses: CodSpeedHQ/action@v3
with:
token: ${{ secrets.CODSPEED_TOKEN }}
run: |
uv run pytest tests/benchmarking/ --codspeed
uv run pytest tests/benchmarking/ --codspeed --test-group=${{ matrix.shard }} --test-group-by filename --test-group-count=2
70 changes: 70 additions & 0 deletions tests/benchmarking/test_building_graph.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import pytest
import json
import importlib
from pathlib import Path
from grimp.adaptors.graph import ImportGraph
import grimp


@pytest.fixture(scope="module")
def large_graph():
raw_json = (Path(__file__).parent / "large_graph.json").read_text()
graph_dict = json.loads(raw_json)
graph = ImportGraph()

for importer, importeds in graph_dict.items():
graph.add_module(importer)
for imported in importeds:
graph.add_import(
importer=importer,
imported=imported,
line_number=1,
line_contents=f"import {imported}",
)

return graph


def test_build_django_uncached(benchmark):
"""
Benchmarks building a graph of real package - in this case Django.

In this benchmark, the cache is turned off.
"""
benchmark(grimp.build_graph, "django", cache_dir=None)


def test_build_django_from_cache_no_misses(benchmark):
"""
Benchmarks building a graph of real package - in this case Django.

This benchmark fully utilizes the cache.
"""
# Populate the cache first, before beginning the benchmark.
grimp.build_graph("django")

benchmark(grimp.build_graph, "django")


@pytest.mark.parametrize(
"number_of_misses",
(
2, # Fewer than the likely number of CPUs.
15, # A bit more than the likely number of CPUs.
),
)
def test_build_django_from_cache_a_few_misses(benchmark, number_of_misses):
"""
Benchmarks building a graph of real package - in this case Django.

This benchmark utilizes the cache except for a few modules, which we add.
"""
# Populate the cache first, before beginning the benchmark.
grimp.build_graph("django")
# Add a module which won't be in the cache.
django_path = Path(importlib.util.find_spec("django").origin).parent
for i in range(number_of_misses):
new_module = django_path / f"new_module_{i}.py"
new_module.write_text("from django.db import models")

benchmark(grimp.build_graph, "django")
Original file line number Diff line number Diff line change
@@ -1,17 +1,11 @@
import pytest
import json
import importlib
from pathlib import Path
from grimp.adaptors.graph import ImportGraph
from grimp import PackageDependency, Route
import grimp
from copy import deepcopy


def _run_benchmark(benchmark, fn, *args, **kwargs):
return benchmark(fn, *args, **kwargs)


@pytest.fixture(scope="module")
def large_graph():
raw_json = (Path(__file__).parent / "large_graph.json").read_text()
Expand Down Expand Up @@ -291,51 +285,6 @@ def large_graph():
}


def test_build_django_uncached(benchmark):
"""
Benchmarks building a graph of real package - in this case Django.

In this benchmark, the cache is turned off.
"""
_run_benchmark(benchmark, grimp.build_graph, "django", cache_dir=None)


def test_build_django_from_cache_no_misses(benchmark):
"""
Benchmarks building a graph of real package - in this case Django.

This benchmark fully utilizes the cache.
"""
# Populate the cache first, before beginning the benchmark.
grimp.build_graph("django")

_run_benchmark(benchmark, grimp.build_graph, "django")


@pytest.mark.parametrize(
"number_of_misses",
(
2, # Fewer than the likely number of CPUs.
15, # A bit more than the likely number of CPUs.
),
)
def test_build_django_from_cache_a_few_misses(benchmark, number_of_misses):
"""
Benchmarks building a graph of real package - in this case Django.

This benchmark utilizes the cache except for a few modules, which we add.
"""
# Populate the cache first, before beginning the benchmark.
grimp.build_graph("django")
# Add a module which won't be in the cache.
django_path = Path(importlib.util.find_spec("django").origin).parent
for i in range(number_of_misses):
new_module = django_path / f"new_module_{i}.py"
new_module.write_text("from django.db import models")

_run_benchmark(benchmark, grimp.build_graph, "django")


class TestFindIllegalDependenciesForLayers:
@staticmethod
def _remove_package_dependencies(graph, package_dependencies):
Expand All @@ -352,8 +301,7 @@ def _remove_package_dependencies(graph, package_dependencies):
return graph

def test_top_level_large_graph_violated(self, large_graph, benchmark):
result = _run_benchmark(
benchmark,
result = benchmark(
large_graph.find_illegal_dependencies_for_layers,
layers=TOP_LEVEL_LAYERS,
containers=("mypackage",),
Expand All @@ -364,59 +312,47 @@ def test_top_level_large_graph_kept(self, large_graph, benchmark):
large_graph = self._remove_package_dependencies(
large_graph, TOP_LEVEL_PACKAGE_DEPENDENCIES
)
result = _run_benchmark(
benchmark,
result = benchmark(
large_graph.find_illegal_dependencies_for_layers,
layers=TOP_LEVEL_LAYERS,
containers=("mypackage",),
)
assert result == set()

def test_deep_layers_large_graph_violated(self, large_graph, benchmark):
result = _run_benchmark(
benchmark, large_graph.find_illegal_dependencies_for_layers, layers=DEEP_LAYERS
)
result = benchmark(large_graph.find_illegal_dependencies_for_layers, layers=DEEP_LAYERS)
assert result == DEEP_LAYER_PACKAGE_DEPENDENCIES

def test_deep_layers_large_graph_kept(self, large_graph, benchmark):
large_graph = self._remove_package_dependencies(
large_graph, DEEP_LAYER_PACKAGE_DEPENDENCIES
)
result = _run_benchmark(
benchmark, large_graph.find_illegal_dependencies_for_layers, layers=DEEP_LAYERS
)
result = benchmark(large_graph.find_illegal_dependencies_for_layers, layers=DEEP_LAYERS)
assert result == set()


def test_find_descendants(large_graph, benchmark):
result = _run_benchmark(benchmark, large_graph.find_descendants, "mypackage")
result = benchmark(large_graph.find_descendants, "mypackage")
assert len(result) == 28222


def test_find_downstream_modules(large_graph, benchmark):
result = _run_benchmark(
benchmark, large_graph.find_downstream_modules, DEEP_LAYERS[0], as_package=True
)
result = benchmark(large_graph.find_downstream_modules, DEEP_LAYERS[0], as_package=True)
assert len(result) == 80


def test_find_upstream_modules(large_graph, benchmark):
result = _run_benchmark(
benchmark, large_graph.find_upstream_modules, DEEP_LAYERS[0], as_package=True
)
result = benchmark(large_graph.find_upstream_modules, DEEP_LAYERS[0], as_package=True)
assert len(result) == 2159


class TestFindShortestChain:
def test_chain_found(self, large_graph, benchmark):
result = _run_benchmark(
benchmark, large_graph.find_shortest_chain, DEEP_LAYERS[0], DEEP_LAYERS[1]
)
result = benchmark(large_graph.find_shortest_chain, DEEP_LAYERS[0], DEEP_LAYERS[1])
assert result is not None

def test_no_chain(self, large_graph, benchmark):
result = _run_benchmark(
benchmark,
result = benchmark(
large_graph.find_shortest_chain,
DEEP_LAYERS[0],
"mypackage.data.vendors.4053192739.6373932949",
Expand All @@ -426,8 +362,7 @@ def test_no_chain(self, large_graph, benchmark):

class TestFindShortestChains:
def test_chains_found(self, large_graph, benchmark):
result = _run_benchmark(
benchmark,
result = benchmark(
large_graph.find_shortest_chains,
DEEP_LAYERS[0],
DEEP_LAYERS[1],
Expand All @@ -436,8 +371,7 @@ def test_chains_found(self, large_graph, benchmark):
assert len(result) > 0

def test_no_chains(self, large_graph, benchmark):
result = _run_benchmark(
benchmark,
result = benchmark(
large_graph.find_shortest_chains,
DEEP_LAYERS[0],
"mypackage.data.vendors.4053192739.6373932949",
Expand All @@ -447,7 +381,7 @@ def test_no_chains(self, large_graph, benchmark):


def test_copy_graph(large_graph, benchmark):
_run_benchmark(benchmark, lambda: deepcopy(large_graph))
benchmark(lambda: deepcopy(large_graph))


def test_modules_property_first_access(large_graph, benchmark):
Expand All @@ -459,7 +393,7 @@ def f():
# Accessing the modules property is what we're benchmarking.
_ = large_graph.modules

_run_benchmark(benchmark, f)
benchmark(f)


def test_modules_property_many_accesses(large_graph, benchmark):
Expand All @@ -472,34 +406,34 @@ def f():
for i in range(1000):
_ = large_graph.modules

_run_benchmark(benchmark, f)
benchmark(f)


def test_get_import_details(benchmark):
graph = ImportGraph()
iterations = 100
for i in range(iterations, 1):
graph.add_import(
importer=f"blue_{i}", imported=f"green_{i}", line_contents="...", line_number=i
importer=f"blue_{i}",
imported=f"green_{i}",
line_contents="...",
line_number=i,
)

def f():
for i in range(iterations):
graph.get_import_details(importer=f"blue_{i}", imported=f"green_{i}")

_run_benchmark(benchmark, f)
benchmark(f)


def test_find_matching_modules(benchmark, large_graph):
matching_modules = _run_benchmark(
benchmark, lambda: large_graph.find_matching_modules("mypackage.domain.**")
)
matching_modules = benchmark(lambda: large_graph.find_matching_modules("mypackage.domain.**"))
assert len(matching_modules) == 2519


def test_find_matching_direct_imports(benchmark, large_graph):
matching_imports = _run_benchmark(
benchmark,
matching_imports = benchmark(
lambda: large_graph.find_matching_direct_imports(
"mypackage.domain.** -> mypackage.data.**"
),
Expand Down
3 changes: 2 additions & 1 deletion tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,10 @@ deps =
pytest==7.4.4
pyyaml==6.0.1
pytest-codspeed==3.2.0
pytest-test-groups==1.2.0
Django==5.1.1
commands =
pytest --codspeed {posargs}
pytest --codspeed tests/benchmarking {posargs}


[testenv:docs]
Expand Down
Loading