diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index ecdd925860e..45a667f8aac 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -15,20 +15,15 @@ repos:
# Ruff version.
rev: 'v0.6.9'
hooks:
+ - id: ruff-format
- id: ruff
args: ["--fix", "--show-fixes"]
- # https://github.com/python/black#version-control-integration
- - repo: https://github.com/psf/black-pre-commit-mirror
- rev: 24.8.0
- hooks:
- - id: black-jupyter
- repo: https://github.com/keewis/blackdoc
rev: v0.3.9
hooks:
- id: blackdoc
exclude: "generate_aggregations.py"
additional_dependencies: ["black==24.8.0"]
- - id: blackdoc-autoupdate-black
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.11.2
hooks:
diff --git a/CORE_TEAM_GUIDE.md b/CORE_TEAM_GUIDE.md
index 9eb91f4e586..a093788fc81 100644
--- a/CORE_TEAM_GUIDE.md
+++ b/CORE_TEAM_GUIDE.md
@@ -271,8 +271,7 @@ resources such as:
[NumPy documentation guide](https://numpy.org/devdocs/dev/howto-docs.html#documentation-style)
for docstring conventions.
- [`pre-commit`](https://pre-commit.com) hooks for autoformatting.
-- [`black`](https://github.com/psf/black) autoformatting.
-- [`flake8`](https://github.com/PyCQA/flake8) linting.
+- [`ruff`](https://github.com/astral-sh/ruff) autoformatting and linting.
- [python-xarray](https://stackoverflow.com/questions/tagged/python-xarray) on Stack Overflow.
- [@xarray_dev](https://twitter.com/xarray_dev) on Twitter.
- [xarray-dev](https://discord.gg/bsSGdwBn) discord community (normally only used for remote synchronous chat during sprints).
diff --git a/ci/min_deps_check.py b/ci/min_deps_check.py
index d6c845615d4..8e09bb1eb90 100755
--- a/ci/min_deps_check.py
+++ b/ci/min_deps_check.py
@@ -3,6 +3,7 @@
publication date. Compare it against requirements/min-all-deps.yml to verify the
policy on obsolete dependencies is being followed. Print a pretty report :)
"""
+
from __future__ import annotations
import itertools
@@ -16,7 +17,6 @@
CHANNELS = ["conda-forge", "defaults"]
IGNORE_DEPS = {
- "black",
"coveralls",
"flake8",
"hypothesis",
diff --git a/ci/requirements/all-but-dask.yml b/ci/requirements/all-but-dask.yml
index 2f47643cc87..1b6db04671f 100644
--- a/ci/requirements/all-but-dask.yml
+++ b/ci/requirements/all-but-dask.yml
@@ -3,7 +3,6 @@ channels:
- conda-forge
- nodefaults
dependencies:
- - black
- aiobotocore
- array-api-strict
- boto3
diff --git a/doc/contributing.rst b/doc/contributing.rst
index c3dc484f4c1..5f943e82558 100644
--- a/doc/contributing.rst
+++ b/doc/contributing.rst
@@ -549,11 +549,7 @@ Code Formatting
xarray uses several tools to ensure a consistent code format throughout the project:
-- `Black `_ for standardized
- code formatting,
-- `blackdoc `_ for
- standardized code formatting in documentation,
-- `ruff `_ for code quality checks and standardized order in imports
+- `ruff `_ for formatting, code quality checks and standardized order in imports
- `absolufy-imports `_ for absolute instead of relative imports from different files,
- `mypy `_ for static type checking on `type hints
`_.
@@ -1069,7 +1065,7 @@ PR checklist
- Test the code using `Pytest `_. Running all tests (type ``pytest`` in the root directory) takes a while, so feel free to only run the tests you think are needed based on your PR (example: ``pytest xarray/tests/test_dataarray.py``). CI will catch any failing tests.
- By default, the upstream dev CI is disabled on pull request and push events. You can override this behavior per commit by adding a ``[test-upstream]`` tag to the first line of the commit message. For documentation-only commits, you can skip the CI per commit by adding a ``[skip-ci]`` tag to the first line of the commit message.
-- **Properly format your code** and verify that it passes the formatting guidelines set by `Black `_ and `Flake8 `_. See `"Code formatting" `_. You can use `pre-commit `_ to run these automatically on each commit.
+- **Properly format your code** and verify that it passes the formatting guidelines set by `ruff `_. See `"Code formatting" `_. You can use `pre-commit `_ to run these automatically on each commit.
- Run ``pre-commit run --all-files`` in the root directory. This may modify some files. Confirm and commit any formatting changes.
diff --git a/pyproject.toml b/pyproject.toml
index c23d12ffba1..8dad98444ac 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -233,7 +233,7 @@ extend-exclude = [
[tool.ruff.lint]
# E402: module level import not at top of file
-# E501: line too long - let black worry about that
+# E501: line too long - let the formatter worry about that
# E731: do not assign a lambda expression, use a def
extend-safe-fixes = [
"TID252", # absolute imports
diff --git a/xarray/backends/common.py b/xarray/backends/common.py
index dd169cdbc7e..8e7ef9dabd9 100644
--- a/xarray/backends/common.py
+++ b/xarray/backends/common.py
@@ -206,13 +206,10 @@ def load(self):
For example::
class SuffixAppendingDataStore(AbstractDataStore):
-
def load(self):
variables, attributes = AbstractDataStore.load(self)
- variables = {'%s_suffix' % k: v
- for k, v in variables.items()}
- attributes = {'%s_suffix' % k: v
- for k, v in attributes.items()}
+ variables = {"%s_suffix" % k: v for k, v in variables.items()}
+ attributes = {"%s_suffix" % k: v for k, v in attributes.items()}
return variables, attributes
This function will be called anytime variables or attributes
diff --git a/xarray/backends/file_manager.py b/xarray/backends/file_manager.py
index 9caaf013494..77c6859650f 100644
--- a/xarray/backends/file_manager.py
+++ b/xarray/backends/file_manager.py
@@ -65,7 +65,7 @@ class CachingFileManager(FileManager):
Example usage::
- manager = FileManager(open, 'example.txt', mode='w')
+ manager = FileManager(open, "example.txt", mode="w")
f = manager.acquire()
f.write(...)
manager.close() # ensures file is closed
diff --git a/xarray/backends/h5netcdf_.py b/xarray/backends/h5netcdf_.py
index 29a9d3d18f5..a81611c9e5c 100644
--- a/xarray/backends/h5netcdf_.py
+++ b/xarray/backends/h5netcdf_.py
@@ -474,7 +474,6 @@ def open_datatree(
driver_kwds=None,
**kwargs,
) -> DataTree:
-
from xarray.core.datatree import DataTree
groups_dict = self.open_groups_as_dict(
@@ -520,7 +519,6 @@ def open_groups_as_dict(
driver_kwds=None,
**kwargs,
) -> dict[str, Dataset]:
-
from xarray.backends.common import _iter_nc_groups
from xarray.core.treenode import NodePath
from xarray.core.utils import close_on_error
diff --git a/xarray/backends/netCDF4_.py b/xarray/backends/netCDF4_.py
index 4b6c5e16334..bff9de8bc69 100644
--- a/xarray/backends/netCDF4_.py
+++ b/xarray/backends/netCDF4_.py
@@ -710,7 +710,6 @@ def open_datatree(
autoclose=False,
**kwargs,
) -> DataTree:
-
from xarray.core.datatree import DataTree
groups_dict = self.open_groups_as_dict(
diff --git a/xarray/backends/plugins.py b/xarray/backends/plugins.py
index a7389f63c6e..dc12982d103 100644
--- a/xarray/backends/plugins.py
+++ b/xarray/backends/plugins.py
@@ -80,7 +80,7 @@ def backends_dict_from_pkg(
def set_missing_parameters(
- backend_entrypoints: dict[str, type[BackendEntrypoint]]
+ backend_entrypoints: dict[str, type[BackendEntrypoint]],
) -> None:
for _, backend in backend_entrypoints.items():
if backend.open_dataset_parameters is None:
@@ -89,7 +89,7 @@ def set_missing_parameters(
def sort_backends(
- backend_entrypoints: dict[str, type[BackendEntrypoint]]
+ backend_entrypoints: dict[str, type[BackendEntrypoint]],
) -> dict[str, type[BackendEntrypoint]]:
ordered_backends_entrypoints = {}
for be_name in STANDARD_BACKENDS_ORDER:
diff --git a/xarray/backends/zarr.py b/xarray/backends/zarr.py
index 107214f0476..a2d6213f22f 100644
--- a/xarray/backends/zarr.py
+++ b/xarray/backends/zarr.py
@@ -496,7 +496,6 @@ def open_store(
zarr_version=None,
write_empty: bool | None = None,
):
-
zarr_group, consolidate_on_close, close_store_on_close = _get_open_params(
store=store,
mode=mode,
@@ -542,7 +541,6 @@ def open_group(
zarr_version=None,
write_empty: bool | None = None,
):
-
zarr_group, consolidate_on_close, close_store_on_close = _get_open_params(
store=store,
mode=mode,
@@ -1338,7 +1336,6 @@ def open_groups_as_dict(
zarr_version=None,
**kwargs,
) -> dict[str, Dataset]:
-
from xarray.core.treenode import NodePath
filename_or_obj = _normalize_path(filename_or_obj)
@@ -1385,7 +1382,6 @@ def open_groups_as_dict(
def _iter_zarr_groups(root: ZarrGroup, parent: str = "/") -> Iterable[str]:
-
parent_nodepath = NodePath(parent)
yield str(parent_nodepath)
for path, group in root.groups():
diff --git a/xarray/convert.py b/xarray/convert.py
index b8d81ccf9f0..14df7cadb9b 100644
--- a/xarray/convert.py
+++ b/xarray/convert.py
@@ -1,5 +1,4 @@
-"""Functions for converting to and from xarray objects
-"""
+"""Functions for converting to and from xarray objects"""
from collections import Counter
diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py
index dde38e86695..3735bf1099c 100644
--- a/xarray/core/dataarray.py
+++ b/xarray/core/dataarray.py
@@ -1582,7 +1582,7 @@ def sel(
Do not try to assign values when using any of the indexing methods
``isel`` or ``sel``::
- da = xr.DataArray([0, 1, 2, 3], dims=['x'])
+ da = xr.DataArray([0, 1, 2, 3], dims=["x"])
# DO NOT do this
da.isel(x=[0, 1, 2])[1] = -1
diff --git a/xarray/core/datatree.py b/xarray/core/datatree.py
index 1c1b5042caa..971b4fc8e88 100644
--- a/xarray/core/datatree.py
+++ b/xarray/core/datatree.py
@@ -801,7 +801,6 @@ def _replace_node(
data: Dataset | Default = _default,
children: dict[str, DataTree] | Default = _default,
) -> None:
-
ds = self.to_dataset(inherit=False) if data is _default else data
if children is _default:
diff --git a/xarray/core/formatting.py b/xarray/core/formatting.py
index 5ef3b9924a0..d2252e90508 100644
--- a/xarray/core/formatting.py
+++ b/xarray/core/formatting.py
@@ -1,5 +1,4 @@
-"""String formatting routines for __repr__.
-"""
+"""String formatting routines for __repr__."""
from __future__ import annotations
diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py
index 92f0572d37a..b09d7cf852c 100644
--- a/xarray/core/groupby.py
+++ b/xarray/core/groupby.py
@@ -235,7 +235,9 @@ def to_array(self) -> DataArray:
T_Group = Union["T_DataArray", _DummyGroup]
-def _ensure_1d(group: T_Group, obj: T_DataWithCoords) -> tuple[
+def _ensure_1d(
+ group: T_Group, obj: T_DataWithCoords
+) -> tuple[
T_Group,
T_DataWithCoords,
Hashable | None,
@@ -462,7 +464,10 @@ def factorize(self) -> EncodedGroups:
)
# NaNs; as well as values outside the bins are coded by -1
# Restore these after the raveling
- mask = functools.reduce(np.logical_or, [(code == -1) for code in broadcasted_codes]) # type: ignore[arg-type]
+ mask = functools.reduce(
+ np.logical_or, # type: ignore[arg-type]
+ [(code == -1) for code in broadcasted_codes],
+ )
_flatcodes[mask] = -1
midx = pd.MultiIndex.from_product(
@@ -1288,7 +1293,6 @@ def _concat_shortcut(self, applied, dim, positions=None):
return self._obj._replace_maybe_drop_dims(reordered)
def _restore_dim_order(self, stacked: DataArray) -> DataArray:
-
def lookup_order(dimension):
for grouper in self.groupers:
if dimension == grouper.name and grouper.group.ndim == 1:
diff --git a/xarray/core/indexes.py b/xarray/core/indexes.py
index 5abc2129e3e..4a735959298 100644
--- a/xarray/core/indexes.py
+++ b/xarray/core/indexes.py
@@ -1768,7 +1768,7 @@ def indexes_equal(
def indexes_all_equal(
- elements: Sequence[tuple[Index, dict[Hashable, Variable]]]
+ elements: Sequence[tuple[Index, dict[Hashable, Variable]]],
) -> bool:
"""Check if indexes are all equal.
diff --git a/xarray/core/parallel.py b/xarray/core/parallel.py
index 12be026e539..84728007b42 100644
--- a/xarray/core/parallel.py
+++ b/xarray/core/parallel.py
@@ -372,7 +372,8 @@ def _wrapper(
# ChainMap wants MutableMapping, but xindexes is Mapping
merged_indexes = collections.ChainMap(
- expected["indexes"], merged_coordinates.xindexes # type: ignore[arg-type]
+ expected["indexes"],
+ merged_coordinates.xindexes, # type: ignore[arg-type]
)
expected_index = merged_indexes.get(name, None)
if expected_index is not None and not index.equals(expected_index):
diff --git a/xarray/core/resample.py b/xarray/core/resample.py
index 677de48f0b6..9dd91d86a47 100644
--- a/xarray/core/resample.py
+++ b/xarray/core/resample.py
@@ -188,7 +188,9 @@ def _interpolate(self, kind="linear", **kwargs) -> T_Xarray:
# https://github.com/python/mypy/issues/9031
-class DataArrayResample(Resample["DataArray"], DataArrayGroupByBase, DataArrayResampleAggregations): # type: ignore[misc]
+class DataArrayResample( # type: ignore[misc]
+ Resample["DataArray"], DataArrayGroupByBase, DataArrayResampleAggregations
+):
"""DataArrayGroupBy object specialized to time resampling operations over a
specified dimension
"""
@@ -329,7 +331,9 @@ def asfreq(self) -> DataArray:
# https://github.com/python/mypy/issues/9031
-class DatasetResample(Resample["Dataset"], DatasetGroupByBase, DatasetResampleAggregations): # type: ignore[misc]
+class DatasetResample( # type: ignore[misc]
+ Resample["Dataset"], DatasetGroupByBase, DatasetResampleAggregations
+):
"""DatasetGroupBy object specialized to resampling a specified dimension"""
def map(
diff --git a/xarray/core/variable.py b/xarray/core/variable.py
index e0679d67891..13053faff58 100644
--- a/xarray/core/variable.py
+++ b/xarray/core/variable.py
@@ -839,7 +839,6 @@ def _getitem_with_mask(self, key, fill_value=dtypes.NA):
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
-
if is_duck_dask_array(self._data):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
diff --git a/xarray/namedarray/_array_api.py b/xarray/namedarray/_array_api.py
index acbfc8af4f1..9cd064b110e 100644
--- a/xarray/namedarray/_array_api.py
+++ b/xarray/namedarray/_array_api.py
@@ -79,7 +79,8 @@ def astype(
def imag(
- x: NamedArray[_ShapeType, np.dtype[_SupportsImag[_ScalarType]]], / # type: ignore[type-var]
+ x: NamedArray[_ShapeType, np.dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var]
+ /,
) -> NamedArray[_ShapeType, np.dtype[_ScalarType]]:
"""
Returns the imaginary component of a complex number for each element x_i of the
@@ -111,7 +112,8 @@ def imag(
def real(
- x: NamedArray[_ShapeType, np.dtype[_SupportsReal[_ScalarType]]], / # type: ignore[type-var]
+ x: NamedArray[_ShapeType, np.dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var]
+ /,
) -> NamedArray[_ShapeType, np.dtype[_ScalarType]]:
"""
Returns the real component of a complex number for each element x_i of the
diff --git a/xarray/plot/utils.py b/xarray/plot/utils.py
index 6d8deb5a66a..1d61c5afc74 100644
--- a/xarray/plot/utils.py
+++ b/xarray/plot/utils.py
@@ -986,7 +986,7 @@ def legend_elements(
This is useful for obtaining a legend for a `~.Axes.scatter` plot;
e.g.::
- scatter = plt.scatter([1, 2, 3], [4, 5, 6], c=[7, 2, 3])
+ scatter = plt.scatter([1, 2, 3], [4, 5, 6], c=[7, 2, 3])
plt.legend(*scatter.legend_elements())
creates three legend elements, one for each color with the numerical
diff --git a/xarray/tests/test_cftime_offsets.py b/xarray/tests/test_cftime_offsets.py
index 11e56e2adad..f6f97108c1d 100644
--- a/xarray/tests/test_cftime_offsets.py
+++ b/xarray/tests/test_cftime_offsets.py
@@ -1673,7 +1673,6 @@ def test_new_to_legacy_freq_anchored(year_alias, n):
),
)
def test_legacy_to_new_freq_pd_freq_passthrough(freq, expected):
-
result = _legacy_to_new_freq(freq)
assert result == expected
@@ -1699,7 +1698,6 @@ def test_legacy_to_new_freq_pd_freq_passthrough(freq, expected):
),
)
def test_new_to_legacy_freq_pd_freq_passthrough(freq, expected):
-
result = _new_to_legacy_freq(freq)
assert result == expected
@@ -1786,7 +1784,6 @@ def test_date_range_no_freq(start, end, periods):
)
@pytest.mark.parametrize("has_year_zero", [False, True])
def test_offset_addition_preserves_has_year_zero(offset, has_year_zero):
-
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="this date/calendar/year zero")
datetime = cftime.DatetimeGregorian(-1, 12, 31, has_year_zero=has_year_zero)
diff --git a/xarray/tests/test_coordinates.py b/xarray/tests/test_coordinates.py
index b167332d38b..91cb9754f9a 100644
--- a/xarray/tests/test_coordinates.py
+++ b/xarray/tests/test_coordinates.py
@@ -64,7 +64,10 @@ def test_init_index_error(self) -> None:
Coordinates(indexes={"x": idx})
with pytest.raises(TypeError, match=".* is not an `xarray.indexes.Index`"):
- Coordinates(coords={"x": ("x", [1, 2, 3])}, indexes={"x": "not_an_xarray_index"}) # type: ignore[dict-item]
+ Coordinates(
+ coords={"x": ("x", [1, 2, 3])},
+ indexes={"x": "not_an_xarray_index"}, # type: ignore[dict-item]
+ )
def test_init_dim_sizes_conflict(self) -> None:
with pytest.raises(ValueError):
diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py
index eafc11b630c..c89cfa85622 100644
--- a/xarray/tests/test_dataset.py
+++ b/xarray/tests/test_dataset.py
@@ -297,9 +297,7 @@ def test_repr(self) -> None:
var2 (dim1, dim2) float64 576B 1.162 -1.097 -2.123 ... 1.267 0.3328
var3 (dim3, dim1) float64 640B 0.5565 -0.2121 0.4563 ... -0.2452 -0.3616
Attributes:
- foo: bar""".format(
- data["dim3"].dtype
- )
+ foo: bar""".format(data["dim3"].dtype)
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
diff --git a/xarray/tests/test_datatree.py b/xarray/tests/test_datatree.py
index 686b45968a0..2c020a021e3 100644
--- a/xarray/tests/test_datatree.py
+++ b/xarray/tests/test_datatree.py
@@ -1031,7 +1031,6 @@ def test_operation_with_attrs_but_no_data(self) -> None:
class TestRepr:
-
def test_repr_four_nodes(self) -> None:
dt = DataTree.from_dict(
{
@@ -1581,7 +1580,6 @@ def f(x, tree, y):
class TestEqualsAndIdentical:
-
def test_minimal_variations(self):
tree = DataTree.from_dict(
{
@@ -1729,7 +1727,6 @@ def test_filter(self) -> None:
class TestIndexing:
-
def test_isel_siblings(self) -> None:
tree = DataTree.from_dict(
{
@@ -1835,7 +1832,6 @@ def test_sel(self) -> None:
class TestAggregations:
-
def test_reduce_method(self) -> None:
ds = xr.Dataset({"a": ("x", [False, True, False])})
dt = DataTree.from_dict({"/": ds, "/results": ds})
@@ -2061,7 +2057,6 @@ def test_dont_broadcast_single_node_tree(self) -> None:
class TestUFuncs:
-
@pytest.mark.xfail(reason="__array_ufunc__ not implemented yet")
def test_tree(self, create_test_datatree):
dt = create_test_datatree()
@@ -2074,7 +2069,6 @@ class TestDocInsertion:
"""Tests map_over_datasets docstring injection."""
def test_standard_doc(self):
-
dataset_doc = dedent(
"""\
Manually trigger loading and/or computation of this dataset's data
diff --git a/xarray/tests/test_distributed.py b/xarray/tests/test_distributed.py
index ca37fbd3d99..9d68d1899d8 100644
--- a/xarray/tests/test_distributed.py
+++ b/xarray/tests/test_distributed.py
@@ -1,4 +1,4 @@
-""" isort:skip_file """
+"""isort:skip_file"""
from __future__ import annotations
diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py
index 688f41a7f92..bcce737d15a 100644
--- a/xarray/tests/test_formatting.py
+++ b/xarray/tests/test_formatting.py
@@ -1034,7 +1034,6 @@ def test_display_nbytes() -> None:
def test_array_repr_dtypes():
-
# These dtypes are expected to be represented similarly
# on Ubuntu, macOS and Windows environments of the CI.
# Unsigned integer could be used as easy replacements
diff --git a/xarray/tests/test_indexing.py b/xarray/tests/test_indexing.py
index 92c21cc32fb..698849cb7fe 100644
--- a/xarray/tests/test_indexing.py
+++ b/xarray/tests/test_indexing.py
@@ -350,7 +350,6 @@ def test_lazily_indexed_array(self) -> None:
([0, 3, 5], arr[:2]),
]
for i, j in indexers:
-
expected_b = v[i][j]
actual = v_lazy[i][j]
assert expected_b.shape == actual.shape
@@ -416,7 +415,6 @@ def check_indexing(v_eager, v_lazy, indexers):
check_indexing(v_eager, v_lazy, indexers)
def test_lazily_indexed_array_vindex_setitem(self) -> None:
-
lazy = indexing.LazilyIndexedArray(np.random.rand(10, 20, 30))
# vectorized indexing
diff --git a/xarray/tests/test_interp.py b/xarray/tests/test_interp.py
index d02e12dd695..6722e8d9404 100644
--- a/xarray/tests/test_interp.py
+++ b/xarray/tests/test_interp.py
@@ -180,7 +180,6 @@ def func(obj, dim, new_x, method):
da[dim], obj.data, axis=obj.get_axis_num(dim), **scipy_kwargs
)(new_x).reshape(shape)
else:
-
return scipy.interpolate.interp1d(
da[dim],
obj.data,
diff --git a/xarray/tests/test_namedarray.py b/xarray/tests/test_namedarray.py
index 39ca08fa06c..7bd2c3bec06 100644
--- a/xarray/tests/test_namedarray.py
+++ b/xarray/tests/test_namedarray.py
@@ -57,7 +57,6 @@ class CustomArray(
def __array__(
self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
) -> np.ndarray[Any, np.dtype[np.generic]]:
-
if Version(np.__version__) >= Version("2.0.0"):
return np.asarray(self.array, dtype=dtype, copy=copy)
else:
diff --git a/xarray/tests/test_treenode.py b/xarray/tests/test_treenode.py
index 6a50d8ec8e5..befb5c68e72 100644
--- a/xarray/tests/test_treenode.py
+++ b/xarray/tests/test_treenode.py
@@ -304,7 +304,6 @@ def create_test_tree() -> tuple[NamedNode, NamedNode]:
class TestZipSubtrees:
-
def test_one_tree(self) -> None:
root, _ = create_test_tree()
expected = [
@@ -351,7 +350,6 @@ def test_different_structure(self) -> None:
class TestAncestry:
-
def test_parents(self) -> None:
_, leaf_f = create_test_tree()
expected = ["e", "b", "a"]