Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Repo review bugbear #9505

Merged
merged 14 commits into from
Sep 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions asv_bench/benchmarks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,15 @@ def decorator(func):
def requires_dask():
try:
import dask # noqa: F401
except ImportError:
raise NotImplementedError()
except ImportError as err:
raise NotImplementedError() from err


def requires_sparse():
try:
import sparse # noqa: F401
except ImportError:
raise NotImplementedError()
except ImportError as err:
raise NotImplementedError() from err


def randn(shape, frac_nan=None, chunks=None, seed=0):
Expand Down
6 changes: 3 additions & 3 deletions asv_bench/benchmarks/accessors.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ def setup(self, calendar):
self.da = xr.DataArray(data, dims="time", coords={"time": time})

def time_dayofyear(self, calendar):
self.da.time.dt.dayofyear
_ = self.da.time.dt.dayofyear

def time_year(self, calendar):
self.da.time.dt.year
_ = self.da.time.dt.year

def time_floor(self, calendar):
self.da.time.dt.floor("D")
_ = self.da.time.dt.floor("D")
4 changes: 2 additions & 2 deletions asv_bench/benchmarks/dataset_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -606,8 +606,8 @@ def setup(self):

try:
import distributed
except ImportError:
raise NotImplementedError()
except ImportError as err:
raise NotImplementedError() from err

self.client = distributed.Client()
self.write = create_delayed_write()
Expand Down
4 changes: 2 additions & 2 deletions ci/min_deps_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ def parse_requirements(fname) -> Iterator[tuple[str, int, int, int | None]]:

try:
version_tup = tuple(int(x) for x in version.split("."))
except ValueError:
raise ValueError("non-numerical version: " + row)
except ValueError as err:
raise ValueError("non-numerical version: " + row) from err

if len(version_tup) == 2:
yield (pkg, *version_tup, None) # type: ignore[misc]
Expand Down
2 changes: 1 addition & 1 deletion doc/user-guide/hierarchical-data.rst
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ and even the distinguishing feature of the common ancestor of any two species (t

.. ipython:: python

[node.name for node in primates.ancestors]
[node.name for node in reversed(primates.parents)]
primates.root.name
primates.find_common_ancestor(dinosaurs).name

Expand Down
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -246,13 +246,14 @@ extend-exclude = [
extend-safe-fixes = [
"TID252", # absolute imports
]
extend-ignore = [
ignore = [
"E402",
"E501",
"E731",
"UP007",
]
extend-select = [
"B", # flake8-bugbear
"F", # Pyflakes
"E", # Pycodestyle
"W",
Expand Down
16 changes: 8 additions & 8 deletions xarray/backends/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,11 +99,11 @@ def _get_default_engine_remote_uri() -> Literal["netcdf4", "pydap"]:
import pydap # noqa: F401

engine = "pydap"
except ImportError:
except ImportError as err:
raise ValueError(
"netCDF4 or pydap is required for accessing "
"remote datasets via OPeNDAP"
)
) from err
return engine


Expand All @@ -112,8 +112,8 @@ def _get_default_engine_gz() -> Literal["scipy"]:
import scipy # noqa: F401

engine: Final = "scipy"
except ImportError: # pragma: no cover
raise ValueError("scipy is required for accessing .gz files")
except ImportError as err: # pragma: no cover
raise ValueError("scipy is required for accessing .gz files") from err
return engine


Expand All @@ -128,11 +128,11 @@ def _get_default_engine_netcdf() -> Literal["netcdf4", "scipy"]:
import scipy.io.netcdf # noqa: F401

engine = "scipy"
except ImportError:
except ImportError as err:
raise ValueError(
"cannot read or write netCDF files without "
"netCDF4-python or scipy installed"
)
) from err
return engine


Expand Down Expand Up @@ -1374,8 +1374,8 @@ def to_netcdf(

try:
store_open = WRITEABLE_STORES[engine]
except KeyError:
raise ValueError(f"unrecognized engine for to_netcdf: {engine!r}")
except KeyError as err:
raise ValueError(f"unrecognized engine for to_netcdf: {engine!r}") from err

if format is not None:
format = format.upper() # type: ignore[assignment]
Expand Down
6 changes: 3 additions & 3 deletions xarray/backends/netCDF4_.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def _getitem(self, key):
with self.datastore.lock:
original_array = self.get_array(needs_lock=False)
array = getitem(original_array, key)
except IndexError:
except IndexError as err:
# Catch IndexError in netCDF4 and return a more informative
# error message. This is most often called when an unsorted
# indexer is used before the data is loaded from disk.
Expand All @@ -120,7 +120,7 @@ def _getitem(self, key):
"is not valid on netCDF4.Variable object. Try loading "
"your data into memory first by calling .load()."
)
raise IndexError(msg)
raise IndexError(msg) from err
return array


Expand Down Expand Up @@ -192,7 +192,7 @@ def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group):
ds = create_group(ds, key)
else:
# wrap error to provide slightly more helpful message
raise OSError(f"group not found: {key}", e)
raise OSError(f"group not found: {key}", e) from e
return ds


Expand Down
13 changes: 10 additions & 3 deletions xarray/backends/plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ def remove_duplicates(entrypoints: EntryPoints) -> list[EntryPoint]:
f"\n {all_module_names}.\n "
f"The entrypoint {selected_module_name} will be used.",
RuntimeWarning,
stacklevel=2,
)
return unique_entrypoints

Expand Down Expand Up @@ -72,7 +73,9 @@ def backends_dict_from_pkg(
backend = entrypoint.load()
backend_entrypoints[name] = backend
except Exception as ex:
warnings.warn(f"Engine {name!r} loading failed:\n{ex}", RuntimeWarning)
warnings.warn(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be better to replace all of these with utils.emit_user_level_warning

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(I also think it's fine to do this in another PR, though defer to @dcherian)

We could also add a regex lint in pre-commit that we don't use warnings.warn, we only use utils.emit_level_warning. Here's an example of this from another repo: https://github.com/PRQL/prql/blob/main/.pre-commit-config.yaml#L64-L71

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I also think it's fine to do this in another PR,

yes fine by me!

f"Engine {name!r} loading failed:\n{ex}", RuntimeWarning, stacklevel=2
)
return backend_entrypoints


Expand Down Expand Up @@ -146,7 +149,9 @@ def guess_engine(
except PermissionError:
raise
except Exception:
warnings.warn(f"{engine!r} fails while guessing", RuntimeWarning)
warnings.warn(
f"{engine!r} fails while guessing", RuntimeWarning, stacklevel=2
)

compatible_engines = []
for engine, (_, backend_cls) in BACKEND_ENTRYPOINTS.items():
Expand All @@ -155,7 +160,9 @@ def guess_engine(
if backend.guess_can_open(store_spec):
compatible_engines.append(engine)
except Exception:
warnings.warn(f"{engine!r} fails while guessing", RuntimeWarning)
warnings.warn(
f"{engine!r} fails while guessing", RuntimeWarning, stacklevel=2
)

installed_engines = [k for k in engines if k != "store"]
if not compatible_engines:
Expand Down
6 changes: 4 additions & 2 deletions xarray/backends/scipy_.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,9 @@ def _open_scipy_netcdf(filename, mode, mmap, version):
# TODO: gzipped loading only works with NetCDF3 files.
errmsg = e.args[0]
if "is not a valid NetCDF 3 file" in errmsg:
raise ValueError("gzipped file loading only supports NetCDF 3 files.")
raise ValueError(
"gzipped file loading only supports NetCDF 3 files."
) from e
else:
raise

Expand All @@ -134,7 +136,7 @@ def _open_scipy_netcdf(filename, mode, mmap, version):
$ pip install netcdf4
"""
errmsg += msg
raise TypeError(errmsg)
raise TypeError(errmsg) from e
else:
raise

Expand Down
8 changes: 5 additions & 3 deletions xarray/backends/zarr.py
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,7 @@ def get_attrs(self):
def get_dimensions(self):
try_nczarr = self._mode == "r"
dimensions = {}
for k, v in self.zarr_group.arrays():
for _k, v in self.zarr_group.arrays():
dim_names, _ = _get_zarr_dims_and_attrs(v, DIMENSION_KEY, try_nczarr)
for d, s in zip(dim_names, v.shape, strict=True):
if d in dimensions and dimensions[d] != s:
Expand Down Expand Up @@ -1374,8 +1374,10 @@ def _get_open_params(
RuntimeWarning,
stacklevel=stacklevel,
)
except zarr.errors.GroupNotFoundError:
raise FileNotFoundError(f"No such file or directory: '{store}'")
except zarr.errors.GroupNotFoundError as err:
raise FileNotFoundError(
f"No such file or directory: '{store}'"
) from err
elif consolidated:
# TODO: an option to pass the metadata_key keyword
zarr_group = zarr.open_consolidated(store, **open_kwargs)
Expand Down
12 changes: 6 additions & 6 deletions xarray/coding/cftimeindex.py
Original file line number Diff line number Diff line change
Expand Up @@ -439,8 +439,8 @@ def _get_string_slice(self, key):
parsed, resolution = _parse_iso8601_with_reso(self.date_type, key)
try:
loc = self._partial_date_slice(resolution, parsed)
except KeyError:
raise KeyError(key)
except KeyError as err:
raise KeyError(key) from err
return loc

def _get_nearest_indexer(self, target, limit, tolerance):
Expand Down Expand Up @@ -593,21 +593,21 @@ def __sub__(self, other):
if _contains_cftime_datetimes(np.array(other)):
try:
return pd.TimedeltaIndex(np.array(self) - np.array(other))
except OUT_OF_BOUNDS_TIMEDELTA_ERRORS:
except OUT_OF_BOUNDS_TIMEDELTA_ERRORS as err:
raise ValueError(
"The time difference exceeds the range of values "
"that can be expressed at the nanosecond resolution."
)
) from err
return NotImplemented

def __rsub__(self, other):
try:
return pd.TimedeltaIndex(other - np.array(self))
except OUT_OF_BOUNDS_TIMEDELTA_ERRORS:
except OUT_OF_BOUNDS_TIMEDELTA_ERRORS as err:
raise ValueError(
"The time difference exceeds the range of values "
"that can be expressed at the nanosecond resolution."
)
) from err

def to_datetimeindex(self, unsafe=False):
"""If possible, convert this index to a pandas.DatetimeIndex.
Expand Down
14 changes: 7 additions & 7 deletions xarray/coding/times.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def _ensure_padded_year(ref_date: str) -> str:
"To remove this message, remove the ambiguity by padding your reference "
"date strings with zeros."
)
warnings.warn(warning_msg, SerializationWarning)
warnings.warn(warning_msg, SerializationWarning, stacklevel=2)

return ref_date_padded

Expand Down Expand Up @@ -216,7 +216,7 @@ def _decode_cf_datetime_dtype(

try:
result = decode_cf_datetime(example_value, units, calendar, use_cftime)
except Exception:
except Exception as err:
calendar_msg = (
"the default calendar" if calendar is None else f"calendar {calendar!r}"
)
Expand All @@ -225,7 +225,7 @@ def _decode_cf_datetime_dtype(
"opening your dataset with decode_times=False or installing cftime "
"if it is not installed."
)
raise ValueError(msg)
raise ValueError(msg) from err
else:
dtype = getattr(result, "dtype", np.dtype("object"))

Expand Down Expand Up @@ -269,10 +269,10 @@ def _decode_datetime_with_pandas(
# TODO: the strict enforcement of nanosecond precision Timestamps can be
# relaxed when addressing GitHub issue #7493.
ref_date = nanosecond_precision_timestamp(ref_date_str)
except ValueError:
except ValueError as err:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using cftime
raise OutOfBoundsDatetime
raise OutOfBoundsDatetime from err

with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered", RuntimeWarning)
Expand Down Expand Up @@ -497,7 +497,7 @@ def cftime_to_nptime(times, raise_on_invalid: bool = True) -> np.ndarray:
raise ValueError(
f"Cannot convert date {t} to a date in the "
f"standard calendar. Reason: {e}."
)
) from e
else:
dt = "NaT"
new[i] = np.datetime64(dt)
Expand Down Expand Up @@ -530,7 +530,7 @@ def convert_times(times, date_type, raise_on_invalid: bool = True) -> np.ndarray
raise ValueError(
f"Cannot convert date {t} to a date in the "
f"{date_type(2000, 1, 1).calendar} calendar. Reason: {e}."
)
) from e
else:
dt = np.nan

Expand Down
1 change: 1 addition & 0 deletions xarray/core/accessor_dt.py
Original file line number Diff line number Diff line change
Expand Up @@ -454,6 +454,7 @@ def weekofyear(self) -> DataArray:
"dt.weekofyear and dt.week have been deprecated. Please use "
"dt.isocalendar().week instead.",
FutureWarning,
stacklevel=2,
)

weekofyear = self.isocalendar().week
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ def _combine_1d(
"xarray.combine_by_coords, or do it manually "
"with xarray.concat, xarray.merge and "
"xarray.align"
)
) from err
else:
raise
else:
Expand Down
11 changes: 7 additions & 4 deletions xarray/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,8 +241,10 @@ def _get_axis_num(self: Any, dim: Hashable) -> int:
_raise_if_any_duplicate_dimensions(self.dims)
try:
return self.dims.index(dim)
except ValueError:
raise ValueError(f"{dim!r} not found in array dimensions {self.dims!r}")
except ValueError as err:
raise ValueError(
f"{dim!r} not found in array dimensions {self.dims!r}"
) from err

@property
def sizes(self: Any) -> Mapping[Hashable, int]:
Expand Down Expand Up @@ -881,7 +883,8 @@ def rolling_exp(
warnings.warn(
"Passing ``keep_attrs`` to ``rolling_exp`` has no effect. Pass"
" ``keep_attrs`` directly to the applied function, e.g."
" ``rolling_exp(...).mean(keep_attrs=False)``."
" ``rolling_exp(...).mean(keep_attrs=False)``.",
stacklevel=2,
)

window = either_dict_or_kwargs(window, window_kwargs, "rolling_exp")
Expand Down Expand Up @@ -1514,7 +1517,7 @@ def full_like(
fill_value: Any,
dtype: DTypeMaybeMapping | None = None,
*,
chunks: T_Chunks = {},
chunks: T_Chunks = {}, # noqa: B006
chunked_array_type: str | None = None,
from_array_kwargs: dict[str, Any] | None = None,
) -> Dataset | DataArray: ...
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,8 +252,8 @@ def concat(

try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError("must supply at least one object to concatenate")
except StopIteration as err:
raise ValueError("must supply at least one object to concatenate") from err

if compat not in _VALID_COMPAT:
raise ValueError(
Expand Down
Loading
Loading