Skip to content

Commit

Permalink
Fix B904: raise from within except
Browse files Browse the repository at this point in the history
  • Loading branch information
Armavica committed Sep 17, 2024
1 parent 1414c66 commit 767a935
Show file tree
Hide file tree
Showing 25 changed files with 94 additions and 80 deletions.
8 changes: 4 additions & 4 deletions asv_bench/benchmarks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,15 @@ def decorator(func):
def requires_dask():
try:
import dask # noqa: F401
except ImportError:
raise NotImplementedError()
except ImportError as err:
raise NotImplementedError() from err


def requires_sparse():
try:
import sparse # noqa: F401
except ImportError:
raise NotImplementedError()
except ImportError as err:
raise NotImplementedError() from err


def randn(shape, frac_nan=None, chunks=None, seed=0):
Expand Down
4 changes: 2 additions & 2 deletions asv_bench/benchmarks/dataset_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -606,8 +606,8 @@ def setup(self):

try:
import distributed
except ImportError:
raise NotImplementedError()
except ImportError as err:
raise NotImplementedError() from err

self.client = distributed.Client()
self.write = create_delayed_write()
Expand Down
4 changes: 2 additions & 2 deletions ci/min_deps_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,8 +68,8 @@ def parse_requirements(fname) -> Iterator[tuple[str, int, int, int | None]]:

try:
version_tup = tuple(int(x) for x in version.split("."))
except ValueError:
raise ValueError("non-numerical version: " + row)
except ValueError as err:
raise ValueError("non-numerical version: " + row) from err

if len(version_tup) == 2:
yield (pkg, *version_tup, None) # type: ignore[misc]
Expand Down
16 changes: 8 additions & 8 deletions xarray/backends/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,11 +99,11 @@ def _get_default_engine_remote_uri() -> Literal["netcdf4", "pydap"]:
import pydap # noqa: F401

engine = "pydap"
except ImportError:
except ImportError as err:
raise ValueError(
"netCDF4 or pydap is required for accessing "
"remote datasets via OPeNDAP"
)
) from err
return engine


Expand All @@ -112,8 +112,8 @@ def _get_default_engine_gz() -> Literal["scipy"]:
import scipy # noqa: F401

engine: Final = "scipy"
except ImportError: # pragma: no cover
raise ValueError("scipy is required for accessing .gz files")
except ImportError as err: # pragma: no cover
raise ValueError("scipy is required for accessing .gz files") from err
return engine


Expand All @@ -128,11 +128,11 @@ def _get_default_engine_netcdf() -> Literal["netcdf4", "scipy"]:
import scipy.io.netcdf # noqa: F401

engine = "scipy"
except ImportError:
except ImportError as err:
raise ValueError(
"cannot read or write netCDF files without "
"netCDF4-python or scipy installed"
)
) from err
return engine


Expand Down Expand Up @@ -1374,8 +1374,8 @@ def to_netcdf(

try:
store_open = WRITEABLE_STORES[engine]
except KeyError:
raise ValueError(f"unrecognized engine for to_netcdf: {engine!r}")
except KeyError as err:
raise ValueError(f"unrecognized engine for to_netcdf: {engine!r}") from err

if format is not None:
format = format.upper() # type: ignore[assignment]
Expand Down
6 changes: 3 additions & 3 deletions xarray/backends/netCDF4_.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def _getitem(self, key):
with self.datastore.lock:
original_array = self.get_array(needs_lock=False)
array = getitem(original_array, key)
except IndexError:
except IndexError as err:
# Catch IndexError in netCDF4 and return a more informative
# error message. This is most often called when an unsorted
# indexer is used before the data is loaded from disk.
Expand All @@ -120,7 +120,7 @@ def _getitem(self, key):
"is not valid on netCDF4.Variable object. Try loading "
"your data into memory first by calling .load()."
)
raise IndexError(msg)
raise IndexError(msg) from err
return array


Expand Down Expand Up @@ -192,7 +192,7 @@ def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group):
ds = create_group(ds, key)
else:
# wrap error to provide slightly more helpful message
raise OSError(f"group not found: {key}", e)
raise OSError(f"group not found: {key}", e) from e
return ds


Expand Down
6 changes: 4 additions & 2 deletions xarray/backends/scipy_.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,9 @@ def _open_scipy_netcdf(filename, mode, mmap, version):
# TODO: gzipped loading only works with NetCDF3 files.
errmsg = e.args[0]
if "is not a valid NetCDF 3 file" in errmsg:
raise ValueError("gzipped file loading only supports NetCDF 3 files.")
raise ValueError(
"gzipped file loading only supports NetCDF 3 files."
) from e
else:
raise

Expand All @@ -134,7 +136,7 @@ def _open_scipy_netcdf(filename, mode, mmap, version):
$ pip install netcdf4
"""
errmsg += msg
raise TypeError(errmsg)
raise TypeError(errmsg) from e
else:
raise

Expand Down
6 changes: 4 additions & 2 deletions xarray/backends/zarr.py
Original file line number Diff line number Diff line change
Expand Up @@ -1374,8 +1374,10 @@ def _get_open_params(
RuntimeWarning,
stacklevel=stacklevel,
)
except zarr.errors.GroupNotFoundError:
raise FileNotFoundError(f"No such file or directory: '{store}'")
except zarr.errors.GroupNotFoundError as err:
raise FileNotFoundError(
f"No such file or directory: '{store}'"
) from err
elif consolidated:
# TODO: an option to pass the metadata_key keyword
zarr_group = zarr.open_consolidated(store, **open_kwargs)
Expand Down
12 changes: 6 additions & 6 deletions xarray/coding/cftimeindex.py
Original file line number Diff line number Diff line change
Expand Up @@ -439,8 +439,8 @@ def _get_string_slice(self, key):
parsed, resolution = _parse_iso8601_with_reso(self.date_type, key)
try:
loc = self._partial_date_slice(resolution, parsed)
except KeyError:
raise KeyError(key)
except KeyError as err:
raise KeyError(key) from err
return loc

def _get_nearest_indexer(self, target, limit, tolerance):
Expand Down Expand Up @@ -593,21 +593,21 @@ def __sub__(self, other):
if _contains_cftime_datetimes(np.array(other)):
try:
return pd.TimedeltaIndex(np.array(self) - np.array(other))
except OUT_OF_BOUNDS_TIMEDELTA_ERRORS:
except OUT_OF_BOUNDS_TIMEDELTA_ERRORS as err:
raise ValueError(
"The time difference exceeds the range of values "
"that can be expressed at the nanosecond resolution."
)
) from err
return NotImplemented

def __rsub__(self, other):
try:
return pd.TimedeltaIndex(other - np.array(self))
except OUT_OF_BOUNDS_TIMEDELTA_ERRORS:
except OUT_OF_BOUNDS_TIMEDELTA_ERRORS as err:
raise ValueError(
"The time difference exceeds the range of values "
"that can be expressed at the nanosecond resolution."
)
) from err

def to_datetimeindex(self, unsafe=False):
"""If possible, convert this index to a pandas.DatetimeIndex.
Expand Down
12 changes: 6 additions & 6 deletions xarray/coding/times.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def _decode_cf_datetime_dtype(

try:
result = decode_cf_datetime(example_value, units, calendar, use_cftime)
except Exception:
except Exception as err:
calendar_msg = (
"the default calendar" if calendar is None else f"calendar {calendar!r}"
)
Expand All @@ -225,7 +225,7 @@ def _decode_cf_datetime_dtype(
"opening your dataset with decode_times=False or installing cftime "
"if it is not installed."
)
raise ValueError(msg)
raise ValueError(msg) from err
else:
dtype = getattr(result, "dtype", np.dtype("object"))

Expand Down Expand Up @@ -260,10 +260,10 @@ def _decode_datetime_with_pandas(
# TODO: the strict enforcement of nanosecond precision Timestamps can be
# relaxed when addressing GitHub issue #7493.
ref_date = nanosecond_precision_timestamp(ref_date_str)
except ValueError:
except ValueError as err:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using cftime
raise OutOfBoundsDatetime
raise OutOfBoundsDatetime from err

with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered", RuntimeWarning)
Expand Down Expand Up @@ -488,7 +488,7 @@ def cftime_to_nptime(times, raise_on_invalid: bool = True) -> np.ndarray:
raise ValueError(
f"Cannot convert date {t} to a date in the "
f"standard calendar. Reason: {e}."
)
) from e
else:
dt = "NaT"
new[i] = np.datetime64(dt)
Expand Down Expand Up @@ -521,7 +521,7 @@ def convert_times(times, date_type, raise_on_invalid: bool = True) -> np.ndarray
raise ValueError(
f"Cannot convert date {t} to a date in the "
f"{date_type(2000, 1, 1).calendar} calendar. Reason: {e}."
)
) from e
else:
dt = np.nan

Expand Down
2 changes: 1 addition & 1 deletion xarray/core/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ def _combine_1d(
"xarray.combine_by_coords, or do it manually "
"with xarray.concat, xarray.merge and "
"xarray.align"
)
) from err
else:
raise
else:
Expand Down
6 changes: 4 additions & 2 deletions xarray/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,8 +241,10 @@ def _get_axis_num(self: Any, dim: Hashable) -> int:
_raise_if_any_duplicate_dimensions(self.dims)
try:
return self.dims.index(dim)
except ValueError:
raise ValueError(f"{dim!r} not found in array dimensions {self.dims!r}")
except ValueError as err:
raise ValueError(
f"{dim!r} not found in array dimensions {self.dims!r}"
) from err

@property
def sizes(self: Any) -> Mapping[Hashable, int]:
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,8 +252,8 @@ def concat(

try:
first_obj, objs = utils.peek_at(objs)
except StopIteration:
raise ValueError("must supply at least one object to concatenate")
except StopIteration as err:
raise ValueError("must supply at least one object to concatenate") from err

if compat not in _VALID_COMPAT:
raise ValueError(
Expand Down
10 changes: 5 additions & 5 deletions xarray/core/dataarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -3860,11 +3860,11 @@ def to_pandas(self) -> Self | pd.Series | pd.DataFrame:
constructors = {0: lambda x: x, 1: pd.Series, 2: pd.DataFrame}
try:
constructor = constructors[self.ndim]
except KeyError:
except KeyError as err:
raise ValueError(
f"Cannot convert arrays with {self.ndim} dimensions into "
"pandas objects. Requires 2 or fewer dimensions."
)
) from err
indexes = [self.get_index(dim) for dim in self.dims]
return constructor(self.values, *indexes) # type: ignore[operator]

Expand Down Expand Up @@ -4468,11 +4468,11 @@ def from_dict(cls, d: Mapping[str, Any]) -> Self:
raise ValueError(
"cannot convert dict when coords are missing the key "
f"'{str(e.args[0])}'"
)
) from e
try:
data = d["data"]
except KeyError:
raise ValueError("cannot convert dict without the key 'data''")
except KeyError as err:
raise ValueError("cannot convert dict without the key 'data''") from err
else:
obj = cls(data, coords, d.get("dims"), d.get("name"), d.get("attrs"))

Expand Down
14 changes: 9 additions & 5 deletions xarray/core/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,12 +359,12 @@ def _get_func_args(func, param_names):
"""
try:
func_args = inspect.signature(func).parameters
except ValueError:
except ValueError as err:
func_args = {}
if not param_names:
raise ValueError(
"Unable to inspect `func` signature, and `param_names` was not provided."
)
) from err
if param_names:
params = param_names
else:
Expand Down Expand Up @@ -6213,8 +6213,10 @@ def drop_sel(
labels_for_dim = np.asarray(labels_for_dim)
try:
index = self.get_index(dim)
except KeyError:
raise ValueError(f"dimension {dim!r} does not have coordinate labels")
except KeyError as err:
raise ValueError(
f"dimension {dim!r} does not have coordinate labels"
) from err
new_index = index.drop(labels_for_dim, errors=errors)
ds = ds.loc[{dim: new_index}]
return ds
Expand Down Expand Up @@ -7747,7 +7749,9 @@ def from_dict(cls, d: Mapping[Any, Any]) -> Self:
for k, v in variables
}
except KeyError as e:
raise ValueError(f"cannot convert dict without the key '{str(e.args[0])}'")
raise ValueError(
f"cannot convert dict without the key '{str(e.args[0])}'"
) from e
obj = cls(variable_dict)

# what if coords aren't dims?
Expand Down
8 changes: 4 additions & 4 deletions xarray/core/duck_array_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -451,10 +451,10 @@ def f(values, axis=None, skipna=None, **kwargs):
try: # dask/dask#3133 dask sometimes needs dtype argument
# if func does not accept dtype, then raises TypeError
return func(values, axis=axis, dtype=values.dtype, **kwargs)
except (AttributeError, TypeError):
except (AttributeError, TypeError) as err:
raise NotImplementedError(
f"{name} is not yet implemented on dask arrays"
)
) from err

f.__name__ = name
return f
Expand Down Expand Up @@ -592,10 +592,10 @@ def timedelta_to_numeric(value, datetime_unit="ns", dtype=float):
elif isinstance(value, str):
try:
a = pd.to_timedelta(value)
except ValueError:
except ValueError as err:
raise ValueError(
f"Could not convert {value!r} to timedelta64 using pandas.to_timedelta"
)
) from err
return py_timedelta_to_float(a, datetime_unit)
else:
raise TypeError(
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,11 @@ def __get__(self, obj, cls):

try:
accessor_obj = self._accessor(obj)
except AttributeError:
except AttributeError as err:
# __getattr__ on data object will swallow any AttributeErrors
# raised when initializing the accessor, so we need to raise as
# something else (GH933):
raise RuntimeError(f"error initializing {self._name!r} accessor.")
raise RuntimeError(f"error initializing {self._name!r} accessor.") from err

cache[self._name] = accessor_obj
return accessor_obj
Expand Down
4 changes: 2 additions & 2 deletions xarray/core/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,8 +390,8 @@ def _resolve_group(
if isinstance(group, DataArray):
try:
align(obj, group, join="exact", copy=False)
except ValueError:
raise ValueError(error_msg)
except ValueError as err:
raise ValueError(error_msg) from err

newgroup = group.copy(deep=False)
newgroup.name = group.name or "group"
Expand Down
Loading

0 comments on commit 767a935

Please sign in to comment.