Skip to content

Commit

Permalink
drop py38
Browse files Browse the repository at this point in the history
  • Loading branch information
jmoralez committed Nov 21, 2024
1 parent f341661 commit 804ed19
Show file tree
Hide file tree
Showing 6 changed files with 23 additions and 18 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11"]
python-version: ["3.9", "3.10", "3.11", "3.12"]
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID_NIXTLA_TMP }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY_NIXTLA_TMP }}
Expand All @@ -44,7 +44,7 @@ jobs:
fail-fast: false
matrix:
os: [macos-13, macos-14, windows-latest]
python-version: ["3.8", "3.9", "3.10", "3.11"]
python-version: ["3.9", "3.10", "3.11", "3.12"]
steps:
- name: Clone repo
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ for best practices.**
- [m4](https://www.kaggle.com/code/lemuz90/m4-competition)
- [m4-cv](https://www.kaggle.com/code/lemuz90/m4-competition-cv)
- [favorita](https://www.kaggle.com/code/lemuz90/mlforecast-favorita)
- [VN1](https://colab.research.google.com/drive/1UdhCAk49k6HgMezG-U_1ETnAB5pYvZk9)

## Why?

Expand Down
14 changes: 8 additions & 6 deletions mlforecast/target_transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,14 +125,14 @@ def inverse_transform(self, ga: GroupedArray) -> GroupedArray:
def inverse_transform_fitted(self, ga: GroupedArray) -> GroupedArray:
if self.fitted_[0].size < ga.data.size:
raise ValueError("fitted differences are smaller than provided target.")
transformed = ga.data
for d, fitted in zip(reversed(self.differences), reversed(self.fitted_)):
fitted_ga = CoreGroupedArray(fitted, self.fitted_indptr_)
adds = fitted_ga._lag(d)
if adds.size > ga.data.size:
adds = CoreGroupedArray(adds, self.fitted_indptr_)._tails(ga.indptr)
transformed = ga.data + adds
ga = GroupedArray(transformed, ga.indptr)
return ga
transformed = transformed + adds
return GroupedArray(transformed, ga.indptr)

def take(self, idxs: np.ndarray) -> "Differences":
out = Differences(self.differences)
Expand Down Expand Up @@ -161,9 +161,11 @@ def stack(scalers: Sequence["Differences"]) -> "Differences": # type: ignore[ov
diffs = first_scaler.differences
out = Differences(diffs)
out.fitted_ = []
for i in range(len(scalers[0].fitted_)):
out.fitted_.append(np.hstack([sc.fitted_[i] for sc in scalers]))
if first_scaler.fitted_indptr_ is not None:
if first_scaler.fitted_indptr_ is None:
out.fitted_indptr_ = None
else:
for i in range(len(scalers[0].fitted_)):
out.fitted_.append(np.hstack([sc.fitted_[i] for sc in scalers]))
sizes = np.hstack([np.diff(sc.fitted_indptr_) for sc in scalers])
out.fitted_indptr_ = np.append(0, sizes.cumsum())
out.scalers_ = [
Expand Down
14 changes: 8 additions & 6 deletions nbs/target_transforms.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -201,14 +201,14 @@
" def inverse_transform_fitted(self, ga: GroupedArray) -> GroupedArray:\n",
" if self.fitted_[0].size < ga.data.size:\n",
" raise ValueError(\"fitted differences are smaller than provided target.\")\n",
" transformed = ga.data\n",
" for d, fitted in zip(reversed(self.differences), reversed(self.fitted_)):\n",
" fitted_ga = CoreGroupedArray(fitted, self.fitted_indptr_)\n",
" adds = fitted_ga._lag(d)\n",
" if adds.size > ga.data.size:\n",
" adds = CoreGroupedArray(adds, self.fitted_indptr_)._tails(ga.indptr)\n",
" transformed = ga.data + adds\n",
" ga = GroupedArray(transformed, ga.indptr)\n",
" return ga\n",
" transformed = transformed + adds\n",
" return GroupedArray(transformed, ga.indptr)\n",
"\n",
" def take(self, idxs: np.ndarray) -> \"Differences\":\n",
" out = Differences(self.differences)\n",
Expand Down Expand Up @@ -237,9 +237,11 @@
" diffs = first_scaler.differences\n",
" out = Differences(diffs)\n",
" out.fitted_ = []\n",
" for i in range(len(scalers[0].fitted_)):\n",
" out.fitted_.append(np.hstack([sc.fitted_[i] for sc in scalers]))\n",
" if first_scaler.fitted_indptr_ is not None:\n",
" if first_scaler.fitted_indptr_ is None:\n",
" out.fitted_indptr_ = None\n",
" else:\n",
" for i in range(len(scalers[0].fitted_)):\n",
" out.fitted_.append(np.hstack([sc.fitted_[i] for sc in scalers]))\n",
" sizes = np.hstack([np.diff(sc.fitted_indptr_) for sc in scalers])\n",
" out.fitted_indptr_ = np.append(0, sizes.cumsum())\n",
" out.scalers_ = [\n",
Expand Down
6 changes: 3 additions & 3 deletions settings.ini
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@ author_email = [email protected]
copyright = Nixtla
branch = main
version = 0.15.0
min_python = 3.8
min_python = 3.9
audience = Developers
language = English
custom_sidebar = True
license = apache2
status = 3
requirements = cloudpickle coreforecast>=0.0.11 fsspec numba optuna packaging pandas scikit-learn utilsforecast>=0.2.3 window-ops
status = 4
requirements = cloudpickle coreforecast>=0.0.15 fsspec numba optuna packaging pandas scikit-learn utilsforecast>=0.2.3 window-ops
dask_requirements = fugue dask[complete] lightgbm xgboost
ray_requirements = fugue[ray] lightgbm_ray numpy<2 pandas<2.2 ray<2.8 setuptools<70 xgboost<2 xgboost_ray
spark_requirements = fugue pyspark>=3.3 lightgbm xgboost
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
}
statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
'4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
py_versions = '3.8 3.9 3.10 3.11'.split()
py_versions = '3.9 3.10 3.11 3.12'.split()

requirements = cfg['requirements'].split()
dask_requirements = cfg['dask_requirements'].split()
Expand Down

0 comments on commit 804ed19

Please sign in to comment.