diff --git a/docs/_images/model_layout.png b/docs/_images/model_layout.png index 0c9857b..90b7c8d 100644 Binary files a/docs/_images/model_layout.png and b/docs/_images/model_layout.png differ diff --git a/src/ewatercycle_HBV/forcing.py b/src/ewatercycle_HBV/forcing.py index 8982855..c29df23 100644 --- a/src/ewatercycle_HBV/forcing.py +++ b/src/ewatercycle_HBV/forcing.py @@ -4,7 +4,7 @@ from datetime import datetime from pathlib import Path from typing import Optional -import random +import secrets import string import pandas as pd @@ -24,7 +24,7 @@ REQUIRED_PARAMS = ["pr", "evspsblpot", "tas"] class HBVForcing(DefaultForcing): - """Container for HBV forcing data. + """Class for HBV forcing data, mainly focused on using CAMELS dataset. Args: camels_file: .txt file that contains CAMELS forcing from https://hess.copernicus.org/articles/21/5293/2017/ @@ -136,7 +136,16 @@ def from_test_txt(self) -> xr.Dataset: return ds def from_camels_txt(self) -> xr.Dataset: - """Load forcing data from a txt file into an xarray dataset. + """Load forcing data from a txt file into a xarray dataset. + + Note: + This is only tested with the daymet files. + The other two sources (NLDAS/maurer) can pose some issues, for more details + see this repo ``_ . + Instead, use the + `eWaterCycle CAMELS functionality `_ + which utilises + `OpenDAP `_ . Requirements: Must be in the same format as the CAMELS dataset: @@ -269,7 +278,7 @@ def crop_ds(self, ds: xr.Dataset, name: str): time = str(datetime.now())[:-10].replace(":", "_") letters = string.ascii_lowercase + string.ascii_uppercase - unique_identifier = ''.join((random.choice(letters)) for _ in range(5)) + unique_identifier = ''.join((secrets.choice(letters)) for _ in range(5)) ds_name = f"HBV_forcing_{name}_{time}_{unique_identifier}.nc" out_dir = self.directory / ds_name if not out_dir.exists():