Skip to content

Commit

Permalink
Merge branch 'main' into accessor
Browse files Browse the repository at this point in the history
  • Loading branch information
bdestombe committed Oct 16, 2023
2 parents 74a0a8b + f7f3760 commit c7d01ba
Show file tree
Hide file tree
Showing 17 changed files with 2,654 additions and 2,435 deletions.
12 changes: 5 additions & 7 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,10 @@ readme = "README.rst"
license = "BSD-3-Clause"
requires-python = ">=3.9, <3.12"
authors = [
{name = "Bas des Tombe", email = "[email protected]"},
{name = "Bart Schilperoort", email = "[email protected]"},
{name = "Bas des Tombe, Bart Schilperoort"},
]
maintainers = [
{name = "Bas des Tombe", email = "[email protected]"},
{name = "Bart Schilperoort", email = "[email protected]"},
{name = "Bas des Tombe, Bart Schilperoort"},
]
keywords = [
"DTS",
Expand All @@ -56,15 +54,15 @@ dependencies = [
"dask",
"pandas",
"xarray[parallel]", # numbagg (llvmlite) is a pain to install with pip
"bottleneck", # optional, speed up Xarray
"flox", # optional, speed up Xarray
"bottleneck", # speeds up Xarray
"flox", # speeds up Xarray
"pyyaml>=6.0.1",
"xmltodict",
"scipy",
"statsmodels",
"matplotlib",
"netCDF4>=1.6.4",
"nc-time-axis>=1.4.1" # optional plot dependency of xarray
"nc-time-axis>=1.4.1" # plot dependency of xarray
]
dynamic = ["version"]

Expand Down
8 changes: 4 additions & 4 deletions src/dtscalibration/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
from dtscalibration.datastore_utils import merge_double_ended
from dtscalibration.datastore_utils import shift_double_ended
from dtscalibration.datastore_utils import suggest_cable_shift_double_ended
from dtscalibration.io import read_apsensing_files
from dtscalibration.io import read_sensornet_files
from dtscalibration.io import read_sensortran_files
from dtscalibration.io import read_silixa_files
from dtscalibration.io.apsensing import read_apsensing_files
from dtscalibration.io.sensornet import read_sensornet_files
from dtscalibration.io.sensortran import read_sensortran_files
from dtscalibration.io.silixa import read_silixa_files
from dtscalibration.plot import plot_accuracy
from dtscalibration.plot import plot_location_residuals_double_ended
from dtscalibration.plot import plot_residuals_reference_sections
Expand Down
7 changes: 1 addition & 6 deletions src/dtscalibration/averaging_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,7 @@ def inverse_variance_weighted_mean(
tmpw_store="tmpw",
tmpw_var_store="tmpw_var",
):
"""
Average two temperature datasets with the inverse of the variance as
weights. The two
temperature datasets `tmp1` and `tmp2` with their variances
`tmp1_var` and `tmp2_var`,
respectively. Are averaged and stored in the DataStore.
"""Compute inverse variance weighted average, and add result in-place.
Parameters
----------
Expand Down
20 changes: 9 additions & 11 deletions src/dtscalibration/calibrate_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,7 +242,7 @@ def calibration_single_ended_solver( # noqa: MC0001
"""
# get ix_sec argsort so the sections are in order of increasing x
ix_sec = ds.dts.ufunc_per_section(sections=sections, x_indices=True, calc_per="all")
ix_sec = ds.ufunc_per_section(x_indices=True, calc_per="all")
ds_sec = ds.isel(x=ix_sec)

x_sec = ds_sec["x"].values
Expand All @@ -261,8 +261,8 @@ def calibration_single_ended_solver( # noqa: MC0001
p0_est_alpha = np.asarray([485.0] + no * [0.0] + nt * [1.4] + nta * nt * [0.0])

# X \gamma # Eq.34
cal_ref = ds.dts.ufunc_per_section(
sections=sections, label="st", ref_temp_broadcasted=True, calc_per="all"
cal_ref = ds.ufunc_per_section(
label="st", ref_temp_broadcasted=True, calc_per="all"
)
# cal_ref = cal_ref # sort by increasing x
data_gamma = 1 / (cal_ref.T.ravel() + 273.15) # gamma
Expand Down Expand Up @@ -1195,7 +1195,7 @@ def calibration_double_ended_solver( # noqa: MC0001
-------
"""
ix_sec = ds.dts.ufunc_per_section(sections=sections, x_indices=True, calc_per="all")
ix_sec = ds.ufunc_per_section(x_indices=True, calc_per="all")
ds_sec = ds.isel(x=ix_sec)
ix_alpha_is_zero = ix_sec[0] # per definition of E

Expand Down Expand Up @@ -1224,7 +1224,7 @@ def calibration_double_ended_solver( # noqa: MC0001
Zero_d,
Z_TA_fw,
Z_TA_bw,
) = construct_submatrices(sections, nt, nx_sec, ds, trans_att, x_sec)
) = construct_submatrices(nt, nx_sec, ds, ds.trans_att.values, x_sec)

# y # Eq.41--45
y_F = np.log(ds_sec.st / ds_sec.ast).values.ravel()
Expand Down Expand Up @@ -1859,9 +1859,7 @@ def construct_submatrices(sections, nt, nx, ds, trans_att, x_sec):

# Z \gamma # Eq.47
cal_ref = np.array(
ds.dts.ufunc_per_section(
sections=sections, label="st", ref_temp_broadcasted=True, calc_per="all"
)
ds.ufunc_per_section(label="st", ref_temp_broadcasted=True, calc_per="all")
)
data_gamma = 1 / (cal_ref.ravel() + 273.15) # gamma
coord_gamma_row = np.arange(nt * nx, dtype=int)
Expand Down Expand Up @@ -2242,10 +2240,10 @@ def calc_df_db_double_est(ds, sections, ix_alpha_is_zero, gamma_est):
Ibwx0 = np.log(
ds.rst.isel(x=ix_alpha_is_zero) / ds.rast.isel(x=ix_alpha_is_zero)
).values
ref_temps_refs = ds.dts.ufunc_per_section(
sections=sections, label="st", ref_temp_broadcasted=True, calc_per="all"
ref_temps_refs = ds.ufunc_per_section(
label="st", ref_temp_broadcasted=True, calc_per="all"
)
ix_sec = ds.dts.ufunc_per_section(sections=sections, x_indices=True, calc_per="all")
ix_sec = ds.ufunc_per_section(x_indices=True, calc_per="all")
ref_temps_x0 = (
ref_temps_refs[ix_sec == ix_alpha_is_zero].flatten().compute() + 273.15
)
Expand Down
16 changes: 11 additions & 5 deletions src/dtscalibration/datastore.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from dtscalibration.datastore_utils import get_params_from_pval_double_ended
from dtscalibration.datastore_utils import get_params_from_pval_single_ended
from dtscalibration.datastore_utils import ufunc_per_section_helper
from dtscalibration.io_utils import _dim_attrs
from dtscalibration.io.utils import _dim_attrs
from dtscalibration.variance_helpers import variance_stokes_constant_helper
from dtscalibration.variance_helpers import variance_stokes_exponential_helper
from dtscalibration.variance_helpers import variance_stokes_linear_helper
Expand Down Expand Up @@ -79,7 +79,13 @@ class DataStore(xr.Dataset):
"""

def __init__(self, *args, autofill_dim_attrs=True, **kwargs):
super().__init__(*args, **kwargs)
with warnings.catch_warnings():
# Filter out nanosecond precision warning: no good way to avoid ATM.
warnings.filterwarnings(
"ignore",
message="Converting non-nanosecond precision timedelta values to nanosecond precision.",
)
super().__init__(*args, **kwargs)

# check order of the dimensions of the data_vars
# first 'x' (if in initiated DataStore), then 'time', then the rest
Expand Down Expand Up @@ -217,7 +223,7 @@ def sections(self, value):
"Not possible anymore. Instead, pass the sections as an argument to \n"
"ds.dts.calibrate_single_ended() or ds.dts.calibrate_double_ended()."
)
raise NotImplementedError(msg)
raise DeprecationWarning(msg)

def check_reference_section_values(self):
"""
Expand Down Expand Up @@ -2215,8 +2221,8 @@ def average_single_ended(
----------
p_val : array-like, optional
Define `p_val`, `p_var`, `p_cov` if you used an external function
for calibration. Has size 2 + `nt`. First value is :math:`\gamma`,
second is :math:`\Delta \\alpha`, others are :math:`C` for each
for calibration. Has size 2 + `nt`. First value is :math:`\\gamma`,
second is :math:`\\Delta \\alpha`, others are :math:`C` for each
timestep.
If set to False, no uncertainty in the parameters is propagated
into the confidence intervals. Similar to the spec sheets of the DTS
Expand Down
10 changes: 8 additions & 2 deletions src/dtscalibration/datastore_utils.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings
from typing import TYPE_CHECKING
from typing import Optional
from typing import Union
Expand Down Expand Up @@ -1108,8 +1109,13 @@ def suggest_cable_shift_double_ended(
rast = ds["rast"].data[:nx2]
x2 = ds["x"].data[i_shift:]

i_f = np.log(st / ast)
i_b = np.log(rst / rast)
with warnings.catch_warnings():
# Supress log(x/0) warnings. The data will result in NaNs values.
warnings.filterwarnings(
"ignore", message="invalid value encountered in log"
)
i_f = np.log(st / ast)
i_b = np.log(rst / rast)

att = (i_b - i_f) / 2 # varianble E in article

Expand Down
Loading

0 comments on commit c7d01ba

Please sign in to comment.