Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Standardize parameter names #178

Merged
merged 16 commits into from
Jul 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ Bug fixes

* Single-ended measurements with `fix_alpha` failed due to a bug introduced in v2.0.0 ([#173](https://github.com/dtscalibration/python-dts-calibration/pull/173)).

Introduced limitations

* Standardized parameter names. Reduced the freedom in choosing parameter names and dimension names in favor of simplifying the code.

Removed

* Removed ds.resample_datastore() in favor of using xarray's resample function. See example notebook 2.
Expand Down
2 changes: 0 additions & 2 deletions docs/notebooks/08Calibrate_double_ended.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -241,8 +241,6 @@
" ast_var=ast_var,\n",
" rst_var=rst_var,\n",
" rast_var=rast_var,\n",
" mc_sample_size=10000, # Optional\n",
" mc_conf_ints=[2.5, 97.5], # Optional\n",
")"
]
},
Expand Down
16 changes: 5 additions & 11 deletions docs/notebooks/14Lossy_splices.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@
"ds.isel(time=0).ast.plot(label=\"ast\")\n",
"ds.isel(time=0).rst.plot(label=\"rst\")\n",
"ds.isel(time=0).rast.plot(label=\"rast\")\n",
"plt.legend()"
"plt.legend();"
]
},
{
Expand Down Expand Up @@ -157,12 +157,9 @@
" ast_var=ast_var,\n",
" rst_var=rst_var,\n",
" rast_var=rast_var,\n",
" store_tmpw=\"tmpw\",\n",
" method=\"wls\",\n",
" solver=\"sparse\",\n",
")\n",
"\n",
"ds_a.isel(time=0).tmpw.plot(label=\"calibrated\")"
"ds_a.isel(time=0).tmpw.plot(label=\"calibrated\");"
]
},
{
Expand Down Expand Up @@ -198,14 +195,11 @@
" rst_var=rst_var,\n",
" rast_var=rast_var,\n",
" trans_att=[50.0],\n",
" store_tmpw=\"tmpw\",\n",
" method=\"wls\",\n",
" solver=\"sparse\",\n",
")\n",
"\n",
"ds_a.isel(time=0).tmpw.plot(label=\"no trans. att.\")\n",
"ds.isel(time=0).tmpw.plot(label=\"with trans. att.\")\n",
"plt.legend()"
"plt.legend();"
]
},
{
Expand All @@ -218,7 +212,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
Expand All @@ -232,7 +226,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.11"
"version": "3.10.10"
}
},
"nbformat": 4,
Expand Down
12 changes: 3 additions & 9 deletions docs/notebooks/15Matching_sections.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,7 @@
" st_var=st_var,\n",
" ast_var=ast_var,\n",
" rst_var=rst_var,\n",
" rast_var=rast_var,\n",
" store_tmpw=\"tmpw\",\n",
" method=\"wls\",\n",
" solver=\"sparse\",\n",
" rast_var=rast_var\n",
")\n",
"\n",
"ds_a.isel(time=0).tmpw.plot(label=\"calibrated\")"
Expand Down Expand Up @@ -183,9 +180,6 @@
" rast_var=rast_var,\n",
" trans_att=[50.0],\n",
" matching_sections=matching_sections,\n",
" store_tmpw=\"tmpw\",\n",
" method=\"wls\",\n",
" solver=\"sparse\",\n",
")\n",
"\n",
"ds_a.isel(time=0).tmpw.plot(label=\"normal calibration\")\n",
Expand All @@ -203,7 +197,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
Expand All @@ -217,7 +211,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.11"
"version": "3.10.10"
}
},
"nbformat": 4,
Expand Down
7 changes: 2 additions & 5 deletions docs/notebooks/16Averaging_temperatures.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -104,9 +104,6 @@
" ast_var=ast_var,\n",
" rst_var=rst_var,\n",
" rast_var=rast_var,\n",
" store_tmpw=\"tmpw\",\n",
" method=\"wls\",\n",
" solver=\"sparse\",\n",
")"
]
},
Expand Down Expand Up @@ -386,7 +383,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
Expand All @@ -400,7 +397,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.11"
"version": "3.10.10"
}
},
"nbformat": 4,
Expand Down
71 changes: 30 additions & 41 deletions src/dtscalibration/calibrate_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,12 +390,12 @@ def calibration_double_ended_solver( # noqa: MC0001
# Calculate E as initial estimate for the E calibration.
# Does not require ta to be passed on
E_all_guess, E_all_var_guess = calc_alpha_double(
'guess',
ds,
st_var,
ast_var,
rst_var,
rast_var,
mode='guess',
ds=ds,
st_var=st_var,
ast_var=ast_var,
rst_var=rst_var,
rast_var=rast_var,
ix_alpha_is_zero=ix_alpha_is_zero)
df_est, db_est = calc_df_db_double_est(ds, ix_alpha_is_zero, 485.)

Expand Down Expand Up @@ -633,22 +633,17 @@ def calibration_double_ended_solver( # noqa: MC0001
# put E outside of reference section in solution
# concatenating makes a copy of the data instead of using a pointer
ds_sub = ds[['st', 'ast', 'rst', 'rast', 'trans_att']]
time_dim = ds_sub.get_time_dim()
ds_sub['df'] = ((time_dim,), p_sol[1:1 + nt])
ds_sub['df_var'] = ((time_dim,), p_var[1:1 + nt])
ds_sub['db'] = ((time_dim,), p_sol[1 + nt:1 + 2 * nt])
ds_sub['db_var'] = ((time_dim,), p_var[1 + nt:1 + 2 * nt])
ds_sub['df'] = (('time',), p_sol[1:1 + nt])
ds_sub['df_var'] = (('time',), p_var[1:1 + nt])
ds_sub['db'] = (('time',), p_sol[1 + nt:1 + 2 * nt])
ds_sub['db_var'] = (('time',), p_var[1 + nt:1 + 2 * nt])
E_all_exact, E_all_var_exact = calc_alpha_double(
'exact',
ds_sub,
st_var,
ast_var,
rst_var,
rast_var,
'df',
'db',
'df_var',
'db_var',
mode='exact',
ds=ds_sub,
st_var=st_var,
ast_var=ast_var,
rst_var=rst_var,
rast_var=rast_var,
ix_alpha_is_zero=ix_alpha_is_zero,
talpha_fw=talpha_fw,
talpha_bw=talpha_bw,
Expand Down Expand Up @@ -1223,10 +1218,6 @@ def calc_alpha_double(
ast_var=None,
rst_var=None,
rast_var=None,
D_F_label=None,
D_B_label=None,
D_F_var_label=None,
D_B_var_label=None,
ix_alpha_is_zero=-1,
talpha_fw=None,
talpha_bw=None,
Expand All @@ -1238,8 +1229,6 @@ def calc_alpha_double(
assert ix_alpha_is_zero >= 0, 'Define ix_alpha_is_zero' + \
str(ix_alpha_is_zero)

time_dim = ds.get_time_dim()

if st_var is not None:
if callable(st_var):
st_var_val = st_var(ds.st)
Expand Down Expand Up @@ -1271,26 +1260,26 @@ def calc_alpha_double(
A = (i_bw - i_fw) / 2

elif mode == 'exact':
D_F = ds[D_F_label]
D_B = ds[D_B_label]
D_F_var = ds[D_F_var_label]
D_B_var = ds[D_B_var_label]
D_F = ds["df"]
D_B = ds["db"]
D_F_var = ds["df_var"]
D_B_var = ds["db_var"]

if ds.trans_att.size > 0:
# Can be improved by including covariances. That reduces the
# uncert.

ta_arr_fw = np.zeros((ds.x.size, ds[time_dim].size))
ta_arr_fw_var = np.zeros((ds.x.size, ds[time_dim].size))
ta_arr_fw = np.zeros((ds.x.size, ds['time'].size))
ta_arr_fw_var = np.zeros((ds.x.size, ds['time'].size))
for tai, taxi, tai_var in zip(
talpha_fw.T, ds.trans_att.values, talpha_fw_var.T):
ta_arr_fw[ds.x.values >= taxi] = \
ta_arr_fw[ds.x.values >= taxi] + tai
ta_arr_fw_var[ds.x.values >= taxi] = \
ta_arr_fw_var[ds.x.values >= taxi] + tai_var

ta_arr_bw = np.zeros((ds.x.size, ds[time_dim].size))
ta_arr_bw_var = np.zeros((ds.x.size, ds[time_dim].size))
ta_arr_bw = np.zeros((ds.x.size, ds['time'].size))
ta_arr_bw_var = np.zeros((ds.x.size, ds['time'].size))
for tai, taxi, tai_var in zip(
talpha_bw.T, ds.trans_att.values, talpha_bw_var.T):
ta_arr_bw[ds.x.values < taxi] = \
Expand All @@ -1308,8 +1297,8 @@ def calc_alpha_double(
A_var = (i_var_fw + i_var_bw + D_B_var + D_F_var) / 2
A = (i_bw - i_fw) / 2 + (D_B - D_F) / 2

E_var = 1 / (1 / A_var).sum(dim=time_dim)
E = (A / A_var).sum(dim=time_dim) * E_var
E_var = 1 / (1 / A_var).sum(dim='time')
E = (A / A_var).sum(dim='time') * E_var

else:
i_fw = np.log(ds.st / ds.ast)
Expand All @@ -1318,12 +1307,12 @@ def calc_alpha_double(
if mode == 'guess':
A = (i_bw - i_fw) / 2
elif mode == 'exact':
D_F = ds[D_F_label]
D_B = ds[D_B_label]
D_F = ds["df"]
D_B = ds["db"]
A = (i_bw - i_fw) / 2 + (D_B - D_F) / 2

E_var = A.var(dim=time_dim)
E = A.mean(dim=time_dim)
E_var = A.var(dim='time')
E = A.mean(dim='time')

# E is defined zero at the first index of the reference sections
if mode == 'guess':
Expand Down
Loading
Loading