diff --git a/.gitignore b/.gitignore index 51e576d..032f199 100644 --- a/.gitignore +++ b/.gitignore @@ -137,3 +137,6 @@ tests/test_zarrs/* tests/test_tiffs/* tests/*.png .vscode/settings.json + +/docs_build/ +/jupyter_execute/ \ No newline at end of file diff --git a/docs/.buildinfo b/docs/.buildinfo index bbbb4df..3b50e19 100644 --- a/docs/.buildinfo +++ b/docs/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 5704830082c07750dd4f65112d73bff2 +config: ca6db5e3c63f8be4f91f3b82db3b81cf tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/.doctrees/autoapi/index.doctree b/docs/.doctrees/autoapi/index.doctree index 2d6b41a..40cf11c 100644 Binary files a/docs/.doctrees/autoapi/index.doctree and b/docs/.doctrees/autoapi/index.doctree differ diff --git a/docs/.doctrees/autoapi/zarrdataset/_augs/index.doctree b/docs/.doctrees/autoapi/zarrdataset/_augs/index.doctree index b4743d0..e0d18b3 100644 Binary files a/docs/.doctrees/autoapi/zarrdataset/_augs/index.doctree and b/docs/.doctrees/autoapi/zarrdataset/_augs/index.doctree differ diff --git a/docs/.doctrees/autoapi/zarrdataset/_imageloaders/index.doctree b/docs/.doctrees/autoapi/zarrdataset/_imageloaders/index.doctree index 3341527..7f15496 100644 Binary files a/docs/.doctrees/autoapi/zarrdataset/_imageloaders/index.doctree and b/docs/.doctrees/autoapi/zarrdataset/_imageloaders/index.doctree differ diff --git a/docs/.doctrees/autoapi/zarrdataset/_maskfuncs/index.doctree b/docs/.doctrees/autoapi/zarrdataset/_maskfuncs/index.doctree index ec7c7e8..ea8c389 100644 Binary files a/docs/.doctrees/autoapi/zarrdataset/_maskfuncs/index.doctree and b/docs/.doctrees/autoapi/zarrdataset/_maskfuncs/index.doctree differ diff --git a/docs/.doctrees/autoapi/zarrdataset/_samplers/index.doctree b/docs/.doctrees/autoapi/zarrdataset/_samplers/index.doctree index c3e7bf6..7de19da 100644 Binary files a/docs/.doctrees/autoapi/zarrdataset/_samplers/index.doctree and b/docs/.doctrees/autoapi/zarrdataset/_samplers/index.doctree differ diff --git a/docs/.doctrees/autoapi/zarrdataset/_utils/index.doctree b/docs/.doctrees/autoapi/zarrdataset/_utils/index.doctree index 1e3f887..ac3441f 100644 Binary files a/docs/.doctrees/autoapi/zarrdataset/_utils/index.doctree and b/docs/.doctrees/autoapi/zarrdataset/_utils/index.doctree differ diff --git a/docs/.doctrees/autoapi/zarrdataset/_zarrdataset/index.doctree b/docs/.doctrees/autoapi/zarrdataset/_zarrdataset/index.doctree index a2c340f..323fc6e 100644 Binary files a/docs/.doctrees/autoapi/zarrdataset/_zarrdataset/index.doctree and b/docs/.doctrees/autoapi/zarrdataset/_zarrdataset/index.doctree differ diff --git a/docs/.doctrees/autoapi/zarrdataset/index.doctree b/docs/.doctrees/autoapi/zarrdataset/index.doctree index e5b79f1..854b5e6 100644 Binary files a/docs/.doctrees/autoapi/zarrdataset/index.doctree and b/docs/.doctrees/autoapi/zarrdataset/index.doctree differ diff --git a/docs/.doctrees/environment.pickle b/docs/.doctrees/environment.pickle index 5985e92..f615a5d 100644 Binary files a/docs/.doctrees/environment.pickle and b/docs/.doctrees/environment.pickle differ diff --git a/docs/.doctrees/examples/advanced_example_pytorch.doctree b/docs/.doctrees/examples/advanced_example_pytorch.doctree index d353d18..3080b14 100644 Binary files a/docs/.doctrees/examples/advanced_example_pytorch.doctree and b/docs/.doctrees/examples/advanced_example_pytorch.doctree differ diff --git a/docs/.doctrees/examples/advanced_example_pytorch_inference.doctree b/docs/.doctrees/examples/advanced_example_pytorch_inference.doctree index 2639151..d4f022d 100644 Binary files a/docs/.doctrees/examples/advanced_example_pytorch_inference.doctree and b/docs/.doctrees/examples/advanced_example_pytorch_inference.doctree differ diff --git a/docs/.doctrees/examples/advanced_masked_datasets_example.doctree b/docs/.doctrees/examples/advanced_masked_datasets_example.doctree index d0914b8..3902b71 100644 Binary files a/docs/.doctrees/examples/advanced_masked_datasets_example.doctree and b/docs/.doctrees/examples/advanced_masked_datasets_example.doctree differ diff --git a/docs/.doctrees/examples/basic_example.doctree b/docs/.doctrees/examples/basic_example.doctree index f81aafb..a5cb257 100644 Binary files a/docs/.doctrees/examples/basic_example.doctree and b/docs/.doctrees/examples/basic_example.doctree differ diff --git a/docs/.doctrees/examples/basic_example_pytorch.doctree b/docs/.doctrees/examples/basic_example_pytorch.doctree index be0b4f1..a4d724a 100644 Binary files a/docs/.doctrees/examples/basic_example_pytorch.doctree and b/docs/.doctrees/examples/basic_example_pytorch.doctree differ diff --git a/docs/.doctrees/examples/basic_example_tensorflow.doctree b/docs/.doctrees/examples/basic_example_tensorflow.doctree index 90119fe..bf0968c 100644 Binary files a/docs/.doctrees/examples/basic_example_tensorflow.doctree and b/docs/.doctrees/examples/basic_example_tensorflow.doctree differ diff --git a/docs/.doctrees/examples/basic_labeled_datasets_example.doctree b/docs/.doctrees/examples/basic_labeled_datasets_example.doctree index cbecba3..b6ac2be 100644 Binary files a/docs/.doctrees/examples/basic_labeled_datasets_example.doctree and b/docs/.doctrees/examples/basic_labeled_datasets_example.doctree differ diff --git a/docs/.doctrees/examples/basic_masked_datasets_example.doctree b/docs/.doctrees/examples/basic_masked_datasets_example.doctree index e1a253b..5208a64 100644 Binary files a/docs/.doctrees/examples/basic_masked_datasets_example.doctree and b/docs/.doctrees/examples/basic_masked_datasets_example.doctree differ diff --git a/docs/.doctrees/index.doctree b/docs/.doctrees/index.doctree index 094f62d..82f0d6c 100644 Binary files a/docs/.doctrees/index.doctree and b/docs/.doctrees/index.doctree differ diff --git a/docs/.doctrees/license_link.doctree b/docs/.doctrees/license_link.doctree index 9e06c3e..35bc67a 100644 Binary files a/docs/.doctrees/license_link.doctree and b/docs/.doctrees/license_link.doctree differ diff --git a/docs/.doctrees/readme_link.doctree b/docs/.doctrees/readme_link.doctree index 71619c8..46d0eb3 100644 Binary files a/docs/.doctrees/readme_link.doctree and b/docs/.doctrees/readme_link.doctree differ diff --git a/docs/_images/0493ea7949336c8bafa6c2a4a4fb0b6f684533792c8a8aee276b84450c0c67ea.png b/docs/_images/0493ea7949336c8bafa6c2a4a4fb0b6f684533792c8a8aee276b84450c0c67ea.png new file mode 100644 index 0000000..3c233d4 Binary files /dev/null and b/docs/_images/0493ea7949336c8bafa6c2a4a4fb0b6f684533792c8a8aee276b84450c0c67ea.png differ diff --git a/docs/_images/14f77fdbefbffee0f204c62d7ca15a5007c9e1ccf295938f7dfbce40dff911db.png b/docs/_images/14f77fdbefbffee0f204c62d7ca15a5007c9e1ccf295938f7dfbce40dff911db.png new file mode 100644 index 0000000..3b2f48b Binary files /dev/null and b/docs/_images/14f77fdbefbffee0f204c62d7ca15a5007c9e1ccf295938f7dfbce40dff911db.png differ diff --git a/docs/_images/1b9d3ae60591d9fa3d3765b22339938d8e2e0f747098c8f8890f0dfd31350263.png b/docs/_images/1b9d3ae60591d9fa3d3765b22339938d8e2e0f747098c8f8890f0dfd31350263.png new file mode 100644 index 0000000..763289d Binary files /dev/null and b/docs/_images/1b9d3ae60591d9fa3d3765b22339938d8e2e0f747098c8f8890f0dfd31350263.png differ diff --git a/docs/_images/2aa6d8e78d812bedbf9cb39408d1225ee70f51ceba3d55b2907fc903dfca5cac.png b/docs/_images/2aa6d8e78d812bedbf9cb39408d1225ee70f51ceba3d55b2907fc903dfca5cac.png new file mode 100644 index 0000000..0017d97 Binary files /dev/null and b/docs/_images/2aa6d8e78d812bedbf9cb39408d1225ee70f51ceba3d55b2907fc903dfca5cac.png differ diff --git a/docs/_images/35caf5d6a16a402da32ca7028c903bf35d4a356a2cc6d94421b48e1413335fa9.png b/docs/_images/35caf5d6a16a402da32ca7028c903bf35d4a356a2cc6d94421b48e1413335fa9.png new file mode 100644 index 0000000..83a494e Binary files /dev/null and b/docs/_images/35caf5d6a16a402da32ca7028c903bf35d4a356a2cc6d94421b48e1413335fa9.png differ diff --git a/docs/_images/4e0e23bdb35ff463a282b0662c51ebe482fdeb0fb21e017109fefe9fe682fff5.png b/docs/_images/4e0e23bdb35ff463a282b0662c51ebe482fdeb0fb21e017109fefe9fe682fff5.png new file mode 100644 index 0000000..8367357 Binary files /dev/null and b/docs/_images/4e0e23bdb35ff463a282b0662c51ebe482fdeb0fb21e017109fefe9fe682fff5.png differ diff --git a/docs/_images/51bb3e68b17996eba92cdf91265cc913643519e5f5dde09c75ea57aded64d6e8.png b/docs/_images/51bb3e68b17996eba92cdf91265cc913643519e5f5dde09c75ea57aded64d6e8.png new file mode 100644 index 0000000..d5ef4bc Binary files /dev/null and b/docs/_images/51bb3e68b17996eba92cdf91265cc913643519e5f5dde09c75ea57aded64d6e8.png differ diff --git a/docs/_images/5e5194f3aacd660f1d602d50e46aff1b64a2b0f19f6be64ea92299b4973e7d97.png b/docs/_images/5e5194f3aacd660f1d602d50e46aff1b64a2b0f19f6be64ea92299b4973e7d97.png new file mode 100644 index 0000000..1787a9d Binary files /dev/null and b/docs/_images/5e5194f3aacd660f1d602d50e46aff1b64a2b0f19f6be64ea92299b4973e7d97.png differ diff --git a/docs/_images/6e34f8ee7b278bfd8a09e6905ef8262eb47d5a1d1fb29441f644e0aa49eb63dd.png b/docs/_images/6e34f8ee7b278bfd8a09e6905ef8262eb47d5a1d1fb29441f644e0aa49eb63dd.png new file mode 100644 index 0000000..81e4961 Binary files /dev/null and b/docs/_images/6e34f8ee7b278bfd8a09e6905ef8262eb47d5a1d1fb29441f644e0aa49eb63dd.png differ diff --git a/docs/_images/754e8e0d0d095c71d66538d669e039318c29e89bca3e925b23b587a091be1603.png b/docs/_images/754e8e0d0d095c71d66538d669e039318c29e89bca3e925b23b587a091be1603.png new file mode 100644 index 0000000..d2acd40 Binary files /dev/null and b/docs/_images/754e8e0d0d095c71d66538d669e039318c29e89bca3e925b23b587a091be1603.png differ diff --git a/docs/_images/845f208486df8f8430ae79295cf9999af568961227d3a7f74143891b3feaf295.png b/docs/_images/845f208486df8f8430ae79295cf9999af568961227d3a7f74143891b3feaf295.png new file mode 100644 index 0000000..aa7b9aa Binary files /dev/null and b/docs/_images/845f208486df8f8430ae79295cf9999af568961227d3a7f74143891b3feaf295.png differ diff --git a/docs/_images/8c2a029504368753fc523a63f10d74fc682d6cf97a23a2d3d607c1a82ed61aba.png b/docs/_images/8c2a029504368753fc523a63f10d74fc682d6cf97a23a2d3d607c1a82ed61aba.png new file mode 100644 index 0000000..cb03a09 Binary files /dev/null and b/docs/_images/8c2a029504368753fc523a63f10d74fc682d6cf97a23a2d3d607c1a82ed61aba.png differ diff --git a/docs/_images/9d6d81e4243d6ff0402a7e6324a0f516f03ecf71cdee64ec322aa28e8e1ef266.png b/docs/_images/9d6d81e4243d6ff0402a7e6324a0f516f03ecf71cdee64ec322aa28e8e1ef266.png new file mode 100644 index 0000000..6011248 Binary files /dev/null and b/docs/_images/9d6d81e4243d6ff0402a7e6324a0f516f03ecf71cdee64ec322aa28e8e1ef266.png differ diff --git a/docs/_images/a29b08481e2598a61febed5711e2d9285586fdc5a6112febb2b8b4f32587e173.png b/docs/_images/a29b08481e2598a61febed5711e2d9285586fdc5a6112febb2b8b4f32587e173.png new file mode 100644 index 0000000..542d942 Binary files /dev/null and b/docs/_images/a29b08481e2598a61febed5711e2d9285586fdc5a6112febb2b8b4f32587e173.png differ diff --git a/docs/_images/a89460b27e574359592e522b7085cd793e44415667461cfb486dd68e3e154352.png b/docs/_images/a89460b27e574359592e522b7085cd793e44415667461cfb486dd68e3e154352.png new file mode 100644 index 0000000..89895bc Binary files /dev/null and b/docs/_images/a89460b27e574359592e522b7085cd793e44415667461cfb486dd68e3e154352.png differ diff --git a/docs/_images/db7d97bf2f0692371951419e0014fc7f39aecdce0e091723176657940810f457.png b/docs/_images/db7d97bf2f0692371951419e0014fc7f39aecdce0e091723176657940810f457.png new file mode 100644 index 0000000..36cacb7 Binary files /dev/null and b/docs/_images/db7d97bf2f0692371951419e0014fc7f39aecdce0e091723176657940810f457.png differ diff --git a/docs/_images/e16ff8424e5b53a5108a7f2c87a24cb50ae3850bb800f3a40b6d89a3c87b4be8.png b/docs/_images/e16ff8424e5b53a5108a7f2c87a24cb50ae3850bb800f3a40b6d89a3c87b4be8.png new file mode 100644 index 0000000..86b4d2c Binary files /dev/null and b/docs/_images/e16ff8424e5b53a5108a7f2c87a24cb50ae3850bb800f3a40b6d89a3c87b4be8.png differ diff --git a/docs/_images/ee452c4ccae35fc77d15bea56dce7c6a644de67d7e546c281e6b1cc08f9c7b2e.png b/docs/_images/ee452c4ccae35fc77d15bea56dce7c6a644de67d7e546c281e6b1cc08f9c7b2e.png new file mode 100644 index 0000000..e6c616b Binary files /dev/null and b/docs/_images/ee452c4ccae35fc77d15bea56dce7c6a644de67d7e546c281e6b1cc08f9c7b2e.png differ diff --git a/docs/_images/efb0e0c981a2a37c61e11c54650023abd22cd8217f80b9af551ca2c50a0ee414.png b/docs/_images/efb0e0c981a2a37c61e11c54650023abd22cd8217f80b9af551ca2c50a0ee414.png new file mode 100644 index 0000000..2ae3513 Binary files /dev/null and b/docs/_images/efb0e0c981a2a37c61e11c54650023abd22cd8217f80b9af551ca2c50a0ee414.png differ diff --git a/docs/_images/f99c6c24479005965377053093c7c4b95248d8010913dabe394641feeb087266.png b/docs/_images/f99c6c24479005965377053093c7c4b95248d8010913dabe394641feeb087266.png new file mode 100644 index 0000000..f4d2746 Binary files /dev/null and b/docs/_images/f99c6c24479005965377053093c7c4b95248d8010913dabe394641feeb087266.png differ diff --git a/docs/_images/fad1bda0fb99150f350bad3eb2d11531e570bf1fb182fda50859b693662a8898.png b/docs/_images/fad1bda0fb99150f350bad3eb2d11531e570bf1fb182fda50859b693662a8898.png new file mode 100644 index 0000000..c30fde5 Binary files /dev/null and b/docs/_images/fad1bda0fb99150f350bad3eb2d11531e570bf1fb182fda50859b693662a8898.png differ diff --git a/docs/_modules/index.html b/docs/_modules/index.html index 746e046..a8a111c 100644 --- a/docs/_modules/index.html +++ b/docs/_modules/index.html @@ -6,7 +6,7 @@ Overview: module code — ZarrDataset documentation - + diff --git a/docs/_modules/zarrdataset/_samplers.html b/docs/_modules/zarrdataset/_samplers.html index b09fb3e..2bce3b9 100644 --- a/docs/_modules/zarrdataset/_samplers.html +++ b/docs/_modules/zarrdataset/_samplers.html @@ -6,7 +6,7 @@ zarrdataset._samplers — ZarrDataset documentation - + @@ -242,26 +242,32 @@

Source code for zarrdataset._samplers

 
 
[docs] - def _compute_reference_indices(self, reference_coordinates: np.ndarray + def _compute_reference_indices(self, reference_coordinates: np.ndarray, + reference_axes_sizes: np.ndarray ) -> Tuple[List[np.ndarray], List[Tuple[int]]]: reference_per_axis = list(map( - lambda coords: np.append(np.full((1, ), fill_value=-float("inf")), - np.unique(coords)), - reference_coordinates.T + lambda coords, axis_size: np.concatenate(( + np.full((1, ), fill_value=-float("inf")), + np.unique(coords), + np.full((1, ), fill_value=np.max(coords) + axis_size))), + reference_coordinates.T, + reference_axes_sizes )) reference_idx = map( lambda coord_axis, ref_axis: - np.argmax(ref_axis[None, ...] - * (coord_axis[..., None] >= ref_axis[None, ...]), - axis=-1), + np.max(np.arange(ref_axis.size) + * (coord_axis.reshape(-1, 1) >= ref_axis[None, ...]), + axis=1), reference_coordinates.T, reference_per_axis ) reference_idx = np.stack(tuple(reference_idx), axis=-1) + reference_idx = reference_idx.reshape(reference_coordinates.T.shape) + reference_idx = [ - tuple(tls_coord - 1) + tuple(tls_coord) for tls_coord in reference_idx.reshape(-1, len(reference_per_axis)) ] @@ -275,13 +281,14 @@

Source code for zarrdataset._samplers

                                                                   np.ndarray]:
         tls_idx = map(
             lambda coord_axis, ref_axis:
-            np.argmax(ref_axis[None, None, ...]
-                      * (coord_axis[..., None] >= ref_axis[None, None, ...]),
-                      axis=-1),
+            np.max(np.arange(ref_axis.size)
+                   * (coord_axis.reshape(-1, 1) >= ref_axis[None, ...]),
+                   axis=1),
             np.moveaxis(corners_coordinates, -1, 0),
             reference_per_axis
         )
         tls_idx = np.stack(tuple(tls_idx), axis=-1)
+        tls_idx = tls_idx.reshape(corners_coordinates.shape)
 
         tls_coordinates = map(
             lambda tls_coord, ref_axis: ref_axis[tls_coord],
@@ -295,7 +302,7 @@ 

Source code for zarrdataset._samplers

         dist2cut = np.fabs(corners_coordinates - corners_cut[None])
         coverage = np.prod(dist2cut, axis=-1)
 
-        return coverage, tls_idx - 1
+ return coverage, tls_idx
@@ -303,6 +310,7 @@

Source code for zarrdataset._samplers

     def _compute_grid(self, chunk_tlbr: dict, mask: ImageBase,
                       patch_size: dict,
                       image_size: dict,
+                      min_area: float,
                       allow_incomplete_patches: bool = False):
         mask_scale = np.array([mask.scale.get(ax, 1)
                                for ax in self.spatial_axes],
@@ -329,7 +337,7 @@ 

Source code for zarrdataset._samplers

         ]
 
         if min(image_blocks) == 0:
-           return []
+            return []
 
         image_scale = np.array([patch_size.get(ax, 1)
                                 for ax in self.spatial_axes],
@@ -360,6 +368,7 @@ 

Source code for zarrdataset._samplers

              for ax in self.spatial_axes],
             dtype=np.float32
         )
+
         chunk_br_coordinates = np.array(
             [chunk_tlbr[ax].stop
              if chunk_tlbr[ax].stop is not None
@@ -377,12 +386,16 @@ 

Source code for zarrdataset._samplers

         )
         mask_coordinates = mask_coordinates[in_chunk]
 
+        # Translate the mask coordinates to the origin for comparison with
+        # image coordinates.
+        mask_coordinates -= chunk_tl_coordinates
+
         if all(map(operator.ge, image_scale, mask_scale)):
             mask_corners = self._compute_corners(mask_coordinates, mask_scale)
 
             (reference_per_axis,
              reference_idx) =\
-                self._compute_reference_indices(image_coordinates)
+                self._compute_reference_indices(image_coordinates, image_scale)
 
             (coverage,
              corners_idx) = self._compute_overlap(mask_corners,
@@ -390,19 +403,22 @@ 

Source code for zarrdataset._samplers

 
             covered_indices = [
                 reference_idx.index(tuple(idx))
+                if tuple(idx) in reference_idx else len(reference_idx)
                 for idx in corners_idx.reshape(-1, len(self.spatial_axes))
             ]
 
             patches_coverage = np.bincount(covered_indices,
                                            weights=coverage.flatten(),
-                                           minlength=np.prod(image_blocks))
+                                           minlength=len(reference_idx) + 1)
+            patches_coverage = patches_coverage[:-1]
 
         else:
             image_corners = self._compute_corners(image_coordinates,
                                                   image_scale)
 
             (reference_per_axis,
-             reference_idx) = self._compute_reference_indices(mask_coordinates)
+             reference_idx) = self._compute_reference_indices(mask_coordinates,
+                                                              mask_scale)
 
             (coverage,
              corners_idx) = self._compute_overlap(image_corners,
@@ -415,10 +431,6 @@ 

Source code for zarrdataset._samplers

 
             patches_coverage = np.sum(covered_indices * coverage, axis=0)
 
-        min_area = self._min_area
-        if min_area < 1:
-            min_area *= np.prod(list(patch_size.values()))
-
         minimum_covered_tls = image_coordinates[patches_coverage > min_area]
         minimum_covered_tls = minimum_covered_tls.astype(np.int64)
 
@@ -511,6 +523,7 @@ 

Source code for zarrdataset._samplers

             mask,
             self._max_chunk_size,
             image_size,
+            min_area=1,
             allow_incomplete_patches=True
         )
 
@@ -546,11 +559,16 @@ 

Source code for zarrdataset._samplers

             for ax in self.spatial_axes
         }
 
+        min_area = self._min_area
+        if min_area < 1:
+            min_area *= np.prod(list(patch_size.values()))
+
         valid_mask_toplefts = self._compute_valid_toplefts(
             chunk_tlbr,
             mask,
             stride,
             image_size=image_size,
+            min_area=min_area,
             allow_incomplete_patches=self._allow_incomplete_patches
         )
 
diff --git a/docs/_sources/autoapi/zarrdataset/_augs/index.rst.txt b/docs/_sources/autoapi/zarrdataset/_augs/index.rst.txt
index ba70494..394ccaa 100644
--- a/docs/_sources/autoapi/zarrdataset/_augs/index.rst.txt
+++ b/docs/_sources/autoapi/zarrdataset/_augs/index.rst.txt
@@ -1,32 +1,34 @@
-:py:mod:`zarrdataset._augs`
-===========================
+zarrdataset._augs
+=================
 
 .. py:module:: zarrdataset._augs
 
 
-Module Contents
----------------
-
 Classes
-~~~~~~~
+-------
 
 .. autoapisummary::
 
    zarrdataset._augs.ToDtype
 
 
-
+Module Contents
+---------------
 
 .. py:class:: ToDtype(dtype: numpy.dtype)
 
-
    Bases: :py:obj:`object`
 
+
    Converted a numpy NDArray to the specified data type.
 
    :param dtype: The data type to cast the input array.
    :type dtype: numpy.dtype
 
+
+   .. py:attribute:: _dtype
+
+
    .. py:method:: __call__(image: numpy.ndarray) -> numpy.ndarray
 
       Casts the type of `image` to the data type specified with `dtype`.
@@ -38,6 +40,7 @@ Classes
       :rtype: numpy.ndarray
 
 
+
    .. py:method:: __repr__() -> str
 
       Return repr(self).
diff --git a/docs/_sources/autoapi/zarrdataset/_imageloaders/index.rst.txt b/docs/_sources/autoapi/zarrdataset/_imageloaders/index.rst.txt
index 71bc8e8..5c9e4e8 100644
--- a/docs/_sources/autoapi/zarrdataset/_imageloaders/index.rst.txt
+++ b/docs/_sources/autoapi/zarrdataset/_imageloaders/index.rst.txt
@@ -1,14 +1,19 @@
-:py:mod:`zarrdataset._imageloaders`
-===================================
+zarrdataset._imageloaders
+=========================
 
 .. py:module:: zarrdataset._imageloaders
 
 
-Module Contents
----------------
+Attributes
+----------
+
+.. autoapisummary::
+
+   zarrdataset._imageloaders.TIFFFILE_SUPPORT
+
 
 Classes
-~~~~~~~
+-------
 
 .. autoapisummary::
 
@@ -17,28 +22,20 @@ Classes
    zarrdataset._imageloaders.ImageCollection
 
 
-
 Functions
-~~~~~~~~~
+---------
 
 .. autoapisummary::
 
    zarrdataset._imageloaders.image2array
 
 
-
-Attributes
-~~~~~~~~~~
-
-.. autoapisummary::
-
-   zarrdataset._imageloaders.TIFFFILE_SUPPORT
-
+Module Contents
+---------------
 
 .. py:data:: TIFFFILE_SUPPORT
    :value: True
 
-   
 
 .. py:function:: image2array(arr_src: Union[str, zarr.Group, zarr.Array, numpy.ndarray], data_group: Union[str, int, None] = None, zarr_store: Union[zarr.storage.Store, None] = None)
 
@@ -62,88 +59,91 @@ Attributes
 
 .. py:class:: ImageBase(shape: Iterable[int], chunk_size: Union[Iterable[int], None] = None, source_axes: str = '', mode: str = '')
 
-
    Bases: :py:obj:`object`
 
-   .. py:property:: shape
-      :type: Iterable[int]
-
-
-   .. py:property:: chunk_size
-      :type: Iterable[int]
-
-
-   .. py:property:: scale
-      :type: dict
-
 
    .. py:attribute:: arr
+      :value: None
+
 
-      
 
    .. py:attribute:: spatial_axes
       :value: 'ZYX'
 
-      
+
 
    .. py:attribute:: source_axes
+      :value: None
+
 
-      
 
    .. py:attribute:: axes
+      :value: None
+
 
-      
 
    .. py:attribute:: mode
       :value: ''
 
-      
+
 
    .. py:attribute:: permute_order
+      :value: None
+
 
-      
 
    .. py:attribute:: _store
+      :value: None
+
 
-      
 
    .. py:attribute:: _new_axes
       :value: ''
 
-      
+
 
    .. py:attribute:: _drop_axes
       :value: ''
 
-      
+
 
    .. py:attribute:: _scale
+      :value: None
+
 
-      
 
    .. py:attribute:: _shape
+      :value: None
+
 
-      
 
    .. py:attribute:: _spatial_reference_shape
+      :value: None
+
 
-      
 
    .. py:attribute:: _spatial_reference_axes
+      :value: None
+
 
-      
 
    .. py:attribute:: _chunk_size
+      :value: None
+
 
-      
 
    .. py:attribute:: _cached_coords
+      :value: None
+
 
-      
 
    .. py:attribute:: _image_func
+      :value: None
+
+
+
+   .. py:attribute:: roi
 
-      
 
    .. py:method:: _iscached(coords)
 
@@ -169,11 +169,26 @@ Attributes
 
 
 
-.. py:class:: ImageLoader(filename: str, source_axes: str, data_group: Union[str, None] = None, axes: Union[str, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, spatial_axes: str = 'ZYX', mode: str = '')
+   .. py:property:: shape
+      :type: Iterable[int]
+
+
+
+   .. py:property:: chunk_size
+      :type: Iterable[int]
+
+
+
+   .. py:property:: scale
+      :type: dict
+
 
 
+.. py:class:: ImageLoader(filename: str, source_axes: str, data_group: Union[str, None] = None, axes: Union[str, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, spatial_axes: str = 'ZYX', mode: str = '')
+
    Bases: :py:obj:`ImageBase`
 
+
    Image lazy loader class.
 
    Opens the zarr file, or any image that can be open by TiffFile or PIL, as a
@@ -198,15 +213,36 @@ Attributes
    :param mode:
    :type mode: str
 
-   .. py:method:: __del__()
 
+   .. py:attribute:: mode
 
 
-.. py:class:: ImageCollection(collection_args: dict, spatial_axes: str = 'ZYX')
+   .. py:attribute:: spatial_axes
+
+
+   .. py:attribute:: roi_slices
 
 
+   .. py:attribute:: roi
+
+
+   .. py:attribute:: source_axes
+
+
+   .. py:attribute:: axes
+
+
+   .. py:attribute:: _image_func
+
+
+   .. py:method:: __del__()
+
+
+.. py:class:: ImageCollection(collection_args: dict, spatial_axes: str = 'ZYX')
+
    Bases: :py:obj:`object`
 
+
    A class to contain a collection of inputs from different modalities.
 
    This is used to match images with their respective labels and masks.
@@ -218,6 +254,16 @@ Attributes
                         modalities using as reference these axes from the `images` collection.
    :type spatial_axes: str
 
+
+   .. py:attribute:: reference_mode
+
+
+   .. py:attribute:: spatial_axes
+
+
+   .. py:attribute:: collection
+
+
    .. py:method:: _generate_mask()
 
 
@@ -227,7 +273,7 @@ Attributes
       collection shape on the `spatial_axes` only.
 
 
-   .. py:method:: __getitem__(index)
 
+   .. py:method:: __getitem__(index)
 
 
diff --git a/docs/_sources/autoapi/zarrdataset/_maskfuncs/index.rst.txt b/docs/_sources/autoapi/zarrdataset/_maskfuncs/index.rst.txt
index c9c83d2..a867917 100644
--- a/docs/_sources/autoapi/zarrdataset/_maskfuncs/index.rst.txt
+++ b/docs/_sources/autoapi/zarrdataset/_maskfuncs/index.rst.txt
@@ -1,14 +1,11 @@
-:py:mod:`zarrdataset._maskfuncs`
-================================
+zarrdataset._maskfuncs
+======================
 
 .. py:module:: zarrdataset._maskfuncs
 
 
-Module Contents
----------------
-
 Classes
-~~~~~~~
+-------
 
 .. autoapisummary::
 
@@ -16,29 +13,34 @@ Classes
    zarrdataset._maskfuncs.WSITissueMaskGenerator
 
 
-
+Module Contents
+---------------
 
 .. py:class:: MaskGenerator(axes)
 
-
    Bases: :py:obj:`object`
 
+
    Base class to define transformation functions on ImageBase objects.
 
 
+
+   .. py:attribute:: axes
+
+
    .. py:method:: _compute_transform(image: numpy.ndarray) -> numpy.ndarray
       :abstractmethod:
 
 
-   .. py:method:: __call__(image: numpy.ndarray) -> numpy.ndarray
 
+   .. py:method:: __call__(image: numpy.ndarray) -> numpy.ndarray
 
 
 .. py:class:: WSITissueMaskGenerator(mask_scale: float = 1 / 16, min_size: int = 16, area_threshold: int = 128, thresh: Union[float, None] = None, axes: str = 'YX')
 
-
    Bases: :py:obj:`MaskGenerator`
 
+
    Mask generator for tissue objects in Whole Slide Images (WSI).
 
    This will compute a mask of scale `mask_scale` from the input image where
@@ -55,7 +57,19 @@ Classes
    :param axes:
    :type axes: str
 
-   .. py:method:: _compute_transform(image: numpy.ndarray) -> numpy.ndarray
 
+   .. py:attribute:: _mask_scale
+
+
+   .. py:attribute:: _min_size
+
+
+   .. py:attribute:: _area_threshold_2
+
+
+   .. py:attribute:: _thresh
+
+
+   .. py:method:: _compute_transform(image: numpy.ndarray) -> numpy.ndarray
 
 
diff --git a/docs/_sources/autoapi/zarrdataset/_samplers/index.rst.txt b/docs/_sources/autoapi/zarrdataset/_samplers/index.rst.txt
index 8f0e905..fd6030d 100644
--- a/docs/_sources/autoapi/zarrdataset/_samplers/index.rst.txt
+++ b/docs/_sources/autoapi/zarrdataset/_samplers/index.rst.txt
@@ -1,14 +1,11 @@
-:py:mod:`zarrdataset._samplers`
-===============================
+zarrdataset._samplers
+=====================
 
 .. py:module:: zarrdataset._samplers
 
 
-Module Contents
----------------
-
 Classes
-~~~~~~~
+-------
 
 .. autoapisummary::
 
@@ -16,13 +13,14 @@ Classes
    zarrdataset._samplers.BlueNoisePatchSampler
 
 
-
+Module Contents
+---------------
 
 .. py:class:: PatchSampler(patch_size: Union[int, Iterable[int], dict], stride: Union[int, Iterable[int], dict, None] = None, pad: Union[int, Iterable[int], dict, None] = None, min_area: Union[int, float] = 1, spatial_axes: str = 'ZYX', allow_incomplete_patches: bool = False)
 
-
    Bases: :py:obj:`object`
 
+
    Patch sampling algorithm to retrieve image patches/windows from images.
 
    If the image used for extracting patches has a mask associated to it, only
@@ -58,16 +56,38 @@ Classes
                                     than the specified patch size.
    :type allow_incomplete_patches: bool
 
+
+   .. py:attribute:: _max_chunk_size
+
+
+   .. py:attribute:: spatial_axes
+
+
+   .. py:attribute:: _patch_size
+
+
+   .. py:attribute:: _stride
+
+
+   .. py:attribute:: _pad
+
+
+   .. py:attribute:: _min_area
+
+
+   .. py:attribute:: _allow_incomplete_patches
+
+
    .. py:method:: _compute_corners(coordinates: numpy.ndarray, scale: numpy.ndarray) -> numpy.ndarray
 
 
-   .. py:method:: _compute_reference_indices(reference_coordinates: numpy.ndarray) -> Tuple[List[numpy.ndarray], List[Tuple[int]]]
+   .. py:method:: _compute_reference_indices(reference_coordinates: numpy.ndarray, reference_axes_sizes: numpy.ndarray) -> Tuple[List[numpy.ndarray], List[Tuple[int]]]
 
 
    .. py:method:: _compute_overlap(corners_coordinates: numpy.ndarray, reference_per_axis: numpy.ndarray) -> Tuple[numpy.ndarray, numpy.ndarray]
 
 
-   .. py:method:: _compute_grid(chunk_tlbr: dict, mask: zarrdataset._imageloaders.ImageBase, patch_size: dict, image_size: dict, allow_incomplete_patches: bool = False)
+   .. py:method:: _compute_grid(chunk_tlbr: dict, mask: zarrdataset._imageloaders.ImageBase, patch_size: dict, image_size: dict, min_area: float, allow_incomplete_patches: bool = False)
 
 
    .. py:method:: _compute_valid_toplefts(chunk_tlbr: dict, mask: zarrdataset._imageloaders.ImageBase, patch_size: dict, **kwargs)
@@ -93,6 +113,7 @@ Classes
       :rtype: list of tuples of slices
 
 
+
    .. py:method:: compute_patches(image_collection: zarrdataset._imageloaders.ImageCollection, chunk_tlbr: dict) -> Iterable[dict]
 
 
@@ -105,9 +126,9 @@ Classes
 
 .. py:class:: BlueNoisePatchSampler(patch_size: Union[int, Iterable[int], dict], resample_positions=False, allow_overlap=False, **kwargs)
 
-
    Bases: :py:obj:`PatchSampler`
 
+
    Patch sampler that retrieves patches from coordinates sampled using the
    Bridson sampling algorithm, also known as Blue-noise sampling algorithm.
 
@@ -119,6 +140,18 @@ Classes
    :param allow_overlap: Whether overlapping of patches is allowed or not.
    :type allow_overlap: bool
 
+
+   .. py:attribute:: _base_chunk_tls
+      :value: None
+
+
+
+   .. py:attribute:: _resample_positions
+
+
+   .. py:attribute:: _allow_overlap
+
+
    .. py:method:: compute_sampling_positions(force=False) -> None
 
       Compute the sampling positions using blue-noise sampling.
@@ -127,7 +160,7 @@ Classes
       :type force: bool
 
 
-   .. py:method:: _compute_valid_toplefts(chunk_tlbr: dict, mask: zarrdataset._imageloaders.ImageBase, patch_size: dict, **kwargs)
 
+   .. py:method:: _compute_valid_toplefts(chunk_tlbr: dict, mask: zarrdataset._imageloaders.ImageBase, patch_size: dict, **kwargs)
 
 
diff --git a/docs/_sources/autoapi/zarrdataset/_utils/index.rst.txt b/docs/_sources/autoapi/zarrdataset/_utils/index.rst.txt
index 8bf4f01..3fc1ed7 100644
--- a/docs/_sources/autoapi/zarrdataset/_utils/index.rst.txt
+++ b/docs/_sources/autoapi/zarrdataset/_utils/index.rst.txt
@@ -1,15 +1,11 @@
-:py:mod:`zarrdataset._utils`
-============================
+zarrdataset._utils
+==================
 
 .. py:module:: zarrdataset._utils
 
 
-Module Contents
----------------
-
-
 Functions
-~~~~~~~~~
+---------
 
 .. autoapisummary::
 
@@ -23,6 +19,8 @@ Functions
    zarrdataset._utils.translate2roi
 
 
+Module Contents
+---------------
 
 .. py:function:: parse_rois(rois_str: Iterable[str]) -> List[slice]
 
@@ -210,4 +208,3 @@ Functions
 
 .. py:function:: translate2roi(index: dict, roi: tuple, source_axes: str, axes: str)
 
-
diff --git a/docs/_sources/autoapi/zarrdataset/_zarrdataset/index.rst.txt b/docs/_sources/autoapi/zarrdataset/_zarrdataset/index.rst.txt
index f7b3792..8db4955 100644
--- a/docs/_sources/autoapi/zarrdataset/_zarrdataset/index.rst.txt
+++ b/docs/_sources/autoapi/zarrdataset/_zarrdataset/index.rst.txt
@@ -1,14 +1,20 @@
-:py:mod:`zarrdataset._zarrdataset`
-==================================
+zarrdataset._zarrdataset
+========================
 
 .. py:module:: zarrdataset._zarrdataset
 
 
-Module Contents
----------------
+Attributes
+----------
+
+.. autoapisummary::
+
+   zarrdataset._zarrdataset.TQDM_SUPPORT
+   zarrdataset._zarrdataset.PYTORCH_SUPPORT
+
 
 Classes
-~~~~~~~
+-------
 
 .. autoapisummary::
 
@@ -20,9 +26,8 @@ Classes
    zarrdataset._zarrdataset.ZarrDataset
 
 
-
 Functions
-~~~~~~~~~
+---------
 
 .. autoapisummary::
 
@@ -30,25 +35,16 @@ Functions
    zarrdataset._zarrdataset.chained_zarrdataset_worker_init_fn
 
 
-
-Attributes
-~~~~~~~~~~
-
-.. autoapisummary::
-
-   zarrdataset._zarrdataset.TQDM_SUPPORT
-   zarrdataset._zarrdataset.PYTORCH_SUPPORT
-
+Module Contents
+---------------
 
 .. py:data:: TQDM_SUPPORT
    :value: True
 
-   
 
 .. py:data:: PYTORCH_SUPPORT
    :value: True
 
-   
 
 .. py:function:: zarrdataset_worker_init_fn(worker_id)
 
@@ -64,23 +60,34 @@ Attributes
 
 .. py:class:: ImageSample(im_id: int, chk_id: int, shuffle: bool = False)
 
-
    .. py:attribute:: _current_patch_idx
       :value: 0
 
-      
+
 
    .. py:attribute:: _ordering
+      :value: None
+
 
-      
 
    .. py:attribute:: _rng_seed
+      :value: None
+
 
-      
 
    .. py:attribute:: num_patches
+      :value: None
+
+
+
+   .. py:attribute:: im_id
+
+
+   .. py:attribute:: chk_id
+
+
+   .. py:attribute:: _shuffle
 
-      
 
    .. py:method:: free_sampler()
 
@@ -88,12 +95,11 @@ Attributes
    .. py:method:: next_patch()
 
 
-
 .. py:class:: DatasetSpecs(modality: str, filenames: Union[str, Iterable[str], zarr.Group, Iterable[zarr.Group], zarr.Array, Iterable[zarr.Array], numpy.ndarray, Iterable[numpy.ndarray]], source_axes: str, axes: Union[str, None] = None, data_group: Union[str, int, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_loader_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, transform: Union[Callable, None] = None, add_to_output: bool = True)
 
-
    Bases: :py:obj:`dict`
 
+
    Data specification guidelines to add image modalities to a ZarrDataset.
 
    :param modality: Specifies the use of this dataset (input image data, labels, masks).
@@ -136,9 +142,9 @@ Attributes
 
 .. py:class:: ImagesDatasetSpecs(filenames: Union[str, Iterable[str], zarr.Group, Iterable[zarr.Group], zarr.Array, Iterable[zarr.Array], numpy.ndarray, Iterable[numpy.ndarray]], source_axes: str, axes: Union[str, None] = None, data_group: Union[str, int, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_loader_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, transform: Union[Callable, None] = None, modality: str = 'images')
 
-
    Bases: :py:obj:`DatasetSpecs`
 
+
    Specification to add `image` data to a ZarrDataset.
 
    :param filenames:              Iterable[str],
@@ -177,9 +183,9 @@ Attributes
 
 .. py:class:: LabelsDatasetSpecs(filenames: Union[str, Iterable[str], zarr.Group, Iterable[zarr.Group], zarr.Array, Iterable[zarr.Array], numpy.ndarray, Iterable[numpy.ndarray]], source_axes: str, axes: Union[str, None] = None, data_group: Union[str, int, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_loader_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, transform: Union[Callable, None] = None, input_label_transform: Union[Callable, None] = None, input_mode: str = 'images', modality: str = 'labels')
 
-
    Bases: :py:obj:`DatasetSpecs`
 
+
    Specification to add `labels` to a ZarrDataset.
 
    :param filenames:              Iterable[str],
@@ -221,9 +227,9 @@ Attributes
 
 .. py:class:: MasksDatasetSpecs(filenames: Union[str, Iterable[str], zarr.Group, Iterable[zarr.Group], zarr.Array, Iterable[zarr.Array], numpy.ndarray, Iterable[numpy.ndarray]], source_axes: str, axes: Union[str, None] = None, data_group: Union[str, int, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_loader_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, modality: str = 'masks')
 
-
    Bases: :py:obj:`DatasetSpecs`
 
+
    Specification to add `masks` to a ZarrDataset.
 
    :param filenames:              Iterable[str],
@@ -259,9 +265,9 @@ Attributes
 
 .. py:class:: ZarrDataset(dataset_specs: Union[Iterable[dict], None] = None, patch_sampler: Union[zarrdataset._samplers.PatchSampler, None] = None, shuffle: bool = False, progress_bar: bool = False, return_positions: bool = False, return_worker_id: bool = False, draw_same_chunk: bool = False)
 
-
    Bases: :py:obj:`torch.utils.data.IterableDataset`
 
+
    A Zarr-based dataset class capable of handling large volumes of image
    data stored in OME-NGFF Zarr format. This class can match the coordinates
    of the different image modalities to those in the `images` mode, so labels
@@ -291,6 +297,75 @@ Attributes
                            whithin the input image.
    :type draw_same_chunk: bool
 
+
+   .. py:attribute:: _worker_sel
+
+
+   .. py:attribute:: _worker_id
+      :value: 0
+
+
+
+   .. py:attribute:: _num_workers
+      :value: 1
+
+
+
+   .. py:attribute:: _shuffle
+
+
+   .. py:attribute:: _progress_bar
+
+
+   .. py:attribute:: _return_positions
+
+
+   .. py:attribute:: _return_worker_id
+
+
+   .. py:attribute:: _draw_same_chunk
+
+
+   .. py:attribute:: _patch_sampler
+
+
+   .. py:attribute:: _transforms
+
+
+   .. py:attribute:: _output_order
+      :value: []
+
+
+
+   .. py:attribute:: _collections
+
+
+   .. py:attribute:: _zarr_store
+
+
+   .. py:attribute:: _image_loader_func
+
+
+   .. py:attribute:: _arr_lists
+      :value: []
+
+
+
+   .. py:attribute:: _toplefts
+      :value: []
+
+
+
+   .. py:attribute:: _ref_mod
+      :value: None
+
+
+
+   .. py:attribute:: _initialized
+      :value: False
+
+
+
    .. py:method:: _initialize(force=False)
 
 
@@ -315,6 +390,7 @@ Attributes
       :type transform: Callable
 
 
+
    .. py:method:: add_modality(modality: str, filenames: Union[str, Iterable[str], zarr.Group, Iterable[zarr.Group], zarr.Array, Iterable[zarr.Array], numpy.ndarray, Iterable[numpy.ndarray]], source_axes: str, axes: Union[str, None] = None, data_group: Union[str, int, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_loader_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, transforms: Union[collections.OrderedDict, None] = None, add_to_output: bool = True)
 
       Add a new modality to the dataset.
@@ -363,6 +439,7 @@ Attributes
       :type add_to_output: bool
 
 
+
    .. py:method:: __repr__() -> str
 
       ZarrDataset string representation.
diff --git a/docs/_sources/autoapi/zarrdataset/index.rst.txt b/docs/_sources/autoapi/zarrdataset/index.rst.txt
index e659b00..aadc96d 100644
--- a/docs/_sources/autoapi/zarrdataset/index.rst.txt
+++ b/docs/_sources/autoapi/zarrdataset/index.rst.txt
@@ -1,28 +1,25 @@
-:py:mod:`zarrdataset`
-=====================
+zarrdataset
+===========
 
 .. py:module:: zarrdataset
 
 
 Submodules
 ----------
+
 .. toctree::
-   :titlesonly:
    :maxdepth: 1
 
-   _augs/index.rst
-   _imageloaders/index.rst
-   _maskfuncs/index.rst
-   _samplers/index.rst
-   _utils/index.rst
-   _zarrdataset/index.rst
-
+   /autoapi/zarrdataset/_augs/index
+   /autoapi/zarrdataset/_imageloaders/index
+   /autoapi/zarrdataset/_maskfuncs/index
+   /autoapi/zarrdataset/_samplers/index
+   /autoapi/zarrdataset/_utils/index
+   /autoapi/zarrdataset/_zarrdataset/index
 
-Package Contents
-----------------
 
 Classes
-~~~~~~~
+-------
 
 .. autoapisummary::
 
@@ -41,9 +38,8 @@ Classes
    zarrdataset.ZarrDataset
 
 
-
 Functions
-~~~~~~~~~
+---------
 
 .. autoapisummary::
 
@@ -60,12 +56,14 @@ Functions
    zarrdataset.chained_zarrdataset_worker_init_fn
 
 
+Package Contents
+----------------
 
 .. py:class:: PatchSampler(patch_size: Union[int, Iterable[int], dict], stride: Union[int, Iterable[int], dict, None] = None, pad: Union[int, Iterable[int], dict, None] = None, min_area: Union[int, float] = 1, spatial_axes: str = 'ZYX', allow_incomplete_patches: bool = False)
 
-
    Bases: :py:obj:`object`
 
+
    Patch sampling algorithm to retrieve image patches/windows from images.
 
    If the image used for extracting patches has a mask associated to it, only
@@ -101,16 +99,38 @@ Functions
                                     than the specified patch size.
    :type allow_incomplete_patches: bool
 
+
+   .. py:attribute:: _max_chunk_size
+
+
+   .. py:attribute:: spatial_axes
+
+
+   .. py:attribute:: _patch_size
+
+
+   .. py:attribute:: _stride
+
+
+   .. py:attribute:: _pad
+
+
+   .. py:attribute:: _min_area
+
+
+   .. py:attribute:: _allow_incomplete_patches
+
+
    .. py:method:: _compute_corners(coordinates: numpy.ndarray, scale: numpy.ndarray) -> numpy.ndarray
 
 
-   .. py:method:: _compute_reference_indices(reference_coordinates: numpy.ndarray) -> Tuple[List[numpy.ndarray], List[Tuple[int]]]
+   .. py:method:: _compute_reference_indices(reference_coordinates: numpy.ndarray, reference_axes_sizes: numpy.ndarray) -> Tuple[List[numpy.ndarray], List[Tuple[int]]]
 
 
    .. py:method:: _compute_overlap(corners_coordinates: numpy.ndarray, reference_per_axis: numpy.ndarray) -> Tuple[numpy.ndarray, numpy.ndarray]
 
 
-   .. py:method:: _compute_grid(chunk_tlbr: dict, mask: zarrdataset._imageloaders.ImageBase, patch_size: dict, image_size: dict, allow_incomplete_patches: bool = False)
+   .. py:method:: _compute_grid(chunk_tlbr: dict, mask: zarrdataset._imageloaders.ImageBase, patch_size: dict, image_size: dict, min_area: float, allow_incomplete_patches: bool = False)
 
 
    .. py:method:: _compute_valid_toplefts(chunk_tlbr: dict, mask: zarrdataset._imageloaders.ImageBase, patch_size: dict, **kwargs)
@@ -136,6 +156,7 @@ Functions
       :rtype: list of tuples of slices
 
 
+
    .. py:method:: compute_patches(image_collection: zarrdataset._imageloaders.ImageCollection, chunk_tlbr: dict) -> Iterable[dict]
 
 
@@ -148,9 +169,9 @@ Functions
 
 .. py:class:: BlueNoisePatchSampler(patch_size: Union[int, Iterable[int], dict], resample_positions=False, allow_overlap=False, **kwargs)
 
-
    Bases: :py:obj:`PatchSampler`
 
+
    Patch sampler that retrieves patches from coordinates sampled using the
    Bridson sampling algorithm, also known as Blue-noise sampling algorithm.
 
@@ -162,6 +183,18 @@ Functions
    :param allow_overlap: Whether overlapping of patches is allowed or not.
    :type allow_overlap: bool
 
+
+   .. py:attribute:: _base_chunk_tls
+      :value: None
+
+
+
+   .. py:attribute:: _resample_positions
+
+
+   .. py:attribute:: _allow_overlap
+
+
    .. py:method:: compute_sampling_positions(force=False) -> None
 
       Compute the sampling positions using blue-noise sampling.
@@ -170,31 +203,35 @@ Functions
       :type force: bool
 
 
-   .. py:method:: _compute_valid_toplefts(chunk_tlbr: dict, mask: zarrdataset._imageloaders.ImageBase, patch_size: dict, **kwargs)
 
+   .. py:method:: _compute_valid_toplefts(chunk_tlbr: dict, mask: zarrdataset._imageloaders.ImageBase, patch_size: dict, **kwargs)
 
 
 .. py:class:: MaskGenerator(axes)
 
-
    Bases: :py:obj:`object`
 
+
    Base class to define transformation functions on ImageBase objects.
 
 
+
+   .. py:attribute:: axes
+
+
    .. py:method:: _compute_transform(image: numpy.ndarray) -> numpy.ndarray
       :abstractmethod:
 
 
-   .. py:method:: __call__(image: numpy.ndarray) -> numpy.ndarray
 
+   .. py:method:: __call__(image: numpy.ndarray) -> numpy.ndarray
 
 
 .. py:class:: WSITissueMaskGenerator(mask_scale: float = 1 / 16, min_size: int = 16, area_threshold: int = 128, thresh: Union[float, None] = None, axes: str = 'YX')
 
-
    Bases: :py:obj:`MaskGenerator`
 
+
    Mask generator for tissue objects in Whole Slide Images (WSI).
 
    This will compute a mask of scale `mask_scale` from the input image where
@@ -211,8 +248,20 @@ Functions
    :param axes:
    :type axes: str
 
-   .. py:method:: _compute_transform(image: numpy.ndarray) -> numpy.ndarray
 
+   .. py:attribute:: _mask_scale
+
+
+   .. py:attribute:: _min_size
+
+
+   .. py:attribute:: _area_threshold_2
+
+
+   .. py:attribute:: _thresh
+
+
+   .. py:method:: _compute_transform(image: numpy.ndarray) -> numpy.ndarray
 
 
 .. py:function:: image2array(arr_src: Union[str, zarr.Group, zarr.Array, numpy.ndarray], data_group: Union[str, int, None] = None, zarr_store: Union[zarr.storage.Store, None] = None)
@@ -237,88 +286,91 @@ Functions
 
 .. py:class:: ImageBase(shape: Iterable[int], chunk_size: Union[Iterable[int], None] = None, source_axes: str = '', mode: str = '')
 
-
    Bases: :py:obj:`object`
 
-   .. py:property:: shape
-      :type: Iterable[int]
-
-
-   .. py:property:: chunk_size
-      :type: Iterable[int]
-
-
-   .. py:property:: scale
-      :type: dict
-
 
    .. py:attribute:: arr
+      :value: None
+
 
-      
 
    .. py:attribute:: spatial_axes
       :value: 'ZYX'
 
-      
+
 
    .. py:attribute:: source_axes
+      :value: None
+
 
-      
 
    .. py:attribute:: axes
+      :value: None
+
 
-      
 
    .. py:attribute:: mode
       :value: ''
 
-      
+
 
    .. py:attribute:: permute_order
+      :value: None
+
 
-      
 
    .. py:attribute:: _store
+      :value: None
+
 
-      
 
    .. py:attribute:: _new_axes
       :value: ''
 
-      
+
 
    .. py:attribute:: _drop_axes
       :value: ''
 
-      
+
 
    .. py:attribute:: _scale
+      :value: None
+
 
-      
 
    .. py:attribute:: _shape
+      :value: None
+
 
-      
 
    .. py:attribute:: _spatial_reference_shape
+      :value: None
+
 
-      
 
    .. py:attribute:: _spatial_reference_axes
+      :value: None
+
 
-      
 
    .. py:attribute:: _chunk_size
+      :value: None
+
 
-      
 
    .. py:attribute:: _cached_coords
+      :value: None
+
 
-      
 
    .. py:attribute:: _image_func
+      :value: None
+
+
+
+   .. py:attribute:: roi
 
-      
 
    .. py:method:: _iscached(coords)
 
@@ -344,11 +396,26 @@ Functions
 
 
 
-.. py:class:: ImageLoader(filename: str, source_axes: str, data_group: Union[str, None] = None, axes: Union[str, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, spatial_axes: str = 'ZYX', mode: str = '')
+   .. py:property:: shape
+      :type: Iterable[int]
+
+
+
+   .. py:property:: chunk_size
+      :type: Iterable[int]
+
+
+
+   .. py:property:: scale
+      :type: dict
 
 
+
+.. py:class:: ImageLoader(filename: str, source_axes: str, data_group: Union[str, None] = None, axes: Union[str, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, spatial_axes: str = 'ZYX', mode: str = '')
+
    Bases: :py:obj:`ImageBase`
 
+
    Image lazy loader class.
 
    Opens the zarr file, or any image that can be open by TiffFile or PIL, as a
@@ -373,15 +440,36 @@ Functions
    :param mode:
    :type mode: str
 
-   .. py:method:: __del__()
 
+   .. py:attribute:: mode
 
 
-.. py:class:: ImageCollection(collection_args: dict, spatial_axes: str = 'ZYX')
+   .. py:attribute:: spatial_axes
+
+
+   .. py:attribute:: roi_slices
+
+
+   .. py:attribute:: roi
+
+
+   .. py:attribute:: source_axes
+
+
+   .. py:attribute:: axes
+
+
+   .. py:attribute:: _image_func
+
+
+   .. py:method:: __del__()
+
 
+.. py:class:: ImageCollection(collection_args: dict, spatial_axes: str = 'ZYX')
 
    Bases: :py:obj:`object`
 
+
    A class to contain a collection of inputs from different modalities.
 
    This is used to match images with their respective labels and masks.
@@ -393,6 +481,16 @@ Functions
                         modalities using as reference these axes from the `images` collection.
    :type spatial_axes: str
 
+
+   .. py:attribute:: reference_mode
+
+
+   .. py:attribute:: spatial_axes
+
+
+   .. py:attribute:: collection
+
+
    .. py:method:: _generate_mask()
 
 
@@ -402,8 +500,8 @@ Functions
       collection shape on the `spatial_axes` only.
 
 
-   .. py:method:: __getitem__(index)
 
+   .. py:method:: __getitem__(index)
 
 
 .. py:function:: parse_rois(rois_str: Iterable[str]) -> List[slice]
@@ -592,17 +690,20 @@ Functions
 
 .. py:function:: translate2roi(index: dict, roi: tuple, source_axes: str, axes: str)
 
-
 .. py:class:: ToDtype(dtype: numpy.dtype)
 
-
    Bases: :py:obj:`object`
 
+
    Converted a numpy NDArray to the specified data type.
 
    :param dtype: The data type to cast the input array.
    :type dtype: numpy.dtype
 
+
+   .. py:attribute:: _dtype
+
+
    .. py:method:: __call__(image: numpy.ndarray) -> numpy.ndarray
 
       Casts the type of `image` to the data type specified with `dtype`.
@@ -614,6 +715,7 @@ Functions
       :rtype: numpy.ndarray
 
 
+
    .. py:method:: __repr__() -> str
 
       Return repr(self).
@@ -622,9 +724,9 @@ Functions
 
 .. py:class:: DatasetSpecs(modality: str, filenames: Union[str, Iterable[str], zarr.Group, Iterable[zarr.Group], zarr.Array, Iterable[zarr.Array], numpy.ndarray, Iterable[numpy.ndarray]], source_axes: str, axes: Union[str, None] = None, data_group: Union[str, int, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_loader_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, transform: Union[Callable, None] = None, add_to_output: bool = True)
 
-
    Bases: :py:obj:`dict`
 
+
    Data specification guidelines to add image modalities to a ZarrDataset.
 
    :param modality: Specifies the use of this dataset (input image data, labels, masks).
@@ -667,9 +769,9 @@ Functions
 
 .. py:class:: ImagesDatasetSpecs(filenames: Union[str, Iterable[str], zarr.Group, Iterable[zarr.Group], zarr.Array, Iterable[zarr.Array], numpy.ndarray, Iterable[numpy.ndarray]], source_axes: str, axes: Union[str, None] = None, data_group: Union[str, int, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_loader_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, transform: Union[Callable, None] = None, modality: str = 'images')
 
-
    Bases: :py:obj:`DatasetSpecs`
 
+
    Specification to add `image` data to a ZarrDataset.
 
    :param filenames:              Iterable[str],
@@ -708,9 +810,9 @@ Functions
 
 .. py:class:: LabelsDatasetSpecs(filenames: Union[str, Iterable[str], zarr.Group, Iterable[zarr.Group], zarr.Array, Iterable[zarr.Array], numpy.ndarray, Iterable[numpy.ndarray]], source_axes: str, axes: Union[str, None] = None, data_group: Union[str, int, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_loader_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, transform: Union[Callable, None] = None, input_label_transform: Union[Callable, None] = None, input_mode: str = 'images', modality: str = 'labels')
 
-
    Bases: :py:obj:`DatasetSpecs`
 
+
    Specification to add `labels` to a ZarrDataset.
 
    :param filenames:              Iterable[str],
@@ -752,9 +854,9 @@ Functions
 
 .. py:class:: MasksDatasetSpecs(filenames: Union[str, Iterable[str], zarr.Group, Iterable[zarr.Group], zarr.Array, Iterable[zarr.Array], numpy.ndarray, Iterable[numpy.ndarray]], source_axes: str, axes: Union[str, None] = None, data_group: Union[str, int, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_loader_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, modality: str = 'masks')
 
-
    Bases: :py:obj:`DatasetSpecs`
 
+
    Specification to add `masks` to a ZarrDataset.
 
    :param filenames:              Iterable[str],
@@ -790,9 +892,9 @@ Functions
 
 .. py:class:: ZarrDataset(dataset_specs: Union[Iterable[dict], None] = None, patch_sampler: Union[zarrdataset._samplers.PatchSampler, None] = None, shuffle: bool = False, progress_bar: bool = False, return_positions: bool = False, return_worker_id: bool = False, draw_same_chunk: bool = False)
 
-
    Bases: :py:obj:`torch.utils.data.IterableDataset`
 
+
    A Zarr-based dataset class capable of handling large volumes of image
    data stored in OME-NGFF Zarr format. This class can match the coordinates
    of the different image modalities to those in the `images` mode, so labels
@@ -822,6 +924,75 @@ Functions
                            whithin the input image.
    :type draw_same_chunk: bool
 
+
+   .. py:attribute:: _worker_sel
+
+
+   .. py:attribute:: _worker_id
+      :value: 0
+
+
+
+   .. py:attribute:: _num_workers
+      :value: 1
+
+
+
+   .. py:attribute:: _shuffle
+
+
+   .. py:attribute:: _progress_bar
+
+
+   .. py:attribute:: _return_positions
+
+
+   .. py:attribute:: _return_worker_id
+
+
+   .. py:attribute:: _draw_same_chunk
+
+
+   .. py:attribute:: _patch_sampler
+
+
+   .. py:attribute:: _transforms
+
+
+   .. py:attribute:: _output_order
+      :value: []
+
+
+
+   .. py:attribute:: _collections
+
+
+   .. py:attribute:: _zarr_store
+
+
+   .. py:attribute:: _image_loader_func
+
+
+   .. py:attribute:: _arr_lists
+      :value: []
+
+
+
+   .. py:attribute:: _toplefts
+      :value: []
+
+
+
+   .. py:attribute:: _ref_mod
+      :value: None
+
+
+
+   .. py:attribute:: _initialized
+      :value: False
+
+
+
    .. py:method:: _initialize(force=False)
 
 
@@ -846,6 +1017,7 @@ Functions
       :type transform: Callable
 
 
+
    .. py:method:: add_modality(modality: str, filenames: Union[str, Iterable[str], zarr.Group, Iterable[zarr.Group], zarr.Array, Iterable[zarr.Array], numpy.ndarray, Iterable[numpy.ndarray]], source_axes: str, axes: Union[str, None] = None, data_group: Union[str, int, None] = None, roi: Union[str, slice, Iterable[slice], None] = None, image_loader_func: Union[Callable, None] = None, zarr_store: Union[zarr.storage.Store, None] = None, transforms: Union[collections.OrderedDict, None] = None, add_to_output: bool = True)
 
       Add a new modality to the dataset.
@@ -894,6 +1066,7 @@ Functions
       :type add_to_output: bool
 
 
+
    .. py:method:: __repr__() -> str
 
       ZarrDataset string representation.
diff --git a/docs/_sources/examples/advanced_example_pytorch.ipynb.txt b/docs/_sources/examples/advanced_example_pytorch.ipynb.txt
index 3a40e68..8c0d4ec 100644
--- a/docs/_sources/examples/advanced_example_pytorch.ipynb.txt
+++ b/docs/_sources/examples/advanced_example_pytorch.ipynb.txt
@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "id": "1aa48a0d",
+   "id": "34e29748",
    "metadata": {},
    "source": [
     "# Integration of ZarrDataset with PyTorch's DataLoader (Advanced)"
@@ -11,7 +11,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "ec43ebb4",
+   "id": "a0873b31",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -24,7 +24,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "e71a076c",
+   "id": "fa485c1c",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -45,7 +45,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "55f9b176",
+   "id": "fab3089e",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -60,7 +60,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "574ae208",
+   "id": "4b6aa9b3",
    "metadata": {},
    "source": [
     "### Extracting patches of size 128x128x32 voxels from a three-dimensional image"
@@ -68,7 +68,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "ee0be0b2",
+   "id": "c42a65b0",
    "metadata": {},
    "source": [
     "Sample the image randomly"
@@ -77,7 +77,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "02f26d14",
+   "id": "365fd8df",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -87,7 +87,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "481f8f9f",
+   "id": "cf5c1089",
    "metadata": {},
    "source": [
     "Transform the input data from uint16 to float16 with a torchvision pre-processing pipeline"
@@ -96,7 +96,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "56e6b88d",
+   "id": "e06ef22f",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -109,7 +109,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "5b9f6109",
+   "id": "486b006b",
    "metadata": {},
    "source": [
     "Pass the pre-processing function to ZarrDataset to be used when generating the samples.\n",
@@ -120,7 +120,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "680bf059",
+   "id": "681ab078",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -145,7 +145,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "4b53c720",
+   "id": "0a98874c",
    "metadata": {},
    "source": [
     "### Create a ChainDataset from a set of ZarrDatasets that can be put together a single large dataset"
@@ -154,7 +154,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "7ef15a14",
+   "id": "164e9457",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -163,7 +163,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "25ba71aa",
+   "id": "a85d5f69",
    "metadata": {},
    "source": [
     "Make sure the chained_zarrdataset_worker_init_fn function is passed to the DataLoader, so the workers can initialize the dataset correctly"
@@ -172,7 +172,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "81621581",
+   "id": "da6637fc",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -186,7 +186,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "7cda9c14",
+   "id": "84d085c6",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -211,7 +211,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "1ca50c91",
+   "id": "2f706067",
    "metadata": {},
    "source": [
     "### Generate a grid with the sampled patches using `torchvision` utilities"
@@ -220,7 +220,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "946dce3c",
+   "id": "bc2f7363",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -230,7 +230,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "1e074de3",
+   "id": "50d7bc83",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -243,7 +243,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "a0060837",
+   "id": "2cfab3b7",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -254,7 +254,7 @@
  ],
  "metadata": {
   "execution": {
-   "timeout": 120
+   "timeout": 600
   },
   "kernelspec": {
    "display_name": "Python 3 (ipykernel)",
diff --git a/docs/_sources/examples/advanced_example_pytorch_inference.ipynb.txt b/docs/_sources/examples/advanced_example_pytorch_inference.ipynb.txt
index 8419abd..1a6491c 100644
--- a/docs/_sources/examples/advanced_example_pytorch_inference.ipynb.txt
+++ b/docs/_sources/examples/advanced_example_pytorch_inference.ipynb.txt
@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "id": "c3a313f2",
+   "id": "e996e157",
    "metadata": {},
    "source": [
     "# Integration of ZarrDataset with PyTorch's DataLoader for inference (Advanced)\n",
@@ -189,7 +189,7 @@
  ],
  "metadata": {
   "execution": {
-   "timeout": 120
+   "timeout": 600
   },
   "kernelspec": {
    "display_name": "Python 3 (ipykernel)",
diff --git a/docs/_sources/examples/advanced_masked_datasets_example.ipynb.txt b/docs/_sources/examples/advanced_masked_datasets_example.ipynb.txt
index f8dff7c..a8314e4 100644
--- a/docs/_sources/examples/advanced_masked_datasets_example.ipynb.txt
+++ b/docs/_sources/examples/advanced_masked_datasets_example.ipynb.txt
@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "id": "a83a8fc0",
+   "id": "6193287e",
    "metadata": {},
    "source": [
     "# Custom masks for sampling specific regions from images with ZarrDataset"
@@ -11,7 +11,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "d5160c71",
+   "id": "0f4a6713",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -22,7 +22,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "98fc686e",
+   "id": "02ec8f70",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -38,7 +38,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "dcbdace1",
+   "id": "69c4aeb4",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -53,7 +53,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "3c5fd889",
+   "id": "43c67512",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -64,7 +64,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "fa0eae79",
+   "id": "fc96ed90",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -76,7 +76,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "b2e7273e",
+   "id": "fe865601",
    "metadata": {},
    "source": [
     "Define a mask from where patches can be extracted"
@@ -85,7 +85,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "0b978ca7",
+   "id": "46e58845",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -95,7 +95,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "e47927e4",
+   "id": "aaca8473",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -113,7 +113,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "e27eed9a",
+   "id": "430549af",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -124,7 +124,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "206a734d",
+   "id": "ca708846",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -135,7 +135,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "fab0a294",
+   "id": "ceb754da",
    "metadata": {},
    "source": [
     "## Extract patches of size 512x512 pixels from a Whole Slide Image (WSI)"
@@ -143,7 +143,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "e06d8dee",
+   "id": "c1f558c8",
    "metadata": {},
    "source": [
     "Sample the image uniformly in a squared grid pattern"
@@ -152,7 +152,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "202b996d",
+   "id": "874ad5dd",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -162,7 +162,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "f66a6de9",
+   "id": "b1133e72",
    "metadata": {},
    "source": [
     "Use the ZarrDataset class to enable extraction of samples from masked regions.\n",
@@ -173,7 +173,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "2d1234bc",
+   "id": "3aeaafe0",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -199,7 +199,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "6ae7ab46",
+   "id": "c9276695",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -209,7 +209,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "5782c1cc",
+   "id": "95973947",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -221,7 +221,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "35bddd76",
+   "id": "bf88917a",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -232,7 +232,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "d9a03a11",
+   "id": "bfeb0a04",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -243,7 +243,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "4f280634",
+   "id": "448656be",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -264,7 +264,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "814d15ce",
+   "id": "89cefb01",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -275,7 +275,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "fea4b572",
+   "id": "d20af5bc",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -285,7 +285,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "c73b54cb",
+   "id": "c07aa2b2",
    "metadata": {},
    "source": [
     "## Use a function to generate the masks for each image in the dataset\n",
@@ -296,7 +296,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "3237e808",
+   "id": "f03aabc0",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -306,7 +306,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "733f20b4",
+   "id": "d9ef53eb",
    "metadata": {},
    "source": [
     "Apply WSITissueMaskGenerator transform to each image in the dataset to define each sampling mask"
@@ -315,7 +315,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "05b1211d",
+   "id": "9e6867a5",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -327,7 +327,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "7b0063bb",
+   "id": "14b9edae",
    "metadata": {},
    "source": [
     "Because the input image (zarr group \"1\") is large, computing the mask directly on that could require high computational resources.\n",
@@ -340,7 +340,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "543efcd5",
+   "id": "bbb7544e",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -369,7 +369,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "a943a4df",
+   "id": "d50fd9a3",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -388,7 +388,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "953f9a59",
+   "id": "059f72a6",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -398,6 +398,9 @@
   }
  ],
  "metadata": {
+  "execution": {
+   "timeout": 600
+  },
   "kernelspec": {
    "display_name": "Python 3 (ipykernel)",
    "language": "python",
diff --git a/docs/_sources/examples/basic_example.ipynb.txt b/docs/_sources/examples/basic_example.ipynb.txt
index f5b1216..593b0a3 100644
--- a/docs/_sources/examples/basic_example.ipynb.txt
+++ b/docs/_sources/examples/basic_example.ipynb.txt
@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "id": "6c6d9851",
+   "id": "b813b87c",
    "metadata": {},
    "source": [
     "# Basic ZarrDataset usage example"
@@ -10,7 +10,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "8376f274",
+   "id": "ecd3e915",
    "metadata": {},
    "source": [
     "Import the \"zarrdataset\" package"
@@ -19,7 +19,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "af50b787",
+   "id": "55f81521",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -29,7 +29,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "4ff26660",
+   "id": "66b43467",
    "metadata": {},
    "source": [
     "Load data stored on S3 storage"
@@ -38,7 +38,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "e581a8e1",
+   "id": "057d034d",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -54,7 +54,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "012cedfd",
+   "id": "a32081be",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -68,7 +68,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "bd9ff801",
+   "id": "d888d310",
    "metadata": {},
    "source": [
     "Inspect the image to sample"
@@ -77,7 +77,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "db24272a",
+   "id": "b99506cb",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -87,7 +87,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "64a921d3",
+   "id": "8ff83a85",
    "metadata": {},
    "source": [
     "Display a downsampled version of the image"
@@ -96,7 +96,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "0f82949f",
+   "id": "81eecf24",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -108,7 +108,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "da1bc0b4",
+   "id": "c56e97f9",
    "metadata": {},
    "source": [
     "## Retrieving whole images"
@@ -116,7 +116,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "6204bc81",
+   "id": "21ff2662",
    "metadata": {},
    "source": [
     "Create a ZarrDataset to handle the image dataset instead of opening all the dataset images by separate and hold them in memory until they are not used anymore."
@@ -125,7 +125,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "3f34bdec",
+   "id": "1be221cb",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -134,7 +134,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "05c8c96c",
+   "id": "c2698879",
    "metadata": {},
    "source": [
     "Start by retrieving whole images, from a subsampled (pyramid) group (e.g. group 6) within the zarr image file, instead the full resolution image at group \"0\".\n",
@@ -144,7 +144,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "c9d4d99d",
+   "id": "aa6ed3a5",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -158,7 +158,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "b23f4722",
+   "id": "a54a6c84",
    "metadata": {},
    "source": [
     "The ZarrDataset class can be used as a Python's generator, and can be accessed by `iter` and subsequently `next` operations."
@@ -167,7 +167,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "3029c961",
+   "id": "e3f45216",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -178,7 +178,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "b78d3e20",
+   "id": "2bc9b2cc",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -189,7 +189,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "fd4c4cdd",
+   "id": "2f58ad14",
    "metadata": {},
    "source": [
     "Compare the shape of the retreived sample with the shape of the original image in group \"6\""
@@ -198,7 +198,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "7476bc30",
+   "id": "65ee053d",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -207,7 +207,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "f971142f",
+   "id": "30d9074b",
    "metadata": {},
    "source": [
     "## Extracting patches of size 512x512 pixels from a Whole Slide Image (WSI)"
@@ -215,7 +215,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "a66ce716",
+   "id": "7b7b40c1",
    "metadata": {},
    "source": [
     "The PatchSampler class can be used along with ZarrDataset to retrieve patches from WSIs without having to tiling them in a pre-process step."
@@ -224,7 +224,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "a9e18baa",
+   "id": "577f4551",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -236,7 +236,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "e1512580",
+   "id": "40265520",
    "metadata": {},
    "source": [
     "Create a new dataset using the ZarrDataset class, and pass the PatchSampler as `patch_sampler` argument.\n",
@@ -246,7 +246,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "e8f37a83",
+   "id": "18becce5",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -264,7 +264,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "b8ed15ba",
+   "id": "a25a19f4",
    "metadata": {},
    "source": [
     "Create a generator from the dataset object and extract some patches"
@@ -273,7 +273,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "ec92a6d0",
+   "id": "107f41f0",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -292,7 +292,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "90c3f44c",
+   "id": "89d742d9",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -302,7 +302,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "90c4e20d",
+   "id": "76b65598",
    "metadata": {},
    "source": [
     "## Using ZarrDataset in a for loop"
@@ -310,7 +310,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "6b0f80af",
+   "id": "12ee7813",
    "metadata": {},
    "source": [
     "ZarrDatasets can be used as generators, for example in for loops"
@@ -319,7 +319,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "4b206e9f",
+   "id": "004dcda0",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -337,7 +337,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "ae7de5e4",
+   "id": "8f2d8eac",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -347,7 +347,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "09a43573",
+   "id": "8eebaf4a",
    "metadata": {},
    "source": [
     "## Create a ZarrDataset with all the dataset specifications.\n",
@@ -361,7 +361,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "d43d8aef",
+   "id": "7b42b3e1",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -374,7 +374,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "7de2269a",
+   "id": "ecd0869e",
    "metadata": {},
    "source": [
     "Also, try sampling patches from random locations by setting `shuffle=True`."
@@ -383,7 +383,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "73c38ab1",
+   "id": "e00b1347",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -395,7 +395,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "18f32aae",
+   "id": "b1c82bc0",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -413,7 +413,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "27f55821",
+   "id": "278e0453",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -423,6 +423,9 @@
   }
  ],
  "metadata": {
+  "execution": {
+   "timeout": 600
+  },
   "kernelspec": {
    "display_name": "Python 3 (ipykernel)",
    "language": "python",
diff --git a/docs/_sources/examples/basic_example_pytorch.ipynb.txt b/docs/_sources/examples/basic_example_pytorch.ipynb.txt
index 9636915..f73cb02 100644
--- a/docs/_sources/examples/basic_example_pytorch.ipynb.txt
+++ b/docs/_sources/examples/basic_example_pytorch.ipynb.txt
@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "id": "3ad26210",
+   "id": "13fcd9c9",
    "metadata": {},
    "source": [
     "# Integration of ZarrDataset with PyTorch's DataLoader"
@@ -11,7 +11,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "a7b04731",
+   "id": "0a27ba15",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -24,7 +24,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "eee83d04",
+   "id": "33b4ef7a",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -42,7 +42,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "cf34d8de",
+   "id": "5a2d0543",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -57,7 +57,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "243b6498",
+   "id": "179c38c5",
    "metadata": {},
    "source": [
     "## Extracting patches of size 1024x1024 pixels from a Whole Slide Image (WSI)"
@@ -65,7 +65,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "db645362",
+   "id": "65ef4459",
    "metadata": {},
    "source": [
     "Sample the image randomly"
@@ -74,7 +74,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "8534c374",
+   "id": "9604a906",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -84,7 +84,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "d68f44c4",
+   "id": "270e5e74",
    "metadata": {},
    "source": [
     "Create a dataset from the list of filenames. All those files should be stored within their respective group \"0\".\n",
@@ -95,7 +95,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "b17dfe1a",
+   "id": "dccf0e2b",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -113,7 +113,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "1e041ddd",
+   "id": "af770aaf",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -122,7 +122,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "b4013a3c",
+   "id": "86cdc3ef",
    "metadata": {},
    "source": [
     "Add a pre-processing step before creating the image batches, where the input arrays are casted from int16 to float32."
@@ -131,7 +131,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "f7756350",
+   "id": "bc222884",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -147,7 +147,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "5781785c",
+   "id": "fbba1ca4",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -156,7 +156,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "1c94c244",
+   "id": "398848d6",
    "metadata": {},
    "source": [
     "## Create a DataLoader from the dataset object"
@@ -164,7 +164,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "cc086d26",
+   "id": "1f1379a4",
    "metadata": {},
    "source": [
     "ZarrDataset is compatible with DataLoader from PyTorch since it is inherited from the IterableDataset class of the torch.utils.data module."
@@ -173,7 +173,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "f12b54a3",
+   "id": "1322f0d6",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -183,7 +183,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "21b153c6",
+   "id": "98187912",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -204,7 +204,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "c6ff3d68",
+   "id": "36650cd2",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -216,7 +216,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "b17012b7",
+   "id": "c6e0bcc9",
    "metadata": {},
    "source": [
     "## Multithread data loading with Torch's DataLoader"
@@ -224,7 +224,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "8ae16730",
+   "id": "e3e53816",
    "metadata": {},
    "source": [
     "This example will use multiple workers to load patches of size 256x256 from the same image"
@@ -233,7 +233,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "04ec333c",
+   "id": "01a7ec2b",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -243,7 +243,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "c20e17cb",
+   "id": "1f2b4694",
    "metadata": {},
    "source": [
     "Create a dataset from the list of filenames. All those files should be stored within their respective group \"0\".\n",
@@ -254,7 +254,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "a39da971",
+   "id": "4d59b9ae",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -271,7 +271,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "9a9076df",
+   "id": "81cf1be3",
    "metadata": {},
    "source": [
     "ZarrDataset performs some special operations for enabling multithread data loading without replicating the full dataset on each worker.\n",
@@ -282,7 +282,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "9d6a81ca",
+   "id": "0a371fdf",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -292,7 +292,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "923b22e4",
+   "id": "5c62ab89",
    "metadata": {},
    "source": [
     "Now the data can be safely loaded using multiple workers."
@@ -301,7 +301,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "a23aeb06",
+   "id": "07954b39",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -322,7 +322,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "689897a0",
+   "id": "a3a14273",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -332,6 +332,9 @@
   }
  ],
  "metadata": {
+  "execution": {
+   "timeout": 600
+  },
   "kernelspec": {
    "display_name": "Python 3 (ipykernel)",
    "language": "python",
diff --git a/docs/_sources/examples/basic_example_tensorflow.ipynb.txt b/docs/_sources/examples/basic_example_tensorflow.ipynb.txt
index 99253f2..d49412d 100644
--- a/docs/_sources/examples/basic_example_tensorflow.ipynb.txt
+++ b/docs/_sources/examples/basic_example_tensorflow.ipynb.txt
@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "id": "e7e1967a",
+   "id": "6d9fb03a",
    "metadata": {},
    "source": [
     "# Integration of ZarrDataset with Tensorflow Datasets"
@@ -11,7 +11,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "d44bae3d",
+   "id": "6468a31a",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -22,7 +22,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "65703edd",
+   "id": "b25034b5",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -40,7 +40,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "243b7f8c",
+   "id": "8fb5811b",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -54,7 +54,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "fad1438a",
+   "id": "3ebb3cca",
    "metadata": {},
    "source": [
     "## Extracting patches of size 1024x1024 pixels from a Whole Slide Image (WSI)"
@@ -62,7 +62,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "398fc76e",
+   "id": "5064cd2e",
    "metadata": {},
    "source": [
     "Sample the image randomly using a [Blue Noise](https://blog.demofox.org/2017/10/20/generating-blue-noise-sample-points-with-mitchells-best-candidate-algorithm/) sampling."
@@ -71,7 +71,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "0fe02956",
+   "id": "16e6278d",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -81,7 +81,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "d2dc6d35",
+   "id": "38bdc472",
    "metadata": {},
    "source": [
     "Create a dataset from the list of filenames. All those files should be stored within their respective group \"0\".\n",
@@ -92,7 +92,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "edb67cbb",
+   "id": "e9b1137d",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -115,7 +115,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "63eb3f4c",
+   "id": "cdcb2ca5",
    "metadata": {},
    "source": [
     "## Create a Tensoflow Dataset from the ZarrDataset object"
@@ -123,7 +123,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "c458f388",
+   "id": "0f554555",
    "metadata": {},
    "source": [
     "When PyTorch is not present in the system, ZarrDataset will still work as a python generator.\n",
@@ -134,7 +134,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "fdcc77de",
+   "id": "3173e008",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -150,7 +150,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "ca484baf",
+   "id": "7ce458ae",
    "metadata": {},
    "source": [
     "This data loader can be used within Tensorflow training pipelines."
@@ -159,7 +159,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "d2dfbc4d",
+   "id": "e9efb890",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -179,7 +179,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "ef732fd0",
+   "id": "633c34c1",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -189,7 +189,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "a7884e0a",
+   "id": "572112e9",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -201,6 +201,9 @@
   }
  ],
  "metadata": {
+  "execution": {
+   "timeout": 600
+  },
   "kernelspec": {
    "display_name": "Python 3 (ipykernel)",
    "language": "python",
diff --git a/docs/_sources/examples/basic_labeled_datasets_example.ipynb.txt b/docs/_sources/examples/basic_labeled_datasets_example.ipynb.txt
index defa7c8..76ce6cb 100644
--- a/docs/_sources/examples/basic_labeled_datasets_example.ipynb.txt
+++ b/docs/_sources/examples/basic_labeled_datasets_example.ipynb.txt
@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "id": "1c5e5395",
+   "id": "38952883",
    "metadata": {},
    "source": [
     "# Labeled dataset loading with ZarrDataset"
@@ -10,7 +10,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "0622008c",
+   "id": "45b86cee",
    "metadata": {},
    "source": [
     "Import the \"zarrdataset\" package"
@@ -19,7 +19,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "81a91dce",
+   "id": "390d9705",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -29,7 +29,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "c0f21062",
+   "id": "b31da04e",
    "metadata": {},
    "source": [
     "Load data stored on S3 storage"
@@ -38,7 +38,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "91558840",
+   "id": "fcdf2a77",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -54,7 +54,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "cf3d2692",
+   "id": "26c76379",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -68,7 +68,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "0458e379",
+   "id": "f4666dd4",
    "metadata": {},
    "source": [
     "## Extract pair of patches of size 512x512 pixels and their respective label from a labeled Whole Slide Image (WSI)"
@@ -76,7 +76,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "d5f6d6ad",
+   "id": "20213f75",
    "metadata": {},
    "source": [
     "LabeledZarrDataset can retrieve the associated label to each patch extracted as a pair of input and target samples."
@@ -85,7 +85,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "8cb5e3aa",
+   "id": "168d7af3",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -95,7 +95,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "59c5be6a",
+   "id": "71cf7ced",
    "metadata": {},
    "source": [
     "### Weakly labeled exmaple"
@@ -103,7 +103,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "372ff118",
+   "id": "d6f1b0c1",
    "metadata": {},
    "source": [
     "Weakly labeled means that there is a few labels (or only one) associated to the whole image.\n",
@@ -114,7 +114,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "4178b9e6",
+   "id": "6f8977d6",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -139,7 +139,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "560c0605",
+   "id": "b34fc47d",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -153,7 +153,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "07b6679b",
+   "id": "70799f79",
    "metadata": {},
    "source": [
     "### Densely labeled example"
@@ -161,7 +161,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "184cbf3b",
+   "id": "a4bfa8b2",
    "metadata": {},
    "source": [
     "Densely labeled images contain more spatial information about the image.\n",
@@ -174,7 +174,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "9b8c545f",
+   "id": "72814c5b",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -195,7 +195,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "7b6f7eb1",
+   "id": "564d61f4",
    "metadata": {},
    "source": [
     "The label image can be something like the following"
@@ -204,7 +204,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "4d6f37a8",
+   "id": "aeff76cb",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -218,7 +218,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "fe53231f",
+   "id": "786c16a3",
    "metadata": {},
    "source": [
     "In this case, the labels are passed as a list of Numpy NDArrays, but these could be also stored in Zarr, either locally or in a remote S3 bucket."
@@ -227,7 +227,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "75f388f1",
+   "id": "818d4bd1",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -251,7 +251,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "eabe1913",
+   "id": "a28ea4c1",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -276,6 +276,9 @@
   }
  ],
  "metadata": {
+  "execution": {
+   "timeout": 600
+  },
   "kernelspec": {
    "display_name": "Python 3 (ipykernel)",
    "language": "python",
diff --git a/docs/_sources/examples/basic_masked_datasets_example.ipynb.txt b/docs/_sources/examples/basic_masked_datasets_example.ipynb.txt
index 2424ecc..3d15539 100644
--- a/docs/_sources/examples/basic_masked_datasets_example.ipynb.txt
+++ b/docs/_sources/examples/basic_masked_datasets_example.ipynb.txt
@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "markdown",
-   "id": "934e89f4",
+   "id": "96bfc62e",
    "metadata": {},
    "source": [
     "# Loading patches/windows from masked regions of images with ZarrDataset"
@@ -10,7 +10,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "69858d35",
+   "id": "bf1a1211",
    "metadata": {},
    "source": [
     "Import the \"zarrdataset\" package"
@@ -19,7 +19,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "8e660935",
+   "id": "99af62f5",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -29,7 +29,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "f7731925",
+   "id": "0db1dc86",
    "metadata": {},
    "source": [
     "Load data stored on S3 storage"
@@ -38,7 +38,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "fd2458f1",
+   "id": "87f76520",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -54,7 +54,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "c87346a4",
+   "id": "3eb2893d",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -69,7 +69,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "256c8816",
+   "id": "93d60e69",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -80,7 +80,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "54526509",
+   "id": "56995806",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -91,7 +91,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "5fc716e1",
+   "id": "ed0ec99c",
    "metadata": {},
    "source": [
     "## Define a mask from where patches will be extracted"
@@ -100,22 +100,20 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "c6da3ae5",
+   "id": "cf6f24d0",
    "metadata": {},
    "outputs": [],
    "source": [
     "mask = np.array([\n",
-    "    [0, 0, 0, 0, 0],\n",
-    "    [0, 0, 1, 0, 0],\n",
-    "    [0, 1, 0, 0, 0],\n",
-    "    [0, 0, 0, 0, 0],\n",
-    "    [0, 0, 0, 0, 1],\n",
+    "    [0, 0, 0, 0],\n",
+    "    [0, 0, 1, 0],\n",
+    "    [0, 1, 0, 0],\n",
     "], dtype=bool)"
    ]
   },
   {
    "cell_type": "markdown",
-   "id": "c4471754",
+   "id": "a4d9c652",
    "metadata": {},
    "source": [
     "ZarrDataset will match the size of the mask t the size of the image that is being sampled.\n",
@@ -126,7 +124,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "4368407e",
+   "id": "04f1abea",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -157,7 +155,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "c2a65f6f",
+   "id": "4f545269",
    "metadata": {},
    "source": [
     "## Extract patches of size 512x512 pixels from masked regiosn of a Whole Slide Image (WSI)"
@@ -165,7 +163,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "baa49bb8",
+   "id": "fa72eceb",
    "metadata": {},
    "source": [
     "Sample the image uniformly in a squared grid pattern using a `PatchSampler`"
@@ -174,7 +172,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "3cb2bad2",
+   "id": "1bd5a11f",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -184,7 +182,7 @@
   },
   {
    "cell_type": "markdown",
-   "id": "03cd1ed3",
+   "id": "37f87569",
    "metadata": {},
    "source": [
     "Use the ZarrDataset class to enable extraction of samples from masked regions by specifying two modalities: images, and masks.\n",
@@ -195,7 +193,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "7f0310ec",
+   "id": "3674fdb6",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -221,7 +219,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "7ed2df7b",
+   "id": "c85d3f9d",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -231,7 +229,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "61894af6",
+   "id": "917d95bb",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -242,7 +240,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "c673a768",
+   "id": "4b555dda",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -253,7 +251,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "f229ea2b",
+   "id": "2d57d73c",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -271,7 +269,7 @@
   {
    "cell_type": "code",
    "execution_count": null,
-   "id": "af72b34f",
+   "id": "99157b0d",
    "metadata": {},
    "outputs": [],
    "source": [
@@ -281,6 +279,9 @@
   }
  ],
  "metadata": {
+  "execution": {
+   "timeout": 600
+  },
   "kernelspec": {
    "display_name": "Python 3 (ipykernel)",
    "language": "python",
diff --git a/docs/_static/searchtools.js b/docs/_static/searchtools.js
index 92da3f8..b08d58c 100644
--- a/docs/_static/searchtools.js
+++ b/docs/_static/searchtools.js
@@ -178,7 +178,7 @@ const Search = {
 
   htmlToText: (htmlString, anchor) => {
     const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
-    for (const removalQuery of [".headerlinks", "script", "style"]) {
+    for (const removalQuery of [".headerlink", "script", "style"]) {
       htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() });
     }
     if (anchor) {
@@ -328,13 +328,14 @@ const Search = {
     for (const [title, foundTitles] of Object.entries(allTitles)) {
       if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) {
         for (const [file, id] of foundTitles) {
-          let score = Math.round(100 * queryLower.length / title.length)
+          const score = Math.round(Scorer.title * queryLower.length / title.length);
+          const boost = titles[file] === title ? 1 : 0;  // add a boost for document titles
           normalResults.push([
             docNames[file],
             titles[file] !== title ? `${titles[file]} > ${title}` : title,
             id !== null ? "#" + id : "",
             null,
-            score,
+            score + boost,
             filenames[file],
           ]);
         }
diff --git a/docs/autoapi/index.html b/docs/autoapi/index.html
index 41edf7a..d584cef 100644
--- a/docs/autoapi/index.html
+++ b/docs/autoapi/index.html
@@ -7,7 +7,7 @@
   API Reference — ZarrDataset  documentation
       
       
-      
+      
       
 
   
@@ -67,7 +67,7 @@
 
 
@@ -101,13 +101,13 @@ 

API Reference[1].