diff --git a/dev/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip b/dev/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip index a65dec3a..cf1b771b 100644 Binary files a/dev/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip and b/dev/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip differ diff --git a/dev/_downloads/0f2bf063e08b7d05b80e0004fcbbb6f9/benchmark_lazy_eager_loading.ipynb b/dev/_downloads/0f2bf063e08b7d05b80e0004fcbbb6f9/benchmark_lazy_eager_loading.ipynb index d39c280f..ad2d9054 100644 --- a/dev/_downloads/0f2bf063e08b7d05b80e0004fcbbb6f9/benchmark_lazy_eager_loading.ipynb +++ b/dev/_downloads/0f2bf063e08b7d05b80e0004fcbbb6f9/benchmark_lazy_eager_loading.ipynb @@ -51,7 +51,7 @@ }, "outputs": [], "source": [ - "def load_example_data(preload, window_len_s, n_recordings=10):\n \"\"\"Create windowed dataset from subjects of the TUH Abnormal dataset.\n\n Parameters\n ----------\n preload: bool\n If True, use eager loading, otherwise use lazy loading.\n window_len_s: int\n Window length in seconds.\n n_recordings: list of int\n Number of recordings to load.\n\n Returns\n -------\n windows_ds: BaseConcatDataset\n Windowed data.\n\n .. warning::\n The recordings from the TUH Abnormal corpus do not all share the same\n sampling rate. The following assumes that the files have already been\n resampled to a common sampling rate.\n \"\"\"\n\n recording_ids = list(range(n_recordings))\n\n ds = TUHAbnormal(\n TUH_PATH,\n recording_ids=recording_ids,\n target_name=\"pathological\",\n preload=preload,\n )\n\n fs = ds.datasets[0].raw.info[\"sfreq\"]\n window_len_samples = int(fs * window_len_s)\n window_stride_samples = int(fs * 4)\n # window_stride_samples = int(fs * window_len_s)\n windows_ds = create_fixed_length_windows(\n ds,\n start_offset_samples=0,\n stop_offset_samples=None,\n window_size_samples=window_len_samples,\n window_stride_samples=window_stride_samples,\n drop_last_window=True,\n preload=preload,\n )\n\n # Drop bad epochs\n # XXX: This could be parallelized.\n # XXX: Also, this could be implemented in the Dataset object itself.\n for ds in windows_ds.datasets:\n ds.windows.drop_bad()\n assert ds.windows.preload == preload\n\n return windows_ds\n\n\ndef create_example_model(\n n_channels, n_classes, window_len_samples, kind=\"shallow\", cuda=False\n):\n \"\"\"Create model, loss and optimizer.\n\n Parameters\n ----------\n n_channels : int\n Number of channels in the input\n n_times : int\n Window length in the input\n n_classes : int\n Number of classes in the output\n kind : str\n 'shallow' or 'deep'\n cuda : bool\n If True, move the model to a CUDA device.\n\n Returns\n -------\n model : torch.nn.Module\n Model to train.\n loss :\n Loss function\n optimizer :\n Optimizer\n \"\"\"\n if kind == \"shallow\":\n model = ShallowFBCSPNet(\n n_channels,\n n_classes,\n input_window_samples=window_len_samples,\n n_filters_time=40,\n filter_time_length=25,\n n_filters_spat=40,\n pool_time_length=75,\n pool_time_stride=15,\n final_conv_length=\"auto\",\n split_first_layer=True,\n batch_norm=True,\n batch_norm_alpha=0.1,\n drop_prob=0.5,\n )\n elif kind == \"deep\":\n model = Deep4Net(\n n_channels,\n n_classes,\n input_window_samples=window_len_samples,\n final_conv_length=\"auto\",\n n_filters_time=25,\n n_filters_spat=25,\n filter_time_length=10,\n pool_time_length=3,\n pool_time_stride=3,\n n_filters_2=50,\n filter_length_2=10,\n n_filters_3=100,\n filter_length_3=10,\n n_filters_4=200,\n filter_length_4=10,\n first_pool_mode=\"max\",\n later_pool_mode=\"max\",\n drop_prob=0.5,\n double_time_convs=False,\n split_first_layer=True,\n batch_norm=True,\n batch_norm_alpha=0.1,\n stride_before_pool=False,\n )\n else:\n raise ValueError\n\n if cuda:\n model.cuda()\n\n optimizer = optim.Adam(model.parameters())\n loss = nn.CrossEntropyLoss()\n\n return model, loss, optimizer\n\n\ndef run_training(model, dataloader, loss, optimizer, n_epochs=1, cuda=False):\n \"\"\"Run training loop.\n\n Parameters\n ----------\n model : torch.nn.Module\n Model to train.\n dataloader : torch.utils.data.Dataloader\n Data loader which will serve examples to the model during training.\n loss :\n Loss function.\n optimizer :\n Optimizer.\n n_epochs : int\n Number of epochs to train the model for.\n cuda : bool\n If True, move X and y to CUDA device.\n\n Returns\n -------\n model : torch.nn.Module\n Trained model.\n \"\"\"\n for i in range(n_epochs):\n loss_vals = list()\n for X, y, _ in dataloader:\n model.train()\n model.zero_grad()\n\n y = y.long()\n if cuda:\n X, y = X.cuda(), y.cuda()\n\n loss_val = loss(model(X), y)\n loss_vals.append(loss_val.item())\n\n loss_val.backward()\n optimizer.step()\n\n print(f\"Epoch {i + 1} - mean training loss: {np.mean(loss_vals)}\")\n\n return model" + "def load_example_data(preload, window_len_s, n_recordings=10):\n \"\"\"Create windowed dataset from subjects of the TUH Abnormal dataset.\n\n Parameters\n ----------\n preload: bool\n If True, use eager loading, otherwise use lazy loading.\n window_len_s: int\n Window length in seconds.\n n_recordings: list of int\n Number of recordings to load.\n\n Returns\n -------\n windows_ds: BaseConcatDataset\n Windowed data.\n\n .. warning::\n The recordings from the TUH Abnormal corpus do not all share the same\n sampling rate. The following assumes that the files have already been\n resampled to a common sampling rate.\n \"\"\"\n\n recording_ids = list(range(n_recordings))\n\n ds = TUHAbnormal(\n TUH_PATH,\n recording_ids=recording_ids,\n target_name=\"pathological\",\n preload=preload,\n )\n\n fs = ds.datasets[0].raw.info[\"sfreq\"]\n window_len_samples = int(fs * window_len_s)\n window_stride_samples = int(fs * 4)\n # window_stride_samples = int(fs * window_len_s)\n windows_ds = create_fixed_length_windows(\n ds,\n start_offset_samples=0,\n stop_offset_samples=None,\n window_size_samples=window_len_samples,\n window_stride_samples=window_stride_samples,\n drop_last_window=True,\n preload=preload,\n )\n\n # Drop bad epochs\n # XXX: This could be parallelized.\n # XXX: Also, this could be implemented in the Dataset object itself.\n # We don't support drop_bad since the last version braindecode,\n # to optimize the dataset speed. If you know how to fix, please open a PR.\n # for ds in windows_ds.datasets:\n # ds.raw.drop_bad()\n # assert ds.raw.preload == preload\n\n return windows_ds\n\n\ndef create_example_model(\n n_channels, n_classes, window_len_samples, kind=\"shallow\", cuda=False\n):\n \"\"\"Create model, loss and optimizer.\n\n Parameters\n ----------\n n_channels : int\n Number of channels in the input\n n_times : int\n Window length in the input\n n_classes : int\n Number of classes in the output\n kind : str\n 'shallow' or 'deep'\n cuda : bool\n If True, move the model to a CUDA device.\n\n Returns\n -------\n model : torch.nn.Module\n Model to train.\n loss :\n Loss function\n optimizer :\n Optimizer\n \"\"\"\n if kind == \"shallow\":\n model = ShallowFBCSPNet(\n n_channels,\n n_classes,\n input_window_samples=window_len_samples,\n n_filters_time=40,\n filter_time_length=25,\n n_filters_spat=40,\n pool_time_length=75,\n pool_time_stride=15,\n final_conv_length=\"auto\",\n split_first_layer=True,\n batch_norm=True,\n batch_norm_alpha=0.1,\n drop_prob=0.5,\n )\n elif kind == \"deep\":\n model = Deep4Net(\n n_channels,\n n_classes,\n input_window_samples=window_len_samples,\n final_conv_length=\"auto\",\n n_filters_time=25,\n n_filters_spat=25,\n filter_time_length=10,\n pool_time_length=3,\n pool_time_stride=3,\n n_filters_2=50,\n filter_length_2=10,\n n_filters_3=100,\n filter_length_3=10,\n n_filters_4=200,\n filter_length_4=10,\n first_pool_mode=\"max\",\n later_pool_mode=\"max\",\n drop_prob=0.5,\n split_first_layer=True,\n batch_norm=True,\n batch_norm_alpha=0.1,\n stride_before_pool=False,\n )\n else:\n raise ValueError\n\n if cuda:\n model.cuda()\n\n optimizer = optim.Adam(model.parameters())\n loss = nn.CrossEntropyLoss()\n\n return model, loss, optimizer\n\n\ndef run_training(model, dataloader, loss, optimizer, n_epochs=1, cuda=False):\n \"\"\"Run training loop.\n\n Parameters\n ----------\n model : torch.nn.Module\n Model to train.\n dataloader : torch.utils.data.Dataloader\n Data loader which will serve examples to the model during training.\n loss :\n Loss function.\n optimizer :\n Optimizer.\n n_epochs : int\n Number of epochs to train the model for.\n cuda : bool\n If True, move X and y to CUDA device.\n\n Returns\n -------\n model : torch.nn.Module\n Trained model.\n \"\"\"\n for i in range(n_epochs):\n loss_vals = list()\n for X, y, _ in dataloader:\n model.train()\n model.zero_grad()\n\n y = y.long()\n if cuda:\n X, y = X.cuda(), y.cuda()\n\n loss_val = loss(model(X), y)\n loss_vals.append(loss_val.item())\n\n loss_val.backward()\n optimizer.step()\n\n print(f\"Epoch {i + 1} - mean training loss: {np.mean(loss_vals)}\")\n\n return model" ] }, { @@ -105,7 +105,7 @@ }, "outputs": [], "source": [ - "all_results = list()\nfor (\n i,\n preload,\n n_recordings,\n win_len_s,\n n_epochs,\n batch_size,\n model_kind,\n num_workers,\n pin_memory,\n cuda,\n) in product(\n range(N_REPETITIONS),\n PRELOAD,\n N_RECORDINGS,\n WINDOW_LEN_S,\n N_EPOCHS,\n BATCH_SIZE,\n MODEL,\n NUM_WORKERS,\n PIN_MEMORY,\n CUDA,\n):\n results = {\n \"repetition\": i,\n \"preload\": preload,\n \"n_recordings\": n_recordings,\n \"win_len_s\": win_len_s,\n \"n_epochs\": n_epochs,\n \"batch_size\": batch_size,\n \"model_kind\": model_kind,\n \"num_workers\": num_workers,\n \"pin_memory\": pin_memory,\n \"cuda\": cuda,\n }\n print(f\"\\nRepetition {i + 1}/{N_REPETITIONS}:\\n{results}\")\n\n # Load the dataset\n data_loading_start = time.time()\n dataset = load_example_data(preload, win_len_s, n_recordings=n_recordings)\n data_loading_end = time.time()\n\n # Create the data loader\n training_setup_start = time.time()\n dataloader = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=False,\n pin_memory=pin_memory,\n num_workers=num_workers,\n worker_init_fn=None,\n )\n\n # Instantiate model and optimizer\n n_channels = len(dataset.datasets[0].windows.ch_names)\n n_times = len(dataset.datasets[0].windows.times)\n n_classes = 2\n model, loss, optimizer = create_example_model(\n n_channels, n_classes, n_times, kind=model_kind, cuda=cuda\n )\n training_setup_end = time.time()\n\n # Start training loop\n model_training_start = time.time()\n trained_model = run_training(\n model, dataloader, loss, optimizer, n_epochs=n_epochs, cuda=cuda\n )\n model_training_end = time.time()\n\n del dataset, model, loss, optimizer, trained_model\n\n # Record timing results\n results[\"data_preparation\"] = data_loading_end - data_loading_start\n results[\"training_setup\"] = training_setup_end - training_setup_start\n results[\"model_training\"] = model_training_end - model_training_start\n all_results.append(results)" + "all_results = list()\nfor (\n i,\n preload,\n n_recordings,\n win_len_s,\n n_epochs,\n batch_size,\n model_kind,\n num_workers,\n pin_memory,\n cuda,\n) in product(\n range(N_REPETITIONS),\n PRELOAD,\n N_RECORDINGS,\n WINDOW_LEN_S,\n N_EPOCHS,\n BATCH_SIZE,\n MODEL,\n NUM_WORKERS,\n PIN_MEMORY,\n CUDA,\n):\n results = {\n \"repetition\": i,\n \"preload\": preload,\n \"n_recordings\": n_recordings,\n \"win_len_s\": win_len_s,\n \"n_epochs\": n_epochs,\n \"batch_size\": batch_size,\n \"model_kind\": model_kind,\n \"num_workers\": num_workers,\n \"pin_memory\": pin_memory,\n \"cuda\": cuda,\n }\n print(f\"\\nRepetition {i + 1}/{N_REPETITIONS}:\\n{results}\")\n\n # Load the dataset\n data_loading_start = time.time()\n dataset = load_example_data(preload, win_len_s, n_recordings=n_recordings)\n data_loading_end = time.time()\n\n # Create the data loader\n training_setup_start = time.time()\n dataloader = DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=False,\n pin_memory=pin_memory,\n num_workers=num_workers,\n worker_init_fn=None,\n )\n # Instantiate model and optimizer\n n_channels = dataset[0][0].shape[0]\n n_times = dataset[0][0].shape[1]\n n_classes = 2\n model, loss, optimizer = create_example_model(\n n_channels, n_classes, n_times, kind=model_kind, cuda=cuda\n )\n training_setup_end = time.time()\n\n # Start training loop\n model_training_start = time.time()\n trained_model = run_training(\n model, dataloader, loss, optimizer, n_epochs=n_epochs, cuda=cuda\n )\n model_training_end = time.time()\n\n del dataset, model, loss, optimizer, trained_model\n\n # Record timing results\n results[\"data_preparation\"] = data_loading_end - data_loading_start\n results[\"training_setup\"] = training_setup_end - training_setup_start\n results[\"model_training\"] = model_training_end - model_training_start\n all_results.append(results)" ] }, { diff --git a/dev/_downloads/2b3354037141fa0033174f982871d566/plot_relative_positioning.zip b/dev/_downloads/2b3354037141fa0033174f982871d566/plot_relative_positioning.zip index ddb53305..8117e1ba 100644 Binary files a/dev/_downloads/2b3354037141fa0033174f982871d566/plot_relative_positioning.zip and b/dev/_downloads/2b3354037141fa0033174f982871d566/plot_relative_positioning.zip differ diff --git a/dev/_downloads/36393305d195c3207520572a3b677a05/plot_basic_training_epochs.zip b/dev/_downloads/36393305d195c3207520572a3b677a05/plot_basic_training_epochs.zip index 0362ac3f..bf343d7d 100644 Binary files a/dev/_downloads/36393305d195c3207520572a3b677a05/plot_basic_training_epochs.zip and b/dev/_downloads/36393305d195c3207520572a3b677a05/plot_basic_training_epochs.zip differ diff --git a/dev/_downloads/3d616473194ad5c360edcbb3e46c7a3a/plot_custom_dataset_example.zip b/dev/_downloads/3d616473194ad5c360edcbb3e46c7a3a/plot_custom_dataset_example.zip index 142f96bc..9928910d 100644 Binary files a/dev/_downloads/3d616473194ad5c360edcbb3e46c7a3a/plot_custom_dataset_example.zip and b/dev/_downloads/3d616473194ad5c360edcbb3e46c7a3a/plot_custom_dataset_example.zip differ diff --git a/dev/_downloads/497b39027218ebaf8ffa20c07a0eff8d/plot_split_dataset.zip b/dev/_downloads/497b39027218ebaf8ffa20c07a0eff8d/plot_split_dataset.zip index cb3c8666..43b36998 100644 Binary files a/dev/_downloads/497b39027218ebaf8ffa20c07a0eff8d/plot_split_dataset.zip and b/dev/_downloads/497b39027218ebaf8ffa20c07a0eff8d/plot_split_dataset.zip differ diff --git a/dev/_downloads/4a2751c5805e7a17be8ee78ee1f2d520/plot_sleep_staging_chambon2018.zip b/dev/_downloads/4a2751c5805e7a17be8ee78ee1f2d520/plot_sleep_staging_chambon2018.zip index 9afaee3f..2bd5750a 100644 Binary files a/dev/_downloads/4a2751c5805e7a17be8ee78ee1f2d520/plot_sleep_staging_chambon2018.zip and b/dev/_downloads/4a2751c5805e7a17be8ee78ee1f2d520/plot_sleep_staging_chambon2018.zip differ diff --git a/dev/_downloads/52f5b6b37eb9b07c97027b1eb0fadcd4/plot_sleep_staging_usleep.zip b/dev/_downloads/52f5b6b37eb9b07c97027b1eb0fadcd4/plot_sleep_staging_usleep.zip index a0105f7b..43355b59 100644 Binary files a/dev/_downloads/52f5b6b37eb9b07c97027b1eb0fadcd4/plot_sleep_staging_usleep.zip and b/dev/_downloads/52f5b6b37eb9b07c97027b1eb0fadcd4/plot_sleep_staging_usleep.zip differ diff --git a/dev/_downloads/568f264ca0ecc135d137fd575c39cf8e/plot_regression.zip b/dev/_downloads/568f264ca0ecc135d137fd575c39cf8e/plot_regression.zip index 7e8d87f3..50140458 100644 Binary files a/dev/_downloads/568f264ca0ecc135d137fd575c39cf8e/plot_regression.zip and b/dev/_downloads/568f264ca0ecc135d137fd575c39cf8e/plot_regression.zip differ diff --git a/dev/_downloads/5e85dd7b06a544f298bd1837eddfcbd5/plot_train_in_pure_pytorch_and_pytorch_lightning.zip b/dev/_downloads/5e85dd7b06a544f298bd1837eddfcbd5/plot_train_in_pure_pytorch_and_pytorch_lightning.zip index 6ca8b048..dba9b393 100644 Binary files a/dev/_downloads/5e85dd7b06a544f298bd1837eddfcbd5/plot_train_in_pure_pytorch_and_pytorch_lightning.zip and b/dev/_downloads/5e85dd7b06a544f298bd1837eddfcbd5/plot_train_in_pure_pytorch_and_pytorch_lightning.zip differ diff --git a/dev/_downloads/67cf91b203bc10a20a2f8703a04849f8/plot_data_augmentation_search.zip b/dev/_downloads/67cf91b203bc10a20a2f8703a04849f8/plot_data_augmentation_search.zip index 94a91259..b67ba4ef 100644 Binary files a/dev/_downloads/67cf91b203bc10a20a2f8703a04849f8/plot_data_augmentation_search.zip and b/dev/_downloads/67cf91b203bc10a20a2f8703a04849f8/plot_data_augmentation_search.zip differ diff --git a/dev/_downloads/6b30e1d8632bb747aae46f30c6993983/plot_bcic_iv_4_ecog_cropped.zip b/dev/_downloads/6b30e1d8632bb747aae46f30c6993983/plot_bcic_iv_4_ecog_cropped.zip index 8428c7ee..e0fa7fce 100644 Binary files a/dev/_downloads/6b30e1d8632bb747aae46f30c6993983/plot_bcic_iv_4_ecog_cropped.zip and b/dev/_downloads/6b30e1d8632bb747aae46f30c6993983/plot_bcic_iv_4_ecog_cropped.zip differ diff --git a/dev/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip b/dev/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip index 8744b68b..aa7969c1 100644 Binary files a/dev/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip and b/dev/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip differ diff --git a/dev/_downloads/701321eb74424104536be933ed2cbf9e/plot_moabb_dataset_example.zip b/dev/_downloads/701321eb74424104536be933ed2cbf9e/plot_moabb_dataset_example.zip index 94327858..fc713055 100644 Binary files a/dev/_downloads/701321eb74424104536be933ed2cbf9e/plot_moabb_dataset_example.zip and b/dev/_downloads/701321eb74424104536be933ed2cbf9e/plot_moabb_dataset_example.zip differ diff --git a/dev/_downloads/7ec8b4809f7c475073a9b5d8ebb962dc/plot_benchmark_preprocessing.zip b/dev/_downloads/7ec8b4809f7c475073a9b5d8ebb962dc/plot_benchmark_preprocessing.zip index 260a63a1..b83a85a1 100644 Binary files a/dev/_downloads/7ec8b4809f7c475073a9b5d8ebb962dc/plot_benchmark_preprocessing.zip and b/dev/_downloads/7ec8b4809f7c475073a9b5d8ebb962dc/plot_benchmark_preprocessing.zip differ diff --git a/dev/_downloads/975f746daaf2fbe1dfde86e12f8d5031/plot_how_train_test_and_tune.zip b/dev/_downloads/975f746daaf2fbe1dfde86e12f8d5031/plot_how_train_test_and_tune.zip index 73dc347c..8e848197 100644 Binary files a/dev/_downloads/975f746daaf2fbe1dfde86e12f8d5031/plot_how_train_test_and_tune.zip and b/dev/_downloads/975f746daaf2fbe1dfde86e12f8d5031/plot_how_train_test_and_tune.zip differ diff --git a/dev/_downloads/9f417c6133a57e14b27122d013590718/plot_mne_dataset_example.zip b/dev/_downloads/9f417c6133a57e14b27122d013590718/plot_mne_dataset_example.zip index f2b0ab1f..fbc5b2a1 100644 Binary files a/dev/_downloads/9f417c6133a57e14b27122d013590718/plot_mne_dataset_example.zip and b/dev/_downloads/9f417c6133a57e14b27122d013590718/plot_mne_dataset_example.zip differ diff --git a/dev/_downloads/add1a85fee267a1849d27de66d7d8a6b/plot_sleep_staging_eldele2021.zip b/dev/_downloads/add1a85fee267a1849d27de66d7d8a6b/plot_sleep_staging_eldele2021.zip index ce946766..8bdba97b 100644 Binary files a/dev/_downloads/add1a85fee267a1849d27de66d7d8a6b/plot_sleep_staging_eldele2021.zip and b/dev/_downloads/add1a85fee267a1849d27de66d7d8a6b/plot_sleep_staging_eldele2021.zip differ diff --git a/dev/_downloads/b5c94eb60270f5ff2f26d4d743e7c69d/plot_tuh_eeg_corpus.zip b/dev/_downloads/b5c94eb60270f5ff2f26d4d743e7c69d/plot_tuh_eeg_corpus.zip index 414446ea..badc6798 100644 Binary files a/dev/_downloads/b5c94eb60270f5ff2f26d4d743e7c69d/plot_tuh_eeg_corpus.zip and b/dev/_downloads/b5c94eb60270f5ff2f26d4d743e7c69d/plot_tuh_eeg_corpus.zip differ diff --git a/dev/_downloads/bee463a2563e9377210dd6396f8af549/benchmark_lazy_eager_loading.py b/dev/_downloads/bee463a2563e9377210dd6396f8af549/benchmark_lazy_eager_loading.py index 55d5eabc..cf2c7a15 100644 --- a/dev/_downloads/bee463a2563e9377210dd6396f8af549/benchmark_lazy_eager_loading.py +++ b/dev/_downloads/bee463a2563e9377210dd6396f8af549/benchmark_lazy_eager_loading.py @@ -118,9 +118,11 @@ def load_example_data(preload, window_len_s, n_recordings=10): # Drop bad epochs # XXX: This could be parallelized. # XXX: Also, this could be implemented in the Dataset object itself. - for ds in windows_ds.datasets: - ds.windows.drop_bad() - assert ds.windows.preload == preload + # We don't support drop_bad since the last version braindecode, + # to optimize the dataset speed. If you know how to fix, please open a PR. + # for ds in windows_ds.datasets: + # ds.raw.drop_bad() + # assert ds.raw.preload == preload return windows_ds @@ -188,7 +190,6 @@ def create_example_model( first_pool_mode="max", later_pool_mode="max", drop_prob=0.5, - double_time_convs=False, split_first_layer=True, batch_norm=True, batch_norm_alpha=0.1, @@ -335,10 +336,9 @@ def run_training(model, dataloader, loss, optimizer, n_epochs=1, cuda=False): num_workers=num_workers, worker_init_fn=None, ) - # Instantiate model and optimizer - n_channels = len(dataset.datasets[0].windows.ch_names) - n_times = len(dataset.datasets[0].windows.times) + n_channels = dataset[0][0].shape[0] + n_times = dataset[0][0].shape[1] n_classes = 2 model, loss, optimizer = create_example_model( n_channels, n_classes, n_times, kind=model_kind, cuda=cuda diff --git a/dev/_downloads/c3317f5b839dfe85c3fa27a60420173e/plot_bcic_iv_2a_moabb_cropped.zip b/dev/_downloads/c3317f5b839dfe85c3fa27a60420173e/plot_bcic_iv_2a_moabb_cropped.zip index 1b45d595..c847d6d3 100644 Binary files a/dev/_downloads/c3317f5b839dfe85c3fa27a60420173e/plot_bcic_iv_2a_moabb_cropped.zip and b/dev/_downloads/c3317f5b839dfe85c3fa27a60420173e/plot_bcic_iv_2a_moabb_cropped.zip differ diff --git a/dev/_downloads/c58c35b5588d025b3e00c7e383793f93/benchmark_lazy_eager_loading.zip b/dev/_downloads/c58c35b5588d025b3e00c7e383793f93/benchmark_lazy_eager_loading.zip index 5f183571..fdb751a0 100644 Binary files a/dev/_downloads/c58c35b5588d025b3e00c7e383793f93/benchmark_lazy_eager_loading.zip and b/dev/_downloads/c58c35b5588d025b3e00c7e383793f93/benchmark_lazy_eager_loading.zip differ diff --git a/dev/_downloads/cec1d60200f666e0fada2d86280a2abe/plot_tuh_discrete_multitarget.zip b/dev/_downloads/cec1d60200f666e0fada2d86280a2abe/plot_tuh_discrete_multitarget.zip index 022cd7d9..a85c79fe 100644 Binary files a/dev/_downloads/cec1d60200f666e0fada2d86280a2abe/plot_tuh_discrete_multitarget.zip and b/dev/_downloads/cec1d60200f666e0fada2d86280a2abe/plot_tuh_discrete_multitarget.zip differ diff --git a/dev/_downloads/d323b5e4b5903c1466d0e7674a0bfab6/plot_bcic_iv_2a_moabb_trial.zip b/dev/_downloads/d323b5e4b5903c1466d0e7674a0bfab6/plot_bcic_iv_2a_moabb_trial.zip index 586aa7f2..9c81b70e 100644 Binary files a/dev/_downloads/d323b5e4b5903c1466d0e7674a0bfab6/plot_bcic_iv_2a_moabb_trial.zip and b/dev/_downloads/d323b5e4b5903c1466d0e7674a0bfab6/plot_bcic_iv_2a_moabb_trial.zip differ diff --git a/dev/_downloads/e824ae71d652d80edc1e0be50f3fcab4/plot_data_augmentation.zip b/dev/_downloads/e824ae71d652d80edc1e0be50f3fcab4/plot_data_augmentation.zip index 56cd32b2..24d0c50f 100644 Binary files a/dev/_downloads/e824ae71d652d80edc1e0be50f3fcab4/plot_data_augmentation.zip and b/dev/_downloads/e824ae71d652d80edc1e0be50f3fcab4/plot_data_augmentation.zip differ diff --git a/dev/_downloads/e920a22508db3ee26e946faad811f57f/plot_hyperparameter_tuning_with_scikit-learn.zip b/dev/_downloads/e920a22508db3ee26e946faad811f57f/plot_hyperparameter_tuning_with_scikit-learn.zip index c3d06961..1a551957 100644 Binary files a/dev/_downloads/e920a22508db3ee26e946faad811f57f/plot_hyperparameter_tuning_with_scikit-learn.zip and b/dev/_downloads/e920a22508db3ee26e946faad811f57f/plot_hyperparameter_tuning_with_scikit-learn.zip differ diff --git a/dev/_downloads/f3c7c35609f54876319e72ff42b71bb9/plot_load_save_datasets.zip b/dev/_downloads/f3c7c35609f54876319e72ff42b71bb9/plot_load_save_datasets.zip index ac9bd8d4..41ae8ded 100644 Binary files a/dev/_downloads/f3c7c35609f54876319e72ff42b71bb9/plot_load_save_datasets.zip and b/dev/_downloads/f3c7c35609f54876319e72ff42b71bb9/plot_load_save_datasets.zip differ diff --git a/dev/_downloads/f56b29a9031a522f5bcdaa29212f6171/plot_bcic_iv_4_ecog_trial.zip b/dev/_downloads/f56b29a9031a522f5bcdaa29212f6171/plot_bcic_iv_4_ecog_trial.zip index 1e506bb0..b60bfae4 100644 Binary files a/dev/_downloads/f56b29a9031a522f5bcdaa29212f6171/plot_bcic_iv_4_ecog_trial.zip and b/dev/_downloads/f56b29a9031a522f5bcdaa29212f6171/plot_bcic_iv_4_ecog_trial.zip differ diff --git a/dev/_images/sphx_glr_plot_benchmark_preprocessing_001.png b/dev/_images/sphx_glr_plot_benchmark_preprocessing_001.png index 67f5f6aa..7c6be2b5 100644 Binary files a/dev/_images/sphx_glr_plot_benchmark_preprocessing_001.png and b/dev/_images/sphx_glr_plot_benchmark_preprocessing_001.png differ diff --git a/dev/_images/sphx_glr_plot_benchmark_preprocessing_thumb.png b/dev/_images/sphx_glr_plot_benchmark_preprocessing_thumb.png index c68c4526..cc95e33c 100644 Binary files a/dev/_images/sphx_glr_plot_benchmark_preprocessing_thumb.png and b/dev/_images/sphx_glr_plot_benchmark_preprocessing_thumb.png differ diff --git a/dev/auto_examples/advanced_training/plot_bcic_iv_4_ecog_cropped.html b/dev/auto_examples/advanced_training/plot_bcic_iv_4_ecog_cropped.html index f09d2e25..2cd96e1f 100644 --- a/dev/auto_examples/advanced_training/plot_bcic_iv_4_ecog_cropped.html +++ b/dev/auto_examples/advanced_training/plot_bcic_iv_4_ecog_cropped.html @@ -605,7 +605,7 @@

Preprocessingpreprocess(test_set, [Preprocessor("crop", tmin=0, tmax=24)], n_jobs=-1) -
<braindecode.datasets.base.BaseConcatDataset object at 0x7fec64a86c20>
+
<braindecode.datasets.base.BaseConcatDataset object at 0x7f0046bfd540>
 

In time series targets setup, targets variables are stored in mne.Raw object as channels @@ -842,14 +842,14 @@

Training

  epoch    r2_train    r2_valid    train_loss    valid_loss      lr     dur
 -------  ----------  ----------  ------------  ------------  ------  ------
-      1    -23.7826     -4.6087        1.8225       11.7419  0.0006  0.5332
-      2     -1.1990     -0.1716        1.5134        2.6475  0.0006  0.4536
-      3     -0.3654     -0.4985        1.2625        3.4645  0.0005  0.4748
-      4     -0.4383     -0.2731        1.2058        2.9438  0.0004  0.4467
-      5     -0.5982     -0.1512        1.1027        2.6529  0.0002  0.4663
-      6     -0.6090     -0.1255        1.1121        2.5886  0.0001  0.4476
-      7     -0.4455     -0.1445        0.9618        2.6339  0.0000  0.4487
-      8     -0.2790     -0.1755        1.0927        2.7063  0.0000  0.4650
+      1    -23.7826     -4.6087        1.8225       11.7419  0.0006  0.5790
+      2     -1.1990     -0.1716        1.5134        2.6475  0.0006  0.4891
+      3     -0.3654     -0.4985        1.2625        3.4645  0.0005  0.4842
+      4     -0.4383     -0.2731        1.2058        2.9438  0.0004  0.4907
+      5     -0.5982     -0.1512        1.1027        2.6529  0.0002  0.4817
+      6     -0.6090     -0.1255        1.1121        2.5886  0.0001  0.4866
+      7     -0.4455     -0.1445        0.9618        2.6339  0.0000  0.5015
+      8     -0.2790     -0.1755        1.0927        2.7063  0.0000  0.4822
 

Obtaining predictions and targets for the test, train, and validation dataset

@@ -970,8 +970,8 @@

Plot Resultsplt.tight_layout()

-plot bcic iv 4 ecog cropped

Total running time of the script: (2 minutes 24.832 seconds)

-

Estimated memory usage: 1534 MB

+plot bcic iv 4 ecog cropped

Total running time of the script: (2 minutes 50.890 seconds)

+

Estimated memory usage: 1285 MB

  epoch    train_accuracy    train_loss    valid_acc    valid_accuracy    valid_loss      lr     dur
 -------  ----------------  ------------  -----------  ----------------  ------------  ------  ------
-      1            0.2639        1.4655       0.2639            0.2639        1.5266  0.0006  1.7875
-      2            0.3299        1.3119       0.3194            0.3194        1.3948  0.0005  1.6114
-      3            0.4757        1.1941       0.2986            0.2986        1.3259  0.0002  1.6068
-      4            0.5625        1.1671       0.3333            0.3333        1.3025  0.0000  1.6193
+      1            0.2639        1.4655       0.2639            0.2639        1.5266  0.0006  1.7687
+      2            0.3299        1.3119       0.3194            0.3194        1.3948  0.0005  1.5863
+      3            0.4757        1.1941       0.2986            0.2986        1.3259  0.0002  1.6029
+      4            0.5625        1.1671       0.3333            0.3333        1.3025  0.0000  1.5946
 
 <class 'braindecode.classifier.EEGClassifier'>[initialized](
   module_=============================================================================================================================================
@@ -850,8 +850,8 @@ 

Setting the data aug -

Total running time of the script: (0 minutes 17.953 seconds)

-

Estimated memory usage: 1131 MB

+

Total running time of the script: (0 minutes 17.360 seconds)

+

Estimated memory usage: 1151 MB

-
/home/runner/work/braindecode/braindecode/braindecode/preprocessing/preprocess.py:244: UserWarning: Applying preprocessors [<braindecode.preprocessing.preprocess.Preprocessor object at 0x7fec4d708460>] to the mne.io.Raw of an EEGWindowsDataset.
+
/home/runner/work/braindecode/braindecode/braindecode/preprocessing/preprocess.py:244: UserWarning: Applying preprocessors [<braindecode.preprocessing.preprocess.Preprocessor object at 0x7f0046be39d0>] to the mne.io.Raw of an EEGWindowsDataset.
   warn(
-/home/runner/work/braindecode/braindecode/braindecode/preprocessing/preprocess.py:244: UserWarning: Applying preprocessors [<braindecode.preprocessing.preprocess.Preprocessor object at 0x7fec4d708460>] to the mne.io.Raw of an EEGWindowsDataset.
+/home/runner/work/braindecode/braindecode/braindecode/preprocessing/preprocess.py:244: UserWarning: Applying preprocessors [<braindecode.preprocessing.preprocess.Preprocessor object at 0x7f0046be39d0>] to the mne.io.Raw of an EEGWindowsDataset.
   warn(
-/home/runner/work/braindecode/braindecode/braindecode/preprocessing/preprocess.py:244: UserWarning: Applying preprocessors [<braindecode.preprocessing.preprocess.Preprocessor object at 0x7fec4d708460>] to the mne.io.Raw of an EEGWindowsDataset.
+/home/runner/work/braindecode/braindecode/braindecode/preprocessing/preprocess.py:244: UserWarning: Applying preprocessors [<braindecode.preprocessing.preprocess.Preprocessor object at 0x7f0046be39d0>] to the mne.io.Raw of an EEGWindowsDataset.
   warn(
 
-<braindecode.datasets.base.BaseConcatDataset object at 0x7fec4cefcd60>
+<braindecode.datasets.base.BaseConcatDataset object at 0x7f010a98cf40>
 
@@ -892,31 +892,31 @@

Training
  epoch    train_acc    train_loss    valid_acc    valid_loss    cp     dur
 -------  -----------  ------------  -----------  ------------  ----  ------
-      1       0.5234        0.7013       0.6680        0.6320     +  1.0945
-      2       0.5938        0.7149       0.4880        0.8358        0.8365
-      3       0.4922        1.0040       0.6440        0.6172     +  0.8199
-      4       0.5234        0.7031       0.6120        0.5990     +  0.8083
-      5       0.5391        0.6751       0.5920        0.6213        0.7976
-      6       0.6719        0.6227       0.5920        0.6263        0.8068
-      7       0.6562        0.6309       0.6240        0.6117        0.8123
-      8       0.6641        0.6272       0.6480        0.5950     +  0.8098
-      9       0.6328        0.6238       0.6680        0.5797     +  0.7859
-     10       0.6406        0.6177       0.6800        0.5746     +  0.7901
-     11       0.6250        0.6323       0.7040        0.5787        0.8057
-     12       0.6094        0.6281       0.6760        0.5772        0.8059
-     13       0.6328        0.6422       0.6880        0.5790        0.8006
-     14       0.6406        0.5920       0.6840        0.5765        0.7881
-     15       0.6562        0.6170       0.6920        0.5730     +  0.8095
-     16       0.7578        0.5608       0.6960        0.5676     +  0.8120
-     17       0.6875        0.5936       0.7120        0.5612     +  0.8039
-     18       0.7734        0.5472       0.7080        0.5500     +  0.7965
-     19       0.7656        0.5245       0.7120        0.5400     +  0.8064
-     20       0.6641        0.5641       0.7160        0.5333     +  0.8121
-     21       0.7422        0.5307       0.7200        0.5272     +  0.8072
-     22       0.7109        0.5499       0.7360        0.5211     +  0.7939
-     23       0.6250        0.6259       0.7400        0.5164     +  0.8113
-     24       0.7031        0.5712       0.7400        0.5120     +  0.8142
-     25       0.7109        0.5030       0.7280        0.5120        0.8289
+      1       0.5234        0.7013       0.6680        0.6320     +  1.0490
+      2       0.5938        0.7149       0.4880        0.8358        0.8341
+      3       0.4922        1.0040       0.6440        0.6172     +  0.8135
+      4       0.5234        0.7031       0.6120        0.5990     +  0.8035
+      5       0.5391        0.6751       0.5920        0.6213        0.8175
+      6       0.6719        0.6227       0.5920        0.6263        0.8164
+      7       0.6562        0.6309       0.6240        0.6117        0.7963
+      8       0.6641        0.6272       0.6480        0.5950     +  0.8067
+      9       0.6328        0.6238       0.6680        0.5797     +  0.7959
+     10       0.6406        0.6177       0.6800        0.5746     +  0.8043
+     11       0.6250        0.6323       0.7040        0.5787        0.8013
+     12       0.6094        0.6281       0.6760        0.5772        0.7926
+     13       0.6328        0.6422       0.6880        0.5790        0.7865
+     14       0.6406        0.5920       0.6840        0.5765        0.7960
+     15       0.6562        0.6170       0.6920        0.5730     +  0.8022
+     16       0.7578        0.5608       0.6960        0.5676     +  0.7963
+     17       0.6875        0.5936       0.7120        0.5612     +  0.7890
+     18       0.7734        0.5472       0.7080        0.5500     +  0.7976
+     19       0.7656        0.5245       0.7120        0.5400     +  0.8039
+     20       0.6641        0.5641       0.7160        0.5333     +  0.7955
+     21       0.7422        0.5307       0.7200        0.5272     +  0.8018
+     22       0.7109        0.5499       0.7360        0.5211     +  0.7879
+     23       0.6250        0.6259       0.7400        0.5164     +  0.8059
+     24       0.7031        0.5712       0.7400        0.5120     +  0.8064
+     25       0.7109        0.5030       0.7280        0.5120        0.8020
 /home/runner/.local/lib/python3.10/site-packages/skorch/net.py:2626: FutureWarning: You are using `torch.load` with `weights_only=False` (the current default value), which uses the default pickle module implicitly. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling (See https://github.com/pytorch/pytorch/blob/main/SECURITY.md#untrusted-models for more details). In a future release, the default value for `weights_only` will be flipped to `True`. This limits the functions that could be executed during unpickling. Arbitrary objects will no longer be allowed to be loaded via this mode unless they are explicitly allowlisted by the user via `torch.serialization.add_safe_globals`. We recommend you start setting `weights_only=True` for any use case where you don't have full control of the loaded file. Please open an issue on GitHub for any issues related to this experimental feature.
   return torch.load(f_name, map_location=map_location)
 
@@ -1102,7 +1102,7 @@

Using the learned re ax.legend()

-plot relative positioning
<matplotlib.legend.Legend object at 0x7fec4f294cd0>
+plot relative positioning
<matplotlib.legend.Legend object at 0x7f0045f46650>
 

We see that there is sleep stage-related structure in the embedding. A @@ -1159,8 +1159,8 @@

References -

Total running time of the script: (2 minutes 1.031 seconds)

-

Estimated memory usage: 754 MB

+

Total running time of the script: (2 minutes 3.369 seconds)

+

Estimated memory usage: 778 MB