This repository has been archived by the owner on May 6, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
126 lines (100 loc) · 3.52 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import itertools
import os
import logging
import traceback
import train
import features
from msaf import config
import eval
from tqdm import tqdm
"""
Automated Experimentation and Evaluation Script
This script automates the process of training and evaluating a model under various settings.
It conducts experiments by testing different combinations of hyperparameters, training the model,
and evaluating its performance using the SALAMI dataset for boundary detection.
Usage:
python script_name.py
Dependencies:
- PyTorch
- PyTorch Lightning
- NumPy
- Pandas
- tqdm
Module Dependencies:
- train (Contains model training functionality)
- features (Contains feature extraction functionality)
- eval (Contains evaluation functionality)
"""
logging.basicConfig(level=logging.INFO)
def create_settings():
settings_dict = {
"FILE_LIST_PATH": "./datasets/MSD/MSD_audio_limit=all.csv",
"DATASET_NAME": "Million Song Dataset",
"SAMPLE_RATE": config.sample_rate,
"MAX_EPOCHS": 100,
"EVAL_WINDOW": 0.5,
}
batch_sizes = sorted([8])
window_sizes = sorted([i * settings_dict["SAMPLE_RATE"] for i in range(4, 8)])
feature_list = sorted(["pcp", "mfcc", "embeddiogram"])
clip_durations = sorted([3.0, 7.0, 15.0])
settings = []
for (
batch_size,
window_size,
feature,
clip_duration,
) in itertools.product(batch_sizes, window_sizes, feature_list, clip_durations):
settings_item = settings_dict.copy()
settings_item.update(
{
"BATCH_SIZE": batch_size,
"WINDOW_SIZE": window_size,
"FEATURE": feature,
"CLIP_DURATION": clip_duration,
}
)
settings.append(settings_item)
return settings
def main():
settings = create_settings()
for setting in tqdm(settings):
logging.info(f"Processing settings: {setting}")
try:
"""
SETTING UP TRAINING PARAMETERS
"""
train.FILE_LIST_PATH = setting["FILE_LIST_PATH"]
train.DATASET_NAME = setting["DATASET_NAME"]
train.BATCH_SIZE = setting["BATCH_SIZE"]
train.CLIP_DURATION = setting["CLIP_DURATION"]
train.SAMPLE_RATE = setting["SAMPLE_RATE"]
train.MAX_EPOCHS = setting["MAX_EPOCHS"]
"""
TRAINING THE MODEL
"""
_, best_model_path = train.main()
"""
SETTING UP TRAINING PARAMETERS
"""
eval.FEATURE = setting["FEATURE"]
eval.EVAL_WINDOW = setting["EVAL_WINDOW"]
features.CKPT_PATH = best_model_path
features.WINDOW_SIZE = setting["WINDOW_SIZE"]
# Check if a file exists before attempting to delete it
if os.path.exists(
"/home/jupyter/oriol/Master-Thesis/.features_msaf_tmp.json"
):
os.remove("/home/jupyter/oriol/Master-Thesis/.features_msaf_tmp.json")
else:
logging.info("The temporary JSON file does not exist")
"""
EVALUATING THE MODEL FOR THE GIVEN SETTING (BOUNDARY DETECTION; SALAMI DATASET)
"""
eval.main()
except Exception as e:
logging.error(
f"An error occurred while processing the settings {setting}. Error: {str(e)}. Continuing to the next iteration of the loop.\n{traceback.format_exc()}"
)
if __name__ == "__main__":
main()