From de025a6aaea4e2881518a459457b8f5f283e9e86 Mon Sep 17 00:00:00 2001 From: kprokofi Date: Tue, 26 Nov 2024 01:36:57 +0900 Subject: [PATCH] fix converter and early stopping + fix warmup epochs --- src/otx/core/model/base.py | 2 +- src/otx/core/schedulers/warmup_schedulers.py | 11 ++++++----- .../classification/h_label_cls/deit_tiny.yaml | 14 +++++++++----- .../h_label_cls/efficientnet_b0.yaml | 14 +++++++++----- .../h_label_cls/efficientnet_v2.yaml | 12 ++++++++++++ .../h_label_cls/mobilenet_v3_large.yaml | 2 +- .../h_label_cls/tv_efficientnet_b3.yaml | 10 +++++++--- .../h_label_cls/tv_efficientnet_v2_l.yaml | 10 +++++++--- .../h_label_cls/tv_mobilenet_v3_small.yaml | 10 +++++++--- .../classification/multi_class_cls/deit_tiny.yaml | 14 +++++++++----- .../classification/multi_class_cls/dino_v2.yaml | 14 +++++++++----- .../multi_class_cls/efficientnet_b0.yaml | 14 +++++++++----- .../multi_class_cls/efficientnet_v2.yaml | 14 +++++++++----- .../multi_class_cls/mobilenet_v3_large.yaml | 2 +- .../multi_class_cls/tv_efficientnet_b3.yaml | 10 +++++++--- .../multi_class_cls/tv_efficientnet_v2_l.yaml | 10 +++++++--- .../multi_class_cls/tv_mobilenet_v3_small.yaml | 10 +++++++--- .../classification/multi_label_cls/deit_tiny.yaml | 14 +++++++++----- .../multi_label_cls/efficientnet_b0.yaml | 14 +++++++++----- .../multi_label_cls/efficientnet_v2.yaml | 14 +++++++++----- .../multi_label_cls/mobilenet_v3_large.yaml | 2 +- .../multi_label_cls/tv_efficientnet_b3.yaml | 10 +++++++--- .../multi_label_cls/tv_efficientnet_v2_l.yaml | 10 +++++++--- .../multi_label_cls/tv_mobilenet_v3_small.yaml | 10 +++++++--- .../maskrcnn_efficientnetb2b.yaml | 2 +- .../maskrcnn_efficientnetb2b_tile.yaml | 2 +- .../recipe/instance_segmentation/maskrcnn_r50.yaml | 2 +- .../instance_segmentation/maskrcnn_r50_tile.yaml | 2 +- .../instance_segmentation/maskrcnn_r50_tv.yaml | 2 +- .../maskrcnn_r50_tv_tile.yaml | 2 +- .../instance_segmentation/maskrcnn_swint.yaml | 2 +- .../maskrcnn_efficientnetb2b.yaml | 2 +- .../maskrcnn_efficientnetb2b_tile.yaml | 2 +- src/otx/recipe/rotated_detection/maskrcnn_r50.yaml | 2 +- .../rotated_detection/maskrcnn_r50_tile.yaml | 2 +- src/otx/recipe/semantic_segmentation/dino_v2.yaml | 12 ++++++++---- .../recipe/semantic_segmentation/litehrnet_18.yaml | 2 +- .../recipe/semantic_segmentation/litehrnet_s.yaml | 2 +- .../recipe/semantic_segmentation/litehrnet_x.yaml | 2 +- src/otx/recipe/visual_prompting/sam_tiny_vit.yaml | 10 +++++++--- src/otx/recipe/visual_prompting/sam_vit_b.yaml | 10 +++++++--- src/otx/tools/converter.py | 4 ++-- 42 files changed, 204 insertions(+), 107 deletions(-) diff --git a/src/otx/core/model/base.py b/src/otx/core/model/base.py index ac2331ff885..3c70d34d3e0 100644 --- a/src/otx/core/model/base.py +++ b/src/otx/core/model/base.py @@ -744,7 +744,7 @@ def lr_scheduler_step(self, scheduler: LRSchedulerTypeUnion, metric: Tensor) -> return super().lr_scheduler_step(scheduler=scheduler, metric=metric) if len(warmup_schedulers) != 1: - msg = "No more than two warmup schedulers coexist." + msg = "No more than one warmup schedulers coexist." raise RuntimeError(msg) warmup_scheduler = next(iter(warmup_schedulers)) diff --git a/src/otx/core/schedulers/warmup_schedulers.py b/src/otx/core/schedulers/warmup_schedulers.py index 6de763bb52b..c4ed150271d 100644 --- a/src/otx/core/schedulers/warmup_schedulers.py +++ b/src/otx/core/schedulers/warmup_schedulers.py @@ -19,8 +19,9 @@ class LinearWarmupScheduler(LambdaLR): """Linear Warmup scheduler. Args: - num_warmup_steps: Learning rate will linearly increased during the period same as this number. - warmup_interval: If "epoch", count the number of steps for the warmup period. + optimizer (Optimizer): Optimizer to apply the scheduler. + num_warmup_steps (int): Learning rate will linearly increased during the period same as this number. + interval (Literal["step", "epoch"]): If "epoch", count the number of epochs for the warmup period. Otherwise, the iteration step will be the warmup period. """ @@ -28,7 +29,7 @@ def __init__( self, optimizer: Optimizer, num_warmup_steps: int = 1000, - interval: Literal["step", "epoch"] = "step", + interval: Literal["step", "epoch"] = "epoch", ): if not num_warmup_steps > 0: msg = f"num_warmup_steps should be > 0, got {num_warmup_steps}" @@ -55,7 +56,7 @@ class LinearWarmupSchedulerCallable: main_scheduler_callable: Callable to create a LR scheduler that will be mainly used. num_warmup_steps: Learning rate will linearly increased during the period same as this number. If it is less than equal to zero, do not create `LinearWarmupScheduler`. - warmup_interval: If "epoch", count the number of steps for the warmup period. + warmup_interval: If "epoch", count the number of epochs for the warmup period. Otherwise, the iteration step will be the warmup period. monitor: If given, override the main scheduler's `monitor` attribute. """ @@ -64,7 +65,7 @@ def __init__( self, main_scheduler_callable: LRSchedulerCallable, num_warmup_steps: int = 0, - warmup_interval: Literal["step", "epoch"] = "step", + warmup_interval: Literal["step", "epoch"] = "epoch", monitor: str | None = None, ): self.main_scheduler_callable = SchedulerCallableSupportHPO.from_callable(main_scheduler_callable) diff --git a/src/otx/recipe/classification/h_label_cls/deit_tiny.yaml b/src/otx/recipe/classification/h_label_cls/deit_tiny.yaml index b36f48e14c9..f60419c5d60 100644 --- a/src/otx/recipe/classification/h_label_cls/deit_tiny.yaml +++ b/src/otx/recipe/classification/h_label_cls/deit_tiny.yaml @@ -10,12 +10,16 @@ model: weight_decay: 0.05 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + num_warmup_steps: 0 + main_scheduler_callable: + class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/accuracy engine: task: H_LABEL_CLS diff --git a/src/otx/recipe/classification/h_label_cls/efficientnet_b0.yaml b/src/otx/recipe/classification/h_label_cls/efficientnet_b0.yaml index d0ea7daec7b..ccd2744b60a 100644 --- a/src/otx/recipe/classification/h_label_cls/efficientnet_b0.yaml +++ b/src/otx/recipe/classification/h_label_cls/efficientnet_b0.yaml @@ -11,12 +11,16 @@ model: weight_decay: 0.0001 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + num_warmup_steps: 0 + main_scheduler_callable: + class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/accuracy engine: task: H_LABEL_CLS diff --git a/src/otx/recipe/classification/h_label_cls/efficientnet_v2.yaml b/src/otx/recipe/classification/h_label_cls/efficientnet_v2.yaml index fc3f6abeab8..6bfe6b5ca63 100644 --- a/src/otx/recipe/classification/h_label_cls/efficientnet_v2.yaml +++ b/src/otx/recipe/classification/h_label_cls/efficientnet_v2.yaml @@ -10,6 +10,18 @@ model: momentum: 0.9 weight_decay: 0.0001 + scheduler: + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable + init_args: + num_warmup_steps: 0 + main_scheduler_callable: + class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/accuracy + engine: task: H_LABEL_CLS device: auto diff --git a/src/otx/recipe/classification/h_label_cls/mobilenet_v3_large.yaml b/src/otx/recipe/classification/h_label_cls/mobilenet_v3_large.yaml index 211bc8fa883..e19f1cc0d3e 100644 --- a/src/otx/recipe/classification/h_label_cls/mobilenet_v3_large.yaml +++ b/src/otx/recipe/classification/h_label_cls/mobilenet_v3_large.yaml @@ -19,7 +19,7 @@ model: init_args: mode: max factor: 0.5 - patience: 1 + patience: 5 monitor: val/accuracy engine: diff --git a/src/otx/recipe/classification/h_label_cls/tv_efficientnet_b3.yaml b/src/otx/recipe/classification/h_label_cls/tv_efficientnet_b3.yaml index 2078c98b43b..f5e291de48e 100644 --- a/src/otx/recipe/classification/h_label_cls/tv_efficientnet_b3.yaml +++ b/src/otx/recipe/classification/h_label_cls/tv_efficientnet_b3.yaml @@ -12,10 +12,14 @@ model: weight_decay: 0.0001 scheduler: - class_path: torch.optim.lr_scheduler.CosineAnnealingLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - T_max: 100000 - eta_min: 0 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: 100000 + eta_min: 0 engine: task: H_LABEL_CLS diff --git a/src/otx/recipe/classification/h_label_cls/tv_efficientnet_v2_l.yaml b/src/otx/recipe/classification/h_label_cls/tv_efficientnet_v2_l.yaml index 0f2d7b60a6a..faf1903172f 100644 --- a/src/otx/recipe/classification/h_label_cls/tv_efficientnet_v2_l.yaml +++ b/src/otx/recipe/classification/h_label_cls/tv_efficientnet_v2_l.yaml @@ -12,10 +12,14 @@ model: weight_decay: 0.0001 scheduler: - class_path: torch.optim.lr_scheduler.CosineAnnealingLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - T_max: 100000 - eta_min: 0 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: 100000 + eta_min: 0 engine: task: H_LABEL_CLS diff --git a/src/otx/recipe/classification/h_label_cls/tv_mobilenet_v3_small.yaml b/src/otx/recipe/classification/h_label_cls/tv_mobilenet_v3_small.yaml index faab071ff5d..a9beb29962d 100644 --- a/src/otx/recipe/classification/h_label_cls/tv_mobilenet_v3_small.yaml +++ b/src/otx/recipe/classification/h_label_cls/tv_mobilenet_v3_small.yaml @@ -12,10 +12,14 @@ model: weight_decay: 0.0001 scheduler: - class_path: torch.optim.lr_scheduler.CosineAnnealingLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - T_max: 100000 - eta_min: 0 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: 100000 + eta_min: 0 engine: task: H_LABEL_CLS diff --git a/src/otx/recipe/classification/multi_class_cls/deit_tiny.yaml b/src/otx/recipe/classification/multi_class_cls/deit_tiny.yaml index f5446d3cca6..d46242990ac 100644 --- a/src/otx/recipe/classification/multi_class_cls/deit_tiny.yaml +++ b/src/otx/recipe/classification/multi_class_cls/deit_tiny.yaml @@ -12,12 +12,16 @@ model: weight_decay: 0.05 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + num_warmup_steps: 0 + main_scheduler_callable: + class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/accuracy engine: task: MULTI_CLASS_CLS diff --git a/src/otx/recipe/classification/multi_class_cls/dino_v2.yaml b/src/otx/recipe/classification/multi_class_cls/dino_v2.yaml index 300091fab8c..62f869a1e33 100644 --- a/src/otx/recipe/classification/multi_class_cls/dino_v2.yaml +++ b/src/otx/recipe/classification/multi_class_cls/dino_v2.yaml @@ -11,12 +11,16 @@ model: weight_decay: 0.05 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + num_warmup_steps: 0 + main_scheduler_callable: + class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/accuracy engine: task: MULTI_CLASS_CLS diff --git a/src/otx/recipe/classification/multi_class_cls/efficientnet_b0.yaml b/src/otx/recipe/classification/multi_class_cls/efficientnet_b0.yaml index 872d28789ef..1bc9c7b9750 100644 --- a/src/otx/recipe/classification/multi_class_cls/efficientnet_b0.yaml +++ b/src/otx/recipe/classification/multi_class_cls/efficientnet_b0.yaml @@ -12,12 +12,16 @@ model: weight_decay: 0.0001 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + num_warmup_steps: 0 + main_scheduler_callable: + class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/accuracy engine: task: MULTI_CLASS_CLS diff --git a/src/otx/recipe/classification/multi_class_cls/efficientnet_v2.yaml b/src/otx/recipe/classification/multi_class_cls/efficientnet_v2.yaml index 0cb77ef8852..7799e9bc275 100644 --- a/src/otx/recipe/classification/multi_class_cls/efficientnet_v2.yaml +++ b/src/otx/recipe/classification/multi_class_cls/efficientnet_v2.yaml @@ -12,12 +12,16 @@ model: weight_decay: 0.0001 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + num_warmup_steps: 0 + main_scheduler_callable: + class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/accuracy engine: task: MULTI_CLASS_CLS diff --git a/src/otx/recipe/classification/multi_class_cls/mobilenet_v3_large.yaml b/src/otx/recipe/classification/multi_class_cls/mobilenet_v3_large.yaml index c4c6946fd6e..43c65b609d3 100644 --- a/src/otx/recipe/classification/multi_class_cls/mobilenet_v3_large.yaml +++ b/src/otx/recipe/classification/multi_class_cls/mobilenet_v3_large.yaml @@ -20,7 +20,7 @@ model: init_args: mode: max factor: 0.5 - patience: 1 + patience: 5 monitor: val/accuracy engine: diff --git a/src/otx/recipe/classification/multi_class_cls/tv_efficientnet_b3.yaml b/src/otx/recipe/classification/multi_class_cls/tv_efficientnet_b3.yaml index f06b3b36e32..e765397e44e 100644 --- a/src/otx/recipe/classification/multi_class_cls/tv_efficientnet_b3.yaml +++ b/src/otx/recipe/classification/multi_class_cls/tv_efficientnet_b3.yaml @@ -12,10 +12,14 @@ model: weight_decay: 0.0001 scheduler: - class_path: torch.optim.lr_scheduler.CosineAnnealingLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - T_max: 100000 - eta_min: 0 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: 100000 + eta_min: 0 engine: task: MULTI_CLASS_CLS diff --git a/src/otx/recipe/classification/multi_class_cls/tv_efficientnet_v2_l.yaml b/src/otx/recipe/classification/multi_class_cls/tv_efficientnet_v2_l.yaml index c72714e9433..331ef3dbb00 100644 --- a/src/otx/recipe/classification/multi_class_cls/tv_efficientnet_v2_l.yaml +++ b/src/otx/recipe/classification/multi_class_cls/tv_efficientnet_v2_l.yaml @@ -12,10 +12,14 @@ model: weight_decay: 0.0001 scheduler: - class_path: torch.optim.lr_scheduler.CosineAnnealingLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - T_max: 100000 - eta_min: 0 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: 100000 + eta_min: 0 engine: task: MULTI_CLASS_CLS diff --git a/src/otx/recipe/classification/multi_class_cls/tv_mobilenet_v3_small.yaml b/src/otx/recipe/classification/multi_class_cls/tv_mobilenet_v3_small.yaml index 4c6975c241a..2c7919b50c8 100644 --- a/src/otx/recipe/classification/multi_class_cls/tv_mobilenet_v3_small.yaml +++ b/src/otx/recipe/classification/multi_class_cls/tv_mobilenet_v3_small.yaml @@ -12,10 +12,14 @@ model: weight_decay: 0.0001 scheduler: - class_path: torch.optim.lr_scheduler.CosineAnnealingLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - T_max: 100000 - eta_min: 0 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: 100000 + eta_min: 0 engine: task: MULTI_CLASS_CLS diff --git a/src/otx/recipe/classification/multi_label_cls/deit_tiny.yaml b/src/otx/recipe/classification/multi_label_cls/deit_tiny.yaml index afb14dd046f..105c60d92a3 100644 --- a/src/otx/recipe/classification/multi_label_cls/deit_tiny.yaml +++ b/src/otx/recipe/classification/multi_label_cls/deit_tiny.yaml @@ -11,12 +11,16 @@ model: weight_decay: 0.05 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + num_warmup_steps: 0 + main_scheduler_callable: + class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/accuracy engine: task: MULTI_LABEL_CLS diff --git a/src/otx/recipe/classification/multi_label_cls/efficientnet_b0.yaml b/src/otx/recipe/classification/multi_label_cls/efficientnet_b0.yaml index d2b11411a51..31acbec5140 100644 --- a/src/otx/recipe/classification/multi_label_cls/efficientnet_b0.yaml +++ b/src/otx/recipe/classification/multi_label_cls/efficientnet_b0.yaml @@ -12,12 +12,16 @@ model: weight_decay: 0.0005 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + num_warmup_steps: 0 + main_scheduler_callable: + class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/accuracy engine: task: MULTI_LABEL_CLS diff --git a/src/otx/recipe/classification/multi_label_cls/efficientnet_v2.yaml b/src/otx/recipe/classification/multi_label_cls/efficientnet_v2.yaml index 87177eb1e17..27687960571 100644 --- a/src/otx/recipe/classification/multi_label_cls/efficientnet_v2.yaml +++ b/src/otx/recipe/classification/multi_label_cls/efficientnet_v2.yaml @@ -12,12 +12,16 @@ model: weight_decay: 0.0005 scheduler: - class_path: lightning.pytorch.cli.ReduceLROnPlateau + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - mode: max - factor: 0.5 - patience: 1 - monitor: val/accuracy + num_warmup_steps: 0 + main_scheduler_callable: + class_path: lightning.pytorch.cli.ReduceLROnPlateau + init_args: + mode: max + factor: 0.5 + patience: 5 + monitor: val/accuracy engine: task: MULTI_LABEL_CLS diff --git a/src/otx/recipe/classification/multi_label_cls/mobilenet_v3_large.yaml b/src/otx/recipe/classification/multi_label_cls/mobilenet_v3_large.yaml index 02021708453..7fc8207d80a 100644 --- a/src/otx/recipe/classification/multi_label_cls/mobilenet_v3_large.yaml +++ b/src/otx/recipe/classification/multi_label_cls/mobilenet_v3_large.yaml @@ -20,7 +20,7 @@ model: init_args: mode: max factor: 0.5 - patience: 1 + patience: 5 monitor: val/accuracy engine: diff --git a/src/otx/recipe/classification/multi_label_cls/tv_efficientnet_b3.yaml b/src/otx/recipe/classification/multi_label_cls/tv_efficientnet_b3.yaml index 9579f8e5e57..7551cf124a5 100644 --- a/src/otx/recipe/classification/multi_label_cls/tv_efficientnet_b3.yaml +++ b/src/otx/recipe/classification/multi_label_cls/tv_efficientnet_b3.yaml @@ -12,10 +12,14 @@ model: weight_decay: 0.0001 scheduler: - class_path: torch.optim.lr_scheduler.CosineAnnealingLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - T_max: 100000 - eta_min: 0 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: 100000 + eta_min: 0 engine: task: MULTI_LABEL_CLS diff --git a/src/otx/recipe/classification/multi_label_cls/tv_efficientnet_v2_l.yaml b/src/otx/recipe/classification/multi_label_cls/tv_efficientnet_v2_l.yaml index 3003b26eb48..7d618b11074 100644 --- a/src/otx/recipe/classification/multi_label_cls/tv_efficientnet_v2_l.yaml +++ b/src/otx/recipe/classification/multi_label_cls/tv_efficientnet_v2_l.yaml @@ -12,10 +12,14 @@ model: weight_decay: 0.0001 scheduler: - class_path: torch.optim.lr_scheduler.CosineAnnealingLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - T_max: 100000 - eta_min: 0 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: 100000 + eta_min: 0 engine: task: MULTI_LABEL_CLS diff --git a/src/otx/recipe/classification/multi_label_cls/tv_mobilenet_v3_small.yaml b/src/otx/recipe/classification/multi_label_cls/tv_mobilenet_v3_small.yaml index 492e835ef62..4bcc6086518 100644 --- a/src/otx/recipe/classification/multi_label_cls/tv_mobilenet_v3_small.yaml +++ b/src/otx/recipe/classification/multi_label_cls/tv_mobilenet_v3_small.yaml @@ -12,10 +12,14 @@ model: weight_decay: 0.0001 scheduler: - class_path: torch.optim.lr_scheduler.CosineAnnealingLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - T_max: 100000 - eta_min: 0 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.CosineAnnealingLR + init_args: + T_max: 100000 + eta_min: 0 engine: task: MULTI_LABEL_CLS diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b.yaml index 7abeee66a13..8101303f006 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b.yaml @@ -13,7 +13,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b_tile.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b_tile.yaml index 527911af487..b8755d19f55 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b_tile.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_efficientnetb2b_tile.yaml @@ -13,7 +13,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_r50.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_r50.yaml index 62b904771ed..da3d2f64483 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_r50.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_r50.yaml @@ -13,7 +13,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_r50_tile.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_r50_tile.yaml index 34603c8a1d8..bbb97ac2adb 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_r50_tile.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_r50_tile.yaml @@ -13,7 +13,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_r50_tv.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_r50_tv.yaml index 0f826d80f59..fba206ad766 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_r50_tv.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_r50_tv.yaml @@ -13,7 +13,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_r50_tv_tile.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_r50_tv_tile.yaml index 499410a0742..6eef1411d9e 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_r50_tv_tile.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_r50_tv_tile.yaml @@ -13,7 +13,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/instance_segmentation/maskrcnn_swint.yaml b/src/otx/recipe/instance_segmentation/maskrcnn_swint.yaml index 39720ba66a3..d2cdf9479b8 100644 --- a/src/otx/recipe/instance_segmentation/maskrcnn_swint.yaml +++ b/src/otx/recipe/instance_segmentation/maskrcnn_swint.yaml @@ -12,7 +12,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b.yaml b/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b.yaml index d071a72ae30..5ce568d0f2b 100644 --- a/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b.yaml +++ b/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b.yaml @@ -13,7 +13,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b_tile.yaml b/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b_tile.yaml index 358d538bfd9..adcb23d8025 100644 --- a/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b_tile.yaml +++ b/src/otx/recipe/rotated_detection/maskrcnn_efficientnetb2b_tile.yaml @@ -13,7 +13,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/rotated_detection/maskrcnn_r50.yaml b/src/otx/recipe/rotated_detection/maskrcnn_r50.yaml index 03e03d5dbf1..d67ff1e6f82 100644 --- a/src/otx/recipe/rotated_detection/maskrcnn_r50.yaml +++ b/src/otx/recipe/rotated_detection/maskrcnn_r50.yaml @@ -13,7 +13,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/rotated_detection/maskrcnn_r50_tile.yaml b/src/otx/recipe/rotated_detection/maskrcnn_r50_tile.yaml index 26b5e746613..050510cddc9 100644 --- a/src/otx/recipe/rotated_detection/maskrcnn_r50_tile.yaml +++ b/src/otx/recipe/rotated_detection/maskrcnn_r50_tile.yaml @@ -13,7 +13,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/semantic_segmentation/dino_v2.yaml b/src/otx/recipe/semantic_segmentation/dino_v2.yaml index 33c4e98d578..b103805567d 100644 --- a/src/otx/recipe/semantic_segmentation/dino_v2.yaml +++ b/src/otx/recipe/semantic_segmentation/dino_v2.yaml @@ -17,11 +17,15 @@ model: weight_decay: 0.0001 scheduler: - class_path: torch.optim.lr_scheduler.PolynomialLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - total_iters: 150 - power: 0.9 - last_epoch: -1 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.PolynomialLR + init_args: + total_iters: 150 + power: 0.9 + last_epoch: -1 engine: task: SEMANTIC_SEGMENTATION diff --git a/src/otx/recipe/semantic_segmentation/litehrnet_18.yaml b/src/otx/recipe/semantic_segmentation/litehrnet_18.yaml index e7a20d7e369..f61d24d3eb3 100644 --- a/src/otx/recipe/semantic_segmentation/litehrnet_18.yaml +++ b/src/otx/recipe/semantic_segmentation/litehrnet_18.yaml @@ -16,7 +16,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/semantic_segmentation/litehrnet_s.yaml b/src/otx/recipe/semantic_segmentation/litehrnet_s.yaml index d353ffdfc4c..7f24dab1fe6 100644 --- a/src/otx/recipe/semantic_segmentation/litehrnet_s.yaml +++ b/src/otx/recipe/semantic_segmentation/litehrnet_s.yaml @@ -16,7 +16,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/semantic_segmentation/litehrnet_x.yaml b/src/otx/recipe/semantic_segmentation/litehrnet_x.yaml index 85bb55d55ca..e3daa967ba0 100644 --- a/src/otx/recipe/semantic_segmentation/litehrnet_x.yaml +++ b/src/otx/recipe/semantic_segmentation/litehrnet_x.yaml @@ -16,7 +16,7 @@ model: scheduler: class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - num_warmup_steps: 100 + num_warmup_steps: 10 main_scheduler_callable: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: diff --git a/src/otx/recipe/visual_prompting/sam_tiny_vit.yaml b/src/otx/recipe/visual_prompting/sam_tiny_vit.yaml index 377d80b3722..7906c79edf7 100644 --- a/src/otx/recipe/visual_prompting/sam_tiny_vit.yaml +++ b/src/otx/recipe/visual_prompting/sam_tiny_vit.yaml @@ -18,10 +18,14 @@ model: lr: 0.00001 scheduler: - class_path: torch.optim.lr_scheduler.ConstantLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - factor: 1 - total_iters: -1 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.ConstantLR + init_args: + factor: 1 + total_iters: -1 engine: task: VISUAL_PROMPTING diff --git a/src/otx/recipe/visual_prompting/sam_vit_b.yaml b/src/otx/recipe/visual_prompting/sam_vit_b.yaml index bc3bf89351a..cea919bbc8f 100644 --- a/src/otx/recipe/visual_prompting/sam_vit_b.yaml +++ b/src/otx/recipe/visual_prompting/sam_vit_b.yaml @@ -18,10 +18,14 @@ model: lr: 0.00001 scheduler: - class_path: torch.optim.lr_scheduler.ConstantLR + class_path: otx.core.schedulers.LinearWarmupSchedulerCallable init_args: - factor: 1 - total_iters: -1 + num_warmup_steps: 0 + main_scheduler_callable: + class_path: torch.optim.lr_scheduler.ConstantLR + init_args: + factor: 1 + total_iters: -1 engine: task: VISUAL_PROMPTING diff --git a/src/otx/tools/converter.py b/src/otx/tools/converter.py index 8797910f74e..33899eb4b17 100644 --- a/src/otx/tools/converter.py +++ b/src/otx/tools/converter.py @@ -308,13 +308,13 @@ def update_num_workers(param_value: int) -> None: config["data"]["test_subset"]["num_workers"] = param_value def update_enable_early_stopping(param_value: bool) -> None: - idx = ConfigConverter._get_callback_idx(config["callbacks"], "lightning.pytorch.callbacks.EarlyStopping") + idx = ConfigConverter._get_callback_idx(config["callbacks"], "otx.algo.callbacks.adaptive_early_stopping.EarlyStoppingWithWarmup") if not param_value and idx > -1: config["callbacks"].pop(idx) def update_early_stop_patience(param_value: int) -> None: for callback in config["callbacks"]: - if callback["class_path"] == "lightning.pytorch.callbacks.EarlyStopping": + if callback["class_path"] == "otx.algo.callbacks.adaptive_early_stopping.EarlyStoppingWithWarmup": callback["init_args"]["patience"] = param_value break