Skip to content

Commit

Permalink
fix converter and early stopping + fix warmup epochs
Browse files Browse the repository at this point in the history
  • Loading branch information
kprokofi committed Nov 25, 2024
1 parent ec610a9 commit de025a6
Show file tree
Hide file tree
Showing 42 changed files with 204 additions and 107 deletions.
2 changes: 1 addition & 1 deletion src/otx/core/model/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -744,7 +744,7 @@ def lr_scheduler_step(self, scheduler: LRSchedulerTypeUnion, metric: Tensor) ->
return super().lr_scheduler_step(scheduler=scheduler, metric=metric)

if len(warmup_schedulers) != 1:
msg = "No more than two warmup schedulers coexist."
msg = "No more than one warmup schedulers coexist."
raise RuntimeError(msg)

warmup_scheduler = next(iter(warmup_schedulers))
Expand Down
11 changes: 6 additions & 5 deletions src/otx/core/schedulers/warmup_schedulers.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,16 +19,17 @@ class LinearWarmupScheduler(LambdaLR):
"""Linear Warmup scheduler.
Args:
num_warmup_steps: Learning rate will linearly increased during the period same as this number.
warmup_interval: If "epoch", count the number of steps for the warmup period.
optimizer (Optimizer): Optimizer to apply the scheduler.
num_warmup_steps (int): Learning rate will linearly increased during the period same as this number.
interval (Literal["step", "epoch"]): If "epoch", count the number of epochs for the warmup period.
Otherwise, the iteration step will be the warmup period.
"""

def __init__(
self,
optimizer: Optimizer,
num_warmup_steps: int = 1000,
interval: Literal["step", "epoch"] = "step",
interval: Literal["step", "epoch"] = "epoch",
):
if not num_warmup_steps > 0:
msg = f"num_warmup_steps should be > 0, got {num_warmup_steps}"
Expand All @@ -55,7 +56,7 @@ class LinearWarmupSchedulerCallable:
main_scheduler_callable: Callable to create a LR scheduler that will be mainly used.
num_warmup_steps: Learning rate will linearly increased during the period same as this number.
If it is less than equal to zero, do not create `LinearWarmupScheduler`.
warmup_interval: If "epoch", count the number of steps for the warmup period.
warmup_interval: If "epoch", count the number of epochs for the warmup period.
Otherwise, the iteration step will be the warmup period.
monitor: If given, override the main scheduler's `monitor` attribute.
"""
Expand All @@ -64,7 +65,7 @@ def __init__(
self,
main_scheduler_callable: LRSchedulerCallable,
num_warmup_steps: int = 0,
warmup_interval: Literal["step", "epoch"] = "step",
warmup_interval: Literal["step", "epoch"] = "epoch",
monitor: str | None = None,
):
self.main_scheduler_callable = SchedulerCallableSupportHPO.from_callable(main_scheduler_callable)
Expand Down
14 changes: 9 additions & 5 deletions src/otx/recipe/classification/h_label_cls/deit_tiny.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,16 @@ model:
weight_decay: 0.05

scheduler:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
mode: max
factor: 0.5
patience: 1
monitor: val/accuracy
num_warmup_steps: 0
main_scheduler_callable:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
init_args:
mode: max
factor: 0.5
patience: 5
monitor: val/accuracy

engine:
task: H_LABEL_CLS
Expand Down
14 changes: 9 additions & 5 deletions src/otx/recipe/classification/h_label_cls/efficientnet_b0.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,16 @@ model:
weight_decay: 0.0001

scheduler:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
mode: max
factor: 0.5
patience: 1
monitor: val/accuracy
num_warmup_steps: 0
main_scheduler_callable:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
init_args:
mode: max
factor: 0.5
patience: 5
monitor: val/accuracy

engine:
task: H_LABEL_CLS
Expand Down
12 changes: 12 additions & 0 deletions src/otx/recipe/classification/h_label_cls/efficientnet_v2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,18 @@ model:
momentum: 0.9
weight_decay: 0.0001

scheduler:
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
num_warmup_steps: 0
main_scheduler_callable:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
init_args:
mode: max
factor: 0.5
patience: 5
monitor: val/accuracy

engine:
task: H_LABEL_CLS
device: auto
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ model:
init_args:
mode: max
factor: 0.5
patience: 1
patience: 5
monitor: val/accuracy

engine:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,14 @@ model:
weight_decay: 0.0001

scheduler:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
T_max: 100000
eta_min: 0
num_warmup_steps: 0
main_scheduler_callable:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
init_args:
T_max: 100000
eta_min: 0

engine:
task: H_LABEL_CLS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,14 @@ model:
weight_decay: 0.0001

scheduler:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
T_max: 100000
eta_min: 0
num_warmup_steps: 0
main_scheduler_callable:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
init_args:
T_max: 100000
eta_min: 0

engine:
task: H_LABEL_CLS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,14 @@ model:
weight_decay: 0.0001

scheduler:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
T_max: 100000
eta_min: 0
num_warmup_steps: 0
main_scheduler_callable:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
init_args:
T_max: 100000
eta_min: 0

engine:
task: H_LABEL_CLS
Expand Down
14 changes: 9 additions & 5 deletions src/otx/recipe/classification/multi_class_cls/deit_tiny.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,16 @@ model:
weight_decay: 0.05

scheduler:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
mode: max
factor: 0.5
patience: 1
monitor: val/accuracy
num_warmup_steps: 0
main_scheduler_callable:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
init_args:
mode: max
factor: 0.5
patience: 5
monitor: val/accuracy

engine:
task: MULTI_CLASS_CLS
Expand Down
14 changes: 9 additions & 5 deletions src/otx/recipe/classification/multi_class_cls/dino_v2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,16 @@ model:
weight_decay: 0.05

scheduler:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
mode: max
factor: 0.5
patience: 1
monitor: val/accuracy
num_warmup_steps: 0
main_scheduler_callable:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
init_args:
mode: max
factor: 0.5
patience: 5
monitor: val/accuracy

engine:
task: MULTI_CLASS_CLS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,16 @@ model:
weight_decay: 0.0001

scheduler:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
mode: max
factor: 0.5
patience: 1
monitor: val/accuracy
num_warmup_steps: 0
main_scheduler_callable:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
init_args:
mode: max
factor: 0.5
patience: 5
monitor: val/accuracy

engine:
task: MULTI_CLASS_CLS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,16 @@ model:
weight_decay: 0.0001

scheduler:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
mode: max
factor: 0.5
patience: 1
monitor: val/accuracy
num_warmup_steps: 0
main_scheduler_callable:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
init_args:
mode: max
factor: 0.5
patience: 5
monitor: val/accuracy

engine:
task: MULTI_CLASS_CLS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ model:
init_args:
mode: max
factor: 0.5
patience: 1
patience: 5
monitor: val/accuracy

engine:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,14 @@ model:
weight_decay: 0.0001

scheduler:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
T_max: 100000
eta_min: 0
num_warmup_steps: 0
main_scheduler_callable:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
init_args:
T_max: 100000
eta_min: 0

engine:
task: MULTI_CLASS_CLS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,14 @@ model:
weight_decay: 0.0001

scheduler:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
T_max: 100000
eta_min: 0
num_warmup_steps: 0
main_scheduler_callable:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
init_args:
T_max: 100000
eta_min: 0

engine:
task: MULTI_CLASS_CLS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,14 @@ model:
weight_decay: 0.0001

scheduler:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
T_max: 100000
eta_min: 0
num_warmup_steps: 0
main_scheduler_callable:
class_path: torch.optim.lr_scheduler.CosineAnnealingLR
init_args:
T_max: 100000
eta_min: 0

engine:
task: MULTI_CLASS_CLS
Expand Down
14 changes: 9 additions & 5 deletions src/otx/recipe/classification/multi_label_cls/deit_tiny.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,16 @@ model:
weight_decay: 0.05

scheduler:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
mode: max
factor: 0.5
patience: 1
monitor: val/accuracy
num_warmup_steps: 0
main_scheduler_callable:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
init_args:
mode: max
factor: 0.5
patience: 5
monitor: val/accuracy

engine:
task: MULTI_LABEL_CLS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,16 @@ model:
weight_decay: 0.0005

scheduler:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
mode: max
factor: 0.5
patience: 1
monitor: val/accuracy
num_warmup_steps: 0
main_scheduler_callable:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
init_args:
mode: max
factor: 0.5
patience: 5
monitor: val/accuracy

engine:
task: MULTI_LABEL_CLS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,16 @@ model:
weight_decay: 0.0005

scheduler:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
class_path: otx.core.schedulers.LinearWarmupSchedulerCallable
init_args:
mode: max
factor: 0.5
patience: 1
monitor: val/accuracy
num_warmup_steps: 0
main_scheduler_callable:
class_path: lightning.pytorch.cli.ReduceLROnPlateau
init_args:
mode: max
factor: 0.5
patience: 5
monitor: val/accuracy

engine:
task: MULTI_LABEL_CLS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ model:
init_args:
mode: max
factor: 0.5
patience: 1
patience: 5
monitor: val/accuracy

engine:
Expand Down
Loading

0 comments on commit de025a6

Please sign in to comment.