forked from pytorch/torchtitan
-
Notifications
You must be signed in to change notification settings - Fork 0
/
llama3.2_1b_conversion.toml
74 lines (63 loc) · 2 KB
/
llama3.2_1b_conversion.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
# torchtitan Config.toml
[job]
dump_folder = "/nfs/h100/raid/chem/checkpoints"
description = "Llama 3.2 training"
use_for_integration_test = false
[profiling]
enable_profiling = false
save_traces_folder = "profile_trace"
profile_freq = 10
enable_memory_snapshot = false
save_memory_snapshot_folder = "memory_snapshot"
[metrics]
log_freq = 1
enable_color_printing = true
enable_aim = false
save_aim_folder = "aim"
[model]
name = "llama3"
flavor = "1B"
norm_type = "rmsnorm" # layernorm / np_layernorm / rmsnorm / compiled_rmsnorm / fused_rmsnorm
tokenizer_path = "torchtitan/tokenizers/Llama-3.2-chem-1B-v1"
# tokenizer_path = "meta-llama/Llama-3.2-1B"
[optimizer]
name = "AdamW"
lr = 1.0e-4
[training]
batch_size = 1
gradient_accumulation_steps = 3
seq_len = 2048
warmup_steps = 500 # lr scheduler warm up, normally 20% of the train steps
max_norm = 1.0 # grad norm clipping
steps = 10
data_parallel_degree = -1
tensor_parallel_degree = 1
compile = false
# dataset = "c4" # supported datasets: c4_test (2K), c4 (177M)
# dataset = "chemlactica_train_mini" # supported datasets: c4_test (2K), c4 (177M), chemlactica_train_mini (4K)
dataset = "chemlactica_train"
data_processing_style="chemlactica_style"
[experimental]
pipeline_parallel_degree = 1
enable_async_tensor_parallel = false
[checkpoint]
enable_checkpoint = true
# load_folder = "meta-llama/Llama-3.2-1B"
# save_folder = "meta-llama/Llama-3.2-1B"
load_folder = "yerevann/Llama-3.2-1B/e625b9a4b9784da4a63fa1a8"
load_at_step = 40000
save_folder = "hf/yerevann/Llama-3.2-1B/e625b9a4b9784da4a63fa1a8"
interval_type = "steps"
interval = 1000
model_weights_only = false
export_dtype = "float32"
async_mode = "async_with_pinned_mem" # ["disabled", "async", "async_with_pinned_mem"]
[model_download_export]
# to_titan = true
# weights_source = "huggingface"
to_hf = true
[activation_checkpoint]
mode = 'none' # ['none', 'selective', 'full']
selective_ac_option = '2' # 'int' = ac every positive int layer or 'op', ac based on ops policy
[float8]
enable_float8_linear = false