Untitled
unknown
plain_text
8 months ago
3.5 kB
14
Indexable
checkpoints:
checkpoint_interval: 1250
checkpoints_path: /mbz/users/fan.zhou/backup/storage/finemath/pt/llama_3_1_8B_nanotron_cpt/megamath/mixture_bs4M_constant_lr1e-5_seq8k_200B_dp16_tp8
checkpoints_path_is_shared_file_system: false
load_lr_scheduler: true
load_optimizer: true
resume_checkpoint_path: /mbz/users/fan.zhou/backup/storage/finemath/pt/llama_3_1_8B_nanotron_cpt/megamath/mixture_bs4M_constant_lr1e-5_seq8k_200B_dp16_tp8/30000
save_final_state: true
save_initial_state: false
data_stages:
- data:
dataset:
dataset_folder:
- /mbz/users/fan.zhou/backup/storage/finemath/tokenized_data/llama3/dclm
- /mbz/users/fan.zhou/backup/storage/finemath/tokenized_data/llama3/megamath_web_pro++
- /mbz/users/fan.zhou/backup/storage/finemath/tokenized_data/llama3/megamath/megamath_code
- /mbz/users/fan.zhou/backup/storage/finemath/tokenized_data/llama3/megamath/megamath_synth_qa
- /mbz/users/fan.zhou/backup/storage/finemath/tokenized_data/llama3/megamath/megamath_synth_code
- /mbz/users/fan.zhou/backup/storage/finemath/tokenized_data/llama3/megamath/megamath_synth_code_block
dataset_weights:
- 0.1
- 0.725
- 0.0125
- 0.05
- 0.0125
- 0.1
num_loading_workers: 1
seed: 42
name: Stable Training Stage
start_training_step: 1
general:
benchmark_csv_path: null
consumed_train_samples: null
ignore_sanity_checks: true
project: MATH-BASE-CPT
run: llama_3_1_8B_megamath_mixture_bs8M_constant_lr1e-5_seq8k_200B_dp16_tp8
seed: 42
id: xrcvmbh8
step: null
lighteval: null
logging:
iteration_step_info_interval: 1
log_level: info
log_level_replica: info
model:
ddp_bucket_cap_mb: 25
dtype: bfloat16
init_method:
path: /mbz/users/fan.zhou/backup/storage/ckpts/Llama-3.1-8B-nanotron/
make_vocab_size_divisible_by: 1
model_config:
bos_token_id: 128000
eos_token_id: 128001
hidden_act: silu
hidden_size: 4096
initializer_range: 0.02
intermediate_size: 14336
is_llama_config: true
max_position_embeddings: 131072
num_attention_heads: 32
num_hidden_layers: 32
num_key_value_heads: 8
pad_token_id: null
pretraining_tp: 1
rms_norm_eps: 1.0e-05
rope_interleaved: false
rope_scaling:
factor: 8.0
high_freq_factor: 4.0
low_freq_factor: 1.0
original_max_position_embeddings: 8192
rope_type: llama3
rope_theta: 500000.0
tie_word_embeddings: false
use_cache: true
vocab_size: 128256
optimizer:
accumulate_grad_in_fp32: true
clip_grad: 1.0
learning_rate_scheduler:
learning_rate: 1.0e-05
lr_decay_starting_step: null
lr_decay_steps: 50000
lr_decay_style: linear
lr_warmup_steps: 0
lr_warmup_style: linear
min_decay_lr: 1.0e-05
optimizer_factory:
adam_beta1: 0.9
adam_beta2: 0.95
adam_eps: 1.0e-08
name: adamW
torch_adam_is_fused: true
weight_decay: 0.01
zero_stage: 0
parallelism:
dp: 32
expert_parallel_size: 1
pp: 1
pp_engine: 1f1b
recompute_layer: false
tp: 8
tp_linear_async_communication: true
tp_mode: REDUCE_SCATTER
tp_recompute_allgather: true
profiler: null
s3_upload: null
tokenizer:
tokenizer_max_length: null
tokenizer_name_or_path: /mbz/users/fan.zhou/backup/storage/ckpts/Llama-3.1-8B-nanotron/
tokenizer_revision: null
tokens:
batch_accumulation_per_replica: 4
limit_test_batches: 0
limit_val_batches: 0
micro_batch_size: 4
sequence_length: 8192
train_steps: 50000
val_check_interval: -1Editor is loading...
Leave a Comment