|
|
checkpoints: |
|
|
checkpoint_interval: 1000 |
|
|
checkpoints_path: /scratch/loubna/checkpoints |
|
|
checkpoints_path_is_shared_file_system: false |
|
|
load_lr_scheduler: true |
|
|
load_optimizer: true |
|
|
resume_checkpoint_path: null |
|
|
save_final_state: true |
|
|
save_initial_state: false |
|
|
data_stages: |
|
|
- data: |
|
|
dataset: |
|
|
dataset_read_path: |
|
|
- /fsx/loubna/projects_v2/data/blog/fineweb-edu |
|
|
- /fsx/loubna/projects_v2/data/blog/finemath-3plus |
|
|
- /fsx/loubna/projects_v2/data/blog/stack-edu-python |
|
|
dataset_folder: |
|
|
- s3://smollm3/datasets/llama_tokenized-global-chunks/fineweb-edu/fineweb-edu/ |
|
|
- s3://smollm3/datasets/llama_tokenized-individual-chunks/finemath/ |
|
|
- s3://smollm3/datasets/llama_tokenized-individual-chunks/stack-edu-Python/ |
|
|
dataset_weights: |
|
|
- 0.7 |
|
|
- 0.1 |
|
|
- 0.2 |
|
|
pad_samples_to_global_batch_size: false |
|
|
return_positions: true |
|
|
token_size_in_bytes: 4 |
|
|
tokenizer_name: HuggingFaceTB/SmolLM3-3B |
|
|
use_old_brrr_dataloader: false |
|
|
vocab_size: 128256 |
|
|
num_loading_workers: 0 |
|
|
seed: 6 |
|
|
name: stable |
|
|
start_training_step: 1 |
|
|
general: |
|
|
benchmark_csv_path: null |
|
|
consumed_train_samples: null |
|
|
ignore_sanity_checks: true |
|
|
project: smollm3-blog |
|
|
run: baseline-mha-14layers |
|
|
seed: 6 |
|
|
step: null |
|
|
logging: |
|
|
iteration_step_info_interval: 1 |
|
|
log_level: info |
|
|
log_level_replica: info |
|
|
model: |
|
|
ddp_bucket_cap_mb: 50 |
|
|
dtype: bfloat16 |
|
|
init_method: |
|
|
std: 0.02 |
|
|
make_vocab_size_divisible_by: 1 |
|
|
model_config: |
|
|
_attn_implementation: flash_attention_2 |
|
|
_fused_rms_norm: true |
|
|
_fused_rotary_emb: true |
|
|
_use_doc_masking: false |
|
|
_use_qkv_packed: true |
|
|
attention_bias: false |
|
|
bos_token_id: 128000 |
|
|
eos_token_id: 128001 |
|
|
flex_attention_mask: null |
|
|
hidden_act: silu |
|
|
hidden_size: 2048 |
|
|
initializer_range: 0.02 |
|
|
intermediate_size: 8192 |
|
|
is_qwen2_config: true |
|
|
max_position_embeddings: 4096 |
|
|
moe_config: null |
|
|
num_attention_heads: 32 |
|
|
num_hidden_layers: 14 |
|
|
num_key_value_heads: 32 |
|
|
pad_token_id: null |
|
|
pretraining_tp: 1 |
|
|
rms_norm_eps: 1.0e-05 |
|
|
rope_interleaved: false |
|
|
rope_scaling: null |
|
|
rope_theta: 50000.0 |
|
|
sliding_window_size: null |
|
|
tie_word_embeddings: true |
|
|
use_cache: true |
|
|
vocab_size: 128256 |
|
|
no_rope_layer: null |
|
|
optimizer: |
|
|
accumulate_grad_in_fp32: true |
|
|
clip_grad: 1.0 |
|
|
learning_rate_scheduler: |
|
|
learning_rate: 0.0005 |
|
|
lr_decay_starting_step: 2000 |
|
|
lr_decay_steps: 28000 |
|
|
lr_decay_style: cosine |
|
|
lr_warmup_steps: 2000 |
|
|
lr_warmup_style: linear |
|
|
min_decay_lr: 5.0e-05 |
|
|
optimizer_factory: |
|
|
adam_beta1: 0.9 |
|
|
adam_beta2: 0.95 |
|
|
adam_eps: 1.0e-08 |
|
|
name: adamW |
|
|
torch_adam_is_fused: true |
|
|
weight_decay: 0.1 |
|
|
weight_decay_exclude_named_params: null |
|
|
zero_stage: 0 |
|
|
parallelism: |
|
|
context_parallel_size: 1 |
|
|
dp: 8 |
|
|
expert_parallel_size: 1 |
|
|
moe_layer_recompute: false |
|
|
pp: 1 |
|
|
pp_engine: 1f1b |
|
|
recompute_layer: false |
|
|
tp: 1 |
|
|
tp_linear_async_communication: true |
|
|
tp_mode: REDUCE_SCATTER |
|
|
tp_recompute_allgather: true |
|
|
profiler: null |
|
|
s3_upload: |
|
|
remove_after_upload: true |
|
|
s5cmd_concurrency: 5 |
|
|
s5cmd_numworkers: 16 |
|
|
s5cmd_path: /fsx/loubna/.venv-2-6-cu124/bin/s5cmd |
|
|
upload_s3_path: s3://smollm3/blogpost-ablations/baseline-mha-14layers |
|
|
tokenizer: |
|
|
tokenizer_max_length: 4096 |
|
|
tokenizer_name_or_path: HuggingFaceTB/SmolLM3-3B |
|
|
tokenizer_revision: null |
|
|
metrics_logging: |
|
|
log_level: 1 |
|
|
log_detail_interval: 100 |
|
|
tokens: |
|
|
batch_accumulation_per_replica: 16 |
|
|
limit_test_batches: 0 |
|
|
limit_val_batches: 0 |
|
|
micro_batch_size: 3 |
|
|
sequence_length: 4096 |
|
|
train_steps: 30000 |
|
|
val_check_interval: 100 |
|
|
lighteval: |
|
|
slurm_script_dir: "/fsx/loubna/projects_v2/smollm3/nanotron/ablations/eval_results/launch-config" |
|
|
logs_path: "/fsx/loubna/projects_v2/smollm3/nanotron/ablations/eval_results/logs" |
|
|
local_checkpoint_dir: "/fsx/loubna/projects_v2/smollm3/nanotron/ablations/evals-ckpt" |
|
|
nanotron_path: "/fsx/loubna/projects_v2/smollm3/nanotron" |
|
|
output_dir: "/fsx/loubna/projects_v2/smollm3/nanotron/ablations/eval_results/results" |
|
|
s3_save_path: "s3://smollm3/blogpost-evals" |
|
|
upload_to_wandb: true |
|
|
wandb_project: smollm3-blog-evals |
|
|
wandb_entity: huggingface |
|
|
parallelism: |
|
|
dp: 8 |
|
|
pp: 1 |
|
|
tp: 1 |
|
|
tp_linear_async_communication: true |
|
|
batch_size: 1 |
|
|
eval_config_override: "/fsx/loubna/projects_v2/smollm3/nanotron/ablations/eval_configs/ablations_blog.yaml" |
|
|
eval_interval: 1000 |
|
|
eval_interval_file: null |
|
|
slurm: |
|
|
gpus_per_node: 8 |
|
|
partition: "hopper-prod" |
|
|
hf_cache: "/fsx/loubna/.cache/huggingface" |
|
|
cpus_per_task: 88 |
|
|
qos: "normal" |
|
|
time: "01:59:00" |
|
|
reservation: null |