| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.05188067444876784, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00025940337224383917, | |
| "grad_norm": 13.49365234375, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 1.5584, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0005188067444876783, | |
| "grad_norm": 2.6006040573120117, | |
| "learning_rate": 3.2000000000000005e-05, | |
| "loss": 0.7111, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0007782101167315176, | |
| "grad_norm": 4.435553073883057, | |
| "learning_rate": 4.8e-05, | |
| "loss": 0.3076, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.0010376134889753567, | |
| "grad_norm": 1.1146177053451538, | |
| "learning_rate": 6.400000000000001e-05, | |
| "loss": 0.1826, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0012970168612191958, | |
| "grad_norm": 0.8707008957862854, | |
| "learning_rate": 8e-05, | |
| "loss": 0.1341, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.0015564202334630351, | |
| "grad_norm": 1.7411352396011353, | |
| "learning_rate": 9.6e-05, | |
| "loss": 0.1144, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.0018158236057068742, | |
| "grad_norm": 0.9691908955574036, | |
| "learning_rate": 0.00011200000000000001, | |
| "loss": 0.0831, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.0020752269779507134, | |
| "grad_norm": 1.0398532152175903, | |
| "learning_rate": 0.00012800000000000002, | |
| "loss": 0.0801, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.0023346303501945525, | |
| "grad_norm": 0.5252112746238708, | |
| "learning_rate": 0.000144, | |
| "loss": 0.081, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.0025940337224383916, | |
| "grad_norm": 0.6552278995513916, | |
| "learning_rate": 0.00016, | |
| "loss": 0.0821, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.0028534370946822307, | |
| "grad_norm": 0.6302046775817871, | |
| "learning_rate": 0.00017600000000000002, | |
| "loss": 0.0749, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.0031128404669260703, | |
| "grad_norm": 0.7439587712287903, | |
| "learning_rate": 0.000192, | |
| "loss": 0.0647, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.0033722438391699094, | |
| "grad_norm": 6.8584771156311035, | |
| "learning_rate": 0.0001999978128380225, | |
| "loss": 0.0726, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.0036316472114137485, | |
| "grad_norm": 2.014613389968872, | |
| "learning_rate": 0.0001999803161162393, | |
| "loss": 0.0936, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.0038910505836575876, | |
| "grad_norm": 1.3758050203323364, | |
| "learning_rate": 0.00019994532573409262, | |
| "loss": 0.1089, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.004150453955901427, | |
| "grad_norm": 1.3027769327163696, | |
| "learning_rate": 0.00019989284781388617, | |
| "loss": 0.0772, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.004409857328145266, | |
| "grad_norm": 1.021938443183899, | |
| "learning_rate": 0.00019982289153773646, | |
| "loss": 0.0695, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.004669260700389105, | |
| "grad_norm": 0.8069638609886169, | |
| "learning_rate": 0.00019973546914596623, | |
| "loss": 0.0624, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.004928664072632944, | |
| "grad_norm": 0.5198134779930115, | |
| "learning_rate": 0.00019963059593496268, | |
| "loss": 0.0481, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.005188067444876783, | |
| "grad_norm": 0.39965012669563293, | |
| "learning_rate": 0.00019950829025450114, | |
| "loss": 0.0493, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.005447470817120622, | |
| "grad_norm": 0.8152652978897095, | |
| "learning_rate": 0.0001993685735045343, | |
| "loss": 0.0477, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.005706874189364461, | |
| "grad_norm": 0.7083427309989929, | |
| "learning_rate": 0.0001992114701314478, | |
| "loss": 0.0501, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.0059662775616083005, | |
| "grad_norm": 0.690096914768219, | |
| "learning_rate": 0.000199037007623783, | |
| "loss": 0.0509, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.0062256809338521405, | |
| "grad_norm": 0.871699869632721, | |
| "learning_rate": 0.00019884521650742715, | |
| "loss": 0.0464, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.00648508430609598, | |
| "grad_norm": 0.6445639133453369, | |
| "learning_rate": 0.00019863613034027224, | |
| "loss": 0.0462, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.006744487678339819, | |
| "grad_norm": 0.4920782148838043, | |
| "learning_rate": 0.0001984097857063434, | |
| "loss": 0.0425, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.007003891050583658, | |
| "grad_norm": 0.8624788522720337, | |
| "learning_rate": 0.0001981662222093976, | |
| "loss": 0.0471, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.007263294422827497, | |
| "grad_norm": 0.5704030990600586, | |
| "learning_rate": 0.00019790548246599447, | |
| "loss": 0.0571, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.007522697795071336, | |
| "grad_norm": 0.569247841835022, | |
| "learning_rate": 0.00019762761209803927, | |
| "loss": 0.0531, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.007782101167315175, | |
| "grad_norm": 0.7414583563804626, | |
| "learning_rate": 0.0001973326597248006, | |
| "loss": 0.0455, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.008041504539559013, | |
| "grad_norm": 0.42905348539352417, | |
| "learning_rate": 0.00019702067695440332, | |
| "loss": 0.0542, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.008300907911802853, | |
| "grad_norm": 0.6132651567459106, | |
| "learning_rate": 0.00019669171837479873, | |
| "loss": 0.0439, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.008560311284046693, | |
| "grad_norm": 0.44733384251594543, | |
| "learning_rate": 0.00019634584154421317, | |
| "loss": 0.0429, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.008819714656290532, | |
| "grad_norm": 0.516659677028656, | |
| "learning_rate": 0.00019598310698107702, | |
| "loss": 0.0407, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.009079118028534372, | |
| "grad_norm": 0.2340136468410492, | |
| "learning_rate": 0.00019560357815343577, | |
| "loss": 0.036, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.00933852140077821, | |
| "grad_norm": 0.6629723906517029, | |
| "learning_rate": 0.00019520732146784491, | |
| "loss": 0.041, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.00959792477302205, | |
| "grad_norm": 0.3777199387550354, | |
| "learning_rate": 0.0001947944062577507, | |
| "loss": 0.0414, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.009857328145265888, | |
| "grad_norm": 0.44298338890075684, | |
| "learning_rate": 0.00019436490477135878, | |
| "loss": 0.0371, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.010116731517509728, | |
| "grad_norm": 0.4530090093612671, | |
| "learning_rate": 0.00019391889215899299, | |
| "loss": 0.037, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.010376134889753566, | |
| "grad_norm": 0.36142367124557495, | |
| "learning_rate": 0.0001934564464599461, | |
| "loss": 0.0387, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.010635538261997406, | |
| "grad_norm": 0.36559927463531494, | |
| "learning_rate": 0.00019297764858882514, | |
| "loss": 0.0386, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.010894941634241245, | |
| "grad_norm": 0.47660204768180847, | |
| "learning_rate": 0.00019248258232139388, | |
| "loss": 0.0316, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.011154345006485085, | |
| "grad_norm": 0.2614981234073639, | |
| "learning_rate": 0.00019197133427991436, | |
| "loss": 0.0417, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.011413748378728923, | |
| "grad_norm": 0.44811561703681946, | |
| "learning_rate": 0.00019144399391799043, | |
| "loss": 0.036, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.011673151750972763, | |
| "grad_norm": 0.4213508367538452, | |
| "learning_rate": 0.00019090065350491626, | |
| "loss": 0.0399, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.011932555123216601, | |
| "grad_norm": 0.29146406054496765, | |
| "learning_rate": 0.0001903414081095315, | |
| "loss": 0.0376, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.012191958495460441, | |
| "grad_norm": 0.2714293897151947, | |
| "learning_rate": 0.00018976635558358722, | |
| "loss": 0.0362, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.012451361867704281, | |
| "grad_norm": 0.3767367899417877, | |
| "learning_rate": 0.00018917559654462474, | |
| "loss": 0.0364, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.01271076523994812, | |
| "grad_norm": 0.36578992009162903, | |
| "learning_rate": 0.00018856923435837022, | |
| "loss": 0.0327, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.01297016861219196, | |
| "grad_norm": 0.32976096868515015, | |
| "learning_rate": 0.0001879473751206489, | |
| "loss": 0.04, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.013229571984435798, | |
| "grad_norm": 0.4037308990955353, | |
| "learning_rate": 0.00018731012763882133, | |
| "loss": 0.0365, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.013488975356679637, | |
| "grad_norm": 0.3599701225757599, | |
| "learning_rate": 0.00018665760341274505, | |
| "loss": 0.0337, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.013748378728923476, | |
| "grad_norm": 0.6167535185813904, | |
| "learning_rate": 0.00018598991661526572, | |
| "loss": 0.0384, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.014007782101167316, | |
| "grad_norm": 0.44012153148651123, | |
| "learning_rate": 0.00018530718407223974, | |
| "loss": 0.0291, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.014267185473411154, | |
| "grad_norm": 0.5593993663787842, | |
| "learning_rate": 0.00018460952524209355, | |
| "loss": 0.0328, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.014526588845654994, | |
| "grad_norm": 0.3970232307910919, | |
| "learning_rate": 0.00018389706219492147, | |
| "loss": 0.0282, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.014785992217898832, | |
| "grad_norm": 0.2354394942522049, | |
| "learning_rate": 0.00018316991959112716, | |
| "loss": 0.0317, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.015045395590142672, | |
| "grad_norm": 0.35121485590934753, | |
| "learning_rate": 0.00018242822465961176, | |
| "loss": 0.0328, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.01530479896238651, | |
| "grad_norm": 0.4026910066604614, | |
| "learning_rate": 0.00018167210717551224, | |
| "loss": 0.0346, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.01556420233463035, | |
| "grad_norm": 0.36058488488197327, | |
| "learning_rate": 0.00018090169943749476, | |
| "loss": 0.0356, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.01582360570687419, | |
| "grad_norm": 0.43329063057899475, | |
| "learning_rate": 0.00018011713624460608, | |
| "loss": 0.039, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.016083009079118027, | |
| "grad_norm": 0.5267623662948608, | |
| "learning_rate": 0.00017931855487268782, | |
| "loss": 0.034, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.01634241245136187, | |
| "grad_norm": 0.4230927526950836, | |
| "learning_rate": 0.0001785060950503568, | |
| "loss": 0.031, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.016601815823605707, | |
| "grad_norm": 0.3721458613872528, | |
| "learning_rate": 0.00017767989893455698, | |
| "loss": 0.0306, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.016861219195849545, | |
| "grad_norm": 0.4158923029899597, | |
| "learning_rate": 0.00017684011108568592, | |
| "loss": 0.0343, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.017120622568093387, | |
| "grad_norm": 0.42805808782577515, | |
| "learning_rate": 0.00017598687844230088, | |
| "loss": 0.0348, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.017380025940337225, | |
| "grad_norm": 0.2532889246940613, | |
| "learning_rate": 0.00017512035029540885, | |
| "loss": 0.0348, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.017639429312581063, | |
| "grad_norm": 0.33485907316207886, | |
| "learning_rate": 0.000174240678262345, | |
| "loss": 0.0382, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.0178988326848249, | |
| "grad_norm": 0.33811262249946594, | |
| "learning_rate": 0.000173348016260244, | |
| "loss": 0.0321, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.018158236057068743, | |
| "grad_norm": 0.5319349765777588, | |
| "learning_rate": 0.00017244252047910892, | |
| "loss": 0.0369, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.01841763942931258, | |
| "grad_norm": 0.1967012882232666, | |
| "learning_rate": 0.00017152434935448256, | |
| "loss": 0.0392, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.01867704280155642, | |
| "grad_norm": 0.491212397813797, | |
| "learning_rate": 0.0001705936635397259, | |
| "loss": 0.034, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.018936446173800258, | |
| "grad_norm": 0.3200204074382782, | |
| "learning_rate": 0.00016965062587790823, | |
| "loss": 0.0349, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.0191958495460441, | |
| "grad_norm": 0.29090484976768494, | |
| "learning_rate": 0.00016869540137331445, | |
| "loss": 0.0302, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.019455252918287938, | |
| "grad_norm": 0.4633236825466156, | |
| "learning_rate": 0.00016772815716257412, | |
| "loss": 0.0305, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.019714656290531776, | |
| "grad_norm": 0.6030534505844116, | |
| "learning_rate": 0.00016674906248541726, | |
| "loss": 0.0331, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.019974059662775615, | |
| "grad_norm": 0.28692400455474854, | |
| "learning_rate": 0.00016575828865506245, | |
| "loss": 0.0319, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.020233463035019456, | |
| "grad_norm": 0.3799605667591095, | |
| "learning_rate": 0.0001647560090282419, | |
| "loss": 0.0316, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.020492866407263294, | |
| "grad_norm": 0.529679536819458, | |
| "learning_rate": 0.000163742398974869, | |
| "loss": 0.0317, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.020752269779507133, | |
| "grad_norm": 0.328056275844574, | |
| "learning_rate": 0.0001627176358473537, | |
| "loss": 0.0332, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.021011673151750974, | |
| "grad_norm": 0.3303374648094177, | |
| "learning_rate": 0.0001616818989495711, | |
| "loss": 0.0323, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.021271076523994813, | |
| "grad_norm": 0.23507635295391083, | |
| "learning_rate": 0.00016063536950548826, | |
| "loss": 0.0338, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.02153047989623865, | |
| "grad_norm": 0.3870477080345154, | |
| "learning_rate": 0.0001595782306274553, | |
| "loss": 0.0326, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.02178988326848249, | |
| "grad_norm": 0.37890198826789856, | |
| "learning_rate": 0.00015851066728416618, | |
| "loss": 0.0309, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.02204928664072633, | |
| "grad_norm": 0.48460447788238525, | |
| "learning_rate": 0.00015743286626829437, | |
| "loss": 0.0351, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.02230869001297017, | |
| "grad_norm": 0.38216733932495117, | |
| "learning_rate": 0.00015634501616380967, | |
| "loss": 0.0331, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.022568093385214007, | |
| "grad_norm": 0.31668373942375183, | |
| "learning_rate": 0.00015524730731298134, | |
| "loss": 0.0306, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.022827496757457846, | |
| "grad_norm": 0.5256275534629822, | |
| "learning_rate": 0.0001541399317830738, | |
| "loss": 0.0307, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.023086900129701687, | |
| "grad_norm": 0.2295175939798355, | |
| "learning_rate": 0.0001530230833327405, | |
| "loss": 0.0297, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.023346303501945526, | |
| "grad_norm": 0.35704338550567627, | |
| "learning_rate": 0.00015189695737812152, | |
| "loss": 0.0358, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.023605706874189364, | |
| "grad_norm": 0.31458932161331177, | |
| "learning_rate": 0.0001507617509586517, | |
| "loss": 0.0278, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.023865110246433202, | |
| "grad_norm": 0.47554951906204224, | |
| "learning_rate": 0.00014961766270258422, | |
| "loss": 0.0332, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.024124513618677044, | |
| "grad_norm": 0.29959654808044434, | |
| "learning_rate": 0.00014846489279223652, | |
| "loss": 0.0268, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.024383916990920882, | |
| "grad_norm": 0.31856444478034973, | |
| "learning_rate": 0.0001473036429289641, | |
| "loss": 0.028, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.02464332036316472, | |
| "grad_norm": 0.23568253219127655, | |
| "learning_rate": 0.0001461341162978688, | |
| "loss": 0.0339, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.024902723735408562, | |
| "grad_norm": 0.4032520055770874, | |
| "learning_rate": 0.00014495651753224705, | |
| "loss": 0.0291, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.0251621271076524, | |
| "grad_norm": 0.36155375838279724, | |
| "learning_rate": 0.00014377105267778518, | |
| "loss": 0.0261, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.02542153047989624, | |
| "grad_norm": 0.2985226511955261, | |
| "learning_rate": 0.00014257792915650728, | |
| "loss": 0.03, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.025680933852140077, | |
| "grad_norm": 0.3420771658420563, | |
| "learning_rate": 0.00014137735573048233, | |
| "loss": 0.0318, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.02594033722438392, | |
| "grad_norm": 0.19557547569274902, | |
| "learning_rate": 0.00014016954246529696, | |
| "loss": 0.0287, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.026199740596627757, | |
| "grad_norm": 0.2249382734298706, | |
| "learning_rate": 0.00013895470069330004, | |
| "loss": 0.0258, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.026459143968871595, | |
| "grad_norm": 0.5154671669006348, | |
| "learning_rate": 0.00013773304297662559, | |
| "loss": 0.0339, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.026718547341115433, | |
| "grad_norm": 0.3469725251197815, | |
| "learning_rate": 0.00013650478307000057, | |
| "loss": 0.032, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.026977950713359275, | |
| "grad_norm": 0.34428727626800537, | |
| "learning_rate": 0.00013527013588334415, | |
| "loss": 0.0317, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.027237354085603113, | |
| "grad_norm": 0.2862760126590729, | |
| "learning_rate": 0.00013402931744416433, | |
| "loss": 0.03, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.02749675745784695, | |
| "grad_norm": 0.5235270261764526, | |
| "learning_rate": 0.00013278254485975976, | |
| "loss": 0.0368, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.02775616083009079, | |
| "grad_norm": 0.2167377918958664, | |
| "learning_rate": 0.00013153003627923218, | |
| "loss": 0.0306, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.02801556420233463, | |
| "grad_norm": 0.29817643761634827, | |
| "learning_rate": 0.00013027201085531634, | |
| "loss": 0.0301, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.02827496757457847, | |
| "grad_norm": 0.29515424370765686, | |
| "learning_rate": 0.00012900868870603503, | |
| "loss": 0.0249, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.028534370946822308, | |
| "grad_norm": 0.3012550473213196, | |
| "learning_rate": 0.00012774029087618446, | |
| "loss": 0.0264, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.028793774319066146, | |
| "grad_norm": 0.31644681096076965, | |
| "learning_rate": 0.00012646703929865817, | |
| "loss": 0.0316, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.029053177691309988, | |
| "grad_norm": 0.37952420115470886, | |
| "learning_rate": 0.00012518915675561483, | |
| "loss": 0.0296, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.029312581063553826, | |
| "grad_norm": 0.2594399154186249, | |
| "learning_rate": 0.00012390686683949798, | |
| "loss": 0.0321, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.029571984435797664, | |
| "grad_norm": 0.34381750226020813, | |
| "learning_rate": 0.00012262039391391404, | |
| "loss": 0.0305, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.029831387808041506, | |
| "grad_norm": 0.2528727948665619, | |
| "learning_rate": 0.0001213299630743747, | |
| "loss": 0.0303, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.030090791180285344, | |
| "grad_norm": 0.32746660709381104, | |
| "learning_rate": 0.00012003580010891213, | |
| "loss": 0.0309, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.030350194552529183, | |
| "grad_norm": 0.46516871452331543, | |
| "learning_rate": 0.00011873813145857249, | |
| "loss": 0.0288, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.03060959792477302, | |
| "grad_norm": 0.21779906749725342, | |
| "learning_rate": 0.00011743718417779517, | |
| "loss": 0.0268, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.030869001297016863, | |
| "grad_norm": 0.2870500087738037, | |
| "learning_rate": 0.00011613318589468511, | |
| "loss": 0.0284, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.0311284046692607, | |
| "grad_norm": 0.223075732588768, | |
| "learning_rate": 0.0001148263647711842, | |
| "loss": 0.0283, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.03138780804150454, | |
| "grad_norm": 0.3081727921962738, | |
| "learning_rate": 0.0001135169494631497, | |
| "loss": 0.0319, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.03164721141374838, | |
| "grad_norm": 0.48067420721054077, | |
| "learning_rate": 0.00011220516908034601, | |
| "loss": 0.0286, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.031906614785992216, | |
| "grad_norm": 0.34588056802749634, | |
| "learning_rate": 0.00011089125314635726, | |
| "loss": 0.0287, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.032166018158236054, | |
| "grad_norm": 0.3428352475166321, | |
| "learning_rate": 0.00010957543155842702, | |
| "loss": 0.025, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.0324254215304799, | |
| "grad_norm": 0.4223664700984955, | |
| "learning_rate": 0.00010825793454723325, | |
| "loss": 0.0304, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.03268482490272374, | |
| "grad_norm": 0.21455545723438263, | |
| "learning_rate": 0.00010693899263660441, | |
| "loss": 0.0265, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.032944228274967575, | |
| "grad_norm": 0.2936132252216339, | |
| "learning_rate": 0.00010561883660318455, | |
| "loss": 0.0298, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.033203631647211414, | |
| "grad_norm": 0.3222610056400299, | |
| "learning_rate": 0.00010429769743605407, | |
| "loss": 0.0333, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.03346303501945525, | |
| "grad_norm": 0.42949262261390686, | |
| "learning_rate": 0.00010297580629631325, | |
| "loss": 0.0336, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.03372243839169909, | |
| "grad_norm": 0.3103349208831787, | |
| "learning_rate": 0.00010165339447663587, | |
| "loss": 0.0327, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.03398184176394293, | |
| "grad_norm": 0.3449970483779907, | |
| "learning_rate": 0.00010033069336079952, | |
| "loss": 0.0293, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.034241245136186774, | |
| "grad_norm": 0.3841015100479126, | |
| "learning_rate": 9.900793438320037e-05, | |
| "loss": 0.0305, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.03450064850843061, | |
| "grad_norm": 0.3848475515842438, | |
| "learning_rate": 9.768534898835862e-05, | |
| "loss": 0.026, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.03476005188067445, | |
| "grad_norm": 0.43429145216941833, | |
| "learning_rate": 9.636316859042259e-05, | |
| "loss": 0.0294, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.03501945525291829, | |
| "grad_norm": 0.2181561291217804, | |
| "learning_rate": 9.504162453267777e-05, | |
| "loss": 0.0275, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.03527885862516213, | |
| "grad_norm": 0.4230787754058838, | |
| "learning_rate": 9.372094804706867e-05, | |
| "loss": 0.0273, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.035538261997405965, | |
| "grad_norm": 0.273333340883255, | |
| "learning_rate": 9.24013702137397e-05, | |
| "loss": 0.0246, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.0357976653696498, | |
| "grad_norm": 0.21888947486877441, | |
| "learning_rate": 9.108312192060298e-05, | |
| "loss": 0.0223, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.03605706874189364, | |
| "grad_norm": 0.22308865189552307, | |
| "learning_rate": 8.97664338229395e-05, | |
| "loss": 0.0249, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.03631647211413749, | |
| "grad_norm": 0.2270708829164505, | |
| "learning_rate": 8.845153630304139e-05, | |
| "loss": 0.0272, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.036575875486381325, | |
| "grad_norm": 0.3046322166919708, | |
| "learning_rate": 8.713865942990141e-05, | |
| "loss": 0.0225, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.03683527885862516, | |
| "grad_norm": 0.31552374362945557, | |
| "learning_rate": 8.582803291895758e-05, | |
| "loss": 0.0302, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.037094682230869, | |
| "grad_norm": 0.46200478076934814, | |
| "learning_rate": 8.451988609189987e-05, | |
| "loss": 0.0272, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.03735408560311284, | |
| "grad_norm": 0.22868406772613525, | |
| "learning_rate": 8.321444783654524e-05, | |
| "loss": 0.0267, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.03761348897535668, | |
| "grad_norm": 0.23030489683151245, | |
| "learning_rate": 8.191194656678904e-05, | |
| "loss": 0.0287, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.037872892347600516, | |
| "grad_norm": 0.19943729043006897, | |
| "learning_rate": 8.061261018263919e-05, | |
| "loss": 0.0249, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.03813229571984436, | |
| "grad_norm": 0.3236585557460785, | |
| "learning_rate": 7.931666603034033e-05, | |
| "loss": 0.0261, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.0383916990920882, | |
| "grad_norm": 0.27057945728302, | |
| "learning_rate": 7.80243408625947e-05, | |
| "loss": 0.0313, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.03865110246433204, | |
| "grad_norm": 0.40263959765434265, | |
| "learning_rate": 7.673586079888698e-05, | |
| "loss": 0.0255, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.038910505836575876, | |
| "grad_norm": 0.36453911662101746, | |
| "learning_rate": 7.54514512859201e-05, | |
| "loss": 0.0306, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.039169909208819714, | |
| "grad_norm": 0.2801981270313263, | |
| "learning_rate": 7.417133705816837e-05, | |
| "loss": 0.0272, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.03942931258106355, | |
| "grad_norm": 0.2771183252334595, | |
| "learning_rate": 7.289574209855559e-05, | |
| "loss": 0.0252, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.03968871595330739, | |
| "grad_norm": 0.1658083200454712, | |
| "learning_rate": 7.16248895992645e-05, | |
| "loss": 0.0278, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.03994811932555123, | |
| "grad_norm": 0.19812481105327606, | |
| "learning_rate": 7.035900192268464e-05, | |
| "loss": 0.028, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.040207522697795074, | |
| "grad_norm": 0.2751558721065521, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 0.0293, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.04046692607003891, | |
| "grad_norm": 0.24725764989852905, | |
| "learning_rate": 6.784300610496048e-05, | |
| "loss": 0.0245, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.04072632944228275, | |
| "grad_norm": 0.6854105591773987, | |
| "learning_rate": 6.65933381902329e-05, | |
| "loss": 0.0278, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.04098573281452659, | |
| "grad_norm": 0.19634288549423218, | |
| "learning_rate": 6.534951547402322e-05, | |
| "loss": 0.0251, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.04124513618677043, | |
| "grad_norm": 0.21054495871067047, | |
| "learning_rate": 6.411175558929152e-05, | |
| "loss": 0.0263, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.041504539559014265, | |
| "grad_norm": 0.2615554630756378, | |
| "learning_rate": 6.28802751081779e-05, | |
| "loss": 0.0268, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.041763942931258104, | |
| "grad_norm": 0.23431521654129028, | |
| "learning_rate": 6.165528950410884e-05, | |
| "loss": 0.0272, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.04202334630350195, | |
| "grad_norm": 0.14299079775810242, | |
| "learning_rate": 6.0437013114095195e-05, | |
| "loss": 0.0206, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.04228274967574579, | |
| "grad_norm": 0.32186359167099, | |
| "learning_rate": 5.922565910122967e-05, | |
| "loss": 0.0257, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.042542153047989625, | |
| "grad_norm": 0.3568614423274994, | |
| "learning_rate": 5.8021439417389444e-05, | |
| "loss": 0.0263, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.042801556420233464, | |
| "grad_norm": 0.2789517045021057, | |
| "learning_rate": 5.6824564766150726e-05, | |
| "loss": 0.0273, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.0430609597924773, | |
| "grad_norm": 0.23182226717472076, | |
| "learning_rate": 5.563524456592163e-05, | |
| "loss": 0.0216, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.04332036316472114, | |
| "grad_norm": 0.20413507521152496, | |
| "learning_rate": 5.4453686913300074e-05, | |
| "loss": 0.0281, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.04357976653696498, | |
| "grad_norm": 0.21678327023983002, | |
| "learning_rate": 5.328009854666303e-05, | |
| "loss": 0.0258, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.04383916990920882, | |
| "grad_norm": 0.25063127279281616, | |
| "learning_rate": 5.2114684809993044e-05, | |
| "loss": 0.0265, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.04409857328145266, | |
| "grad_norm": 0.17453815042972565, | |
| "learning_rate": 5.095764961694922e-05, | |
| "loss": 0.0256, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.0443579766536965, | |
| "grad_norm": 0.22323985397815704, | |
| "learning_rate": 4.980919541518796e-05, | |
| "loss": 0.025, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.04461738002594034, | |
| "grad_norm": 0.2994442880153656, | |
| "learning_rate": 4.866952315094088e-05, | |
| "loss": 0.0266, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.04487678339818418, | |
| "grad_norm": 0.27734553813934326, | |
| "learning_rate": 4.753883223385467e-05, | |
| "loss": 0.0247, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.045136186770428015, | |
| "grad_norm": 0.18703316152095795, | |
| "learning_rate": 4.6417320502100316e-05, | |
| "loss": 0.0232, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.04539559014267185, | |
| "grad_norm": 0.271628201007843, | |
| "learning_rate": 4.530518418775733e-05, | |
| "loss": 0.0275, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.04565499351491569, | |
| "grad_norm": 0.26739704608917236, | |
| "learning_rate": 4.4202617882478405e-05, | |
| "loss": 0.0273, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.045914396887159536, | |
| "grad_norm": 0.2327081710100174, | |
| "learning_rate": 4.310981450344189e-05, | |
| "loss": 0.0244, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.046173800259403375, | |
| "grad_norm": 0.23359940946102142, | |
| "learning_rate": 4.2026965259596666e-05, | |
| "loss": 0.0266, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.04643320363164721, | |
| "grad_norm": 0.1780378818511963, | |
| "learning_rate": 4.0954259618206295e-05, | |
| "loss": 0.0275, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.04669260700389105, | |
| "grad_norm": 0.14094606041908264, | |
| "learning_rate": 3.9891885271697496e-05, | |
| "loss": 0.0224, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.04695201037613489, | |
| "grad_norm": 0.21397313475608826, | |
| "learning_rate": 3.884002810481958e-05, | |
| "loss": 0.0262, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.04721141374837873, | |
| "grad_norm": 0.18339146673679352, | |
| "learning_rate": 3.779887216211995e-05, | |
| "loss": 0.0227, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.047470817120622566, | |
| "grad_norm": 0.3625165522098541, | |
| "learning_rate": 3.676859961574162e-05, | |
| "loss": 0.0261, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.047730220492866404, | |
| "grad_norm": 0.24323268234729767, | |
| "learning_rate": 3.574939073354838e-05, | |
| "loss": 0.0263, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.04798962386511025, | |
| "grad_norm": 0.16706861555576324, | |
| "learning_rate": 3.4741423847583134e-05, | |
| "loss": 0.0236, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.04824902723735409, | |
| "grad_norm": 0.25477734208106995, | |
| "learning_rate": 3.3744875322865034e-05, | |
| "loss": 0.0265, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.048508430609597926, | |
| "grad_norm": 0.23444534838199615, | |
| "learning_rate": 3.275991952653054e-05, | |
| "loss": 0.0255, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.048767833981841764, | |
| "grad_norm": 0.13936041295528412, | |
| "learning_rate": 3.178672879732435e-05, | |
| "loss": 0.0247, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.0490272373540856, | |
| "grad_norm": 0.27730414271354675, | |
| "learning_rate": 3.0825473415445074e-05, | |
| "loss": 0.0211, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.04928664072632944, | |
| "grad_norm": 0.44268760085105896, | |
| "learning_rate": 2.9876321572751144e-05, | |
| "loss": 0.0293, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.04954604409857328, | |
| "grad_norm": 0.2908082902431488, | |
| "learning_rate": 2.8939439343332086e-05, | |
| "loss": 0.0283, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.049805447470817124, | |
| "grad_norm": 0.20642030239105225, | |
| "learning_rate": 2.8014990654450325e-05, | |
| "loss": 0.0237, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.05006485084306096, | |
| "grad_norm": 0.23467542231082916, | |
| "learning_rate": 2.7103137257858868e-05, | |
| "loss": 0.0257, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.0503242542153048, | |
| "grad_norm": 0.30216318368911743, | |
| "learning_rate": 2.6204038701499056e-05, | |
| "loss": 0.023, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.05058365758754864, | |
| "grad_norm": 0.2494220733642578, | |
| "learning_rate": 2.5317852301584643e-05, | |
| "loss": 0.0248, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.05084306095979248, | |
| "grad_norm": 0.3801492154598236, | |
| "learning_rate": 2.4444733115075823e-05, | |
| "loss": 0.0208, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.051102464332036315, | |
| "grad_norm": 0.18340712785720825, | |
| "learning_rate": 2.3584833912548888e-05, | |
| "loss": 0.0265, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.051361867704280154, | |
| "grad_norm": 0.20898978412151337, | |
| "learning_rate": 2.2738305151465645e-05, | |
| "loss": 0.0242, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.05162127107652399, | |
| "grad_norm": 0.2393597811460495, | |
| "learning_rate": 2.190529494984782e-05, | |
| "loss": 0.0223, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.05188067444876784, | |
| "grad_norm": 0.16330353915691376, | |
| "learning_rate": 2.1085949060360654e-05, | |
| "loss": 0.0207, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.62919912685568e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |