forked from KellerJordan/modded-nanogpt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
c8e1a7d3-a37e-4a88-b28a-3afb2d8089ca.txt
2165 lines (2092 loc) · 134 KB
/
c8e1a7d3-a37e-4a88-b28a-3afb2d8089ca.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import os
import sys
with open(sys.argv[0]) as f:
code = f.read() # read the code of this file ASAP, for logging
import uuid
import glob
import time
import contextlib
from dataclasses import dataclass
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed as dist
import torch._inductor.config as config
from torch.nn.parallel import DistributedDataParallel as DDP
# Use of FlexAttention contributed by @KoszarskyB
from torch.nn.attention.flex_attention import flex_attention, create_block_mask
flex_attention = torch.compile(flex_attention, dynamic=False)
create_block_mask = torch.compile(create_block_mask, dynamic=False)
# -----------------------------------------------------------------------------
# Muon optimizer
def zeropower_via_svd(G, steps=None):
U, S, V = G.svd()
return U @ V.T
@torch.compile
def zeropower_via_newtonschulz5(G, steps=10, eps=1e-7):
"""
Newton-Schulz iteration to compute the zeroth power / orthogonalization of G. We opt to use a
quintic iteration whose coefficients are selected to maximize the slope at zero. For the purpose
of minimizing steps, it turns out to be empirically effective to keep increasing the slope at
zero even beyond the point where the iteration no longer converges all the way to one everywhere
on the interval. This iteration therefore does not produce UV^T but rather something like US'V^T
where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model
performance at all relative to UV^T, where USV^T = G is the SVD.
"""
assert len(G.shape) == 2
a, b, c = (3.4445, -4.7750, 2.0315)
X = G.bfloat16()
X /= (X.norm() + eps) # ensure top singular value <= 1
if G.size(0) > G.size(1):
X = X.T
for _ in range(steps):
A = X @ X.T
B = b * A + c * A @ A # adapted from suggestion by @jxbz, @leloykun, and @YouJiacheng
X = a * X + B @ X
if G.size(0) > G.size(1):
X = X.T
return X
zeropower_backends = dict(svd=zeropower_via_svd, newtonschulz5=zeropower_via_newtonschulz5)
class Muon(torch.optim.Optimizer):
"""
Muon - MomentUm Orthogonalized by Newton-schulz
Muon internally runs standard SGD-momentum, and then performs an orthogonalization post-
processing step, in which each 2D parameter's update is replaced with the nearest orthogonal
matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has
the advantage that it can be stably run in bfloat16 on the GPU.
Some warnings:
- This optimizer assumes that all parameters passed in are 2D.
- It should not be used for the embedding layer, the final fully connected layer, or any {0,1}-D
parameters; those should all be optimized by a standard method (e.g., AdamW).
- To use it with 4D convolutional filters, it works well to just flatten their last 3 dimensions.
- We believe it is unlikely to work well for training with small batch size.
- We believe it may not work well for finetuning pretrained models, but we haven't tested this.
- We have not yet tried this optimizer for training scenarios larger than NanoGPT (124M).
Arguments:
lr: The learning rate used by the internal SGD.
momentum: The momentum used by the internal SGD.
nesterov: Whether to use Nesterov-style momentum in the internal SGD. (recommended)
backend: The chosen backend for the orthogonalization step. (recommended: 'newtonschulz5')
backend_steps: The number of iteration steps to use in the backend, if it is iterative.
"""
def __init__(self, params, lr=0.02, momentum=0.95, nesterov=True,
backend='newtonschulz5', backend_steps=5):
defaults = dict(lr=lr, momentum=momentum, nesterov=nesterov, backend=backend, backend_steps=backend_steps)
super().__init__(params, defaults)
def step(self):
for group in self.param_groups:
lr = group['lr']
momentum = group['momentum']
zeropower_backend = zeropower_backends[group['backend']]
# generate weight updates in distributed fashion
total_params = sum(p.numel() for p in group['params'])
updates_flat = torch.zeros(total_params, device='cuda', dtype=torch.bfloat16)
curr_idx = 0
for i, p in enumerate(group['params']):
# luckily this will perfectly distribute a transformer with multiple of 4 layers to 8 GPUs
if i % int(os.environ['WORLD_SIZE']) == int(os.environ['RANK']):
g = p.grad
assert g is not None
state = self.state[p]
if 'momentum_buffer' not in state:
state['momentum_buffer'] = torch.zeros_like(g)
buf = state['momentum_buffer']
buf.mul_(momentum).add_(g)
g = g.add(buf, alpha=momentum) if group['nesterov'] else buf
g = zeropower_backend(g, steps=group['backend_steps'])
g *= max(1, g.size(0)/g.size(1))**0.5
updates_flat[curr_idx:curr_idx+p.numel()] = g.flatten()
curr_idx += p.numel()
# sync updates across devices. we are not memory-constrained so can do this simple deserialization
dist.all_reduce(updates_flat, op=dist.ReduceOp.SUM)
# deserialize and apply updates
curr_idx = 0
for p in group['params']:
g = updates_flat[curr_idx:curr_idx+p.numel()].view_as(p.data).type_as(p.data)
p.data.add_(g, alpha=-lr)
curr_idx += p.numel()
# -----------------------------------------------------------------------------
# PyTorch nn.Module definitions for the GPT-2 model
def norm(x):
return F.rms_norm(x, (x.size(-1),))
class CastedLinear(nn.Linear):
def __init__(self, in_features, out_features):
super().__init__(in_features, out_features, bias=False)
def forward(self, x):
return F.linear(x, self.weight.to(x.dtype))
class Rotary(torch.nn.Module):
def __init__(self, dim, base=10000):
super().__init__()
self.register_buffer('inv_freq', (1 / base) ** (torch.arange(0, dim, 2) / dim))
self.seq_len_cached = None
self.cos_cached = None
self.sin_cached = None
def forward(self, x):
seq_len = x.shape[1]
if seq_len != self.seq_len_cached:
t = torch.arange(seq_len, device=x.device)
freqs = torch.outer(t, self.inv_freq)
self.seq_len_cached = seq_len
self.cos_cached = freqs.cos()
self.sin_cached = freqs.sin()
cos, sin = self.cos_cached[None, :, None, :], self.sin_cached[None, :, None, :]
# apply_rotary_emb(x, cos, sin)
x1, x2 = x.chunk(2, dim=3)
y1 = x1 * cos + x2 * sin
y2 = x1 * (-sin) + x2 * cos
return torch.cat((y1, y2), 3).type_as(x)
class CausalSelfAttention(nn.Module):
def __init__(self, dim, n_head):
super().__init__()
assert dim % n_head == 0
self.n_head = n_head
self.c_q = CastedLinear(dim, dim)
self.c_k = CastedLinear(dim, dim)
self.c_v = CastedLinear(dim, dim)
# value residual lambda
self.lamb = nn.Parameter(torch.tensor(0.5)) # @Grad62304977
# rotary embeddings
self.rotary = Rotary(dim // n_head) # dim // n_head = head_dim
# output projection
self.c_proj = CastedLinear(dim, dim)
self.c_proj.weight.data.zero_() # zero init suggested by @Grad62304977
def forward(self, x, vi, block_mask):
B, T = x.size(0), x.size(1) # batch size, sequence length
assert B == 1, "Must use batch size = 1 for FlexAttention"
q = self.c_q(x).view(B, T, self.n_head, -1)
k = self.c_k(x).view(B, T, self.n_head, -1)
v = self.c_v(x).view(B, T, self.n_head, -1)
v = (1 - self.lamb) * v + self.lamb * vi.view_as(v) # @Grad62304977
q, k = norm(q), norm(k) # QK norm suggested by @Grad62304977
q, k = self.rotary(q), self.rotary(k)
y = flex_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2), block_mask=block_mask)
y = y.transpose(1, 2).contiguous().view_as(x) # re-assemble all head outputs side by side
y = self.c_proj(y)
return y
class MLP(nn.Module):
def __init__(self, dim):
super().__init__()
self.c_fc = CastedLinear(dim, 4 * dim)
self.c_proj = CastedLinear(4 * dim, dim)
self.c_proj.weight.data.zero_() # zero init suggested by @Grad62304977
def forward(self, x):
x = self.c_fc(x)
x = F.relu(x).square() # https://arxiv.org/abs/2109.08668v2; ~1-2% better than GELU; suggested by @SKYLINEZ007 and @Grad62304977
x = self.c_proj(x)
return x
class Block(nn.Module):
def __init__(self, config):
super().__init__()
self.attn = CausalSelfAttention(config.n_embd, config.n_head)
self.mlp = MLP(config.n_embd)
self.lambdas = nn.Parameter(torch.tensor([1., 0.]))
def forward(self, x, vi, x0, block_mask):
x = self.lambdas[0] * x + self.lambdas[1] * x0
x = x + self.attn(norm(x), vi, block_mask)
x = x + self.mlp(norm(x))
return x
# -----------------------------------------------------------------------------
# The main GPT-2 model
@dataclass
class GPTConfig:
vocab_size : int = 50304
n_layer : int = 12
n_head : int = 6 # head dim 128 suggested by @Grad62304977
n_embd : int = 768
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
# U-net design by @brendanh0gan
self.num_encoder_layers = config.n_layer // 2 # Half of the layers for encoder
self.num_decoder_layers = config.n_layer - self.num_encoder_layers # Remaining for decoder
# Add learnable skip connection weights for decoder layers
self.skip_weights = nn.Parameter(torch.ones(self.num_decoder_layers))
self.transformer = nn.ModuleDict(dict(
wte = nn.Embedding(config.vocab_size, config.n_embd),
# token value embeddings by @KoszarskyB - inspired by @Grad62304977's value residual learning
vte = nn.Embedding(config.vocab_size, config.n_embd*12),
h = nn.ModuleList([Block(config) for _ in range(config.n_layer)]),
))
self.lm_head = CastedLinear(config.n_embd, config.vocab_size)
self.lm_head.weight.data.zero_() # @Grad62304977
def forward(self, idx, target, attn_blocksize):
docs = (idx == 50256).cumsum(0)
def document_causal_mask(b, h, q_idx, kv_idx):
causal_mask = q_idx >= kv_idx
document_mask = docs[q_idx] == docs[kv_idx]
window_mask = q_idx - kv_idx < attn_blocksize
return causal_mask & document_mask & window_mask
S = len(idx)
block_mask = create_block_mask(document_causal_mask, None, None, S, S, device="cuda", _compile=True)
# forward the GPT model itself
x = self.transformer.wte(idx[None]) # token embeddings of shape (b, t, n_embd)
x = norm(x) # @Grad62304977
x0 = x
vi = self.transformer.vte(idx[None]).chunk(12, dim=-1)
# Store outputs for U-Net skip connections
skip_connections = []
# Encoder pass - process only the first half of the blocks
for i in range(self.num_encoder_layers):
x = self.transformer.h[i](x, vi[i], x0, block_mask)
skip_connections.append(x)
# Decoder pass - process the remaining blocks with weighted skip connections
for i in range(self.num_decoder_layers):
x = x + self.skip_weights[i] * skip_connections.pop()
x = self.transformer.h[self.num_encoder_layers + i](x, vi[self.num_encoder_layers+i], x0, block_mask)
x = norm(x)
logits = self.lm_head(x)
logits = 30 * torch.tanh(logits / 30) # @Grad62304977
logits = logits.float()
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target.view(-1))
return loss
# -----------------------------------------------------------------------------
# Our own simple Distributed Data Loader
def _peek_data_shard(filename):
# only reads the header, returns header data
with open(filename, "rb") as f:
# first read the header, which is 256 int32 integers (4 bytes each)
header = np.frombuffer(f.read(256*4), dtype=np.int32)
if header[0] != 20240520:
print("ERROR: magic number mismatch in the data .bin file!")
print("---> HINT: Are you passing in a correct file with --input_bin?")
print("---> HINT: Dataset encoding changed recently, re-run data prepro or refer again to README")
print("---> HINT: For example re-run: `python dev/data/tinyshakespeare.py`, then re-try")
exit(1)
assert header[1] == 1, "unsupported version"
ntok = header[2] # number of tokens (claimed)
return ntok # for now just return the number of tokens
def _load_data_shard(filename):
with open(filename, "rb") as f:
# first read the header, which is 256 int32 integers (4 bytes each)
header = np.frombuffer(f.read(256*4), dtype=np.int32)
assert header[0] == 20240520, "magic number mismatch in the data .bin file"
assert header[1] == 1, "unsupported version"
ntok = header[2] # number of tokens (claimed)
# the rest of it are tokens, stored as uint16
tokens = np.frombuffer(f.read(), dtype=np.uint16)
assert len(tokens) == ntok, "number of tokens read does not match header?"
return tokens
class DistributedDataLoader:
def __init__(self, filename_pattern, T, process_rank, num_processes):
self.process_rank = process_rank
self.num_processes = num_processes
self.T = T
# glob files that match the pattern
self.files = sorted(glob.glob(filename_pattern))
assert len(self.files) > 0, f"did not find any files that match the pattern {filename_pattern}"
# load and validate all data shards, count number of tokens in total
ntok_total = 0
for fname in self.files:
shard_ntok = _peek_data_shard(fname)
assert shard_ntok >= num_processes * T + 1
ntok_total += int(shard_ntok)
self.ntok_total = ntok_total
self.reset()
def reset(self):
self.current_shard = -1
self.advance()
def advance(self): # advance to next data shard
self.current_shard = (self.current_shard + 1) % len(self.files)
self.current_position = self.process_rank * self.T
self.tokens = _load_data_shard(self.files[self.current_shard])
def next_batch(self):
batch_size = self.T * self.num_processes
buf = self.tokens[self.current_position:self.current_position+self.T+1]
buf = torch.tensor(buf.astype(np.int32), dtype=torch.long)
x = buf[:-1] # inputs
y = buf[1:] # targets
# advance current position and load next shard if necessary
self.current_position += batch_size
if self.current_position + batch_size >= len(self.tokens):
self.advance()
return x.cuda(), y.cuda()
# -----------------------------------------------------------------------------
# int main
@dataclass
class Hyperparameters:
# data hyperparams
input_bin : str = 'data/fineweb10B/fineweb_train_*.bin' # input .bin to train on
input_val_bin : str = 'data/fineweb10B/fineweb_val_*.bin' # input .bin to eval validation loss on
# optimization hyperparams
batch_size : int = 8 # batch size, in sequences, across all devices
sequence_length : int = 64*1024 # sequence length, in tokens
num_iterations : int = 1530 # number of iterations to run
warmup_iters : int = 0
cooldown_iters : int = 600 # number of iterations of linear warmup/cooldown for triangular or trapezoidal schedule
weight_decay : float = 0
# evaluation and logging hyperparams
val_loss_every : int = 125 # every how many steps to evaluate val loss? 0 for only at the end
val_tokens : int = 10485760 # how many tokens of validation data? it's important to keep this fixed for consistent comparisons
save_every : int = 0 # every how many steps to save the checkpoint? 0 for only at the end
args = Hyperparameters()
# set up DDP (distributed data parallel). torchrun sets this env variable
assert torch.cuda.is_available()
dist.init_process_group(backend='nccl')
ddp_rank = int(os.environ['RANK'])
ddp_local_rank = int(os.environ['LOCAL_RANK'])
ddp_world_size = int(os.environ['WORLD_SIZE'])
device = f'cuda:{ddp_local_rank}'
torch.cuda.set_device(device)
print(f"using device: {device}")
master_process = (ddp_rank == 0) # this process will do logging, checkpointing etc.
# begin logging
logfile = None
if master_process:
run_id = str(uuid.uuid4())
logdir = 'logs/%s/' % run_id
os.makedirs(logdir, exist_ok=True)
logfile = 'logs/%s.txt' % run_id
# create the log file
with open(logfile, "w") as f:
# begin the log by printing this file (the Python code)
f.write(code)
f.write('='*100 + '\n')
def print0(s, logonly=False):
if master_process:
with open(logfile, "a") as f:
if not logonly:
print(s)
f.write(s+'\n')
# log information about the hardware/software environment this is running on
# and print the full `nvidia-smi` to file
print0(f"Running pytorch {torch.version.__version__} compiled for CUDA {torch.version.cuda}\nnvidia-smi:")
import subprocess
result = subprocess.run(['nvidia-smi'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
print0(f'{result.stdout}', logonly=True)
print0('='*100, logonly=True)
# convenience variables
T = args.sequence_length
# calculate the number of steps to take in the val loop.
assert args.val_tokens % (T * ddp_world_size) == 0
val_steps = args.val_tokens // (T * ddp_world_size)
# calculate the steps of gradient accumulation required to attain the desired global batch size.
assert args.batch_size % (ddp_world_size) == 0
train_accumulation_steps = args.batch_size // ddp_world_size
# load tokens
train_loader = DistributedDataLoader(args.input_bin, T, ddp_rank, ddp_world_size)
val_loader = DistributedDataLoader(args.input_val_bin, T, ddp_rank, ddp_world_size)
print0(f"Training DataLoader: total number of tokens: {train_loader.ntok_total} across {len(train_loader.files)} files")
print0(f"Validation DataLoader: total number of tokens: {val_loader.ntok_total} across {len(val_loader.files)} files")
print0('='*100, logonly=True)
x, y = train_loader.next_batch()
# there are only 50257 unique GPT-2 tokens; we extend to nearest multiple of 128 for efficiency. suggested to me by @Grad62304977.
# this originates from Karpathy's experiments.
num_vocab = 50304
model = GPT(GPTConfig(vocab_size=num_vocab, n_layer=12, n_head=6, n_embd=768))
model = model.cuda().bfloat16()
for m in model.modules():
if isinstance(m, CastedLinear):
m.float()
if hasattr(config, "coordinate_descent_tuning"):
config.coordinate_descent_tuning = True # suggested by @Chillee
model = torch.compile(model)
# here we wrap model into DDP container
model = DDP(model, device_ids=[ddp_local_rank])
raw_model = model.module # always contains the "raw" unwrapped model
# init the optimizer(s)
optimizer1 = torch.optim.Adam([raw_model.transformer.wte.weight, raw_model.transformer.vte.weight], lr=0.6, betas=(0.8, 0.95), fused=True)
optimizer2 = torch.optim.Adam([raw_model.lm_head.weight], lr=0.008, betas=(0.8, 0.95), fused=True)
params = list(raw_model.transformer.h.parameters())
matrix_params = [p for p in params if p.ndim == 2]
scalar_params = [p for p in params if p.ndim < 2] + [raw_model.skip_weights]
optimizer3 = Muon(matrix_params, lr=0.05, momentum=0.95)
optimizer4 = torch.optim.Adam(scalar_params, lr=0.04, betas=(0.8, 0.95), fused=True) # note that this learning rate is neither sensitive nor tuned
optimizers = [optimizer1, optimizer2, optimizer3, optimizer4]
# learning rate decay scheduler (linear warmup and cooldown)
def get_lr(it):
assert it <= args.num_iterations
# 1) linear warmup for warmup_iters steps
if it < args.warmup_iters:
return (it+1) / args.warmup_iters
# 2) constant lr for a while
elif it < args.num_iterations - args.cooldown_iters:
return 1.0
# 3) linear cooldown
else:
decay_ratio = (args.num_iterations - it) / args.cooldown_iters
return decay_ratio
schedulers = [torch.optim.lr_scheduler.LambdaLR(opt, get_lr) for opt in optimizers]
# Start training loop
training_time_ms = 0
# start the clock
torch.cuda.synchronize()
t0 = time.time()
# begin training
for step in range(args.num_iterations + 1):
last_step = (step == args.num_iterations)
# This effectively ignores timing first 10 steps, which are slower for weird reasons.
# Alternately, and slightly more correctly in terms of benchmarking, we could do 10
# steps with dummy data first, and then re-initialize the model and reset the loader.
if step == 10:
training_time_ms = 0
t0 = time.time()
timed_steps = float('nan') if step <= 11 else (step - 10) + 1 # <= 11 to avoid bug in val
# Set the attention blocksize for the current step, in chunks of 64. By @fernbear.bsky.social
attn_blocksize = torch.tensor(64*((step/args.num_iterations * (1792 - 64) + 64)//64), dtype=torch.int, device='cuda')
# once in a while evaluate the validation dataset
if (last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0)):
# stop the clock
torch.cuda.synchronize()
training_time_ms += 1000 * (time.time() - t0)
# run validation batches
model.eval()
val_loader.reset()
val_loss = 0.0
for _ in range(val_steps):
with torch.no_grad():
x_val, y_val = val_loader.next_batch()
val_loss += model(x_val, y_val, attn_blocksize=attn_blocksize)
dist.all_reduce(val_loss, op=dist.ReduceOp.AVG)
val_loss /= val_steps
# log val loss to console and to logfile
print0(f'step:{step}/{args.num_iterations} val_loss:{val_loss:.4f} train_time:{training_time_ms:.0f}ms step_avg:{training_time_ms/(timed_steps-1):.2f}ms')
# start the clock again
torch.cuda.synchronize()
t0 = time.time()
if master_process and (last_step or (args.save_every > 0 and step % args.save_every == 0)):
# stop the clock
torch.cuda.synchronize()
training_time_ms += 1000 * (time.time() - t0)
# save the state of the training process
log = dict(step=step, code=code, model=raw_model.state_dict(), optimizers=[opt.state_dict() for opt in optimizers])
torch.save(log, 'logs/%s/state_step%06d.pt' % (run_id, step))
# start the clock again
torch.cuda.synchronize()
t0 = time.time()
# bit confusing: we want to make sure to eval on 0th iteration
# but also after the very last iteration. so we loop for step <= num_iterations
# instead of just < num_iterations (one extra due to <=), only to do
# the validation/sampling one last time, and then we break right here as we're done.
if last_step:
break
# --------------- TRAINING SECTION BEGIN -----------------
model.train()
for i in range(1, train_accumulation_steps+1):
ctx = model.no_sync() if i < train_accumulation_steps else contextlib.nullcontext()
with ctx: # there's no need to sync gradients every accumulation step
# forward pass
loss = model(x, y, attn_blocksize=attn_blocksize)
# advance the dataset for the next batch
x, y = train_loader.next_batch()
# backward pass
loss.backward()
train_loss = loss.detach()
for p in model.parameters():
p.grad /= train_accumulation_steps
# momentum warmup for Muon
frac = min(step/300, 1)
optimizer3.param_groups[0]['momentum'] = (1 - frac) * 0.85 + frac * 0.95
# step the optimizers and schedulers
for opt, sched in zip(optimizers, schedulers):
opt.step()
sched.step()
# null the gradients
model.zero_grad(set_to_none=True)
# --------------- TRAINING SECTION END -------------------
# everything that follows now is just diagnostics, prints, logging, etc.
#dist.all_reduce(train_loss, op=dist.ReduceOp.AVG) # all-reducing the training loss would be more correct in terms of logging, but slower
approx_time = training_time_ms + 1000 * (time.time() - t0)
print0(f"step:{step+1}/{args.num_iterations} train_loss:{train_loss.item():.4f} train_time:{approx_time:.0f}ms step_avg:{approx_time/timed_steps:.2f}ms")
if master_process:
print(f"peak memory consumption: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB")
# -------------------------------------------------------------------------
# clean up nice
dist.destroy_process_group()
====================================================================================================
Running pytorch 2.6.0.dev20241203+cu124 compiled for CUDA 12.4
nvidia-smi:
Thu Dec 5 01:03:59 2024
+---------------------------------------------------------------------------------------+
| NVIDIA-SMI 535.183.06 Driver Version: 535.183.06 CUDA Version: 12.2 |
|-----------------------------------------+----------------------+----------------------+
| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|=========================================+======================+======================|
| 0 NVIDIA H100 80GB HBM3 On | 00000000:19:00.0 Off | 0 |
| N/A 38C P0 75W / 700W | 3MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 1 NVIDIA H100 80GB HBM3 On | 00000000:3B:00.0 Off | 0 |
| N/A 29C P0 73W / 700W | 3MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 2 NVIDIA H100 80GB HBM3 On | 00000000:4C:00.0 Off | 0 |
| N/A 31C P0 117W / 700W | 41MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 3 NVIDIA H100 80GB HBM3 On | 00000000:5D:00.0 Off | 0 |
| N/A 37C P0 74W / 700W | 3MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 4 NVIDIA H100 80GB HBM3 On | 00000000:9B:00.0 Off | 0 |
| N/A 39C P0 123W / 700W | 529MiB / 81559MiB | 1% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 5 NVIDIA H100 80GB HBM3 On | 00000000:BB:00.0 Off | 0 |
| N/A 29C P0 110W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 6 NVIDIA H100 80GB HBM3 On | 00000000:CB:00.0 Off | 0 |
| N/A 38C P0 108W / 700W | 22MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
| 7 NVIDIA H100 80GB HBM3 On | 00000000:DB:00.0 Off | 0 |
| N/A 30C P0 118W / 700W | 529MiB / 81559MiB | 0% Default |
| | | Disabled |
+-----------------------------------------+----------------------+----------------------+
+---------------------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=======================================================================================|
+---------------------------------------------------------------------------------------+
====================================================================================================
Training DataLoader: total number of tokens: 1100000000 across 11 files
Validation DataLoader: total number of tokens: 100000000 across 1 files
====================================================================================================
step:0/1530 val_loss:10.8258 train_time:0ms step_avg:nanms
step:1/1530 train_loss:10.8258 train_time:32179ms step_avg:nanms
step:2/1530 train_loss:10.0826 train_time:32290ms step_avg:nanms
step:3/1530 train_loss:8.3313 train_time:32450ms step_avg:nanms
step:4/1530 train_loss:7.6546 train_time:32611ms step_avg:nanms
step:5/1530 train_loss:7.6265 train_time:32770ms step_avg:nanms
step:6/1530 train_loss:7.0802 train_time:32932ms step_avg:nanms
step:7/1530 train_loss:7.2240 train_time:33092ms step_avg:nanms
step:8/1530 train_loss:6.7802 train_time:33253ms step_avg:nanms
step:9/1530 train_loss:6.6586 train_time:33413ms step_avg:nanms
step:10/1530 train_loss:6.5721 train_time:33574ms step_avg:nanms
step:11/1530 train_loss:6.4864 train_time:115ms step_avg:nanms
step:12/1530 train_loss:6.3776 train_time:274ms step_avg:nanms
step:13/1530 train_loss:6.2403 train_time:435ms step_avg:145.03ms
step:14/1530 train_loss:6.1898 train_time:595ms step_avg:148.87ms
step:15/1530 train_loss:6.1448 train_time:756ms step_avg:151.14ms
step:16/1530 train_loss:6.1296 train_time:917ms step_avg:152.76ms
step:17/1530 train_loss:6.1676 train_time:1076ms step_avg:153.71ms
step:18/1530 train_loss:5.9947 train_time:1236ms step_avg:154.56ms
step:19/1530 train_loss:5.9798 train_time:1397ms step_avg:155.17ms
step:20/1530 train_loss:5.6551 train_time:1556ms step_avg:155.60ms
step:21/1530 train_loss:5.9447 train_time:1717ms step_avg:156.10ms
step:22/1530 train_loss:6.1764 train_time:1877ms step_avg:156.40ms
step:23/1530 train_loss:5.8562 train_time:2037ms step_avg:156.72ms
step:24/1530 train_loss:6.0149 train_time:2197ms step_avg:156.95ms
step:25/1530 train_loss:5.6802 train_time:2357ms step_avg:157.14ms
step:26/1530 train_loss:5.6142 train_time:2517ms step_avg:157.32ms
step:27/1530 train_loss:5.7736 train_time:2677ms step_avg:157.47ms
step:28/1530 train_loss:5.4407 train_time:2838ms step_avg:157.64ms
step:29/1530 train_loss:5.6784 train_time:2998ms step_avg:157.78ms
step:30/1530 train_loss:5.4764 train_time:3158ms step_avg:157.88ms
step:31/1530 train_loss:5.4368 train_time:3319ms step_avg:158.03ms
step:32/1530 train_loss:5.2963 train_time:3478ms step_avg:158.08ms
step:33/1530 train_loss:5.5781 train_time:3638ms step_avg:158.18ms
step:34/1530 train_loss:5.5047 train_time:3798ms step_avg:158.24ms
step:35/1530 train_loss:5.6052 train_time:3957ms step_avg:158.27ms
step:36/1530 train_loss:5.5442 train_time:4118ms step_avg:158.38ms
step:37/1530 train_loss:5.4581 train_time:4277ms step_avg:158.41ms
step:38/1530 train_loss:5.3046 train_time:4437ms step_avg:158.47ms
step:39/1530 train_loss:5.3198 train_time:4598ms step_avg:158.55ms
step:40/1530 train_loss:5.2723 train_time:4757ms step_avg:158.56ms
step:41/1530 train_loss:5.2407 train_time:4917ms step_avg:158.60ms
step:42/1530 train_loss:5.1802 train_time:5077ms step_avg:158.65ms
step:43/1530 train_loss:5.2727 train_time:5237ms step_avg:158.68ms
step:44/1530 train_loss:5.2516 train_time:5396ms step_avg:158.72ms
step:45/1530 train_loss:5.3865 train_time:5557ms step_avg:158.77ms
step:46/1530 train_loss:5.1813 train_time:5717ms step_avg:158.81ms
step:47/1530 train_loss:5.0656 train_time:5877ms step_avg:158.84ms
step:48/1530 train_loss:5.2120 train_time:6037ms step_avg:158.87ms
step:49/1530 train_loss:5.1369 train_time:6197ms step_avg:158.89ms
step:50/1530 train_loss:5.2571 train_time:6356ms step_avg:158.91ms
step:51/1530 train_loss:5.1472 train_time:6516ms step_avg:158.93ms
step:52/1530 train_loss:5.0305 train_time:6676ms step_avg:158.96ms
step:53/1530 train_loss:5.1830 train_time:6836ms step_avg:158.98ms
step:54/1530 train_loss:5.0296 train_time:6996ms step_avg:159.00ms
step:55/1530 train_loss:5.4225 train_time:7156ms step_avg:159.02ms
step:56/1530 train_loss:5.0384 train_time:7317ms step_avg:159.06ms
step:57/1530 train_loss:4.8932 train_time:7476ms step_avg:159.07ms
step:58/1530 train_loss:5.0717 train_time:7637ms step_avg:159.09ms
step:59/1530 train_loss:5.0525 train_time:7796ms step_avg:159.11ms
step:60/1530 train_loss:5.1381 train_time:7956ms step_avg:159.13ms
step:61/1530 train_loss:4.8429 train_time:8116ms step_avg:159.14ms
step:62/1530 train_loss:4.9827 train_time:8276ms step_avg:159.15ms
step:63/1530 train_loss:4.9820 train_time:8436ms step_avg:159.18ms
step:64/1530 train_loss:4.9757 train_time:8596ms step_avg:159.19ms
step:65/1530 train_loss:4.8004 train_time:8756ms step_avg:159.20ms
step:66/1530 train_loss:4.9526 train_time:8917ms step_avg:159.23ms
step:67/1530 train_loss:4.8287 train_time:9076ms step_avg:159.24ms
step:68/1530 train_loss:5.0988 train_time:9236ms step_avg:159.25ms
step:69/1530 train_loss:4.7238 train_time:9396ms step_avg:159.25ms
step:70/1530 train_loss:4.8568 train_time:9556ms step_avg:159.27ms
step:71/1530 train_loss:4.9589 train_time:9717ms step_avg:159.29ms
step:72/1530 train_loss:4.8896 train_time:9876ms step_avg:159.29ms
step:73/1530 train_loss:4.7753 train_time:10036ms step_avg:159.31ms
step:74/1530 train_loss:4.9091 train_time:10197ms step_avg:159.33ms
step:75/1530 train_loss:4.8581 train_time:10356ms step_avg:159.33ms
step:76/1530 train_loss:4.8086 train_time:10517ms step_avg:159.35ms
step:77/1530 train_loss:4.9186 train_time:10677ms step_avg:159.36ms
step:78/1530 train_loss:5.1131 train_time:10836ms step_avg:159.36ms
step:79/1530 train_loss:4.8061 train_time:10997ms step_avg:159.38ms
step:80/1530 train_loss:4.8533 train_time:11157ms step_avg:159.38ms
step:81/1530 train_loss:4.6510 train_time:11318ms step_avg:159.41ms
step:82/1530 train_loss:4.8381 train_time:11477ms step_avg:159.41ms
step:83/1530 train_loss:4.7706 train_time:11637ms step_avg:159.41ms
step:84/1530 train_loss:4.7591 train_time:11797ms step_avg:159.41ms
step:85/1530 train_loss:4.6267 train_time:11957ms step_avg:159.43ms
step:86/1530 train_loss:4.8375 train_time:12117ms step_avg:159.43ms
step:87/1530 train_loss:4.7435 train_time:12277ms step_avg:159.44ms
step:88/1530 train_loss:4.7501 train_time:12437ms step_avg:159.44ms
step:89/1530 train_loss:4.6992 train_time:12597ms step_avg:159.45ms
step:90/1530 train_loss:4.6467 train_time:12756ms step_avg:159.45ms
step:91/1530 train_loss:4.6325 train_time:12917ms step_avg:159.47ms
step:92/1530 train_loss:4.7817 train_time:13076ms step_avg:159.46ms
step:93/1530 train_loss:4.5869 train_time:13236ms step_avg:159.47ms
step:94/1530 train_loss:4.6444 train_time:13397ms step_avg:159.49ms
step:95/1530 train_loss:4.6774 train_time:13556ms step_avg:159.49ms
step:96/1530 train_loss:4.5901 train_time:13717ms step_avg:159.50ms
step:97/1530 train_loss:4.6305 train_time:13876ms step_avg:159.49ms
step:98/1530 train_loss:4.5838 train_time:14036ms step_avg:159.50ms
step:99/1530 train_loss:4.6661 train_time:14197ms step_avg:159.52ms
step:100/1530 train_loss:4.6778 train_time:14357ms step_avg:159.52ms
step:101/1530 train_loss:4.5212 train_time:14517ms step_avg:159.53ms
step:102/1530 train_loss:4.6935 train_time:14677ms step_avg:159.53ms
step:103/1530 train_loss:4.5630 train_time:14837ms step_avg:159.53ms
step:104/1530 train_loss:4.5435 train_time:14997ms step_avg:159.54ms
step:105/1530 train_loss:4.5379 train_time:15157ms step_avg:159.54ms
step:106/1530 train_loss:4.6056 train_time:15318ms step_avg:159.56ms
step:107/1530 train_loss:4.5034 train_time:15477ms step_avg:159.56ms
step:108/1530 train_loss:4.3566 train_time:15637ms step_avg:159.56ms
step:109/1530 train_loss:4.4802 train_time:15797ms step_avg:159.57ms
step:110/1530 train_loss:4.4779 train_time:15957ms step_avg:159.57ms
step:111/1530 train_loss:4.4234 train_time:16117ms step_avg:159.58ms
step:112/1530 train_loss:4.5780 train_time:16277ms step_avg:159.58ms
step:113/1530 train_loss:4.4858 train_time:16437ms step_avg:159.58ms
step:114/1530 train_loss:4.3581 train_time:16597ms step_avg:159.58ms
step:115/1530 train_loss:4.5015 train_time:16759ms step_avg:159.61ms
step:116/1530 train_loss:4.4685 train_time:16923ms step_avg:159.65ms
step:117/1530 train_loss:4.3697 train_time:17087ms step_avg:159.69ms
step:118/1530 train_loss:4.6020 train_time:17252ms step_avg:159.74ms
step:119/1530 train_loss:4.4501 train_time:17416ms step_avg:159.78ms
step:120/1530 train_loss:4.3183 train_time:17578ms step_avg:159.80ms
step:121/1530 train_loss:4.2895 train_time:17742ms step_avg:159.84ms
step:122/1530 train_loss:4.4446 train_time:17906ms step_avg:159.87ms
step:123/1530 train_loss:4.2733 train_time:18071ms step_avg:159.92ms
step:124/1530 train_loss:4.5801 train_time:18234ms step_avg:159.95ms
step:125/1530 train_loss:4.4448 train_time:18397ms step_avg:159.98ms
step:125/1530 val_loss:4.3946 train_time:18445ms step_avg:160.39ms
step:126/1530 train_loss:4.4061 train_time:18564ms step_avg:160.03ms
step:127/1530 train_loss:4.4320 train_time:18729ms step_avg:160.08ms
step:128/1530 train_loss:4.3886 train_time:18892ms step_avg:160.10ms
step:129/1530 train_loss:4.7095 train_time:19056ms step_avg:160.13ms
step:130/1530 train_loss:4.3557 train_time:19219ms step_avg:160.16ms
step:131/1530 train_loss:4.3792 train_time:19383ms step_avg:160.19ms
step:132/1530 train_loss:4.3441 train_time:19546ms step_avg:160.22ms
step:133/1530 train_loss:4.4422 train_time:19710ms step_avg:160.24ms
step:134/1530 train_loss:4.2604 train_time:19874ms step_avg:160.27ms
step:135/1530 train_loss:4.4543 train_time:20037ms step_avg:160.29ms
step:136/1530 train_loss:4.2160 train_time:20200ms step_avg:160.32ms
step:137/1530 train_loss:4.3722 train_time:20365ms step_avg:160.35ms
step:138/1530 train_loss:4.2805 train_time:20529ms step_avg:160.38ms
step:139/1530 train_loss:4.3714 train_time:20692ms step_avg:160.40ms
step:140/1530 train_loss:4.4764 train_time:20856ms step_avg:160.43ms
step:141/1530 train_loss:4.3094 train_time:21021ms step_avg:160.46ms
step:142/1530 train_loss:4.3034 train_time:21185ms step_avg:160.49ms
step:143/1530 train_loss:4.2528 train_time:21348ms step_avg:160.51ms
step:144/1530 train_loss:4.3462 train_time:21511ms step_avg:160.53ms
step:145/1530 train_loss:4.2989 train_time:21674ms step_avg:160.55ms
step:146/1530 train_loss:4.1646 train_time:21838ms step_avg:160.57ms
step:147/1530 train_loss:4.3244 train_time:22003ms step_avg:160.61ms
step:148/1530 train_loss:4.3550 train_time:22168ms step_avg:160.63ms
step:149/1530 train_loss:4.3017 train_time:22331ms step_avg:160.65ms
step:150/1530 train_loss:4.4436 train_time:22495ms step_avg:160.68ms
step:151/1530 train_loss:4.2742 train_time:22657ms step_avg:160.69ms
step:152/1530 train_loss:4.2800 train_time:22822ms step_avg:160.72ms
step:153/1530 train_loss:4.3669 train_time:22986ms step_avg:160.74ms
step:154/1530 train_loss:4.3653 train_time:23148ms step_avg:160.75ms
step:155/1530 train_loss:4.2735 train_time:23312ms step_avg:160.77ms
step:156/1530 train_loss:4.3458 train_time:23477ms step_avg:160.80ms
step:157/1530 train_loss:4.4000 train_time:23641ms step_avg:160.82ms
step:158/1530 train_loss:4.2463 train_time:23805ms step_avg:160.84ms
step:159/1530 train_loss:4.3054 train_time:23968ms step_avg:160.86ms
step:160/1530 train_loss:4.1273 train_time:24130ms step_avg:160.87ms
step:161/1530 train_loss:4.3501 train_time:24295ms step_avg:160.90ms
step:162/1530 train_loss:4.3601 train_time:24460ms step_avg:160.92ms
step:163/1530 train_loss:4.3408 train_time:24623ms step_avg:160.93ms
step:164/1530 train_loss:4.1896 train_time:24787ms step_avg:160.95ms
step:165/1530 train_loss:4.2903 train_time:24950ms step_avg:160.96ms
step:166/1530 train_loss:4.3444 train_time:25113ms step_avg:160.98ms
step:167/1530 train_loss:4.2000 train_time:25276ms step_avg:161.00ms
step:168/1530 train_loss:4.2932 train_time:25440ms step_avg:161.01ms
step:169/1530 train_loss:4.1610 train_time:25604ms step_avg:161.03ms
step:170/1530 train_loss:4.0266 train_time:25769ms step_avg:161.06ms
step:171/1530 train_loss:4.1986 train_time:25932ms step_avg:161.07ms
step:172/1530 train_loss:4.2174 train_time:26095ms step_avg:161.08ms
step:173/1530 train_loss:4.2636 train_time:26257ms step_avg:161.09ms
step:174/1530 train_loss:4.4201 train_time:26420ms step_avg:161.10ms
step:175/1530 train_loss:4.2384 train_time:26583ms step_avg:161.11ms
step:176/1530 train_loss:4.0942 train_time:26746ms step_avg:161.12ms
step:177/1530 train_loss:4.0694 train_time:26908ms step_avg:161.13ms
step:178/1530 train_loss:4.1788 train_time:27070ms step_avg:161.13ms
step:179/1530 train_loss:4.1223 train_time:27232ms step_avg:161.14ms
step:180/1530 train_loss:4.1057 train_time:27396ms step_avg:161.15ms
step:181/1530 train_loss:4.2815 train_time:27557ms step_avg:161.15ms
step:182/1530 train_loss:4.1435 train_time:27721ms step_avg:161.17ms
step:183/1530 train_loss:4.1321 train_time:27884ms step_avg:161.18ms
step:184/1530 train_loss:4.1218 train_time:28046ms step_avg:161.18ms
step:185/1530 train_loss:4.2037 train_time:28209ms step_avg:161.19ms
step:186/1530 train_loss:4.1668 train_time:28372ms step_avg:161.20ms
step:187/1530 train_loss:4.2267 train_time:28533ms step_avg:161.21ms
step:188/1530 train_loss:4.1675 train_time:28837ms step_avg:162.00ms
step:189/1530 train_loss:4.1085 train_time:29168ms step_avg:162.95ms
step:190/1530 train_loss:4.2052 train_time:29328ms step_avg:162.93ms
step:191/1530 train_loss:4.0803 train_time:29492ms step_avg:162.94ms
step:192/1530 train_loss:4.0254 train_time:29654ms step_avg:162.93ms
step:193/1530 train_loss:4.2475 train_time:29817ms step_avg:162.93ms
step:194/1530 train_loss:4.1707 train_time:29981ms step_avg:162.94ms
step:195/1530 train_loss:4.3478 train_time:30143ms step_avg:162.94ms
step:196/1530 train_loss:4.1735 train_time:30307ms step_avg:162.94ms
step:197/1530 train_loss:4.0386 train_time:30470ms step_avg:162.94ms
step:198/1530 train_loss:4.1749 train_time:30630ms step_avg:162.92ms
step:199/1530 train_loss:4.0267 train_time:30793ms step_avg:162.93ms
step:200/1530 train_loss:4.1064 train_time:30956ms step_avg:162.93ms
step:201/1530 train_loss:4.0144 train_time:31118ms step_avg:162.92ms
step:202/1530 train_loss:4.2531 train_time:31282ms step_avg:162.93ms
step:203/1530 train_loss:4.0671 train_time:31443ms step_avg:162.92ms
step:204/1530 train_loss:4.1888 train_time:31606ms step_avg:162.92ms
step:205/1530 train_loss:4.2371 train_time:31769ms step_avg:162.92ms
step:206/1530 train_loss:3.9445 train_time:31931ms step_avg:162.91ms
step:207/1530 train_loss:4.0806 train_time:32094ms step_avg:162.91ms
step:208/1530 train_loss:4.0954 train_time:32255ms step_avg:162.91ms
step:209/1530 train_loss:4.2392 train_time:32418ms step_avg:162.91ms
step:210/1530 train_loss:4.1709 train_time:32583ms step_avg:162.91ms
step:211/1530 train_loss:4.0590 train_time:32744ms step_avg:162.91ms
step:212/1530 train_loss:4.1159 train_time:32908ms step_avg:162.91ms
step:213/1530 train_loss:4.0592 train_time:33070ms step_avg:162.91ms
step:214/1530 train_loss:4.1167 train_time:33231ms step_avg:162.90ms
step:215/1530 train_loss:3.9607 train_time:33396ms step_avg:162.91ms
step:216/1530 train_loss:3.9982 train_time:33560ms step_avg:162.91ms
step:217/1530 train_loss:4.0081 train_time:33724ms step_avg:162.92ms
step:218/1530 train_loss:4.0835 train_time:33887ms step_avg:162.92ms
step:219/1530 train_loss:4.0763 train_time:34049ms step_avg:162.91ms
step:220/1530 train_loss:4.0751 train_time:34211ms step_avg:162.91ms
step:221/1530 train_loss:4.0856 train_time:34374ms step_avg:162.91ms
step:222/1530 train_loss:3.9873 train_time:34536ms step_avg:162.90ms
step:223/1530 train_loss:3.9925 train_time:34697ms step_avg:162.90ms
step:224/1530 train_loss:4.2970 train_time:34861ms step_avg:162.90ms
step:225/1530 train_loss:3.9209 train_time:35025ms step_avg:162.91ms
step:226/1530 train_loss:3.9831 train_time:35188ms step_avg:162.91ms
step:227/1530 train_loss:3.9741 train_time:35350ms step_avg:162.90ms
step:228/1530 train_loss:4.1422 train_time:35514ms step_avg:162.91ms
step:229/1530 train_loss:3.9203 train_time:35681ms step_avg:162.93ms
step:230/1530 train_loss:4.0411 train_time:35847ms step_avg:162.94ms
step:231/1530 train_loss:3.9046 train_time:36013ms step_avg:162.95ms
step:232/1530 train_loss:3.9698 train_time:36179ms step_avg:162.97ms
step:233/1530 train_loss:4.0840 train_time:36345ms step_avg:162.98ms
step:234/1530 train_loss:4.0249 train_time:36511ms step_avg:163.00ms
step:235/1530 train_loss:3.9049 train_time:36678ms step_avg:163.01ms
step:236/1530 train_loss:4.0767 train_time:36844ms step_avg:163.03ms
step:237/1530 train_loss:4.0809 train_time:37010ms step_avg:163.04ms
step:238/1530 train_loss:3.9424 train_time:37177ms step_avg:163.05ms
step:239/1530 train_loss:4.0773 train_time:37343ms step_avg:163.07ms
step:240/1530 train_loss:4.1107 train_time:37509ms step_avg:163.08ms
step:241/1530 train_loss:3.9648 train_time:37673ms step_avg:163.09ms
step:242/1530 train_loss:4.1442 train_time:37840ms step_avg:163.11ms
step:243/1530 train_loss:4.0027 train_time:38007ms step_avg:163.12ms
step:244/1530 train_loss:4.0736 train_time:38172ms step_avg:163.13ms
step:245/1530 train_loss:4.1375 train_time:38337ms step_avg:163.14ms
step:246/1530 train_loss:4.0541 train_time:38504ms step_avg:163.15ms
step:247/1530 train_loss:4.0045 train_time:38669ms step_avg:163.16ms
step:248/1530 train_loss:4.1003 train_time:38836ms step_avg:163.17ms
step:249/1530 train_loss:3.9194 train_time:39001ms step_avg:163.18ms
step:250/1530 train_loss:3.9709 train_time:39167ms step_avg:163.19ms
step:250/1530 val_loss:4.0051 train_time:39214ms step_avg:163.39ms
step:251/1530 train_loss:4.0722 train_time:39334ms step_avg:163.21ms
step:252/1530 train_loss:4.1518 train_time:39501ms step_avg:163.23ms
step:253/1530 train_loss:3.9307 train_time:39668ms step_avg:163.24ms
step:254/1530 train_loss:3.8710 train_time:39832ms step_avg:163.25ms
step:255/1530 train_loss:4.0689 train_time:39998ms step_avg:163.26ms
step:256/1530 train_loss:3.9874 train_time:40165ms step_avg:163.27ms
step:257/1530 train_loss:3.9866 train_time:40330ms step_avg:163.28ms
step:258/1530 train_loss:3.9861 train_time:40495ms step_avg:163.29ms
step:259/1530 train_loss:4.0311 train_time:40664ms step_avg:163.31ms
step:260/1530 train_loss:4.0593 train_time:40830ms step_avg:163.32ms
step:261/1530 train_loss:4.0189 train_time:40995ms step_avg:163.33ms
step:262/1530 train_loss:3.9848 train_time:41161ms step_avg:163.34ms
step:263/1530 train_loss:3.8943 train_time:41327ms step_avg:163.35ms
step:264/1530 train_loss:3.9879 train_time:41493ms step_avg:163.36ms
step:265/1530 train_loss:3.8595 train_time:41660ms step_avg:163.37ms
step:266/1530 train_loss:3.9155 train_time:41826ms step_avg:163.38ms
step:267/1530 train_loss:3.9247 train_time:41991ms step_avg:163.39ms
step:268/1530 train_loss:3.9652 train_time:42157ms step_avg:163.40ms
step:269/1530 train_loss:3.8483 train_time:42323ms step_avg:163.41ms
step:270/1530 train_loss:4.0853 train_time:42489ms step_avg:163.42ms
step:271/1530 train_loss:3.9661 train_time:42656ms step_avg:163.43ms
step:272/1530 train_loss:3.9236 train_time:42822ms step_avg:163.44ms
step:273/1530 train_loss:3.9387 train_time:42987ms step_avg:163.45ms
step:274/1530 train_loss:4.0384 train_time:43154ms step_avg:163.46ms
step:275/1530 train_loss:4.0619 train_time:43321ms step_avg:163.47ms
step:276/1530 train_loss:4.2205 train_time:43486ms step_avg:163.48ms
step:277/1530 train_loss:4.0360 train_time:43652ms step_avg:163.49ms
step:278/1530 train_loss:4.0870 train_time:43817ms step_avg:163.49ms
step:279/1530 train_loss:4.0012 train_time:43984ms step_avg:163.51ms
step:280/1530 train_loss:4.2103 train_time:44151ms step_avg:163.52ms
step:281/1530 train_loss:3.9764 train_time:44318ms step_avg:163.53ms
step:282/1530 train_loss:3.9436 train_time:44485ms step_avg:163.55ms
step:283/1530 train_loss:3.9106 train_time:44650ms step_avg:163.55ms
step:284/1530 train_loss:4.0426 train_time:44816ms step_avg:163.56ms
step:285/1530 train_loss:4.0638 train_time:44982ms step_avg:163.57ms
step:286/1530 train_loss:4.0872 train_time:45147ms step_avg:163.57ms
step:287/1530 train_loss:3.9027 train_time:45311ms step_avg:163.58ms
step:288/1530 train_loss:4.0083 train_time:45476ms step_avg:163.58ms
step:289/1530 train_loss:3.8766 train_time:45643ms step_avg:163.60ms
step:290/1530 train_loss:3.8570 train_time:45807ms step_avg:163.60ms
step:291/1530 train_loss:3.9067 train_time:45972ms step_avg:163.60ms
step:292/1530 train_loss:3.8623 train_time:46138ms step_avg:163.61ms
step:293/1530 train_loss:3.8938 train_time:46303ms step_avg:163.61ms
step:294/1530 train_loss:3.9335 train_time:46467ms step_avg:163.62ms
step:295/1530 train_loss:3.8315 train_time:46631ms step_avg:163.62ms
step:296/1530 train_loss:3.8577 train_time:46797ms step_avg:163.63ms
step:297/1530 train_loss:3.8600 train_time:46962ms step_avg:163.63ms
step:298/1530 train_loss:3.9687 train_time:47128ms step_avg:163.64ms
step:299/1530 train_loss:3.8193 train_time:47292ms step_avg:163.64ms
step:300/1530 train_loss:3.9658 train_time:47458ms step_avg:163.65ms
step:301/1530 train_loss:3.9584 train_time:47624ms step_avg:163.66ms
step:302/1530 train_loss:3.9273 train_time:47790ms step_avg:163.66ms
step:303/1530 train_loss:3.9720 train_time:47956ms step_avg:163.67ms
step:304/1530 train_loss:3.9654 train_time:48121ms step_avg:163.68ms
step:305/1530 train_loss:4.4428 train_time:48286ms step_avg:163.68ms
step:306/1530 train_loss:3.9282 train_time:48451ms step_avg:163.68ms
step:307/1530 train_loss:3.8370 train_time:48615ms step_avg:163.69ms
step:308/1530 train_loss:3.9746 train_time:48780ms step_avg:163.69ms
step:309/1530 train_loss:3.8687 train_time:48947ms step_avg:163.70ms
step:310/1530 train_loss:4.0778 train_time:49111ms step_avg:163.70ms
step:311/1530 train_loss:3.9167 train_time:49274ms step_avg:163.70ms
step:312/1530 train_loss:3.8634 train_time:49442ms step_avg:163.71ms
step:313/1530 train_loss:3.9372 train_time:49606ms step_avg:163.72ms
step:314/1530 train_loss:4.0623 train_time:49771ms step_avg:163.72ms
step:315/1530 train_loss:3.9340 train_time:49936ms step_avg:163.72ms
step:316/1530 train_loss:3.7912 train_time:50102ms step_avg:163.73ms
step:317/1530 train_loss:3.8662 train_time:50268ms step_avg:163.74ms
step:318/1530 train_loss:3.9192 train_time:50432ms step_avg:163.74ms
step:319/1530 train_loss:3.8794 train_time:50598ms step_avg:163.75ms
step:320/1530 train_loss:4.0084 train_time:50764ms step_avg:163.75ms
step:321/1530 train_loss:3.9562 train_time:50929ms step_avg:163.76ms
step:322/1530 train_loss:3.9296 train_time:51093ms step_avg:163.76ms
step:323/1530 train_loss:4.0014 train_time:51260ms step_avg:163.77ms
step:324/1530 train_loss:3.9437 train_time:51425ms step_avg:163.77ms
step:325/1530 train_loss:4.0098 train_time:51590ms step_avg:163.78ms
step:326/1530 train_loss:3.8921 train_time:51755ms step_avg:163.78ms
step:327/1530 train_loss:4.3914 train_time:51921ms step_avg:163.79ms
step:328/1530 train_loss:4.0680 train_time:52086ms step_avg:163.79ms
step:329/1530 train_loss:3.7921 train_time:52251ms step_avg:163.80ms
step:330/1530 train_loss:3.7403 train_time:52416ms step_avg:163.80ms
step:331/1530 train_loss:3.9781 train_time:52582ms step_avg:163.81ms
step:332/1530 train_loss:3.9102 train_time:52748ms step_avg:163.81ms
step:333/1530 train_loss:3.8818 train_time:52912ms step_avg:163.81ms
step:334/1530 train_loss:3.8362 train_time:53076ms step_avg:163.82ms
step:335/1530 train_loss:4.0137 train_time:53242ms step_avg:163.82ms
step:336/1530 train_loss:3.9673 train_time:53407ms step_avg:163.83ms
step:337/1530 train_loss:4.4185 train_time:53572ms step_avg:163.83ms
step:338/1530 train_loss:3.9313 train_time:53738ms step_avg:163.84ms
step:339/1530 train_loss:3.8616 train_time:53903ms step_avg:163.84ms
step:340/1530 train_loss:3.9301 train_time:54068ms step_avg:163.84ms
step:341/1530 train_loss:3.8532 train_time:54234ms step_avg:163.85ms
step:342/1530 train_loss:3.8064 train_time:54402ms step_avg:163.86ms
step:343/1530 train_loss:3.8328 train_time:54570ms step_avg:163.87ms
step:344/1530 train_loss:3.9910 train_time:54736ms step_avg:163.88ms
step:345/1530 train_loss:3.8146 train_time:54906ms step_avg:163.90ms
step:346/1530 train_loss:3.7668 train_time:55073ms step_avg:163.91ms
step:347/1530 train_loss:3.7958 train_time:55243ms step_avg:163.93ms
step:348/1530 train_loss:3.8541 train_time:55410ms step_avg:163.93ms
step:349/1530 train_loss:3.8286 train_time:55577ms step_avg:163.94ms
step:350/1530 train_loss:3.5678 train_time:55747ms step_avg:163.96ms
step:351/1530 train_loss:3.8236 train_time:55916ms step_avg:163.98ms
step:352/1530 train_loss:4.1838 train_time:56085ms step_avg:163.99ms
step:353/1530 train_loss:3.6578 train_time:56253ms step_avg:164.00ms
step:354/1530 train_loss:3.9213 train_time:56419ms step_avg:164.01ms
step:355/1530 train_loss:3.7776 train_time:56588ms step_avg:164.02ms
step:356/1530 train_loss:3.8790 train_time:56755ms step_avg:164.03ms
step:357/1530 train_loss:3.7498 train_time:56925ms step_avg:164.05ms
step:358/1530 train_loss:3.8575 train_time:57092ms step_avg:164.06ms
step:359/1530 train_loss:3.7679 train_time:57262ms step_avg:164.07ms
step:360/1530 train_loss:3.4209 train_time:57431ms step_avg:164.09ms
step:361/1530 train_loss:4.0101 train_time:57599ms step_avg:164.10ms
step:362/1530 train_loss:3.9090 train_time:57768ms step_avg:164.11ms
step:363/1530 train_loss:3.8351 train_time:57934ms step_avg:164.12ms
step:364/1530 train_loss:3.7441 train_time:58102ms step_avg:164.13ms
step:365/1530 train_loss:3.9086 train_time:58271ms step_avg:164.14ms
step:366/1530 train_loss:3.8589 train_time:58440ms step_avg:164.16ms
step:367/1530 train_loss:3.8599 train_time:58606ms step_avg:164.16ms
step:368/1530 train_loss:3.8434 train_time:58773ms step_avg:164.17ms
step:369/1530 train_loss:3.7422 train_time:58942ms step_avg:164.18ms
step:370/1530 train_loss:3.8742 train_time:59109ms step_avg:164.19ms
step:371/1530 train_loss:3.7217 train_time:59276ms step_avg:164.20ms
step:372/1530 train_loss:3.6877 train_time:59445ms step_avg:164.21ms
step:373/1530 train_loss:3.9124 train_time:59612ms step_avg:164.22ms
step:374/1530 train_loss:3.8296 train_time:59780ms step_avg:164.23ms
step:375/1530 train_loss:3.7946 train_time:59949ms step_avg:164.24ms
step:375/1530 val_loss:3.8189 train_time:59997ms step_avg:164.38ms