@@ -35,6 +35,7 @@ class TrainTests(unittest.TestCase):
35
35
r"dataset_path=gs://maxtext-dataset" ,
36
36
"steps=2" ,
37
37
"enable_checkpointing=False" ,
38
+ "enable_goodput_recording=false" ,
38
39
rf"tokenizer_path={ os .path .join (os .path .dirname (PKG_DIR ), 'assets' , 'tokenizer.llama2' )} " ,
39
40
],
40
41
"synthetic" : [ # tests base config with synthtic dataset
@@ -45,6 +46,7 @@ class TrainTests(unittest.TestCase):
45
46
r"dataset_path=gs://maxtext-dataset" ,
46
47
"steps=2" ,
47
48
"enable_checkpointing=False" ,
49
+ "enable_goodput_recording=false" ,
48
50
"dataset_type=synthetic" ,
49
51
rf"tokenizer_path={ os .path .join (os .path .dirname (PKG_DIR ), 'assets' , 'tokenizer.llama2' )} " ,
50
52
],
@@ -56,6 +58,7 @@ class TrainTests(unittest.TestCase):
56
58
r"dataset_path=gs://maxtext-dataset" ,
57
59
"steps=2" ,
58
60
"enable_checkpointing=False" ,
61
+ "enable_goodput_recording=false" ,
59
62
"per_device_batch_size=0.25" ,
60
63
"ici_tensor_parallelism=4" ,
61
64
rf"tokenizer_path={ os .path .join (os .path .dirname (PKG_DIR ), 'assets' , 'tokenizer.llama2' )} " ,
@@ -68,6 +71,7 @@ class TrainTests(unittest.TestCase):
68
71
r"dataset_path=gs://maxtext-dataset" ,
69
72
"steps=2" ,
70
73
"ici_tensor_transpose_parallelism=4" ,
74
+ "enable_goodput_recording=false" ,
71
75
rf"tokenizer_path={ os .path .join (os .path .dirname (PKG_DIR ), 'assets' , 'tokenizer.llama2' )} " ,
72
76
],
73
77
"int8" : [ # tests base config with int8
@@ -79,6 +83,7 @@ class TrainTests(unittest.TestCase):
79
83
"quantization=int8" ,
80
84
"steps=2" ,
81
85
"enable_checkpointing=False" ,
86
+ "enable_goodput_recording=false" ,
82
87
rf"tokenizer_path={ os .path .join (os .path .dirname (PKG_DIR ), 'assets' , 'tokenizer.llama2' )} " ,
83
88
],
84
89
"fp8" : [ # tests base config with fp8
@@ -90,6 +95,7 @@ class TrainTests(unittest.TestCase):
90
95
"quantization=fp8" ,
91
96
"steps=2" ,
92
97
"enable_checkpointing=False" ,
98
+ "enable_goodput_recording=false" ,
93
99
rf"tokenizer_path={ os .path .join (os .path .dirname (PKG_DIR ), 'assets' , 'tokenizer.llama2' )} " ,
94
100
],
95
101
"nanoo_fp8" : [ # tests base config with nanoo_fp8
@@ -101,6 +107,7 @@ class TrainTests(unittest.TestCase):
101
107
"quantization=nanoo_fp8" ,
102
108
"steps=2" ,
103
109
"enable_checkpointing=False" ,
110
+ "enable_goodput_recording=false" ,
104
111
rf"tokenizer_path={ os .path .join (os .path .dirname (PKG_DIR ), 'assets' , 'tokenizer.llama2' )} " ,
105
112
],
106
113
"dropout" : [ # tests base config with dropout
@@ -111,6 +118,7 @@ class TrainTests(unittest.TestCase):
111
118
r"dataset_path=gs://maxtext-dataset" ,
112
119
"steps=2" ,
113
120
"enable_checkpointing=False" ,
121
+ "enable_goodput_recording=false" ,
114
122
"max_target_length=128" ,
115
123
"per_device_batch_size=1" ,
116
124
"dropout_rate=0.02" ,
@@ -123,6 +131,7 @@ class TrainTests(unittest.TestCase):
123
131
"run_name=runner_test" ,
124
132
"steps=2" ,
125
133
"enable_checkpointing=False" ,
134
+ "enable_goodput_recording=false" ,
126
135
"dataset_type=hf" ,
127
136
"hf_path=parquet" ,
128
137
r"hf_train_files=gs://maxtext-dataset/hf/c4/c4-train-00000-of-01637.parquet" ,
@@ -217,6 +226,7 @@ def test_gpu_cudnn_flash_te(self):
217
226
r"dataset_path=gs://maxtext-dataset" ,
218
227
"steps=2" ,
219
228
"enable_checkpointing=False" ,
229
+ "enable_goodput_recording=false" ,
220
230
"attention=cudnn_flash_te" ,
221
231
"packing=False" ,
222
232
rf"tokenizer_path={ os .path .join (os .path .dirname (PKG_DIR ), 'assets' , 'tokenizer.llama2' )} " ,
@@ -235,6 +245,7 @@ def test_gpu_context_parallelism(self):
235
245
r"dataset_path=gs://maxtext-dataset" ,
236
246
"steps=10" ,
237
247
"enable_checkpointing=False" ,
248
+ "enable_goodput_recording=false" ,
238
249
"attention=cudnn_flash_te" ,
239
250
"ici_fsdp_parallelism=2" ,
240
251
"ici_context_parallelism=2" ,
0 commit comments