@@ -146,7 +146,7 @@ def _get_model(platform: str,
146
146
xgb.Booster model file.
147
147
"""
148
148
model_path = _get_model_path (platform , model , variant )
149
- logger .info ('Loading model from: %s' , model_path )
149
+ logger .debug ('Loading model from: %s' , model_path )
150
150
xgb_model = xgb .Booster ()
151
151
xgb_model .load_model (model_path )
152
152
return xgb_model
@@ -290,7 +290,7 @@ def _predict(
290
290
if any (input_df ['fraction_supported' ] != 1.0 )
291
291
else 'raw'
292
292
)
293
- logger .info ('Predicting dataset (%s): %s' , filter_str , dataset )
293
+ logger .debug ('Predicting dataset (%s): %s' , filter_str , dataset )
294
294
features , feature_cols , label_col = extract_model_features (input_df , {'default' : split_fn })
295
295
# note: dataset name is already stored in the 'appName' field
296
296
try :
@@ -345,7 +345,7 @@ def _read_dataset_scores(
345
345
nan_df ['model' ] + '/' + nan_df ['platform' ] + '/' + nan_df ['dataset' ]
346
346
)
347
347
keys = list (nan_df ['key' ].unique ())
348
- logger .warning ('Dropped rows w/ NaN values from: %s: %s' , eval_dir , keys )
348
+ logger .debug ('Dropped rows w/ NaN values from: %s: %s' , eval_dir , keys )
349
349
350
350
return df
351
351
@@ -395,7 +395,7 @@ def _read_platform_scores(
395
395
nan_df ['model' ] + '/' + nan_df ['platform' ] + '/' + nan_df ['dataset' ]
396
396
)
397
397
keys = list (nan_df ['key' ].unique ())
398
- logger .warning ('Dropped rows w/ NaN values from: %s: %s' , eval_dir , keys )
398
+ logger .debug ('Dropped rows w/ NaN values from: %s: %s' , eval_dir , keys )
399
399
400
400
# compute accuracy by platform
401
401
scores = {}
@@ -507,7 +507,7 @@ def train(
507
507
for ds_name , ds_meta in datasets .items ():
508
508
if 'split_function' in ds_meta :
509
509
plugin_path = ds_meta ['split_function' ]
510
- logger .info ('Using split function for %s dataset from plugin: %s' , ds_name , plugin_path )
510
+ logger .debug ('Using split function for %s dataset from plugin: %s' , ds_name , plugin_path )
511
511
plugin = load_plugin (plugin_path )
512
512
split_functions [ds_name ] = plugin .split_function
513
513
@@ -613,7 +613,7 @@ def predict(
613
613
'platform' : platform ,
614
614
}
615
615
616
- logger .info ('Loading dataset: %s' , dataset_name )
616
+ logger .debug ('Loading dataset: %s' , dataset_name )
617
617
profile_df = load_profiles (
618
618
datasets = datasets ,
619
619
node_level_supp = node_level_supp ,
@@ -655,7 +655,7 @@ def predict(
655
655
if node_level_supp is not None and any (profile_df ['fraction_supported' ] != 1.0 )
656
656
else 'raw'
657
657
)
658
- logger .info ('Predicting dataset (%s): %s' , filter_str , dataset_name )
658
+ logger .debug ('Predicting dataset (%s): %s' , filter_str , dataset_name )
659
659
660
660
try :
661
661
features_list = []
@@ -684,17 +684,17 @@ def predict(
684
684
if output_info :
685
685
# save features for troubleshooting
686
686
output_file = output_info ['features' ]['path' ]
687
- logger .info ('Writing features to: %s' , output_file )
687
+ logger .debug ('Writing features to: %s' , output_file )
688
688
features .to_csv (output_file , index = False )
689
689
690
690
feature_importance , shapley_values = compute_shapley_values (xgb_model , features )
691
691
692
692
output_file = output_info ['featureImportance' ]['path' ]
693
- logger .info ('Writing shapley feature importances to: %s' , output_file )
693
+ logger .debug ('Writing shapley feature importances to: %s' , output_file )
694
694
feature_importance .to_csv (output_file )
695
695
696
696
output_file = output_info ['shapValues' ]['path' ]
697
- logger .info ('Writing shapley values to: %s' , output_file )
697
+ logger .debug ('Writing shapley values to: %s' , output_file )
698
698
shapley_values .to_csv (output_file , index = False )
699
699
700
700
# compute per-app speedups
@@ -853,10 +853,10 @@ def evaluate(
853
853
plugin = load_plugin (plugin_path )
854
854
split_fn = plugin .split_function
855
855
856
- logger .info ('Loading qualification tool CSV files.' )
856
+ logger .debug ('Loading qualification tool CSV files.' )
857
857
node_level_supp , qual_tool_output , _ = _get_qual_data (qual_dir )
858
858
859
- logger .info ('Loading profiler tool CSV files.' )
859
+ logger .debug ('Loading profiler tool CSV files.' )
860
860
profile_df = load_profiles (datasets , profile_dir ) # w/ GPU rows
861
861
filtered_profile_df = load_profiles (
862
862
datasets , profile_dir , node_level_supp , qual_tool_filter , qual_tool_output
0 commit comments