88from matplotlib import cm
99from matplotlib .colors import Colormap
1010import numpy as np
11- from scipy .stats import rankdata
1211from sklearn .metrics import mean_absolute_error , mean_squared_error
1312from yaml import safe_load
1413
@@ -145,7 +144,7 @@ def calc_metric_scores(
145144 for row in metrics_scores :
146145 for key , value in row .items ():
147146 # Value may be ``None`` if missing for a benchmark
148- if key not in {"MLIP" , "Score" , "Rank" , " id" } and value is not None :
147+ if key not in {"MLIP" , "Score" , "id" } and value is not None :
149148 if cleaned_thresholds is None or key not in cleaned_thresholds :
150149 row [key ] = value
151150 continue
@@ -194,7 +193,7 @@ def calc_table_scores(
194193 weights_list = []
195194 for key , value in metrics_row .items ():
196195 # Value may be ``None`` if missing for a benchmark
197- if key not in {"MLIP" , "Score" , "Rank" , " id" } and value is not None :
196+ if key not in {"MLIP" , "Score" , "id" } and value is not None :
198197 scores_list .append (scores_row [key ])
199198 weights_list .append (weights .get (key , 1.0 ))
200199
@@ -210,36 +209,6 @@ def calc_table_scores(
210209 return metrics_data
211210
212211
213- def calc_ranks (metrics_data : list [dict ]) -> list [dict ]:
214- """
215- Calculate rank for each model and add to table data.
216-
217- Parameters
218- ----------
219- metrics_data
220- Rows data containing model name, metric values, and Score.
221- The "Score" column is used to calculate the rank, with the highest score ranked
222- 1.
223-
224- Returns
225- -------
226- list[dict]
227- Rows of data with rank for each model added.
228- """
229- # If a score is None, set to NaN for ranking purposes, but do not rank
230- ranked_scores = rankdata (
231- [x ["Score" ] if x .get ("Score" ) is not None else np .nan for x in metrics_data ],
232- nan_policy = "omit" ,
233- method = "max" ,
234- )
235- for i , row in enumerate (metrics_data ):
236- if np .isnan (ranked_scores [i ]):
237- row ["Rank" ] = None
238- else :
239- row ["Rank" ] = len (ranked_scores ) - int (ranked_scores [i ]) + 1
240- return metrics_data
241-
242-
243212def get_table_style (
244213 data : list [TableRow ],
245214 * ,
@@ -339,10 +308,7 @@ def rgba_from_val(val: float, vmin: float, vmax: float, cmap: Colormap) -> str:
339308
340309 # Use thresholds
341310 if normalized :
342- if col != "Rank" :
343- min_value , max_value = 1 , 0
344- else :
345- min_value , max_value = 1 , len (numeric_values )
311+ min_value , max_value = 1 , 0
346312 else :
347313 min_value = min (numeric_values )
348314 max_value = max (numeric_values )
@@ -370,13 +336,13 @@ def rgba_from_val(val: float, vmin: float, vmax: float, cmap: Colormap) -> str:
370336 return style_data_conditional
371337
372338
373- def update_score_rank_style (
339+ def update_score_style (
374340 data : list [MetricRow ],
375341 weights : dict [str , float ] | None = None ,
376342 thresholds : Thresholds | None = None ,
377343) -> tuple [list [MetricRow ], list [TableRow ]]:
378344 """
379- Update table scores, ranks, and table styles.
345+ Update table scores and table styles.
380346
381347 Parameters
382348 ----------
@@ -395,7 +361,6 @@ def update_score_rank_style(
395361 """
396362 weights = clean_weights (weights )
397363 data = calc_table_scores (data , weights , thresholds )
398- data = calc_ranks (data )
399364 scored_data = calc_metric_scores (data , thresholds )
400365 style = get_table_style (data , scored_data = scored_data )
401366 return data , style
0 commit comments