Spaces:
Building
Building
# source: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/blob/main/src/utils_display.py | |
from dataclasses import dataclass | |
import plotly.graph_objects as go | |
from transformers import AutoConfig | |
import plotly.express as px | |
import numpy as np | |
# These classes are for user facing column names, to avoid having to change them | |
# all around the code when a modif is needed | |
class ColumnContent: | |
name: str | |
type: str | |
displayed_by_default: bool | |
hidden: bool = False | |
def fields(raw_class): | |
return [ | |
v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__" | |
] | |
class AutoEvalColumn: # Auto evals column | |
model_type_symbol = ColumnContent("type", "str", True) | |
model = ColumnContent("model", "markdown", True) | |
complete_score = ColumnContent("complete", "number", True) | |
instruct_score = ColumnContent("instruct", "number", True) | |
elo_mle = ColumnContent("elo_mle", "number", True) | |
dummy = ColumnContent("model", "str", True) | |
size = ColumnContent("size", "number", True) | |
def model_hyperlink(link, model_name): | |
return f'<a target="_blank" href="{link}" style="color: var(--link-text-color); text-decoration: underline;text-decoration-style: dotted;">{model_name}</a>' | |
def make_clickable_names(df): | |
df["model"] = df.apply( | |
lambda row: model_hyperlink(row["link"], row["model"]), axis=1 | |
) | |
return df | |
def plot_elo_mle(df): | |
fig = px.scatter(df, x="model", y="rating", error_y="error_y", | |
error_y_minus="error_y_minus", | |
# title="Bootstrap of Elo MLE Estimates (BigCodeBench-Complete)" | |
) | |
fig.update_layout(xaxis_title="Model", | |
yaxis_title="Rating", | |
autosize=True, | |
# width=1300, | |
# height=900, | |
) | |
return fig | |
def plot_solve_rate(df, task, rows=30, cols=38): | |
keys = df["task_id"] | |
values = df["solve_rate"] | |
values = np.array(values) | |
n = len(values) | |
if rows is None or cols is None: | |
cols = int(math.sqrt(n)) | |
rows = cols if cols * cols >= n else cols + 1 | |
while rows * cols < n: | |
cols += 1 | |
values = np.pad(values, (0, rows * cols - n), 'constant', constant_values=np.nan).reshape((rows, cols)) | |
keys = np.pad(keys, (0, rows * cols - n), 'constant', constant_values='').reshape((rows, cols)) | |
hover_text = np.empty_like(values, dtype=object) | |
for i in range(rows): | |
for j in range(cols): | |
if not np.isnan(values[i, j]): | |
hover_text[i, j] = f"{keys[i, j]}<br>Solve Rate: {values[i, j]:.2f}" | |
else: | |
hover_text[i, j] = "NaN" | |
fig = go.Figure(data=go.Heatmap( | |
z=values, | |
text=hover_text, | |
hoverinfo='text', | |
colorscale='teal', | |
zmin=0, | |
zmax=100 | |
)) | |
fig.update_layout( | |
title=f'BigCodeBench-{task}', | |
xaxis_nticks=cols, | |
yaxis_nticks=rows, | |
xaxis=dict(showticklabels=False), | |
yaxis=dict(showticklabels=False), | |
autosize=True, | |
# width=760, | |
# height=600, | |
) | |
return fig | |
def styled_error(error): | |
return f"<p style='color: red; font-size: 20px; text-align: center;'>{error}</p>" | |
def styled_warning(warn): | |
return f"<p style='color: orange; font-size: 20px; text-align: center;'>{warn}</p>" | |
def styled_message(message): | |
return f"<p style='color: green; font-size: 20px; text-align: center;'>{message}</p>" | |
def has_no_nan_values(df, columns): | |
return df[columns].notna().all(axis=1) | |
def has_nan_values(df, columns): | |
return df[columns].isna().any(axis=1) | |
def is_model_on_hub(model_name: str, revision: str) -> bool: | |
try: | |
AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=False) | |
return True, None | |
except ValueError: | |
return ( | |
False, | |
"needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.", | |
) | |
except Exception as e: | |
print(f"Could not get the model config from the hub.: {e}") | |
return False, "was not found on hub!" |