Spaces:
Runtime error
Runtime error
[email protected]
commited on
Commit
Β·
c4a1d6e
1
Parent(s):
3104cdf
update
Browse files
app.py
CHANGED
|
@@ -21,6 +21,7 @@ from src.display.utils import (
|
|
| 21 |
EVAL_COLS,
|
| 22 |
EVAL_TYPES,
|
| 23 |
AutoEvalColumn,
|
|
|
|
| 24 |
ModelType,
|
| 25 |
fields,
|
| 26 |
WeightType,
|
|
@@ -96,31 +97,31 @@ def init_perf_plot(df):
|
|
| 96 |
)
|
| 97 |
return fig
|
| 98 |
|
| 99 |
-
def init_leaderboard(dataframe):
|
| 100 |
if dataframe is None or dataframe.empty:
|
| 101 |
raise ValueError("Leaderboard DataFrame is empty or None.")
|
| 102 |
return Leaderboard(
|
| 103 |
value=dataframe,
|
| 104 |
-
datatype=[c.type for c in fields(
|
| 105 |
select_columns=SelectColumns(
|
| 106 |
-
default_selection=[c.name for c in fields(
|
| 107 |
-
cant_deselect=[c.name for c in fields(
|
| 108 |
label="Select Columns to Display:",
|
| 109 |
),
|
| 110 |
-
search_columns=[
|
| 111 |
-
hide_columns=[c.name for c in fields(
|
| 112 |
filter_columns=[
|
| 113 |
-
ColumnFilter(
|
| 114 |
-
ColumnFilter(
|
| 115 |
ColumnFilter(
|
| 116 |
-
|
| 117 |
type="slider",
|
| 118 |
min=0.01,
|
| 119 |
max=150,
|
| 120 |
label="Select the number of parameters (B)",
|
| 121 |
),
|
| 122 |
ColumnFilter(
|
| 123 |
-
|
| 124 |
),
|
| 125 |
],
|
| 126 |
bool_checkboxgroup_label="Hide models",
|
|
@@ -135,9 +136,12 @@ with demo:
|
|
| 135 |
|
| 136 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 137 |
with gr.TabItem("π
LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
|
| 138 |
-
leaderboard = init_leaderboard(LEADERBOARD_DF)
|
|
|
|
|
|
|
|
|
|
| 139 |
|
| 140 |
-
with gr.TabItem("π Performance Plot", elem_id="llm-benchmark-tab-table", id=
|
| 141 |
print(LEADERBOARD_DF.columns)
|
| 142 |
# gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 143 |
with gr.Row():
|
|
@@ -147,10 +151,10 @@ with demo:
|
|
| 147 |
show_label=False,
|
| 148 |
)
|
| 149 |
|
| 150 |
-
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=
|
| 151 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 152 |
|
| 153 |
-
with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=
|
| 154 |
with gr.Column():
|
| 155 |
with gr.Row():
|
| 156 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
|
|
|
| 21 |
EVAL_COLS,
|
| 22 |
EVAL_TYPES,
|
| 23 |
AutoEvalColumn,
|
| 24 |
+
AutoEvalColumnAsset,
|
| 25 |
ModelType,
|
| 26 |
fields,
|
| 27 |
WeightType,
|
|
|
|
| 97 |
)
|
| 98 |
return fig
|
| 99 |
|
| 100 |
+
def init_leaderboard(dataframe, auto_eval_col_class):
|
| 101 |
if dataframe is None or dataframe.empty:
|
| 102 |
raise ValueError("Leaderboard DataFrame is empty or None.")
|
| 103 |
return Leaderboard(
|
| 104 |
value=dataframe,
|
| 105 |
+
datatype=[c.type for c in fields(auto_eval_col_class)],
|
| 106 |
select_columns=SelectColumns(
|
| 107 |
+
default_selection=[c.name for c in fields(auto_eval_col_class) if c.displayed_by_default],
|
| 108 |
+
cant_deselect=[c.name for c in fields(auto_eval_col_class) if c.never_hidden],
|
| 109 |
label="Select Columns to Display:",
|
| 110 |
),
|
| 111 |
+
search_columns=[auto_eval_col_class.model.name, auto_eval_col_class.license.name],
|
| 112 |
+
hide_columns=[c.name for c in fields(auto_eval_col_class) if c.hidden],
|
| 113 |
filter_columns=[
|
| 114 |
+
ColumnFilter(auto_eval_col_class.model_type.name, type="checkboxgroup", label="Model types"),
|
| 115 |
+
ColumnFilter(auto_eval_col_class.precision.name, type="checkboxgroup", label="Precision"),
|
| 116 |
ColumnFilter(
|
| 117 |
+
auto_eval_col_class.params.name,
|
| 118 |
type="slider",
|
| 119 |
min=0.01,
|
| 120 |
max=150,
|
| 121 |
label="Select the number of parameters (B)",
|
| 122 |
),
|
| 123 |
ColumnFilter(
|
| 124 |
+
auto_eval_col_class.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=False
|
| 125 |
),
|
| 126 |
],
|
| 127 |
bool_checkboxgroup_label="Hide models",
|
|
|
|
| 136 |
|
| 137 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 138 |
with gr.TabItem("π
LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
|
| 139 |
+
leaderboard = init_leaderboard(LEADERBOARD_DF, AutoEvalColumn)
|
| 140 |
+
|
| 141 |
+
with gr.TabItem("π
Asset Benchmark", elem_id="llm-benchmark-asset-tab-table", id=1):
|
| 142 |
+
leaderboard = init_leaderboard(ASSET_LEADERBOARD_DF, AutoEvalColumnAsset)
|
| 143 |
|
| 144 |
+
with gr.TabItem("π Performance Plot", elem_id="llm-benchmark-tab-table", id=2):
|
| 145 |
print(LEADERBOARD_DF.columns)
|
| 146 |
# gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 147 |
with gr.Row():
|
|
|
|
| 151 |
show_label=False,
|
| 152 |
)
|
| 153 |
|
| 154 |
+
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=3):
|
| 155 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 156 |
|
| 157 |
+
with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=4):
|
| 158 |
with gr.Column():
|
| 159 |
with gr.Row():
|
| 160 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|