Spaces:
Running
Running
Commit
ยท
df7d7c4
1
Parent(s):
cdf41e7
fix
Browse files
app.py
CHANGED
@@ -76,7 +76,6 @@ llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
|
|
76 |
def get_benchmark_df(benchmark="Succeeded-1xA100-80GB"):
|
77 |
if llm_perf_dataset_repo:
|
78 |
llm_perf_dataset_repo.git_pull()
|
79 |
-
|
80 |
# load data
|
81 |
benchmark_df = pd.read_csv(f"./llm-perf-dataset/reports/{benchmark}.csv")
|
82 |
clusters_df = pd.read_csv("./llm-perf-dataset/Clustered-Open-LLM-Leaderboard.csv")
|
@@ -99,22 +98,11 @@ def get_benchmark_df(benchmark="Succeeded-1xA100-80GB"):
|
|
99 |
merged_df["quantization"] = merged_df["backend.quantization_strategy"].apply(
|
100 |
lambda x: "BnB.4bit" if x == "bnb" else ("GPTQ.4bit" if x == "gptq" else "None")
|
101 |
)
|
102 |
-
|
103 |
-
# # distance to 100% score
|
104 |
-
# score_distance = 100 - merged_df["best_score"]
|
105 |
-
# # distance to 0s latency
|
106 |
-
# latency_distance = merged_df["generate.latency(s)"]
|
107 |
-
# # distance to 0MB memory
|
108 |
-
# memory_distance = merged_df["forward.peak_memory(MB)"]
|
109 |
-
# # add perf distance
|
110 |
-
# merged_df["perf_distance"] = (
|
111 |
-
# score_distance**2 + latency_distance**2 + memory_distance**2
|
112 |
-
# ) ** 0.5
|
113 |
-
|
114 |
# sort
|
115 |
merged_df.sort_values(by=SORTING_COLUMN, ascending=SORTING_ASCENDING, inplace=True)
|
116 |
# drop duplicates
|
117 |
merged_df.drop_duplicates(subset=NO_DUPLICATES_COLUMNS, inplace=True)
|
|
|
118 |
return merged_df
|
119 |
|
120 |
|
|
|
76 |
def get_benchmark_df(benchmark="Succeeded-1xA100-80GB"):
|
77 |
if llm_perf_dataset_repo:
|
78 |
llm_perf_dataset_repo.git_pull()
|
|
|
79 |
# load data
|
80 |
benchmark_df = pd.read_csv(f"./llm-perf-dataset/reports/{benchmark}.csv")
|
81 |
clusters_df = pd.read_csv("./llm-perf-dataset/Clustered-Open-LLM-Leaderboard.csv")
|
|
|
98 |
merged_df["quantization"] = merged_df["backend.quantization_strategy"].apply(
|
99 |
lambda x: "BnB.4bit" if x == "bnb" else ("GPTQ.4bit" if x == "gptq" else "None")
|
100 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
# sort
|
102 |
merged_df.sort_values(by=SORTING_COLUMN, ascending=SORTING_ASCENDING, inplace=True)
|
103 |
# drop duplicates
|
104 |
merged_df.drop_duplicates(subset=NO_DUPLICATES_COLUMNS, inplace=True)
|
105 |
+
merged_df = merged_df[[*ALL_COLUMNS_DATATYPES, "generate.latency(s)"]]
|
106 |
return merged_df
|
107 |
|
108 |
|