File size: 3,427 Bytes
c8763bd 9dc4521 d8b9ce2 d262fb3 c8763bd d262fb3 708b21b c8763bd dcfabfb 2773294 6640b32 efc3d5b d262fb3 c8763bd a18f8de e2c5bda efc3d5b 00642fb efc3d5b 6064b14 efc3d5b 708b21b efc3d5b 00642fb efc3d5b c8763bd d8b9ce2 c8763bd d262fb3 c8763bd 708b21b a0b186b 708b21b a18f8de 708b21b a18f8de 708b21b a18f8de 708b21b 9dc4521 00642fb d262fb3 c8763bd 5aacd58 c8763bd d262fb3 c8763bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import os
import gradio as gr
import pandas as pd
from apscheduler.schedulers.background import BackgroundScheduler
from src.assets.text_content import TITLE, INTRODUCTION_TEXT, CITATION_BUTTON_LABEL, CITATION_BUTTON_TEXT
from src.assets.css_html_js import custom_css, get_window_url_params
from src.utils import restart_space, load_dataset_repo, make_clickable_model
LLM_PERF_LEADERBOARD_REPO = "optimum/llm-perf-leaderboard"
LLM_PERF_DATASET_REPO = "optimum/llm-perf-dataset"
OPTIMUM_TOKEN = os.environ.get("OPTIMUM_TOKEN", None)
COLUMNS_MAPPING = {
"model": "Model 🤗",
"backend.name": "Backend 🏭",
"backend.torch_dtype": "Load Datatype 📥",
"generate.latency(s)": "Latency (s) ⬇️",
"generate.throughput(tokens/s)": "Throughput (tokens/s) ⬆️",
}
COLUMNS_DATATYPES = ["markdown", "str", "str", "number", "number"]
SORTING_COLUMN = ["Throughput (tokens/s) ⬆️"]
llm_perf_dataset_repo = load_dataset_repo(LLM_PERF_DATASET_REPO, OPTIMUM_TOKEN)
def get_benchmark_df(benchmark):
if llm_perf_dataset_repo:
llm_perf_dataset_repo.git_pull()
# load
df = pd.read_csv(
f"./llm-perf-dataset/reports/{benchmark}/inference_report.csv")
# preprocess
df["model"] = df["model"].apply(make_clickable_model)
# filter
df = df[list(COLUMNS_MAPPING.keys())]
# rename
df.rename(columns=COLUMNS_MAPPING, inplace=True)
# sort
df.sort_values(by=SORTING_COLUMN, ascending=False, inplace=True)
return df
# Define demo interface
demo = gr.Blocks(css=custom_css)
with demo:
gr.HTML(TITLE)
gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
with gr.Tabs(elem_classes="tab-buttons") as tabs:
with gr.TabItem("🖥️ A100-80GB Benchmark 🏋️", elem_id="A100-benchmark", id=0):
SINGLE_A100_TEXT = """<h3>Single-GPU (1xA100):</h3>
<ul>
<li>Singleton Batch (1)</li>
<li>Thousand Tokens (1000)</li>
</ul>
"""
gr.HTML(SINGLE_A100_TEXT)
single_A100_df = get_benchmark_df(benchmark="1xA100-80GB")
leaderboard_table_lite = gr.components.Dataframe(
value=single_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
elem_id="1xA100-table",
)
MULTI_A100_TEXT = """<h3>Multi-GPU (4xA100):</h3>
<ul>
<li>Singleton Batch (1)</li>
<li>Thousand Tokens (1000)</li>
</ul>"""
gr.HTML(MULTI_A100_TEXT)
multi_A100_df = get_benchmark_df(benchmark="4xA100-80GB")
leaderboard_table_full = gr.components.Dataframe(
value=multi_A100_df,
datatype=COLUMNS_DATATYPES,
headers=list(COLUMNS_MAPPING.values()),
elem_id="4xA100-table",
)
with gr.Row():
with gr.Accordion("📙 Citation", open=False):
citation_button = gr.Textbox(
value=CITATION_BUTTON_TEXT,
label=CITATION_BUTTON_LABEL,
elem_id="citation-button",
).style(show_copy_button=True)
# Restart space every hour
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600,
args=[LLM_PERF_LEADERBOARD_REPO, OPTIMUM_TOKEN])
scheduler.start()
# Launch demo
demo.queue(concurrency_count=40).launch()
|