|
import gradio as gr |
|
import pandas as pd |
|
import plotly.express as px |
|
|
|
|
|
FLASHATTENTIONV2_DATA = [ |
|
|
|
"Model π€", |
|
"DType π₯", |
|
"Backend π", |
|
"Params (B)", |
|
"Architecture ποΈ", |
|
"Open LLM Score (%)", |
|
|
|
"DType π₯", |
|
"Backend π", |
|
"Optimization π οΈ", |
|
"Quantization ποΈ", |
|
"Optimization π οΈ FlashAttentionV2", |
|
|
|
"Prefill (s)", |
|
"Prefill (s) FlashAttentionV2", |
|
"Decode (tokens/s)", |
|
"Decode (tokens/s) FlashAttentionV2", |
|
"End-to-End (tokens/s)", |
|
"End-to-End (tokens/s) FlashAttentionV2", |
|
|
|
"Prefill Speedup (%)", |
|
"Decode Speedup (%)", |
|
] |
|
|
|
|
|
def get_fa2_df(llm_perf_df): |
|
copy_df = llm_perf_df.copy() |
|
|
|
original_df = copy_df[(copy_df["Optimization π οΈ"] == "None") & (copy_df["DType π₯"] == "float16")] |
|
fa2_df = copy_df[(copy_df["Optimization π οΈ"] == "FlashAttentionV2") & (copy_df["DType π₯"] == "float16")] |
|
|
|
fa2_df = pd.merge( |
|
original_df, |
|
fa2_df, |
|
on=["Model π€", "Quantization ποΈ"], |
|
suffixes=["", " FlashAttentionV2"], |
|
) |
|
|
|
fa2_df["Prefill Speedup (%)"] = ((fa2_df["Prefill (s)"] / fa2_df["Prefill (s) FlashAttentionV2"]) * 100).round( |
|
2 |
|
) - 100 |
|
fa2_df["Decode Speedup (%)"] = ( |
|
(fa2_df["Decode (tokens/s) FlashAttentionV2"] / fa2_df["Decode (tokens/s)"]) * 100 |
|
).round(2) - 100 |
|
|
|
fa2_df = fa2_df[fa2_df["Prefill Speedup (%)"] < 1000] |
|
fa2_df = fa2_df[fa2_df["Decode Speedup (%)"] < 1000] |
|
|
|
return fa2_df |
|
|
|
|
|
def get_fa2_decode_fig(llm_perf_df): |
|
fa2_df = get_fa2_df(llm_perf_df) |
|
|
|
decode_fig = px.box( |
|
fa2_df, |
|
x="Architecture ποΈ", |
|
y="Decode Speedup (%)", |
|
color_discrete_sequence=px.colors.qualitative.Light24, |
|
custom_data=FLASHATTENTIONV2_DATA, |
|
color="Quantization ποΈ", |
|
points="all", |
|
) |
|
|
|
decode_fig.update_traces( |
|
hovertemplate="<br>".join( |
|
[f"<b>{column}:</b> %{{customdata[{i}]}}" for i, column in enumerate(FLASHATTENTIONV2_DATA)] |
|
) |
|
) |
|
|
|
decode_fig.update_layout( |
|
title={ |
|
"text": "Decode Speedup per Architecture, Compared To Non-Optimized Model", |
|
"y": 0.95, |
|
"x": 0.5, |
|
"xanchor": "center", |
|
"yanchor": "top", |
|
}, |
|
xaxis_title="LLM Architecture", |
|
yaxis_title="Decode Speedup (%)", |
|
legend_title="Quantization Scheme", |
|
width=1200, |
|
height=600, |
|
) |
|
|
|
return decode_fig |
|
|
|
|
|
def get_fa2_prefill_fig(llm_perf_df): |
|
fa2_df = get_fa2_df(llm_perf_df) |
|
|
|
prefill_fig = px.box( |
|
fa2_df, |
|
x="Architecture ποΈ", |
|
y="Prefill Speedup (%)", |
|
color_discrete_sequence=px.colors.qualitative.Light24, |
|
custom_data=FLASHATTENTIONV2_DATA, |
|
color="Quantization ποΈ", |
|
points="all", |
|
) |
|
|
|
prefill_fig.update_traces( |
|
hovertemplate="<br>".join( |
|
[f"<b>{column}:</b> %{{customdata[{i}]}}" for i, column in enumerate(FLASHATTENTIONV2_DATA)] |
|
) |
|
) |
|
|
|
prefill_fig.update_layout( |
|
title={ |
|
"text": "Prefill Speedup per Architecture, Compared To Non-Optimized Model", |
|
"y": 0.95, |
|
"x": 0.5, |
|
"xanchor": "center", |
|
"yanchor": "top", |
|
}, |
|
xaxis_title="LLM Architecture", |
|
yaxis_title="Prefill Speedup (%)", |
|
legend_title="Quantization Scheme", |
|
width=1200, |
|
height=600, |
|
) |
|
|
|
return prefill_fig |
|
|
|
|
|
def create_fa2_plots(llm_perf_df): |
|
|
|
gr.HTML("π Hover over the points π for additional information.", elem_id="text") |
|
|
|
prefill_fig = get_fa2_prefill_fig(llm_perf_df) |
|
decode_fig = get_fa2_decode_fig(llm_perf_df) |
|
|
|
|
|
prefill_plot = gr.components.Plot(value=prefill_fig, elem_id="plot", show_label=False) |
|
decode_plot = gr.components.Plot(value=decode_fig, elem_id="plot", show_label=False) |
|
|
|
return prefill_plot, decode_plot |
|
|