|
TITLE = """<h1 align="center" id="space-title">π€ Open LLM-Perf Leaderboard ποΈ</h1>""" |
|
|
|
INTRODUCTION_TEXT = f""" |
|
The π€ Open LLM-Perf Leaderboard ποΈ aims to benchmark the performance (latency & throughput) of Large Language Models (LLMs) with different hardwares, backends and optimizations using [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark) and [Optimum](https://github.com/huggingface/optimum) flavors. |
|
|
|
Anyone from the community can request a model or a hardware+backend+optimization configuration for automated benchmarking: |
|
- Model requests should be made in the [π€ Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) and will be added to the π€ Open LLM-Perf Leaderboard ποΈ automatically once they're publicly available. That's mostly because we don't want to benchmark models that don't have an evaluation score yet. |
|
- Hardware+Backend+Optimization requests should be made in the π€ Open LLM-Perf Leaderboard ποΈ [community discussions](https://huggingface.co/spaces/optimum/llm-perf-leaderboard/discussions) for open discussion about their relevance and feasibility. |
|
""" |
|
|
|
SINGLE_A100_TEXT = """<h3>Single-GPU Benchmark (1xA100):</h3> |
|
<ul> |
|
<li>Singleton Batch (1)</li> |
|
<li>Thousand Tokens (1000)</li> |
|
</ul> |
|
""" |
|
|
|
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results." |
|
CITATION_BUTTON_TEXT = r"""@misc{open-llm-perf-leaderboard, |
|
author = {Ilyas Moutawwakil}, |
|
title = {Open LLM-Perf Leaderboard}, |
|
year = {2023}, |
|
publisher = {Hugging Face}, |
|
howpublished = "\url{https://huggingface.co/spaces/optimum/llm-perf-leaderboard}", |
|
@software{optimum-benchmark, |
|
author = {Ilyas Moutawwakil}, |
|
publisher = {Hugging Face}, |
|
title = {Optimum-Benchmark: A framework for benchmarking the performance of Transformers models with different hardwares, backends and optimizations.}, |
|
} |
|
""" |
|
|