bgamazay commited on
Commit
7bb4986
·
verified ·
1 Parent(s): 079d204

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -144
app.py CHANGED
@@ -8,40 +8,16 @@ import gradio as gr
8
  from huggingface_hub import HfApi, snapshot_download, ModelInfo, list_models
9
  from enum import Enum
10
 
11
-
12
  OWNER = "AIEnergyScore"
13
  COMPUTE_SPACE = f"{OWNER}/launch-computation-example"
14
-
15
-
16
  TOKEN = os.environ.get("DEBUG")
17
  API = HfApi(token=TOKEN)
18
 
19
-
20
-
21
- task_mappings = {'automatic speech recognition':'automatic-speech-recognition', 'Object Detection': 'object-detection', 'Text Classification': 'text-classification',
22
- 'Image to Text':'image-to-text', 'Question Answering':'question-answering', 'Text Generation': 'text-generation',
23
- 'Image Classification':'image-classification', 'Sentence Similarity': 'sentence-similarity',
24
- 'Image Generation':'image-generation', 'Summarization':'summarization'}
25
- @dataclass
26
- class ModelDetails:
27
- name: str
28
- display_name: str = ""
29
- symbol: str = "" # emoji
30
-
31
- def start_compute_space():
32
- API.restart_space(COMPUTE_SPACE)
33
- gr.Info(f"Okay! {COMPUTE_SPACE} should be running now!")
34
-
35
-
36
- def get_model_size(model_info: ModelInfo):
37
- """Gets the model size from the configuration, or the model name if the configuration does not contain the information."""
38
- try:
39
- model_size = round(model_info.safetensors["total"] / 1e9, 3)
40
- except (AttributeError, TypeError):
41
- return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py
42
- return model_size
43
-
44
- def add_docker_eval(zip_file):
45
  new_fid_list = zip_file.split("/")
46
  new_fid = new_fid_list[-1]
47
  if new_fid.endswith('.zip'):
@@ -52,130 +28,31 @@ def add_docker_eval(zip_file):
52
  repo_type="dataset",
53
  commit_message="Adding logs via submission Space.",
54
  token=TOKEN
55
- )
56
  gr.Info('Uploaded logs to dataset! We will validate their validity and add them to the next version of the leaderboard.')
57
  else:
58
  gr.Info('You can only upload .zip files here!')
59
 
60
-
61
- def add_new_eval(repo_id: str, task: str):
62
- model_owner = repo_id.split("/")[0]
63
- model_name = repo_id.split("/")[1]
64
- current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
65
- requests = load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
66
- requests_dset = requests.to_pandas()
67
- model_list = requests_dset[requests_dset['status'] == 'COMPLETED']['model'].tolist()
68
- task_models = list(API.list_models(filter=task_mappings[task]))
69
- task_model_names = [m.id for m in task_models]
70
- if repo_id in model_list:
71
- gr.Info('This model has already been run!')
72
- elif repo_id not in task_model_names:
73
- gr.Info("This model isn't compatible with the chosen task! Pick a different model-task combination")
74
- else:
75
- # Is the model info correctly filled?
76
- try:
77
- model_info = API.model_info(repo_id=repo_id)
78
- model_size = get_model_size(model_info=model_info)
79
- likes = model_info.likes
80
- except Exception:
81
- gr.Info("Could not find information for model %s" % (model_name))
82
- model_size = None
83
- likes = None
84
-
85
- gr.Info("Adding request")
86
-
87
- request_dict = {
88
- "model": repo_id,
89
- "status": "PENDING",
90
- "submitted_time": pd.to_datetime(current_time),
91
- "task": task_mappings[task],
92
- "likes": likes,
93
- "params": model_size,
94
- "leaderboard_version": "v0",}
95
- #"license": license,
96
- #"private": False,
97
- #}
98
-
99
- print("Writing out request file to dataset")
100
- df_request_dict = pd.DataFrame([request_dict])
101
- print(df_request_dict)
102
- df_final = pd.concat([requests_dset, df_request_dict], ignore_index=True)
103
- updated_dset = Dataset.from_pandas(df_final)
104
- updated_dset.push_to_hub("AIEnergyScore/requests_debug", split="test", token=TOKEN)
105
-
106
- gr.Info("Starting compute space at %s " % COMPUTE_SPACE)
107
- return start_compute_space()
108
-
109
-
110
- def print_existing_models():
111
- requests= load_dataset("AIEnergyScore/requests_debug", split="test", token=TOKEN)
112
- requests_dset = requests.to_pandas()
113
- model_df= requests_dset[['model', 'status']]
114
- model_df = model_df[model_df['status'] == 'COMPLETED']
115
- return model_df
116
-
117
- def highlight_cols(x):
118
- df = x.copy()
119
- df[df['status'] == 'COMPLETED'] = 'color: green'
120
- df[df['status'] == 'PENDING'] = 'color: orange'
121
- df[df['status'] == 'FAILED'] = 'color: red'
122
- return df
123
-
124
- # Applying the style function
125
- existing_models = print_existing_models()
126
- formatted_df = existing_models.style.apply(highlight_cols, axis=None)
127
-
128
- def get_leaderboard_models():
129
- path = r'leaderboard_v0_data/energy'
130
- filenames = glob.glob(path + "/*.csv")
131
- data = []
132
- for filename in filenames:
133
- data.append(pd.read_csv(filename))
134
- leaderboard_data = pd.concat(data, ignore_index=True)
135
- return leaderboard_data[['model','task']]
136
-
137
-
138
  with gr.Blocks() as demo:
139
  gr.Markdown("# AI Energy Score | Submission Portal")
140
- gr.Markdown("### The goal of the AI Energy Score project is to develop an energy-based rating system for AI model deployment that will guide members of the community in choosing models for different tasks based on energy efficiency.", elem_classes="markdown-text")
141
- gr.Markdown("### If you want us to evaluate a model hosted on the 🤗 Hub, enter the model ID and choose the corresponding task from the dropdown list below, then click **Run Analysis** to launch the benchmarking process.")
142
- gr.Markdown("### If you've used the [Docker file](https://github.com/huggingface/AIEnergyScore/) to run your own evaluation, please submit the resulting log files at the bottom of the page.")
143
- gr.Markdown("### The [Project Leaderboard](https://huggingface.co/spaces/AIEnergyScore/Leaderboard) will be updated twice a year.")
144
- with gr.Row():
145
- with gr.Column():
146
- task = gr.Dropdown(
147
- choices=list(task_mappings.keys()),
148
- label="Choose a benchmark task",
149
- value='Text Generation',
150
- multiselect=False,
151
- interactive=True,
152
- )
153
- with gr.Column():
154
- model_name_textbox = gr.Textbox(label="Model name (user_name/model_name)")
155
-
156
- with gr.Row():
157
- with gr.Column():
158
- submit_button = gr.Button("Submit for Analysis")
159
- submission_result = gr.Markdown()
160
- submit_button.click(
161
- fn=add_new_eval,
162
- inputs=[
163
- model_name_textbox,
164
- task,
165
- ],
166
- outputs=submission_result,
167
- )
168
  with gr.Row():
169
  with gr.Column():
170
  with gr.Accordion("Submit log files from a Docker run:", open=False):
171
  gr.Markdown("If you've already benchmarked your model using the [Docker file](https://github.com/huggingface/EnergyStarAI/) provided, please upload the **entire run log directory** (in .zip format) below:")
 
 
 
 
 
 
 
 
 
172
  file_output = gr.File(visible=False)
173
  u = gr.UploadButton("Upload a zip file with logs", file_count="single")
174
- u.upload(add_docker_eval, u, file_output)
175
- with gr.Row():
176
- with gr.Column():
177
- with gr.Accordion("Models that are in the latest leaderboard version:", open=False, visible=False):
178
- gr.Dataframe(get_leaderboard_models())
179
- with gr.Accordion("Models that have been benchmarked recently:", open=False, visible=False):
180
- gr.Dataframe(formatted_df)
181
- demo.launch()
 
8
  from huggingface_hub import HfApi, snapshot_download, ModelInfo, list_models
9
  from enum import Enum
10
 
 
11
  OWNER = "AIEnergyScore"
12
  COMPUTE_SPACE = f"{OWNER}/launch-computation-example"
 
 
13
  TOKEN = os.environ.get("DEBUG")
14
  API = HfApi(token=TOKEN)
15
 
16
+ def add_docker_eval_with_agreement(zip_file, agreement):
17
+ if not agreement:
18
+ gr.Warning("You must agree to the terms before submitting your energy score data.")
19
+ return
20
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  new_fid_list = zip_file.split("/")
22
  new_fid = new_fid_list[-1]
23
  if new_fid.endswith('.zip'):
 
28
  repo_type="dataset",
29
  commit_message="Adding logs via submission Space.",
30
  token=TOKEN
31
+ )
32
  gr.Info('Uploaded logs to dataset! We will validate their validity and add them to the next version of the leaderboard.')
33
  else:
34
  gr.Info('You can only upload .zip files here!')
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  with gr.Blocks() as demo:
37
  gr.Markdown("# AI Energy Score | Submission Portal")
38
+ gr.Markdown("### The goal of the AI Energy Score project is to develop an energy-based rating system for AI model deployment that will guide members of the community in choosing models for different tasks based on energy efficiency.")
39
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  with gr.Row():
41
  with gr.Column():
42
  with gr.Accordion("Submit log files from a Docker run:", open=False):
43
  gr.Markdown("If you've already benchmarked your model using the [Docker file](https://github.com/huggingface/EnergyStarAI/) provided, please upload the **entire run log directory** (in .zip format) below:")
44
+
45
+ agreement_checkbox = gr.Checkbox(label="I agree to the following terms:")
46
+ agreement_text = gr.Markdown("""
47
+ By checking the box below and submitting your energy score data, you confirm and agree to the following:
48
+ 1. **Public Data Sharing**: You consent to the public sharing of the energy performance data derived from your submission. No additional information related to this model including proprietary configurations will be disclosed.
49
+ 2. **Data Integrity**: You validate that the log files submitted are accurate, unaltered, and generated directly from testing your model as per the specified benchmarking procedures.
50
+ 3. **Model Representation**: You verify that the model tested and submitted is representative of the production-level version of the model, including its level of quantization and any other relevant characteristics impacting energy efficiency and performance.
51
+ """)
52
+
53
  file_output = gr.File(visible=False)
54
  u = gr.UploadButton("Upload a zip file with logs", file_count="single")
55
+
56
+ u.upload(add_docker_eval_with_agreement, [u, agreement_checkbox], file_output)
57
+
58
+ demo.launch()