Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -144,7 +144,8 @@ def submit_results(task: str, results_json):
|
|
144 |
return gr.Warning(f"Error submitting results: {str(e)}")
|
145 |
|
146 |
# Create the demo interface
|
147 |
-
with gr.Blocks(
|
|
|
148 |
.button-link {
|
149 |
display: inline-block;
|
150 |
padding: 0.5rem 1.5rem;
|
@@ -165,7 +166,8 @@ with gr.Blocks(css="""
|
|
165 |
box-shadow: 0 3px 6px rgba(0,0,0,0.16), 0 3px 6px rgba(0,0,0,0.23);
|
166 |
text-decoration: none;
|
167 |
}
|
168 |
-
"""
|
|
|
169 |
|
170 |
gr.Image("./logo.png", show_label=False, container=False)
|
171 |
|
@@ -309,42 +311,58 @@ The goal of the Frugal AI Challenge is to encourage both academic and industry a
|
|
309 |
webbrowser.open_new_tab(FORM_URL)
|
310 |
return gr.Info("Opening submission form in new tab...")
|
311 |
|
312 |
-
# Set up event handlers
|
313 |
text_evaluate_btn.click(
|
314 |
lambda url, route: evaluate_model(route.strip("/"), url),
|
315 |
inputs=[text_space_url, text_route],
|
316 |
-
outputs=[text_accuracy, text_emissions, text_energy, text_results_json]
|
|
|
|
|
317 |
)
|
318 |
|
319 |
text_submit_btn.click(
|
320 |
lambda results: submit_results("text", results),
|
321 |
inputs=[text_results_json],
|
322 |
-
outputs=None
|
|
|
|
|
323 |
)
|
324 |
|
325 |
image_evaluate_btn.click(
|
326 |
lambda url, route: evaluate_model(route.strip("/"), url),
|
327 |
inputs=[image_space_url, image_route],
|
328 |
-
outputs=[image_accuracy, image_emissions, image_energy, image_results_json]
|
|
|
|
|
329 |
)
|
330 |
|
331 |
image_submit_btn.click(
|
332 |
lambda results: submit_results("image", results),
|
333 |
inputs=[image_results_json],
|
334 |
-
outputs=None
|
|
|
|
|
335 |
)
|
336 |
|
337 |
audio_evaluate_btn.click(
|
338 |
lambda url, route: evaluate_model(route.strip("/"), url),
|
339 |
inputs=[audio_space_url, audio_route],
|
340 |
-
outputs=[audio_accuracy, audio_emissions, audio_energy, audio_results_json]
|
|
|
|
|
341 |
)
|
342 |
|
343 |
audio_submit_btn.click(
|
344 |
lambda results: submit_results("audio", results),
|
345 |
inputs=[audio_results_json],
|
346 |
-
outputs=None
|
|
|
|
|
347 |
)
|
348 |
|
349 |
if __name__ == "__main__":
|
350 |
-
demo.launch(
|
|
|
|
|
|
|
|
|
|
144 |
return gr.Warning(f"Error submitting results: {str(e)}")
|
145 |
|
146 |
# Create the demo interface
|
147 |
+
with gr.Blocks(
|
148 |
+
css="""
|
149 |
.button-link {
|
150 |
display: inline-block;
|
151 |
padding: 0.5rem 1.5rem;
|
|
|
166 |
box-shadow: 0 3px 6px rgba(0,0,0,0.16), 0 3px 6px rgba(0,0,0,0.23);
|
167 |
text-decoration: none;
|
168 |
}
|
169 |
+
"""
|
170 |
+
).queue(default_concurrency_limit=20) as demo: # Allow up to 20 concurrent requests by default
|
171 |
|
172 |
gr.Image("./logo.png", show_label=False, container=False)
|
173 |
|
|
|
311 |
webbrowser.open_new_tab(FORM_URL)
|
312 |
return gr.Info("Opening submission form in new tab...")
|
313 |
|
314 |
+
# Set up event handlers with specific queue configurations
|
315 |
text_evaluate_btn.click(
|
316 |
lambda url, route: evaluate_model(route.strip("/"), url),
|
317 |
inputs=[text_space_url, text_route],
|
318 |
+
outputs=[text_accuracy, text_emissions, text_energy, text_results_json],
|
319 |
+
concurrency_limit=5, # Allow 5 concurrent model evaluations
|
320 |
+
concurrency_id="eval_queue" # Share evaluation queue across tasks
|
321 |
)
|
322 |
|
323 |
text_submit_btn.click(
|
324 |
lambda results: submit_results("text", results),
|
325 |
inputs=[text_results_json],
|
326 |
+
outputs=None,
|
327 |
+
concurrency_limit=10, # Allow 10 concurrent submissions
|
328 |
+
concurrency_id="submit_queue" # Share submission queue across tasks
|
329 |
)
|
330 |
|
331 |
image_evaluate_btn.click(
|
332 |
lambda url, route: evaluate_model(route.strip("/"), url),
|
333 |
inputs=[image_space_url, image_route],
|
334 |
+
outputs=[image_accuracy, image_emissions, image_energy, image_results_json],
|
335 |
+
concurrency_limit=5, # Share same limit with text evaluation
|
336 |
+
concurrency_id="eval_queue"
|
337 |
)
|
338 |
|
339 |
image_submit_btn.click(
|
340 |
lambda results: submit_results("image", results),
|
341 |
inputs=[image_results_json],
|
342 |
+
outputs=None,
|
343 |
+
concurrency_limit=10,
|
344 |
+
concurrency_id="submit_queue"
|
345 |
)
|
346 |
|
347 |
audio_evaluate_btn.click(
|
348 |
lambda url, route: evaluate_model(route.strip("/"), url),
|
349 |
inputs=[audio_space_url, audio_route],
|
350 |
+
outputs=[audio_accuracy, audio_emissions, audio_energy, audio_results_json],
|
351 |
+
concurrency_limit=5,
|
352 |
+
concurrency_id="eval_queue"
|
353 |
)
|
354 |
|
355 |
audio_submit_btn.click(
|
356 |
lambda results: submit_results("audio", results),
|
357 |
inputs=[audio_results_json],
|
358 |
+
outputs=None,
|
359 |
+
concurrency_limit=10,
|
360 |
+
concurrency_id="submit_queue"
|
361 |
)
|
362 |
|
363 |
if __name__ == "__main__":
|
364 |
+
demo.launch(
|
365 |
+
server_name="0.0.0.0",
|
366 |
+
server_port=7860,
|
367 |
+
share=False
|
368 |
+
)
|