Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -81,7 +81,7 @@ methods = list(set(df_results['Unlearned_Methods']))
|
|
81 |
all_columns = ['Unlearned_Methods','Source', 'Diffusion_Models','Pre-ASR', 'Post-ASR','FID']
|
82 |
show_columns = ['Unlearned_Methods','Source', 'Diffusion_Models','Pre-ASR', 'Post-ASR','FID']
|
83 |
TYPES = ['str', 'markdown', 'str', 'number', 'number', 'number']
|
84 |
-
|
85 |
df_results_init = df_results.copy()[show_columns]
|
86 |
|
87 |
def update_table(
|
@@ -341,187 +341,7 @@ with demo:
|
|
341 |
leaderboard_table,
|
342 |
)
|
343 |
|
344 |
-
|
345 |
-
# shown_columns = gr.CheckboxGroup(
|
346 |
-
# choices=[
|
347 |
-
# c.name
|
348 |
-
# for c in fields(AutoEvalColumn)
|
349 |
-
# if not c.hidden and not c.never_hidden
|
350 |
-
# ],
|
351 |
-
# value=[
|
352 |
-
# c.name
|
353 |
-
# for c in fields(AutoEvalColumn)
|
354 |
-
# if c.displayed_by_default and not c.hidden and not c.never_hidden
|
355 |
-
# ],
|
356 |
-
# label="Select columns to show",
|
357 |
-
# elem_id="column-select",
|
358 |
-
# interactive=True,
|
359 |
-
# )
|
360 |
-
# with gr.Row():
|
361 |
-
# deleted_models_visibility = gr.Checkbox(
|
362 |
-
# value=False, label="Show gated/private/deleted models", interactive=True
|
363 |
-
# )
|
364 |
-
# with gr.Column(min_width=320):
|
365 |
-
# #with gr.Box(elem_id="box-filter"):
|
366 |
-
# filter_columns_type = gr.CheckboxGroup(
|
367 |
-
# label="Unlearning types",
|
368 |
-
# choices=[t.to_str() for t in ModelType],
|
369 |
-
# value=[t.to_str() for t in ModelType],
|
370 |
-
# interactive=True,
|
371 |
-
# elem_id="filter-columns-type",
|
372 |
-
# )
|
373 |
-
# filter_columns_precision = gr.CheckboxGroup(
|
374 |
-
# label="Precision",
|
375 |
-
# choices=[i.value.name for i in Precision],
|
376 |
-
# value=[i.value.name for i in Precision],
|
377 |
-
# interactive=True,
|
378 |
-
# elem_id="filter-columns-precision",
|
379 |
-
# )
|
380 |
-
# filter_columns_size = gr.CheckboxGroup(
|
381 |
-
# label="Model sizes (in billions of parameters)",
|
382 |
-
# choices=list(NUMERIC_INTERVALS.keys()),
|
383 |
-
# value=list(NUMERIC_INTERVALS.keys()),
|
384 |
-
# interactive=True,
|
385 |
-
# elem_id="filter-columns-size",
|
386 |
-
# )
|
387 |
-
|
388 |
-
# leaderboard_table = gr.components.Dataframe(
|
389 |
-
# value=leaderboard_df[
|
390 |
-
# [c.name for c in fields(AutoEvalColumn) if c.never_hidden]
|
391 |
-
# + shown_columns.value
|
392 |
-
# ],
|
393 |
-
# headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value,
|
394 |
-
# datatype=TYPES,
|
395 |
-
# elem_id="leaderboard-table",
|
396 |
-
# interactive=False,
|
397 |
-
# visible=True,
|
398 |
-
# )
|
399 |
-
|
400 |
-
# # Dummy leaderboard for handling the case when the user uses backspace key
|
401 |
-
# hidden_leaderboard_table_for_search = gr.components.Dataframe(
|
402 |
-
# value=original_df[COLS],
|
403 |
-
# headers=COLS,
|
404 |
-
# datatype=TYPES,
|
405 |
-
# visible=False,
|
406 |
-
# )
|
407 |
-
# search_bar.submit(
|
408 |
-
# update_table,
|
409 |
-
# [
|
410 |
-
# hidden_leaderboard_table_for_search,
|
411 |
-
# shown_columns,
|
412 |
-
# filter_columns_type,
|
413 |
-
# filter_columns_precision,
|
414 |
-
# filter_columns_size,
|
415 |
-
# deleted_models_visibility,
|
416 |
-
# search_bar,
|
417 |
-
# ],
|
418 |
-
# leaderboard_table,
|
419 |
-
# )
|
420 |
-
# for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size, deleted_models_visibility]:
|
421 |
-
# selector.change(
|
422 |
-
# update_table,
|
423 |
-
# [
|
424 |
-
# hidden_leaderboard_table_for_search,
|
425 |
-
# shown_columns,
|
426 |
-
# filter_columns_type,
|
427 |
-
# filter_columns_precision,
|
428 |
-
# filter_columns_size,
|
429 |
-
# deleted_models_visibility,
|
430 |
-
# search_bar,
|
431 |
-
# ],
|
432 |
-
# leaderboard_table,
|
433 |
-
# queue=True,
|
434 |
-
# )
|
435 |
-
|
436 |
-
# with gr.TabItem("📝 About", elem_id="llm-benchmark-tab-table", id=2):
|
437 |
-
# gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
438 |
-
|
439 |
-
# with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
440 |
-
# with gr.Column():
|
441 |
-
# with gr.Row():
|
442 |
-
# gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
443 |
-
|
444 |
-
# with gr.Column():
|
445 |
-
# with gr.Accordion(
|
446 |
-
# f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
|
447 |
-
# open=False,
|
448 |
-
# ):
|
449 |
-
# with gr.Row():
|
450 |
-
# finished_eval_table = gr.components.Dataframe(
|
451 |
-
# value=finished_eval_queue_df,
|
452 |
-
# headers=EVAL_COLS,
|
453 |
-
# datatype=EVAL_TYPES,
|
454 |
-
# row_count=5,
|
455 |
-
# )
|
456 |
-
# with gr.Accordion(
|
457 |
-
# f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
|
458 |
-
# open=False,
|
459 |
-
# ):
|
460 |
-
# with gr.Row():
|
461 |
-
# running_eval_table = gr.components.Dataframe(
|
462 |
-
# value=running_eval_queue_df,
|
463 |
-
# headers=EVAL_COLS,
|
464 |
-
# datatype=EVAL_TYPES,
|
465 |
-
# row_count=5,
|
466 |
-
# )
|
467 |
-
|
468 |
-
# with gr.Accordion(
|
469 |
-
# f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
|
470 |
-
# open=False,
|
471 |
-
# ):
|
472 |
-
# with gr.Row():
|
473 |
-
# pending_eval_table = gr.components.Dataframe(
|
474 |
-
# value=pending_eval_queue_df,
|
475 |
-
# headers=EVAL_COLS,
|
476 |
-
# datatype=EVAL_TYPES,
|
477 |
-
# row_count=5,
|
478 |
-
# )
|
479 |
-
# with gr.Row():
|
480 |
-
# gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
|
481 |
-
|
482 |
-
# with gr.Row():
|
483 |
-
# with gr.Column():
|
484 |
-
# model_name_textbox = gr.Textbox(label="Model name")
|
485 |
-
# revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
|
486 |
-
# model_type = gr.Dropdown(
|
487 |
-
# choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
|
488 |
-
# label="Model type",
|
489 |
-
# multiselect=False,
|
490 |
-
# value=None,
|
491 |
-
# interactive=True,
|
492 |
-
# )
|
493 |
-
|
494 |
-
# with gr.Column():
|
495 |
-
# precision = gr.Dropdown(
|
496 |
-
# choices=[i.value.name for i in Precision if i != Precision.Unknown],
|
497 |
-
# label="Precision",
|
498 |
-
# multiselect=False,
|
499 |
-
# value="float16",
|
500 |
-
# interactive=True,
|
501 |
-
# )
|
502 |
-
# weight_type = gr.Dropdown(
|
503 |
-
# choices=[i.value.name for i in WeightType],
|
504 |
-
# label="Weights type",
|
505 |
-
# multiselect=False,
|
506 |
-
# value="Original",
|
507 |
-
# interactive=True,
|
508 |
-
# )
|
509 |
-
# base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
|
510 |
-
|
511 |
-
# submit_button = gr.Button("Submit Eval")
|
512 |
-
# submission_result = gr.Markdown()
|
513 |
-
# submit_button.click(
|
514 |
-
# add_new_eval,
|
515 |
-
# [
|
516 |
-
# model_name_textbox,
|
517 |
-
# base_model_name_textbox,
|
518 |
-
# revision_name_textbox,
|
519 |
-
# precision,
|
520 |
-
# weight_type,
|
521 |
-
# model_type,
|
522 |
-
# ],
|
523 |
-
# submission_result,
|
524 |
-
# )
|
525 |
|
526 |
with gr.Row():
|
527 |
with gr.Accordion("📙 Citation", open=True):
|
|
|
81 |
all_columns = ['Unlearned_Methods','Source', 'Diffusion_Models','Pre-ASR', 'Post-ASR','FID']
|
82 |
show_columns = ['Unlearned_Methods','Source', 'Diffusion_Models','Pre-ASR', 'Post-ASR','FID']
|
83 |
TYPES = ['str', 'markdown', 'str', 'number', 'number', 'number']
|
84 |
+
files = ['church','garbage','parachute','tench', 'vangogh', 'nidity', 'violence','illgel_activity']
|
85 |
df_results_init = df_results.copy()[show_columns]
|
86 |
|
87 |
def update_table(
|
|
|
341 |
leaderboard_table,
|
342 |
)
|
343 |
|
344 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
345 |
|
346 |
with gr.Row():
|
347 |
with gr.Accordion("📙 Citation", open=True):
|