Spaces:
Running
Running
Joschka Strueber
commited on
Commit
·
75b9622
1
Parent(s):
00b5438
[Add] error messages
Browse files- app.py +12 -2
- src/dataloading.py +1 -0
- src/similarity.py +5 -1
app.py
CHANGED
@@ -21,9 +21,17 @@ def create_heatmap(selected_models, selected_dataset, selected_metric):
|
|
21 |
|
22 |
# Sort models and get short names
|
23 |
selected_models = sorted(selected_models)
|
24 |
-
|
25 |
similarities = load_data_and_compute_similarities(selected_models, selected_dataset, selected_metric)
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
# Create figure and heatmap using seaborn
|
28 |
plt.figure(figsize=(8, 6))
|
29 |
ax = sns.heatmap(
|
@@ -71,7 +79,7 @@ def update_datasets_based_on_models(selected_models, current_dataset):
|
|
71 |
)
|
72 |
|
73 |
with gr.Blocks(title="LLM Similarity Analyzer") as demo:
|
74 |
-
gr.Markdown("## Model Similarity Comparison Tool")
|
75 |
|
76 |
with gr.Row():
|
77 |
dataset_dropdown = gr.Dropdown(
|
@@ -97,6 +105,8 @@ with gr.Blocks(title="LLM Similarity Analyzer") as demo:
|
|
97 |
info="Search and select multiple models"
|
98 |
)
|
99 |
|
|
|
|
|
100 |
model_dropdown.change(
|
101 |
fn=update_datasets_based_on_models,
|
102 |
inputs=[model_dropdown, dataset_dropdown],
|
|
|
21 |
|
22 |
# Sort models and get short names
|
23 |
selected_models = sorted(selected_models)
|
|
|
24 |
similarities = load_data_and_compute_similarities(selected_models, selected_dataset, selected_metric)
|
25 |
|
26 |
+
# Check if similarity matrix contains NaN rows
|
27 |
+
failed_models = []
|
28 |
+
for i in range(len(similarities)):
|
29 |
+
if np.isnan(similarities[i]).all():
|
30 |
+
failed_models.append(selected_models[i])
|
31 |
+
|
32 |
+
if failed_models:
|
33 |
+
raise gr.Error(f"Failed to load data for models: {', '.join(failed_models)}")
|
34 |
+
|
35 |
# Create figure and heatmap using seaborn
|
36 |
plt.figure(figsize=(8, 6))
|
37 |
ax = sns.heatmap(
|
|
|
79 |
)
|
80 |
|
81 |
with gr.Blocks(title="LLM Similarity Analyzer") as demo:
|
82 |
+
gr.Markdown("## Model Similarity Comparison Tool \n\nAs Language Model (LM) capabilities advance, evaluating and supervising them at scale is getting harder for humans. There is hope that other language models can automate both these tasks, which we refer to as AI Oversight. We study how model similarity affects both aspects of AI oversight by proposing a probabilistic metric for LM similarity based on overlap in model mistakes. Using this metric, we first show that LLM-as-a-judge scores favor models similar to the judge, generalizing recent self-preference results. Then, we study training on LM annotations, and find complementary knowledge between the weak supervisor and strong student model plays a crucial role in gains from weak-to-strong generalization. As model capabilities increase, it becomes harder to find their mistakes, and we might defer more to AI oversight. However, we observe a concerning trend -- model mistakes are becoming more similar with increasing capabilities, pointing to risks from correlated failures. Our work underscores the importance of reporting and correcting for model similarity, especially in the emerging paradigm of AI oversight. ")
|
83 |
|
84 |
with gr.Row():
|
85 |
dataset_dropdown = gr.Dropdown(
|
|
|
105 |
info="Search and select multiple models"
|
106 |
)
|
107 |
|
108 |
+
gr.Markdown("* For the probabilistic Kappa_p metric self-similarity is only 1, if the model predicts a single option with 100% confidence.")
|
109 |
+
|
110 |
model_dropdown.change(
|
111 |
fn=update_datasets_based_on_models,
|
112 |
inputs=[model_dropdown, dataset_dropdown],
|
src/dataloading.py
CHANGED
@@ -68,6 +68,7 @@ def filter_labels(doc):
|
|
68 |
raise ValueError("Invalid label")
|
69 |
return labels
|
70 |
|
|
|
71 |
def load_run_data(model_name, dataset_name):
|
72 |
try:
|
73 |
model_name = model_name.replace("/", "__")
|
|
|
68 |
raise ValueError("Invalid label")
|
69 |
return labels
|
70 |
|
71 |
+
|
72 |
def load_run_data(model_name, dataset_name):
|
73 |
try:
|
74 |
model_name = model_name.replace("/", "__")
|
src/similarity.py
CHANGED
@@ -67,6 +67,10 @@ def compute_pairwise_similarities(metric_name: str, probs: list[list[np.array]],
|
|
67 |
del gt_a[idx]
|
68 |
del gt_b[idx]
|
69 |
|
70 |
-
|
|
|
|
|
|
|
|
|
71 |
similarities[j, i] = similarities[i, j]
|
72 |
return similarities
|
|
|
67 |
del gt_a[idx]
|
68 |
del gt_b[idx]
|
69 |
|
70 |
+
try:
|
71 |
+
similarities[i, j] = compute_similarity(metric, outputs_a, outputs_b, gt_a)
|
72 |
+
except Exception as e:
|
73 |
+
similarities[i, j] = np.nan
|
74 |
+
|
75 |
similarities[j, i] = similarities[i, j]
|
76 |
return similarities
|