File size: 3,368 Bytes
89fc4d4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 c16f0f4 4cc7261 1cfb6f2 c16f0f4 89fc4d4 c16f0f4 89fc4d4 c16f0f4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
import gradio as gr
from huggingface_hub import list_models
from sentence_transformers import SentenceTransformer, util
import numpy as np
# Load sentence transformer model for similarity calculation
semantic_model = SentenceTransformer('all-MiniLM-L6-v2')
# Function to fetch models from Hugging Face based on dynamic task filter
def fetch_models_from_hf(task_filter, limit=10):
models = list_models(filter=task_filter, limit=limit)
model_data = [
{
"model_id": model.modelId,
"tags": model.tags,
"downloads": model.downloads,
"likes": model.likes,
"last_modified": model.lastModified
}
for model in models
]
return model_data
# Function to normalize a list of values to a 0-1 range
def normalize(values):
min_val, max_val = min(values), max(values)
return [(v - min_val) / (max_val - min_val) if max_val > min_val else 0 for v in values]
# Function to get weighted recommendations based on user query and additional metrics
def get_weighted_recommendations_from_hf(user_query, task_filter, weights=None):
if weights is None:
weights = {"similarity": 0.7, "downloads": 0.2, "likes": 0.1}
model_data = fetch_models_from_hf(task_filter)
model_ids = [model["model_id"] for model in model_data]
model_tags = [' '.join(model["tags"]) for model in model_data]
model_embeddings = semantic_model.encode(model_tags)
user_embedding = semantic_model.encode(user_query)
similarities = util.pytorch_cos_sim(user_embedding, model_embeddings)[0].numpy()
downloads = normalize([model["downloads"] for model in model_data])
likes = normalize([model["likes"] for model in model_data])
final_scores = []
for i in range(len(model_data)):
score = (
weights["similarity"] * similarities[i] +
weights["downloads"] * downloads[i] +
weights["likes"] * likes[i]
)
final_scores.append((model_ids[i], score, similarities[i], downloads[i], likes[i]))
ranked_recommendations = sorted(final_scores, key=lambda x: x[1], reverse=True)
result = []
for rank, (model_id, final_score, sim, downloads, likes) in enumerate(ranked_recommendations, 1):
result.append(f"Rank {rank}: Model ID: {model_id}, Final Score: {final_score:.4f}, "
f"Similarity: {sim:.4f}, Downloads: {downloads:.4f}, Likes: {likes:.4f}")
return '\n'.join(result)
# Gradio chatbot interface
def respond(user_query, task_filter, history, weights=None):
# Provide model recommendations based on the user's query and task filter
return get_weighted_recommendations_from_hf(user_query, task_filter, weights)
# Gradio Interface
demo = gr.Interface(
fn=respond,
inputs=[
gr.Textbox(label="Enter your query", placeholder="What kind of model are you looking for?"),
gr.Textbox(label="Task Filter", placeholder="Enter the task, e.g., text-classification"),
gr.Textbox(value="You are using the Hugging Face model recommender system.", label="System message")
],
outputs=gr.Textbox(label="Model Recommendations"),
title="Hugging Face Model Recommender",
description="This chatbot recommends models from Hugging Face based on your query and task."
)
if __name__ == "__main__":
demo.launch(share=True)
|