Spaces:
Sleeping
Sleeping
# Import required libraries | |
import gradio as gr # For interface | |
from sentence_transformers import SentenceTransformer # For embedding the text | |
import torch # For gpu | |
import numpy as np | |
# Make the app device agnostic | |
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") | |
# Load a pretrained Sentence Transformer model and move it to the appropriate device | |
model = SentenceTransformer("mixedbread-ai/mxbai-embed-large-v1") | |
model = model.to(device) | |
# Function that does the embedding | |
def predict(input_text): | |
# Calculate embeddings by calling model.encode(), specifying the device | |
embeddings = model.encode(input_text, device=device) | |
# Set the print options to avoid truncation and use fixed-point notation | |
np.set_printoptions(threshold=np.inf, precision=8, suppress=True, floatmode='fixed') | |
# Convert the array to a string for display | |
embeddings_str = np.array2string(embeddings, separator=',') | |
return embeddings_str | |
# Gradio app interface | |
gradio_app = gr.Interface( | |
predict, | |
inputs=gr.Textbox(placeholder="Insert Text", label='Text'), | |
outputs=gr.Textbox(max_lines=1, placeholder='Vector of dimensions 1024', label='Vector', show_label=True, show_copy_button=True), | |
title="Text to Vector Generator", | |
description="Embedding model: mixedbread-ai/mxbai-embed-large-v1." | |
) | |
if __name__ == "__main__": | |
gradio_app.launch() |