|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
from transformers import pipeline |
|
|
|
|
|
sentiment = pipeline("text-classification", model="tabularisai/multilingual-sentiment-analysis") |
|
|
|
""" |
|
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference |
|
""" |
|
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") |
|
|
|
|
|
def get_sentiment(text): |
|
output = sentiment(text) |
|
return f'The sentence was classified as "{output[0]["label"]}" with {output[0]["score"]*100}% confidence' |
|
|
|
|
|
|
|
""" |
|
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface |
|
""" |
|
|
|
title = "Get a sentiment on you text" |
|
description = """ |
|
The bot was takes your text and classify it as either 'Positive' or 'Negative' |
|
""" |
|
|
|
demo = gr.Interface( |
|
fn=get_sentiment, |
|
inputs="text", |
|
outputs="text", |
|
title=title, |
|
description=description, |
|
examples=[["I really enjoyed my stay !"], ["Worst rental I ever got"]], |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|