JPeace18's picture
bard
340cece verified
raw
history blame
804 Bytes
import gradio as gr
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
# Load your pretrained model and tokenizer
model_name = "your-model-name" # Replace with your model's name
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
# Define the Gradio interface
iface = gr.Interface(
fn=generate_answer,
inputs=[gr.Textbox(lines=5, placeholder="Ask a question")],
outputs="textbox",
title="AI Answer Generator",
)
# Function to generate an answer using your model
def generate_answer(question):
inputs = tokenizer([question], return_tensors="pt")
outputs = model.generate(**inputs)
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
return answer
# Launch the interface
iface.launch()