import torch import gradio as gr from transformers import pipeline from transformers import AutoTokenizer, AutoModelForCausalLM # init tokenizer = AutoTokenizer.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B") model = AutoModelForCausalLM.from_pretrained("togethercomputer/GPT-NeoXT-Chat-Base-20B", torch_dtype=torch.bfloat16) # Load a pre-trained Hugging Face model (this example uses GPT-2) generator = pipeline('text-generation', model="mistralai/Mistral-Small-24B-Instruct-2501") # Define the function to be called by the Gradio interface def generate_text(prompt): inputs = tokenizer(f": {prompt}\n:", return_tensors='pt').to(model.device) outputs = model.generate(**inputs, max_new_tokens=10, do_sample=True, temperature=0.8) return tokenizer.decode(outputs[0]) # Define the Gradio interface iface = gr.Interface( fn=generate_text, # The function to call inputs="text", # Input is a text field outputs="text", # Output is a text field title="Mistral Text Generator" ) # Launch the interface if __name__ == "__main__": iface.launch()