import gradio as gr from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM # Load the fine-tuned model model_name = "gpt2" # Replace with your fine-tuned model path if available tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) generator = pipeline("text-generation", model=model, tokenizer=tokenizer) # Define chatbot function def chatbot_response(input_text): response = generator(input_text, max_length=50, num_return_sequences=1) return response[0]['generated_text'] # Create Gradio interface interface = gr.Interface( fn=chatbot_response, inputs="text", outputs="text", title="Love and Smile", description="An AI assistant for enhanced texting, flirting, and dating conversations.", ) # Launch app interface.launch()