from transformers import GPT2LMHeadModel, GPT2Tokenizer import gradio as gr import subprocess # Import subprocess module to run shell commands # Install Rust using shell command subprocess.run(['curl', '--proto', '=https', '--tlsv1.2', '-sSf', 'https://sh.rustup.rs', '|', 'sh']) # Load pre-trained GPT-2 model and tokenizer model_name = "gpt2" # You can use other models like "gpt2-medium", "gpt2-large", etc. model = GPT2LMHeadModel.from_pretrained(model_name) tokenizer = GPT2Tokenizer.from_pretrained(model_name) # Function to generate response def generate_response(query): input_ids = tokenizer.encode(query, return_tensors="pt") output = model.generate(input_ids, max_length=100, num_return_sequences=1, no_repeat_ngram_size=2) response = tokenizer.decode(output[0], skip_special_tokens=True) return response # Interface setup iface = gr.Interface(fn=generate_response, inputs="text", outputs="text", title="Generative AI Query Response") iface.launch()