rkwsuper commited on
Commit
f2b7bc7
·
1 Parent(s): 1110a58
Files changed (2) hide show
  1. app.py +40 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+
5
+ # Load model and tokenizer
6
+ model_name = "rkwsuper/lora_model"
7
+
8
+ # Use an environment variable or secret for the token
9
+ auth_token = os.getenv("HF_TOKEN") # Automatically fetches the token if set as an environment variable
10
+
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=auth_token)
12
+ model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=auth_token)
13
+
14
+ # Define the function for inference
15
+ def generate_text(prompt, max_length=100, temperature=1.0):
16
+ inputs = tokenizer(prompt, return_tensors="pt")
17
+ outputs = model.generate(
18
+ inputs["input_ids"],
19
+ max_length=max_length,
20
+ temperature=temperature,
21
+ pad_token_id=tokenizer.eos_token_id
22
+ )
23
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
24
+
25
+ # Create the Gradio interface
26
+ iface = gr.Interface(
27
+ fn=generate_text,
28
+ inputs=[
29
+ gr.Textbox(label="Enter Prompt"),
30
+ gr.Slider(10, 300, value=100, step=10, label="Max Length"),
31
+ gr.Slider(0.1, 2.0, value=1.0, step=0.1, label="Temperature")
32
+ ],
33
+ outputs="text",
34
+ title="Hugging Face Model Text Generator",
35
+ description="This interface generates text based on your input using a fine-tuned Hugging Face model."
36
+ )
37
+
38
+ # Launch
39
+ if __name__ == "__main__":
40
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ transformers
3
+ torch