george sami commited on
Commit
9f87cf9
·
1 Parent(s): 5a27220
Files changed (2) hide show
  1. app.py +13 -24
  2. requirements.txt +1 -0
app.py CHANGED
@@ -2,26 +2,13 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
  # Load GPT-4-mini model and tokenizer from Hugging Face
5
- model_name = "gpt-4-mini" # Change this to the exact Hugging Face model name
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
- # Define your custom context
10
- custom_context = """
11
- I am George Sami
12
- - Job: Technical lead
13
- - Services: Frontend development, backend development, mobile development using flutter
14
- - Programming languages : php, java, js, python
15
- - Started my career: 2019
16
- """
17
-
18
-
19
- def generate_response(input_text, context):
20
- # Combine user input with the custom context
21
- full_input = f"{context}\n{input_text}"
22
-
23
  # Tokenize the input text and generate the response
24
- inputs = tokenizer(full_input, return_tensors="pt")
25
  outputs = model.generate(**inputs, max_length=500, temperature=0.7)
26
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
 
@@ -30,13 +17,10 @@ def generate_response(input_text, context):
30
  # Create the Gradio interface
31
  iface = gr.Interface(
32
  fn=generate_response,
33
- inputs=[
34
- gr.Textbox(label="Input Text", placeholder="Enter your text here..."),
35
- gr.Textbox(label="Context", placeholder="Enter custom context here...")
36
- ],
37
  outputs="text",
38
- title="GPT-4-mini with Custom Context",
39
- description="This is a simple interface to interact with GPT-4-mini and provide your own context for responses."
40
  )
41
 
42
  # Launch the Gradio interface on Hugging Face
@@ -44,7 +28,12 @@ iface.launch()
44
 
45
 
46
 
 
 
 
 
 
 
 
47
 
48
 
49
- # input_with_context = f"{custom_context}\n\nUser: {user_input}\nAssistant:"
50
-
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
  # Load GPT-4-mini model and tokenizer from Hugging Face
5
+ model_name = "gpt-4-mini" # Ensure this is the correct model name on Hugging Face
6
  tokenizer = AutoTokenizer.from_pretrained(model_name)
7
  model = AutoModelForCausalLM.from_pretrained(model_name)
8
 
9
+ def generate_response(input_text):
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  # Tokenize the input text and generate the response
11
+ inputs = tokenizer(input_text, return_tensors="pt")
12
  outputs = model.generate(**inputs, max_length=500, temperature=0.7)
13
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
14
 
 
17
  # Create the Gradio interface
18
  iface = gr.Interface(
19
  fn=generate_response,
20
+ inputs=gr.Textbox(label="Input Text", placeholder="Enter your text here..."),
 
 
 
21
  outputs="text",
22
+ title="GPT-4-mini",
23
+ description="A simple interface to interact with the GPT-4-mini model."
24
  )
25
 
26
  # Launch the Gradio interface on Hugging Face
 
28
 
29
 
30
 
31
+ # custom_context = """
32
+ # I am George Sami
33
+ # - Job: Technical lead
34
+ # - Services: Frontend development, backend development, mobile development using flutter
35
+ # - Programming languages : php, java, js, python
36
+ # - Started my career: 2019
37
+ # """
38
 
39
 
 
 
requirements.txt CHANGED
@@ -3,3 +3,4 @@ gradio
3
  transformers
4
  torch
5
 
 
 
3
  transformers
4
  torch
5
 
6
+