thepolymerguy commited on
Commit
53bd282
·
1 Parent(s): cab9610

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -49
app.py CHANGED
@@ -45,53 +45,51 @@ def classifier(userin, SearchType):
45
  return broad_scope_predictions[1], searchlink
46
 
47
 
48
- # def generateresponse(history, task):
49
- # """
50
- # Model definition here:
51
- # """
52
- # '''
53
- # global model
54
- # global tokenizer
55
-
56
- # PROMPT = f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
57
- # ### Instruction:
58
- # {user}
59
- # ### Response:"""
60
-
61
- # inputs = tokenizer(
62
- # PROMPT,
63
- # return_tensors="pt",
64
- # )
65
- # input_ids = inputs["input_ids"].cuda()
66
-
67
- # generation_config = GenerationConfig(
68
- # temperature=0.6,
69
- # top_p=0.95,
70
- # repetition_penalty=1.15,
71
- # )
72
- # print("Generating...")
73
- # generation_output = model.generate(
74
- # input_ids=input_ids,
75
- # generation_config=generation_config,
76
- # return_dict_in_generate=True,
77
- # output_scores=True,
78
- # max_new_tokens=256,
79
- # )
80
- # output = []
81
- # for s in generation_output.sequences:
82
- # outputs.append(tokenizer.decode(s))
83
- # print(tokenizer.decode(s))
 
 
84
 
85
- # output = (outputs[0].split('### Response:'))[1]
86
-
87
- # '''
88
-
89
- # user = history[-1][0]
90
 
91
- # response = f"You asked: {user}"
92
- # history[-1][1] = response
93
- # print(history)
94
- # return history
95
 
96
 
97
  theme = gr.themes.Base(
@@ -207,10 +205,9 @@ with gr.Blocks(title='Claimed', theme=theme) as demo:
207
  ).style(container=False)
208
  with gr.Column(scale=0.15, min_width=0):
209
  btn = gr.Button("Submit")
210
-
211
- # txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
212
- # generateresponse, chatbot, chatbot
213
- # )
214
 
215
  gr.Markdown("""
216
  # HAVE AN IDEA? GET IT CLAIMED
 
45
  return broad_scope_predictions[1], searchlink
46
 
47
 
48
+ def generateresponse(history, task):
49
+ """
50
+ Model definition here:
51
+ """
52
+
53
+ global model
54
+ global tokenizer
55
+
56
+ user = history[-1][0]
57
+
58
+ PROMPT = f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
59
+ ### Instruction:
60
+ {user}
61
+ ### Response:"""
62
+
63
+ inputs = tokenizer(
64
+ PROMPT,
65
+ return_tensors="pt",
66
+ )
67
+ input_ids = inputs["input_ids"].cuda()
68
+
69
+ generation_config = GenerationConfig(
70
+ temperature=0.6,
71
+ top_p=0.95,
72
+ repetition_penalty=1.15,
73
+ )
74
+ print("Generating...")
75
+ generation_output = model.generate(
76
+ input_ids=input_ids,
77
+ generation_config=generation_config,
78
+ return_dict_in_generate=True,
79
+ output_scores=True,
80
+ max_new_tokens=256,
81
+ )
82
+ output = []
83
+ for s in generation_output.sequences:
84
+ outputs.append(tokenizer.decode(s))
85
+ print(tokenizer.decode(s))
86
 
87
+ output = (outputs[0].split('### Response:'))[1]
 
 
 
 
88
 
89
+ response = f"Response: {output}"
90
+ history[-1][1] = response
91
+ print(history)
92
+ return history
93
 
94
 
95
  theme = gr.themes.Base(
 
205
  ).style(container=False)
206
  with gr.Column(scale=0.15, min_width=0):
207
  btn = gr.Button("Submit")
208
+
209
+ txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
210
+ generateresponse, chatbot, chatbot)
 
211
 
212
  gr.Markdown("""
213
  # HAVE AN IDEA? GET IT CLAIMED