thepolymerguy commited on
Commit
23e7d2a
·
1 Parent(s): bc5c55b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -87,9 +87,6 @@ def add_text(history, text):
87
  history = history + [(text, None)]
88
  return history, ""
89
 
90
- def add_file(history, file):
91
- history = history + [((file.name,), None)]
92
- return history
93
 
94
  def convert_saved_embeddings(embedding_string):
95
  """
@@ -111,11 +108,6 @@ def convert_saved_embeddings(embedding_string):
111
  embedding = torch.from_numpy(embedding)
112
  return embedding
113
 
114
- def bot(history):
115
- response = "**That's cool!**"
116
- history[-1][1] = response
117
- return history
118
-
119
 
120
  ########## LOADING PRE-COMPUTED EMBEDDINGS ##########
121
 
@@ -192,7 +184,8 @@ def classifier(userin, SearchType):
192
  return broad_scope_predictions
193
 
194
 
195
- def generateresponse(history):#, task):
 
196
  """
197
  Model definition here:
198
  """
@@ -214,8 +207,8 @@ def generateresponse(history):#, task):
214
  input_ids = inputs["input_ids"].cuda()
215
 
216
  generation_config = GenerationConfig(
217
- temperature=0.6,
218
- top_p=0.95,
219
  repetition_penalty=1.15,
220
  )
221
  print("Generating...")
@@ -224,7 +217,7 @@ def generateresponse(history):#, task):
224
  generation_config=generation_config,
225
  return_dict_in_generate=True,
226
  output_scores=True,
227
- max_new_tokens=256,
228
  )
229
  output = []
230
  for s in generation_output.sequences:
@@ -416,9 +409,16 @@ with gr.Blocks(title='Claimed', theme=theme) as demo:
416
  show_label=False,
417
  placeholder="Enter text and submit",
418
  ).style(container=False)
 
 
 
 
 
 
 
 
 
419
 
420
- txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
421
- generateresponse, chatbot, chatbot)
422
 
423
  gr.Markdown("""
424
  # HAVE AN IDEA? GET IT CLAIMED
 
87
  history = history + [(text, None)]
88
  return history, ""
89
 
 
 
 
90
 
91
  def convert_saved_embeddings(embedding_string):
92
  """
 
108
  embedding = torch.from_numpy(embedding)
109
  return embedding
110
 
 
 
 
 
 
111
 
112
  ########## LOADING PRE-COMPUTED EMBEDDINGS ##########
113
 
 
184
  return broad_scope_predictions
185
 
186
 
187
+ def generateresponse(history, temp, top_p, tokens):
188
+
189
  """
190
  Model definition here:
191
  """
 
207
  input_ids = inputs["input_ids"].cuda()
208
 
209
  generation_config = GenerationConfig(
210
+ temperature=temp,
211
+ top_p=top_p,
212
  repetition_penalty=1.15,
213
  )
214
  print("Generating...")
 
217
  generation_config=generation_config,
218
  return_dict_in_generate=True,
219
  output_scores=True,
220
+ max_new_tokens=tokens,
221
  )
222
  output = []
223
  for s in generation_output.sequences:
 
409
  show_label=False,
410
  placeholder="Enter text and submit",
411
  ).style(container=False)
412
+
413
+ with gr.Row():
414
+ with gr.Accordion("Parameters"):
415
+ temp = gr.Slider(minimum=0, maximum=1, value=0.6, label="Temperature", step=0.1)
416
+ top_p = gr.Slider(minimum=0.5, maximum=1, value=0.95, label="Top P", step=0.1)
417
+ tokens = gr.Slider(minimum=5, maximum=512, value=256, label="Max Tokens", step=1)
418
+
419
+ txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
420
+ generateresponse, [chatbot, temp, top_p, tokens], chatbot)
421
 
 
 
422
 
423
  gr.Markdown("""
424
  # HAVE AN IDEA? GET IT CLAIMED