Pijush2023 commited on
Commit
9a94403
·
verified ·
1 Parent(s): 7bfcc92

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -8
app.py CHANGED
@@ -335,6 +335,21 @@ def initialize_agent_with_prompt(prompt_template):
335
  )
336
  return agent
337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
  def generate_answer(message, choice):
339
  logging.debug(f"generate_answer called with prompt_choice: {choice}")
340
 
@@ -348,24 +363,46 @@ def generate_answer(message, choice):
348
  response = agent(message)
349
 
350
  addresses = extract_addresses(response['output'])
351
- return response['output'], addresses
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
 
353
  def bot(history, choice, tts_choice):
354
  if not history:
355
  return history
356
  response, addresses = generate_answer(history[-1][0], choice)
357
- history[-1][1] = ""
358
-
359
  with concurrent.futures.ThreadPoolExecutor() as executor:
360
  if tts_choice == "Alpha":
361
- audio_future = executor.submit(generate_audio_elevenlabs, response)
362
  elif tts_choice == "Beta":
363
- audio_future = executor.submit(generate_audio_parler_tts, response)
364
  elif tts_choice == "Gamma":
365
- audio_future = executor.submit(generate_audio_mars5, response)
366
-
367
 
368
- for character in response:
369
  history[-1][1] += character
370
  time.sleep(0.05)
371
  yield history, None
 
335
  )
336
  return agent
337
 
338
+ # def generate_answer(message, choice):
339
+ # logging.debug(f"generate_answer called with prompt_choice: {choice}")
340
+
341
+ # if choice == "Details":
342
+ # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
343
+ # elif choice == "Conversational":
344
+ # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
345
+ # else:
346
+ # logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
347
+ # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
348
+ # response = agent(message)
349
+
350
+ # addresses = extract_addresses(response['output'])
351
+ # return response['output'], addresses
352
+
353
  def generate_answer(message, choice):
354
  logging.debug(f"generate_answer called with prompt_choice: {choice}")
355
 
 
363
  response = agent(message)
364
 
365
  addresses = extract_addresses(response['output'])
366
+ return [message, response['output']], addresses # Ensure it returns a list with two elements
367
+
368
+ # def bot(history, choice, tts_choice):
369
+ # if not history:
370
+ # return history
371
+ # response, addresses = generate_answer(history[-1][0], choice)
372
+ # history[-1][1] = ""
373
+
374
+ # with concurrent.futures.ThreadPoolExecutor() as executor:
375
+ # if tts_choice == "Alpha":
376
+ # audio_future = executor.submit(generate_audio_elevenlabs, response)
377
+ # elif tts_choice == "Beta":
378
+ # audio_future = executor.submit(generate_audio_parler_tts, response)
379
+ # elif tts_choice == "Gamma":
380
+ # audio_future = executor.submit(generate_audio_mars5, response)
381
+
382
+
383
+ # for character in response:
384
+ # history[-1][1] += character
385
+ # time.sleep(0.05)
386
+ # yield history, None
387
+
388
+ # audio_path = audio_future.result()
389
+ # yield history, audio_path
390
 
391
  def bot(history, choice, tts_choice):
392
  if not history:
393
  return history
394
  response, addresses = generate_answer(history[-1][0], choice)
395
+ history[-1][1] = response[1] # Update bot response
396
+
397
  with concurrent.futures.ThreadPoolExecutor() as executor:
398
  if tts_choice == "Alpha":
399
+ audio_future = executor.submit(generate_audio_elevenlabs, response[1])
400
  elif tts_choice == "Beta":
401
+ audio_future = executor.submit(generate_audio_parler_tts, response[1])
402
  elif tts_choice == "Gamma":
403
+ audio_future = executor.submit(generate_audio_mars5, response[1])
 
404
 
405
+ for character in response[1]:
406
  history[-1][1] += character
407
  time.sleep(0.05)
408
  yield history, None