Pijush2023 commited on
Commit
e668d96
·
verified ·
1 Parent(s): 162f162

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +84 -34
app.py CHANGED
@@ -335,6 +335,7 @@ def initialize_agent_with_prompt(prompt_template):
335
  )
336
  return agent
337
 
 
338
  def generate_answer(message, choice):
339
  logging.debug(f"generate_answer called with prompt_choice: {choice}")
340
 
@@ -345,18 +346,98 @@ def generate_answer(message, choice):
345
  else:
346
  logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
347
  agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
 
348
  response = agent(message)
349
-
350
  addresses = extract_addresses(response['output'])
351
  return response['output'], addresses
352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
 
354
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355
  # def bot(history, choice, tts_choice):
356
  # if not history:
357
  # return history
358
  # response_pair, addresses = generate_answer(history[-1][0], choice)
359
- # history[-1] = response_pair # Update bot response correctly
 
 
 
 
360
 
361
  # with concurrent.futures.ThreadPoolExecutor() as executor:
362
  # if tts_choice == "Alpha":
@@ -378,38 +459,7 @@ def generate_answer(message, choice):
378
  # history = history or []
379
  # history.append([message, ""]) # Ensure it is a list with two elements: message and empty response
380
  # return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
381
-
382
- def bot(history, choice, tts_choice):
383
- if not history:
384
- return history
385
- response_pair, addresses = generate_answer(history[-1][0], choice)
386
- # Ensure history has the correct format
387
- if len(history[-1]) == 1:
388
- history[-1].append("")
389
- elif len(history[-1]) == 0:
390
- history[-1] = [history[-1][0], ""]
391
-
392
- with concurrent.futures.ThreadPoolExecutor() as executor:
393
- if tts_choice == "Alpha":
394
- audio_future = executor.submit(generate_audio_elevenlabs, response_pair[1])
395
- elif tts_choice == "Beta":
396
- audio_future = executor.submit(generate_audio_parler_tts, response_pair[1])
397
- elif tts_choice == "Gamma":
398
- audio_future = executor.submit(generate_audio_mars5, response_pair[1])
399
-
400
- for character in response_pair[1]:
401
- history[-1][1] += character
402
- time.sleep(0.05)
403
- yield history, None
404
-
405
- audio_path = audio_future.result()
406
- yield history, audio_path
407
-
408
- def add_message(history, message):
409
- history = history or []
410
- history.append([message, ""]) # Ensure it is a list with two elements: message and empty response
411
- return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
412
-
413
  # def generate_voice_response(history, tts_choice):
414
  # if not history:
415
  # return None
 
335
  )
336
  return agent
337
 
338
+
339
  def generate_answer(message, choice):
340
  logging.debug(f"generate_answer called with prompt_choice: {choice}")
341
 
 
346
  else:
347
  logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
348
  agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
349
+
350
  response = agent(message)
 
351
  addresses = extract_addresses(response['output'])
352
  return response['output'], addresses
353
 
354
+ def bot(history, choice, tts_choice):
355
+ if not history:
356
+ return history
357
+
358
+ user_message = history[-1][0]
359
+ response_text, addresses = generate_answer(user_message, choice)
360
+
361
+ # Ensure history has the correct format
362
+ if len(history[-1]) == 1:
363
+ history[-1].append(response_text)
364
+ else:
365
+ history[-1][1] = response_text
366
+
367
+ with concurrent.futures.ThreadPoolExecutor() as executor:
368
+ if tts_choice == "Alpha":
369
+ audio_future = executor.submit(generate_audio_elevenlabs, response_text)
370
+ elif tts_choice == "Beta":
371
+ audio_future = executor.submit(generate_audio_parler_tts, response_text)
372
+ elif tts_choice == "Gamma":
373
+ audio_future = executor.submit(generate_audio_mars5, response_text)
374
+
375
+ for character in response_text:
376
+ history[-1][1] += character
377
+ time.sleep(0.05)
378
+ yield history, None
379
+
380
+ audio_path = audio_future.result()
381
+ yield history, audio_path
382
+
383
+ def add_message(history, message):
384
+ history = history or []
385
+ history.append([message, ""]) # Ensure it is a list with two elements: message and empty response
386
+ return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
387
+ #----------------------------------part 1-------------------------
388
+ # def generate_answer(message, choice):
389
+ # logging.debug(f"generate_answer called with prompt_choice: {choice}")
390
+
391
+ # if choice == "Details":
392
+ # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_1)
393
+ # elif choice == "Conversational":
394
+ # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
395
+ # else:
396
+ # logging.error(f"Invalid prompt_choice: {choice}. Defaulting to 'Conversational'")
397
+ # agent = initialize_agent_with_prompt(QA_CHAIN_PROMPT_2)
398
+ # response = agent(message)
399
+
400
+ # addresses = extract_addresses(response['output'])
401
+ # return response['output'], addresses
402
 
403
 
404
+
405
+ # # def bot(history, choice, tts_choice):
406
+ # # if not history:
407
+ # # return history
408
+ # # response_pair, addresses = generate_answer(history[-1][0], choice)
409
+ # # history[-1] = response_pair # Update bot response correctly
410
+
411
+ # # with concurrent.futures.ThreadPoolExecutor() as executor:
412
+ # # if tts_choice == "Alpha":
413
+ # # audio_future = executor.submit(generate_audio_elevenlabs, response_pair[1])
414
+ # # elif tts_choice == "Beta":
415
+ # # audio_future = executor.submit(generate_audio_parler_tts, response_pair[1])
416
+ # # elif tts_choice == "Gamma":
417
+ # # audio_future = executor.submit(generate_audio_mars5, response_pair[1])
418
+
419
+ # # for character in response_pair[1]:
420
+ # # history[-1][1] += character
421
+ # # time.sleep(0.05)
422
+ # # yield history, None
423
+
424
+ # # audio_path = audio_future.result()
425
+ # # yield history, audio_path
426
+
427
+ # # def add_message(history, message):
428
+ # # history = history or []
429
+ # # history.append([message, ""]) # Ensure it is a list with two elements: message and empty response
430
+ # # return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
431
+
432
  # def bot(history, choice, tts_choice):
433
  # if not history:
434
  # return history
435
  # response_pair, addresses = generate_answer(history[-1][0], choice)
436
+ # # Ensure history has the correct format
437
+ # if len(history[-1]) == 1:
438
+ # history[-1].append("")
439
+ # elif len(history[-1]) == 0:
440
+ # history[-1] = [history[-1][0], ""]
441
 
442
  # with concurrent.futures.ThreadPoolExecutor() as executor:
443
  # if tts_choice == "Alpha":
 
459
  # history = history or []
460
  # history.append([message, ""]) # Ensure it is a list with two elements: message and empty response
461
  # return history, gr.Textbox(value="", interactive=True, placeholder="Enter message or upload file...", show_label=False)
462
+ #--------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
463
  # def generate_voice_response(history, tts_choice):
464
  # if not history:
465
  # return None