repd79 commited on
Commit
502bbe0
verified
1 Parent(s): 492bec3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -14
app.py CHANGED
@@ -1,10 +1,30 @@
1
  import gradio as gr
 
 
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("BSC-LT/ALIA-40b")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  def respond(
10
  message,
@@ -14,26 +34,33 @@ def respond(
14
  temperature,
15
  top_p,
16
  ):
 
 
17
  prompt = f"{system_message}\n"
18
 
19
  for user_input, bot_response in history:
20
  prompt += f"User: {user_input}\nAssistant: {bot_response}\n"
21
-
22
  prompt += f"User: {message}\nAssistant:"
23
 
24
  response = ""
25
 
26
- for message in client.text_generation(
27
- prompt=prompt,
28
- max_new_tokens=max_tokens,
29
- temperature=temperature,
30
- top_p=top_p,
31
- stream=True
32
- ):
33
- response += message
34
- yield response
 
35
 
 
36
 
 
 
 
37
 
38
  """
39
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
@@ -53,4 +80,5 @@ demo = gr.Interface(
53
  )
54
 
55
  if __name__ == "__main__":
 
56
  demo.launch()
 
1
  import gradio as gr
2
+ import os
3
+ import logging
4
  from huggingface_hub import InferenceClient
5
 
6
+ # Configurar logging para depuraci贸n
7
+ logging.basicConfig(
8
+ level=logging.INFO,
9
+ format="%(asctime)s - %(levelname)s - %(message)s"
10
+ )
11
+
12
+ # Obtener el token de Hugging Face de variables de entorno (secret)
13
+ hf_token = os.getenv("HF_TOKEN")
14
+
15
+ if not hf_token:
16
+ logging.error("El token de Hugging Face no est谩 configurado. Agrega 'HF_TOKEN' como variable de entorno.")
17
+ raise ValueError("El token de Hugging Face no est谩 configurado. Agrega 'HF_TOKEN' como variable de entorno.")
18
+
19
+ logging.info("Token de Hugging Face encontrado correctamente.")
20
+
21
+ # Inicializar el cliente de inferencia con autenticaci贸n segura
22
+ try:
23
+ client = InferenceClient("BSC-LT/ALIA-40b", token=hf_token)
24
+ logging.info("Cliente de Hugging Face inicializado correctamente.")
25
+ except Exception as e:
26
+ logging.error(f"Error al inicializar el cliente de Hugging Face: {e}")
27
+ raise
28
 
29
  def respond(
30
  message,
 
34
  temperature,
35
  top_p,
36
  ):
37
+ logging.info("Generando respuesta para el mensaje del usuario.")
38
+
39
  prompt = f"{system_message}\n"
40
 
41
  for user_input, bot_response in history:
42
  prompt += f"User: {user_input}\nAssistant: {bot_response}\n"
43
+
44
  prompt += f"User: {message}\nAssistant:"
45
 
46
  response = ""
47
 
48
+ try:
49
+ for message in client.text_generation(
50
+ prompt=prompt,
51
+ max_new_tokens=max_tokens,
52
+ temperature=temperature,
53
+ top_p=top_p,
54
+ stream=True
55
+ ):
56
+ response += message
57
+ yield response
58
 
59
+ logging.info("Respuesta generada correctamente.")
60
 
61
+ except Exception as e:
62
+ logging.error(f"Error durante la generaci贸n de texto: {e}")
63
+ yield "Hubo un error al generar la respuesta. Intenta nuevamente."
64
 
65
  """
66
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
 
80
  )
81
 
82
  if __name__ == "__main__":
83
+ logging.info("Lanzando la aplicaci贸n con Gradio...")
84
  demo.launch()