Daemontatox commited on
Commit
ab4f4a6
·
verified ·
1 Parent(s): 0b72fd3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -18
app.py CHANGED
@@ -69,11 +69,9 @@ h3 {
69
  }
70
  .message-wrap {
71
  overflow-x: auto;
72
- white-space: pre-wrap !important;
73
  }
74
  .message-wrap p {
75
  margin-bottom: 1em;
76
- white-space: pre-wrap !important;
77
  }
78
  .message-wrap pre {
79
  background-color: #f6f8fa;
@@ -120,7 +118,7 @@ def initialize_model():
120
  return model, tokenizer
121
 
122
  def format_text(text):
123
- """Format text with proper spacing and tag highlighting"""
124
  tag_patterns = [
125
  (r'<Thinking>', '\n<Thinking>\n'),
126
  (r'</Thinking>', '\n</Thinking>\n'),
@@ -141,13 +139,28 @@ def format_text(text):
141
  return formatted
142
 
143
  def format_chat_history(history):
144
- """Format chat history for display in text area"""
145
  formatted = []
146
  for user_msg, assistant_msg in history:
147
  formatted.append(f"User: {user_msg}")
148
  if assistant_msg:
149
  formatted.append(f"Assistant: {assistant_msg}")
150
  return "\n\n".join(formatted)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
 
152
  @spaces.GPU()
153
  def chat_response(
@@ -161,7 +174,7 @@ def chat_response(
161
  top_k: int = 20,
162
  penalty: float = 1.2,
163
  ):
164
- """Generate chat responses with proper tag handling"""
165
  conversation = [
166
  {"role": "system", "content": system_prompt}
167
  ]
@@ -199,7 +212,6 @@ def chat_response(
199
  )
200
 
201
  buffer = ""
202
- current_line = ""
203
 
204
  with torch.no_grad():
205
  thread = Thread(target=model.generate, kwargs=generate_kwargs)
@@ -209,19 +221,11 @@ def chat_response(
209
 
210
  for new_text in streamer:
211
  buffer += new_text
212
- current_line += new_text
 
 
213
 
214
- if '\n' in current_line:
215
- lines = current_line.split('\n')
216
- current_line = lines[-1]
217
- formatted_buffer = format_text(buffer)
218
- history[-1][1] = formatted_buffer
219
- chat_display = format_chat_history(history)
220
- yield history, chat_display
221
- else:
222
- history[-1][1] = buffer
223
- chat_display = format_chat_history(history)
224
- yield history, chat_display
225
 
226
  def process_example(example: str) -> tuple:
227
  """Process example query and return empty history and updated display"""
 
69
  }
70
  .message-wrap {
71
  overflow-x: auto;
 
72
  }
73
  .message-wrap p {
74
  margin-bottom: 1em;
 
75
  }
76
  .message-wrap pre {
77
  background-color: #f6f8fa;
 
118
  return model, tokenizer
119
 
120
  def format_text(text):
121
+ """Format text with proper spacing and tag highlighting (but keep tags visible)"""
122
  tag_patterns = [
123
  (r'<Thinking>', '\n<Thinking>\n'),
124
  (r'</Thinking>', '\n</Thinking>\n'),
 
139
  return formatted
140
 
141
  def format_chat_history(history):
142
+ """Format chat history for display, keeping tags visible"""
143
  formatted = []
144
  for user_msg, assistant_msg in history:
145
  formatted.append(f"User: {user_msg}")
146
  if assistant_msg:
147
  formatted.append(f"Assistant: {assistant_msg}")
148
  return "\n\n".join(formatted)
149
+
150
+ def create_examples():
151
+ """Create example queries for the UI"""
152
+ return [
153
+ "Explain the concept of artificial intelligence.",
154
+ "How does photosynthesis work?",
155
+ "What are the main causes of climate change?",
156
+ "Describe the process of protein synthesis.",
157
+ "What are the key features of a democratic government?",
158
+ "Explain the theory of relativity.",
159
+ "How do vaccines work to prevent diseases?",
160
+ "What are the major events of World War II?",
161
+ "Describe the structure of a human cell.",
162
+ "What is the role of DNA in genetics?"
163
+ ]
164
 
165
  @spaces.GPU()
166
  def chat_response(
 
174
  top_k: int = 20,
175
  penalty: float = 1.2,
176
  ):
177
+ """Generate chat responses, keeping tags visible in the output"""
178
  conversation = [
179
  {"role": "system", "content": system_prompt}
180
  ]
 
212
  )
213
 
214
  buffer = ""
 
215
 
216
  with torch.no_grad():
217
  thread = Thread(target=model.generate, kwargs=generate_kwargs)
 
221
 
222
  for new_text in streamer:
223
  buffer += new_text
224
+ formatted_buffer = format_text(buffer)
225
+ history[-1][1] = formatted_buffer
226
+ chat_display = format_chat_history(history)
227
 
228
+ yield history, chat_display
 
 
 
 
 
 
 
 
 
 
229
 
230
  def process_example(example: str) -> tuple:
231
  """Process example query and return empty history and updated display"""