prithivMLmods commited on
Commit
7a2c608
·
verified ·
1 Parent(s): c863607

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -7
app.py CHANGED
@@ -6,7 +6,6 @@ import spaces
6
  import torch
7
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
8
  from typing import List, Dict, Optional, Tuple
9
- from http import HTTPStatus
10
 
11
  DESCRIPTION = """
12
  # QwQ Distill
@@ -88,7 +87,7 @@ def generate(
88
  top_p: float = 0.9,
89
  top_k: int = 50,
90
  repetition_penalty: float = 1.2,
91
- ) -> Iterator[Tuple[str, List, str]]:
92
  if query is None:
93
  query = ''
94
  if history is None:
@@ -135,11 +134,7 @@ def generate(
135
  outputs = []
136
  for text in streamer:
137
  outputs.append(text)
138
- response = "".join(outputs)
139
- # Update history with the new response
140
- new_messages = messages + [{'role': Role.ASSISTANT, 'content': response}]
141
- system, new_history = messages_to_history(new_messages)
142
- yield "", new_history, system
143
 
144
 
145
  demo = gr.ChatInterface(
 
6
  import torch
7
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
8
  from typing import List, Dict, Optional, Tuple
 
9
 
10
  DESCRIPTION = """
11
  # QwQ Distill
 
87
  top_p: float = 0.9,
88
  top_k: int = 50,
89
  repetition_penalty: float = 1.2,
90
+ ) -> Iterator[str]:
91
  if query is None:
92
  query = ''
93
  if history is None:
 
134
  outputs = []
135
  for text in streamer:
136
  outputs.append(text)
137
+ yield "".join(outputs)
 
 
 
 
138
 
139
 
140
  demo = gr.ChatInterface(