Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -57,7 +57,7 @@ def model_inference(input_dict, history):
|
|
57 |
|
58 |
# Set up streamer for real-time output
|
59 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
|
60 |
-
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=
|
61 |
|
62 |
# Start generation in a separate thread
|
63 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
@@ -86,7 +86,7 @@ examples = [
|
|
86 |
|
87 |
demo = gr.ChatInterface(
|
88 |
fn=model_inference,
|
89 |
-
description="# **Qwen2.5-VL-
|
90 |
examples=examples,
|
91 |
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
|
92 |
stop_btn="Stop Generation",
|
|
|
57 |
|
58 |
# Set up streamer for real-time output
|
59 |
streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
|
60 |
+
generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=2048)
|
61 |
|
62 |
# Start generation in a separate thread
|
63 |
thread = Thread(target=model.generate, kwargs=generation_kwargs)
|
|
|
86 |
|
87 |
demo = gr.ChatInterface(
|
88 |
fn=model_inference,
|
89 |
+
description="# **Qwen2.5-VL-7B-Instruct**",
|
90 |
examples=examples,
|
91 |
textbox=gr.MultimodalTextbox(label="Query Input", file_types=["image"], file_count="multiple"),
|
92 |
stop_btn="Stop Generation",
|