Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ import spaces
|
|
9 |
import torch
|
10 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
11 |
|
12 |
-
DESCRIPTION = "#
|
13 |
|
14 |
if not torch.cuda.is_available():
|
15 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
@@ -19,7 +19,7 @@ DEFAULT_MAX_NEW_TOKENS = 1024
|
|
19 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
20 |
|
21 |
if torch.cuda.is_available():
|
22 |
-
model_id = "
|
23 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
|
24 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
25 |
|
@@ -107,11 +107,11 @@ chat_interface = gr.ChatInterface(
|
|
107 |
],
|
108 |
stop_btn=None,
|
109 |
examples=[
|
110 |
-
["
|
111 |
-
["
|
112 |
-
["
|
113 |
-
["
|
114 |
-
["
|
115 |
],
|
116 |
)
|
117 |
|
|
|
9 |
import torch
|
10 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
11 |
|
12 |
+
DESCRIPTION = "# Geitje-SPIN-7B"
|
13 |
|
14 |
if not torch.cuda.is_available():
|
15 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
|
|
19 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
20 |
|
21 |
if torch.cuda.is_available():
|
22 |
+
model_id = "davidberenstein1957/ultra-feedback-dutch-cleaned-hq-spin-geitje-7b-ultra-sft_iter2"
|
23 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
|
24 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
25 |
|
|
|
107 |
],
|
108 |
stop_btn=None,
|
109 |
examples=[
|
110 |
+
["Hi! How gaat het?"],
|
111 |
+
["Kun je mij uitleggen wat Python is?"],
|
112 |
+
["Leg het plot van Assepoester uit in een zin."],
|
113 |
+
["Hoe lang doet een persoon er over om een helicopter op te eten?"],
|
114 |
+
["Schrijf een artikel met 100 woorden over de nut van AI."],
|
115 |
],
|
116 |
)
|
117 |
|