Initial commit with Selene-1-Mini-Llama implementation
Browse files- .python-version +1 -0
- app.py +36 -0
- pyproject.toml +12 -0
- requirements.txt +4 -0
- uv.lock +0 -0
.python-version
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3.12
|
app.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
import gradio as gr
|
3 |
+
import spaces
|
4 |
+
import torch
|
5 |
+
|
6 |
+
model_id = "AtlaAI/Selene-1-Mini-Llama-3.1-8B"
|
7 |
+
|
8 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
10 |
+
|
11 |
+
@spaces.GPU
|
12 |
+
def generate_response(prompt):
|
13 |
+
messages = [{"role": "user", "content": prompt}]
|
14 |
+
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
15 |
+
model_inputs = tokenizer([text], return_tensors="pt").to("cuda")
|
16 |
+
|
17 |
+
generated_ids = model.generate(
|
18 |
+
model_inputs.input_ids,
|
19 |
+
max_new_tokens=512,
|
20 |
+
do_sample=True
|
21 |
+
)
|
22 |
+
generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)]
|
23 |
+
|
24 |
+
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
25 |
+
return response
|
26 |
+
|
27 |
+
demo = gr.Interface(
|
28 |
+
fn=generate_response,
|
29 |
+
inputs=gr.Textbox(label="γγγ³γγγε
₯εγγ¦γγ γγ"),
|
30 |
+
outputs=gr.Textbox(label="ηζγγγεΏη"),
|
31 |
+
title="Selene-1-Mini-Llama-3.1-8B γγ’",
|
32 |
+
description="γγγ³γγγε
₯εγγγ¨γγ’γγ«γεΏηγηζγγΎγγ"
|
33 |
+
)
|
34 |
+
|
35 |
+
if __name__ == "__main__":
|
36 |
+
demo.launch()
|
pyproject.toml
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "demo-atlaai-selene-1-mini-llama-3-1-8b"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "Add your description here"
|
5 |
+
readme = "README.md"
|
6 |
+
requires-python = ">=3.12"
|
7 |
+
dependencies = [
|
8 |
+
"gradio>=5.13.2",
|
9 |
+
"spaces>=0.32.0",
|
10 |
+
"torch>=2.6.0",
|
11 |
+
"transformers>=4.48.1",
|
12 |
+
]
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
torch
|
3 |
+
gradio
|
4 |
+
spaces
|
uv.lock
ADDED
The diff for this file is too large to render.
See raw diff
|
|