Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
22 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
23 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
24 |
|
25 |
-
model_name = "
|
26 |
'''
|
27 |
model = AutoModelForCausalLM.from_pretrained(
|
28 |
model_name,
|
@@ -34,10 +34,10 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
34 |
|
35 |
peft_model = AutoPeftModelForCausalLM.from_pretrained("ehristoforu/fd-lora-64x128", torch_dtype=torch.float16, trust_remote_code=True)
|
36 |
merged_model = peft_model.merge_and_unload()
|
37 |
-
|
38 |
#model.save_pretrained("./coolqwen")
|
39 |
-
|
40 |
-
|
41 |
from huggingface_hub import HfApi
|
42 |
|
43 |
api = HfApi()
|
@@ -46,11 +46,11 @@ api = HfApi()
|
|
46 |
|
47 |
api.upload_folder(
|
48 |
folder_path="./coolqwen",
|
49 |
-
repo_id="ehristoforu/
|
50 |
repo_type="model",
|
51 |
token=HF_TOKEN,
|
52 |
)
|
53 |
-
|
54 |
|
55 |
@spaces.GPU()
|
56 |
def generate(
|
|
|
22 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
23 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
24 |
|
25 |
+
model_name = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
26 |
'''
|
27 |
model = AutoModelForCausalLM.from_pretrained(
|
28 |
model_name,
|
|
|
34 |
|
35 |
peft_model = AutoPeftModelForCausalLM.from_pretrained("ehristoforu/fd-lora-64x128", torch_dtype=torch.float16, trust_remote_code=True)
|
36 |
merged_model = peft_model.merge_and_unload()
|
37 |
+
merged_model.save_pretrained("./coolqwen")
|
38 |
#model.save_pretrained("./coolqwen")
|
39 |
+
tokenizer.save_pretrained("./coolqwen")
|
40 |
+
|
41 |
from huggingface_hub import HfApi
|
42 |
|
43 |
api = HfApi()
|
|
|
46 |
|
47 |
api.upload_folder(
|
48 |
folder_path="./coolqwen",
|
49 |
+
repo_id="ehristoforu/ehristoforu/fd-lora-merged-64x128",
|
50 |
repo_type="model",
|
51 |
token=HF_TOKEN,
|
52 |
)
|
53 |
+
|
54 |
|
55 |
@spaces.GPU()
|
56 |
def generate(
|