ehristoforu commited on
Commit
411e698
·
verified ·
1 Parent(s): e2f02dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -22,7 +22,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
22
  HF_TOKEN = os.getenv("HF_TOKEN")
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
 
25
- model_name = "ehristoforu/Phi4-MoE-2x14B-Instruct"
26
 
27
  model = AutoModelForCausalLM.from_pretrained(
28
  model_name,
@@ -31,10 +31,10 @@ model = AutoModelForCausalLM.from_pretrained(
31
  )
32
  tokenizer = AutoTokenizer.from_pretrained(model_name)
33
 
34
- #peft_model = AutoPeftModelForCausalLM.from_pretrained("ehristoforu/CoolQwen2.5-3b-it")
35
- #merged_model = peft_model.merge_and_unload()
36
- #merged_model.save_pretrained("./coolqwen")
37
- model.save_pretrained("./coolqwen")
38
  tokenizer.save_pretrained("./coolqwen")
39
 
40
  from huggingface_hub import HfApi
@@ -45,7 +45,7 @@ api = HfApi()
45
 
46
  api.upload_folder(
47
  folder_path="./coolqwen",
48
- repo_id="ehristoforu/Phi4-MoE-2x14B",
49
  repo_type="model",
50
  token=HF_TOKEN,
51
  )
 
22
  HF_TOKEN = os.getenv("HF_TOKEN")
23
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
 
25
+ model_name = "tiiuae/Falcon3-7B-Instruct"
26
 
27
  model = AutoModelForCausalLM.from_pretrained(
28
  model_name,
 
31
  )
32
  tokenizer = AutoTokenizer.from_pretrained(model_name)
33
 
34
+ peft_model = AutoPeftModelForCausalLM.from_pretrained("ehristoforu/think-lora-qwen-r64")
35
+ merged_model = peft_model.merge_and_unload()
36
+ merged_model.save_pretrained("./coolqwen")
37
+ #model.save_pretrained("./coolqwen")
38
  tokenizer.save_pretrained("./coolqwen")
39
 
40
  from huggingface_hub import HfApi
 
45
 
46
  api.upload_folder(
47
  folder_path="./coolqwen",
48
+ repo_id="ehristoforu/Falcon3-with-lora-think-7b-it",
49
  repo_type="model",
50
  token=HF_TOKEN,
51
  )