Update README.md
Browse files
README.md
CHANGED
@@ -25,16 +25,21 @@ This model has been fine tuned with mosaicml/instruct-v3 dataset with 2 epoch on
|
|
25 |
## How to use?
|
26 |
from peft import PeftModel
|
27 |
|
28 |
-
#
|
|
|
29 |
model_path = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
|
|
30 |
tokenizer=AutoTokenizer.from_pretrained(model_path)
|
|
|
31 |
model = AutoModelForCausalLM.from_pretrained(
|
32 |
model_path,
|
33 |
torch_dtype = torch.bfloat16,
|
34 |
device_map = "auto",
|
35 |
trust_remote_code = True
|
36 |
)
|
|
|
37 |
#load the adapter
|
|
|
38 |
model_peft = PeftModel.from_pretrained(model, "azam25/TinyLlama_instruct_generation")
|
39 |
|
40 |
messages = [{
|
@@ -46,6 +51,7 @@ messages = [{
|
|
46 |
}]
|
47 |
|
48 |
def generate_response(message, model):
|
|
|
49 |
prompt = tokenizer.apply_chat_template(messages, tokenize=False)
|
50 |
encoded_input = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
|
51 |
model_inputs = encoded_input.to('cuda')
|
|
|
25 |
## How to use?
|
26 |
from peft import PeftModel
|
27 |
|
28 |
+
#load the base model
|
29 |
+
|
30 |
model_path = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
31 |
+
|
32 |
tokenizer=AutoTokenizer.from_pretrained(model_path)
|
33 |
+
|
34 |
model = AutoModelForCausalLM.from_pretrained(
|
35 |
model_path,
|
36 |
torch_dtype = torch.bfloat16,
|
37 |
device_map = "auto",
|
38 |
trust_remote_code = True
|
39 |
)
|
40 |
+
|
41 |
#load the adapter
|
42 |
+
|
43 |
model_peft = PeftModel.from_pretrained(model, "azam25/TinyLlama_instruct_generation")
|
44 |
|
45 |
messages = [{
|
|
|
51 |
}]
|
52 |
|
53 |
def generate_response(message, model):
|
54 |
+
|
55 |
prompt = tokenizer.apply_chat_template(messages, tokenize=False)
|
56 |
encoded_input = tokenizer(prompt, return_tensors="pt", add_special_tokens=True)
|
57 |
model_inputs = encoded_input.to('cuda')
|