Michael Brunzel commited on
Commit
b4a7bbc
·
1 Parent(s): 671d4b6

Add custom handler file

Browse files
Files changed (2) hide show
  1. handler.py +70 -0
  2. requirements.txt +5 -0
handler.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any, Union
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+ from peft import PeftModel
5
+
6
+
7
+ class EndpointHandler:
8
+ def __init__(self, path=""):
9
+ # load model and processor from path
10
+ self.model = AutoModelForCausalLM.from_pretrained(
11
+ path, device_map="auto", load_in_8bit=True)
12
+ self.model = PeftModel.from_pretrained(
13
+ self.model,
14
+ "MichaelAI23/falcon-rw-1b_8bit_finetuned",
15
+ torch_dtype=torch.float16,
16
+ device_map="auto"
17
+ )
18
+ self.tokenizer = AutoTokenizer.from_pretrained(path)
19
+ self.template = {
20
+ "prompt_input": "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n",
21
+ "prompt_no_input": "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:\n",
22
+ "response_split": "### Response:"
23
+ }
24
+ self.instruction = """Extract the name of the person, the location, the hotel name and the desired date from the following hotel request"""
25
+
26
+ def generate_prompt(
27
+ self,
28
+ template: str,
29
+ instruction: str,
30
+ input: Union[None, str] = None,
31
+ label: Union[None, str] = None,
32
+ ) -> str:
33
+ # returns the full prompt from instruction and optional input
34
+ # if a label (=response, =output) is provided, it's also appended.
35
+ if input:
36
+ res = template["prompt_input"].format(
37
+ instruction=instruction, input=input
38
+ )
39
+ else:
40
+ res = template["prompt_no_input"].format(
41
+ instruction=instruction
42
+ )
43
+ if label:
44
+ res = f"{res}{label}"
45
+ return res
46
+
47
+ def __call__(self, data: Dict[str, Any]) -> Dict[str, str]:
48
+ """
49
+ Args:
50
+ data (:dict:):
51
+ The payload with the text prompt and generation parameters.
52
+ """
53
+ # process input
54
+ inputs = data.pop("inputs", data)
55
+ parameters = data.pop("parameters", None)
56
+
57
+ inputs = self.generate_prompt(self.template, self.instruction, inputs)
58
+ # preprocess
59
+ input_ids = self.tokenizer(inputs, return_tensors="pt").input_ids
60
+
61
+ # pass inputs with all kwargs in data
62
+ if parameters is not None:
63
+ outputs = self.model.generate(input_ids, **parameters)
64
+ else:
65
+ outputs = self.model.generate(input_ids)
66
+
67
+ # postprocess the prediction
68
+ prediction = self.tokenizer.decode(outputs[0]) #, skip_special_tokens=True)
69
+ prediction = prediction.split("<|endoftext|>")[0]
70
+ return [{"generated_text": prediction}]
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ bitsandbytes==0.41.1
2
+ datasets==2.14.5
3
+ peft==0.5.0
4
+ sentencepiece==0.1.99
5
+ transformers==4.32.1