File size: 2,374 Bytes
93c9f45
85a3c90
1b9d0f6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85a3c90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1b9d0f6
 
93c9f45
 
 
 
 
 
 
 
 
 
 
 
2d5d543
 
93c9f45
2d5d543
 
93c9f45
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import gradio as gr
from transformers import AutoModelForCausalLLM, AutoTokenizer
from langchain.llms.base import LLM
from langchain import PromptTemplate, LLMChain

def initialize_model_and_tokenizer(model_name="KvrParaskevi/Llama-2-7b-Hotel-Booking-Model"):
    model = AutoModelForCausalLM.from_pretrained(model_name)
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    return model, tokenizer

model, tokenizer = initialize_model_and_tokenizer()

class CustomLLM(LLM):
    def _call(self, prompt, stop=None, run_manager=None) -> str:
        inputs = tokenizer(prompt, return_tensors="pt")
        result = model.generate(input_ids=inputs.input_ids, max_new_tokens=20)
        result = tokenizer.decode(result[0])
        return result

    @property
    def _llm_type(self) -> str:
        return "custom"

llm = CustomLLM()

template = """<<SYS>>
You are an AI having conversation with a human. Below is an instruction that describes a task. 
Write a response that appropriately completes the request.
Reply with the most helpful and logic answer. During the conversation you need to ask the user 
the following questions to complete the hotel booking task.
1) Where would you like to stay and when?
2) How many people are staying in the room?
3) Do you prefer any ammenities like breakfast included or gym?
4) What is your name, your email address and phone number? 
Make sure you receive a logical answer from the user from every question to complete the hotel 
booking process.
<</SYS>>
Previous conversation:
{history}
Human: {input}
AI:"""
prompt = PromptTemplate(template=template, input_variables=["history", "input"])

llm_chain = LLMChain(prompt=prompt, llm=llm)

with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox()
    clear = gr.Button("Clear")
    llm_chain, llm = init_chain(model, tokenizer)

    def user(user_message, history):
        return "", history + [[user_message, None]]

    def bot(history):
        print("Question: ", history[-1][0])
        bot_message = llm_chain.run(question=history[-1][0])
        print("Response: ", bot_message)
        history[-1][1] = ""
        history[-1][1] += bot_message
        return history

    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, chatbot, chatbot)
    clear.click(lambda: None, None, chatbot, queue=False)

demo.queue()
demo.launch()