Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import AutoModelForCausalLLM, AutoTokenizer | |
from langchain.llms.base import LLM | |
from langchain import PromptTemplate, LLMChain | |
def initialize_model_and_tokenizer(model_name="KvrParaskevi/Llama-2-7b-Hotel-Booking-Model"): | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
return model, tokenizer | |
model, tokenizer = initialize_model_and_tokenizer() | |
class CustomLLM(LLM): | |
def _call(self, prompt, stop=None, run_manager=None) -> str: | |
inputs = tokenizer(prompt, return_tensors="pt") | |
result = model.generate(input_ids=inputs.input_ids, max_new_tokens=20) | |
result = tokenizer.decode(result[0]) | |
return result | |
def _llm_type(self) -> str: | |
return "custom" | |
llm = CustomLLM() | |
template = """<<SYS>> | |
You are an AI having conversation with a human. Below is an instruction that describes a task. | |
Write a response that appropriately completes the request. | |
Reply with the most helpful and logic answer. During the conversation you need to ask the user | |
the following questions to complete the hotel booking task. | |
1) Where would you like to stay and when? | |
2) How many people are staying in the room? | |
3) Do you prefer any ammenities like breakfast included or gym? | |
4) What is your name, your email address and phone number? | |
Make sure you receive a logical answer from the user from every question to complete the hotel | |
booking process. | |
<</SYS>> | |
Previous conversation: | |
{history} | |
Human: {input} | |
AI:""" | |
prompt = PromptTemplate(template=template, input_variables=["history", "input"]) | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
with gr.Blocks() as demo: | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox() | |
clear = gr.Button("Clear") | |
llm_chain, llm = init_chain(model, tokenizer) | |
def user(user_message, history): | |
return "", history + [[user_message, None]] | |
def bot(history): | |
print("Question: ", history[-1][0]) | |
bot_message = llm_chain.run(question=history[-1][0]) | |
print("Response: ", bot_message) | |
history[-1][1] = "" | |
history[-1][1] += bot_message | |
return history | |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, chatbot, chatbot) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
demo.queue() | |
demo.launch() |