Spaces:
Runtime error
Runtime error
KvrParaskevi
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -14,18 +14,29 @@ def initialize_model_and_tokenizer(model_name="KvrParaskevi/Llama-2-7b-Hotel-Boo
|
|
14 |
|
15 |
model, tokenizer = initialize_model_and_tokenizer()
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
return "custom"
|
27 |
|
28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
template = """<<SYS>>
|
31 |
You are an AI having conversation with a human. Below is an instruction that describes a task.
|
@@ -49,24 +60,27 @@ memory = ConversationBufferMemory(memory_key="history", llm = llm, prompt = prom
|
|
49 |
llm_chain = ConversationChain(prompt=prompt, llm=llm, memory = memory)
|
50 |
|
51 |
with gr.Blocks() as demo:
|
52 |
-
|
53 |
-
|
|
|
54 |
clear = gr.Button("Clear")
|
55 |
#llm_chain, llm = init_chain(model, tokenizer)
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
-
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, chatbot, chatbot)
|
69 |
-
clear.click(lambda: None, None, chatbot, queue=False)
|
70 |
|
71 |
-
demo.queue()
|
72 |
-
demo.launch()
|
|
|
14 |
|
15 |
model, tokenizer = initialize_model_and_tokenizer()
|
16 |
|
17 |
+
def load_pipeline():
|
18 |
+
tokenizer, model = load_model()
|
19 |
+
pipe = pipeline("text-generation",
|
20 |
+
model= model,
|
21 |
+
tokenizer = tokenizer,
|
22 |
+
max_new_tokens = 20,
|
23 |
+
top_k = 30,
|
24 |
+
early_stopping=True,
|
25 |
+
num_beams = 2,
|
26 |
+
temperature = 0.1,
|
27 |
+
repetition_penalty = 1.03)
|
28 |
|
29 |
+
llm = HuggingFacePipeline(pipeline = pipe)
|
30 |
+
return llm
|
|
|
31 |
|
32 |
+
def chat_interface(inputs):
|
33 |
+
question = inputs["input"]
|
34 |
+
chat_history = inputs["history"]
|
35 |
+
# Assuming `chain` is your instance of ConversationalRetrievalChain
|
36 |
+
result = chain.run({"input": question, "history": chat_history})
|
37 |
+
return result["response"]
|
38 |
+
|
39 |
+
llm = load_pipeline()
|
40 |
|
41 |
template = """<<SYS>>
|
42 |
You are an AI having conversation with a human. Below is an instruction that describes a task.
|
|
|
60 |
llm_chain = ConversationChain(prompt=prompt, llm=llm, memory = memory)
|
61 |
|
62 |
with gr.Blocks() as demo:
|
63 |
+
gr.Markdown("Hotel Booking Assistant Chat 🤗")
|
64 |
+
chatbot = gr.Chatbot(label="Chat history")
|
65 |
+
message = gr.Textbox(label="Ask me a question!")
|
66 |
clear = gr.Button("Clear")
|
67 |
#llm_chain, llm = init_chain(model, tokenizer)
|
68 |
|
69 |
+
# Convert chat history to list of tuples
|
70 |
+
chat_history_tuples = []
|
71 |
+
for message in chat_history:
|
72 |
+
chat_history_tuples.append((message[0], message[1]))
|
73 |
+
|
74 |
+
result = llm_chain({"input": query, "history": chat_history_tuples})
|
75 |
+
|
76 |
+
iface = gr.Interface(
|
77 |
+
fn=chat_interface,
|
78 |
+
inputs=[
|
79 |
+
gr.inputs.Textbox(lines=1, label="Question"),
|
80 |
+
gr.inputs.Textbox(lines=5, label="Chat History"),
|
81 |
+
],
|
82 |
+
outputs="text"
|
83 |
+
)
|
84 |
+
iface.launch()
|
85 |
|
|
|
|
|
86 |
|
|
|
|