KvrParaskevi commited on
Commit
287918d
·
verified ·
1 Parent(s): c735c22

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -19,7 +19,7 @@ def load_pipeline():
19
  pipe = pipeline("text-generation",
20
  model= model,
21
  tokenizer = tokenizer,
22
- max_new_tokens = 100,
23
  top_k = 30,
24
  early_stopping=True,
25
  num_beams = 2,
@@ -37,8 +37,8 @@ def chat_interface(inputs):
37
  chat_history_tuples.append((message[0], message[1]))
38
 
39
  #result = llm_chain({"input": query, "history": chat_history_tuples})
40
- result = llm_chain.predict(input = inputs)
41
- return result
42
 
43
  llm = load_pipeline()
44
  chat_history = []
@@ -60,14 +60,14 @@ the following questions to complete the hotel booking task.
60
  Make sure you receive a logical answer from the user from every question to complete the hotel
61
  booking process.
62
  <</SYS>>
63
- Previous conversation:
64
  {history}
65
  Human: {input}
66
  AI:"""
67
  prompt = PromptTemplate(template=template, input_variables=["history", "input"])
68
 
69
  memory = ConversationBufferMemory(memory_key="history", llm = llm)
70
- llm_chain = ConversationChain(llm=llm, memory = memory)
71
 
72
  with gr.Blocks() as demo:
73
  #gr.Markdown("Hotel Booking Assistant Chat 🤗")
 
19
  pipe = pipeline("text-generation",
20
  model= model,
21
  tokenizer = tokenizer,
22
+ max_new_tokens = 50,
23
  top_k = 30,
24
  early_stopping=True,
25
  num_beams = 2,
 
37
  chat_history_tuples.append((message[0], message[1]))
38
 
39
  #result = llm_chain({"input": query, "history": chat_history_tuples})
40
+ result = llm_chain.predict(input = inputs, max_length = 50)
41
+ return result["response"]
42
 
43
  llm = load_pipeline()
44
  chat_history = []
 
60
  Make sure you receive a logical answer from the user from every question to complete the hotel
61
  booking process.
62
  <</SYS>>
63
+ Current conversation:
64
  {history}
65
  Human: {input}
66
  AI:"""
67
  prompt = PromptTemplate(template=template, input_variables=["history", "input"])
68
 
69
  memory = ConversationBufferMemory(memory_key="history", llm = llm)
70
+ llm_chain = ConversationChain(llm=llm, memory = memory, prompt= prompt)
71
 
72
  with gr.Blocks() as demo:
73
  #gr.Markdown("Hotel Booking Assistant Chat 🤗")