Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -9,34 +9,34 @@ def create_conversation_prompt(name1: str, name2: str, persona_style: str):
|
|
9 |
"""
|
10 |
Create a prompt that instructs the model to produce exactly 15 messages
|
11 |
of conversation, alternating between name1 and name2, starting with name1.
|
|
|
|
|
12 |
"""
|
13 |
prompt_template_str = f"""
|
14 |
-
You are
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
-
|
25 |
-
- Do not repeat the conversation.
|
26 |
-
- Do not produce anything after the 15th message.
|
27 |
- Use everyday language, can ask questions, show opinions.
|
28 |
-
- Use emojis sparingly
|
29 |
-
- No
|
30 |
-
-
|
31 |
|
32 |
-
|
33 |
"""
|
34 |
return ChatPromptTemplate.from_template(prompt_template_str)
|
35 |
|
36 |
def create_summary_prompt(name1: str, name2: str, conversation: str):
|
37 |
-
"""
|
38 |
summary_prompt_str = f"""
|
39 |
-
|
40 |
|
41 |
{conversation}
|
42 |
|
@@ -44,7 +44,9 @@ def create_summary_prompt(name1: str, name2: str, conversation: str):
|
|
44 |
Title: <A short descriptive title of the conversation>
|
45 |
Summary: <A few short sentences highlighting the main points, tone, and conclusion>
|
46 |
|
47 |
-
Do not continue the conversation, do not repeat it,
|
|
|
|
|
48 |
"""
|
49 |
return ChatPromptTemplate.from_template(summary_prompt_str)
|
50 |
|
@@ -53,11 +55,9 @@ def main():
|
|
53 |
|
54 |
model_names = [
|
55 |
"meta-llama/Llama-3.3-70B-Instruct",
|
56 |
-
"
|
57 |
"lmsys/vicuna-13b-v1.5",
|
58 |
-
"
|
59 |
-
"EleutherAI/gpt-neox-20b",
|
60 |
-
"dice-research/lola_v1"
|
61 |
]
|
62 |
selected_model = st.selectbox("Select a model:", model_names)
|
63 |
|
@@ -96,15 +96,14 @@ def main():
|
|
96 |
|
97 |
try:
|
98 |
# Generate all 15 messages in one go
|
99 |
-
conversation = conversation_chain.run(chat_history="", input="
|
100 |
-
conversation = conversation.strip()
|
101 |
|
102 |
st.subheader("Final Conversation:")
|
103 |
st.text(conversation)
|
104 |
print("Conversation Generation Complete.\n")
|
105 |
print("Full Conversation:\n", conversation)
|
106 |
|
107 |
-
#
|
108 |
summary_prompt = create_summary_prompt(name1, name2, conversation)
|
109 |
summary_chain = LLMChain(llm=llm, prompt=summary_prompt)
|
110 |
|
@@ -122,4 +121,3 @@ def main():
|
|
122 |
|
123 |
if __name__ == "__main__":
|
124 |
main()
|
125 |
-
|
|
|
9 |
"""
|
10 |
Create a prompt that instructs the model to produce exactly 15 messages
|
11 |
of conversation, alternating between name1 and name2, starting with name1.
|
12 |
+
|
13 |
+
We will be very explicit and not allow any formatting except the required lines.
|
14 |
"""
|
15 |
prompt_template_str = f"""
|
16 |
+
You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
|
17 |
+
{name1} speaks first (message 1), then {name2} (message 2), then {name1} (message 3), and so forth,
|
18 |
+
alternating until all 15 messages are complete. The 15th message is by {name1}.
|
19 |
+
|
20 |
+
Requirements:
|
21 |
+
- Output exactly 15 lines, no more, no less.
|
22 |
+
- Each line must be a single message in the format:
|
23 |
+
{name1}: <message> or {name2}: <message>
|
24 |
+
- Do not add any headings, numbers, sample outputs, or explanations.
|
25 |
+
- Do not mention code, programming, or instructions.
|
26 |
+
- Each message should be 1-2 short sentences, friendly, natural, reflecting the style: {persona_style}.
|
|
|
|
|
27 |
- Use everyday language, can ask questions, show opinions.
|
28 |
+
- Use emojis sparingly if it fits the style (no more than 1-2 total).
|
29 |
+
- No repeated lines, each message should logically follow from the previous one.
|
30 |
+
- Do not produce anything after the 15th message. No extra lines or text.
|
31 |
|
32 |
+
Produce all 15 messages now:
|
33 |
"""
|
34 |
return ChatPromptTemplate.from_template(prompt_template_str)
|
35 |
|
36 |
def create_summary_prompt(name1: str, name2: str, conversation: str):
|
37 |
+
"""Prompt for generating a title and summary."""
|
38 |
summary_prompt_str = f"""
|
39 |
+
Below is a completed 15-message conversation between {name1} and {name2}:
|
40 |
|
41 |
{conversation}
|
42 |
|
|
|
44 |
Title: <A short descriptive title of the conversation>
|
45 |
Summary: <A few short sentences highlighting the main points, tone, and conclusion>
|
46 |
|
47 |
+
Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines:
|
48 |
+
- One line starting with "Title:"
|
49 |
+
- One line starting with "Summary:"
|
50 |
"""
|
51 |
return ChatPromptTemplate.from_template(summary_prompt_str)
|
52 |
|
|
|
55 |
|
56 |
model_names = [
|
57 |
"meta-llama/Llama-3.3-70B-Instruct",
|
58 |
+
"meta-llama/Llama-3.1-405B-Instruct",
|
59 |
"lmsys/vicuna-13b-v1.5",
|
60 |
+
"mistral-7b"
|
|
|
|
|
61 |
]
|
62 |
selected_model = st.selectbox("Select a model:", model_names)
|
63 |
|
|
|
96 |
|
97 |
try:
|
98 |
# Generate all 15 messages in one go
|
99 |
+
conversation = conversation_chain.run(chat_history="", input="").strip()
|
|
|
100 |
|
101 |
st.subheader("Final Conversation:")
|
102 |
st.text(conversation)
|
103 |
print("Conversation Generation Complete.\n")
|
104 |
print("Full Conversation:\n", conversation)
|
105 |
|
106 |
+
# Summarize the conversation
|
107 |
summary_prompt = create_summary_prompt(name1, name2, conversation)
|
108 |
summary_chain = LLMChain(llm=llm, prompt=summary_prompt)
|
109 |
|
|
|
121 |
|
122 |
if __name__ == "__main__":
|
123 |
main()
|
|