Update pages/4_AutoEngage.py
Browse files- pages/4_AutoEngage.py +48 -3
pages/4_AutoEngage.py
CHANGED
@@ -12,6 +12,14 @@ from langchain.text_splitter import CharacterTextSplitter
|
|
12 |
from langchain.agents.agent_toolkits import create_retriever_tool
|
13 |
from langchain.agents.agent_toolkits import create_conversational_retrieval_agent
|
14 |
from langchain.chat_models import ChatOpenAI
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
import streamlit as st
|
17 |
|
@@ -65,10 +73,47 @@ if OpenAI_Key:
|
|
65 |
|
66 |
if result_2:
|
67 |
st.subheader('JSON-LD Description')
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
|
71 |
-
st.write(result_3
|
72 |
|
73 |
|
74 |
|
|
|
12 |
from langchain.agents.agent_toolkits import create_retriever_tool
|
13 |
from langchain.agents.agent_toolkits import create_conversational_retrieval_agent
|
14 |
from langchain.chat_models import ChatOpenAI
|
15 |
+
from langchain.chains import ConversationChain
|
16 |
+
from langchain import OpenAI
|
17 |
+
from langchain.chains.conversation.memory import ConversationBufferMemory
|
18 |
+
from langchain.callbacks import get_openai_callback
|
19 |
+
|
20 |
+
with open('docs/ae_ontology.ttl', 'r') as file:
|
21 |
+
# Read the entire contents of the file into a string
|
22 |
+
ttl_file_contents = file.read()
|
23 |
|
24 |
import streamlit as st
|
25 |
|
|
|
73 |
|
74 |
if result_2:
|
75 |
st.subheader('JSON-LD Description')
|
76 |
+
# lets make gpt to digest our ontology by calling the 16k-model to avoid token limit
|
77 |
+
# first initialize the large language model
|
78 |
+
llm = OpenAI(
|
79 |
+
temperature=0,
|
80 |
+
openai_api_key= OpenAI_Key,
|
81 |
+
model_name="gpt-3.5-turbo-16k"
|
82 |
+
)
|
83 |
+
conversation = ConversationChain(llm=llm)
|
84 |
+
|
85 |
+
|
86 |
+
|
87 |
+
conversation_buf = ConversationChain(
|
88 |
+
llm=llm,
|
89 |
+
memory=ConversationBufferMemory()
|
90 |
+
)
|
91 |
+
#internal purpose
|
92 |
+
def count_tokens(chain, query):
|
93 |
+
with get_openai_callback() as cb:
|
94 |
+
result = chain.run(query)
|
95 |
+
print(f'Spent a total of {cb.total_tokens} tokens')
|
96 |
+
|
97 |
+
return result
|
98 |
+
|
99 |
+
count_tokens( conversation_buf,
|
100 |
+
f""" Please digest this ttl file content and simply acknowledge that you have understand it.
|
101 |
+
No need to say anything else.\n {ttl_file_contents} """
|
102 |
+
)
|
103 |
+
|
104 |
+
result_3 = count_tokens( conversation_buf,
|
105 |
+
f""" I'll provide the deception strategy plan to you.
|
106 |
+
Kindly generate a json-ld portrayal of the strategy, adhering to the ontology you've acquired.
|
107 |
+
Ensure the json-ld aligns with the ontology framework, and refrain from utilizing any classes not included in the established ontology.
|
108 |
+
|
109 |
+
{result_2['output']}
|
110 |
+
"""
|
111 |
+
)
|
112 |
+
|
113 |
+
|
114 |
+
result_3
|
115 |
|
116 |
+
st.write(result_3)
|
117 |
|
118 |
|
119 |
|