gkbalu commited on
Commit
89db5bf
Β·
1 Parent(s): 00e4cf6

Squad AI first commit

Browse files
Files changed (7) hide show
  1. .env +5 -0
  2. Dockerfile +10 -0
  3. README.md +5 -6
  4. app.py +235 -0
  5. chainlit.md +1 -0
  6. requirements.txt +7 -0
  7. squadusersinfo.psv +13 -0
.env ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ OPENAI_API_KEY=sk-proj-gWXNkaZYyjH4X4g1OVPET3BlbkFJplRpheelyapIU1rXE4F7
2
+ LANGCHAIN_API_KEY=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
3
+ #LANGCHAIN_PROJECT=SQUADAI-1234
4
+ LANGCHAIN_TRACING_V2=false
5
+ TAVILY_API_KEY=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
Dockerfile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11
2
+ RUN useradd -m -u 1000 user
3
+ ENV HOME=/home/user \
4
+ PATH=/home/user/.local/bin:$PATH
5
+ WORKDIR $HOME/app
6
+ COPY . .
7
+ RUN chown -R user:user .
8
+ RUN pip install -r requirements.txt
9
+ USER user
10
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
README.md CHANGED
@@ -1,10 +1,9 @@
1
  ---
2
- title: SquadAI
3
- emoji: πŸ‘€
4
- colorFrom: green
5
- colorTo: blue
6
  sdk: docker
7
  pinned: false
 
8
  ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: SQUAD AI
3
+ emoji: πŸ“‰
4
+ colorFrom: pink
5
+ colorTo: yellow
6
  sdk: docker
7
  pinned: false
8
+ app_port: 7860
9
  ---
 
 
app.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import getpass
3
+ from langgraph.graph import StateGraph, END
4
+ from typing import TypedDict, Annotated
5
+ import operator
6
+ from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage, ToolMessage
7
+ from langchain_openai import ChatOpenAI
8
+ #from langchain.tools import tool
9
+ #from langchain_community.agent_toolkits import SQLDatabaseToolkit
10
+ #from langchain_community.agent_toolkits import SQLDatabaseToolkit
11
+ #from langchain.sql_database import SQLDatabase
12
+ #from sqlalchemy import create_engine
13
+ from langchain.tools import tool
14
+ from langgraph.prebuilt import ToolInvocation
15
+ import json
16
+ from langchain_core.messages import FunctionMessage
17
+ from langgraph.prebuilt import ToolExecutor
18
+ from langgraph.graph import StateGraph, END
19
+ from langchain_openai import ChatOpenAI
20
+ from langchain.tools.render import format_tool_to_openai_function
21
+ from langchain_core.utils.function_calling import convert_to_openai_function
22
+ from langchain_core.prompts import ChatPromptTemplate
23
+ from operator import itemgetter
24
+ import pandas as pd
25
+ from langchain_community.document_loaders import CSVLoader
26
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
27
+ from langchain_community.tools.tavily_search import TavilySearchResults
28
+
29
+ from langchain_community.vectorstores import Qdrant
30
+ from langchain_openai import OpenAIEmbeddings
31
+ #from qdrant_client import QdrantClient
32
+ from langchain.schema.output_parser import StrOutputParser
33
+ from langchain_community.vectorstores import FAISS
34
+ from langchain_core.messages import HumanMessage
35
+ from langchain_core.runnables import RunnableConfig
36
+ import os
37
+ import chainlit as cl
38
+ from dotenv import load_dotenv
39
+ load_dotenv(override=True)
40
+
41
+
42
+
43
+ from uuid import uuid4
44
+
45
+
46
+ class AgentState(TypedDict):
47
+ messages: Annotated[list[AnyMessage], operator.add]
48
+
49
+ document_loader = CSVLoader("./squadusersinfo.psv", csv_args={'delimiter': '|'})
50
+ documents = document_loader.load()
51
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=30)
52
+ split_documents = text_splitter.split_documents(documents)
53
+
54
+ embeddings = OpenAIEmbeddings()
55
+ #client = QdrantClient(location=":memory:")
56
+ hf_embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
57
+
58
+
59
+
60
+ for i in range(0, len(split_documents), 32):
61
+ if i == 0:
62
+ vectorstore = FAISS.from_documents(split_documents[i:i+32], hf_embeddings)
63
+ continue
64
+ vectorstore.add_documents(split_documents[i:i+32])
65
+
66
+
67
+ hf_retriever = vectorstore.as_retriever()
68
+
69
+
70
+ RAG_PROMPT = """
71
+ CONTEXT:
72
+ {context}
73
+
74
+ QUERY:
75
+ {question}
76
+
77
+ You are a helpful assistant. You will search the interest of user from the stored list of users. If you cannot find a match, you will look for the matches from the chat history.
78
+ If you still cannot find any match, respond with Sorry, at present there is no match for your interest and ask if this user can be matched if any future interest matches this user.
79
+ Ask for all necessary details to consider this user as a future match.
80
+ If the question is outside of finding a match, say I don't know.
81
+ """
82
+ rag_prompt = ChatPromptTemplate.from_template(RAG_PROMPT)
83
+
84
+ model = ChatOpenAI(model="gpt-4o") #reduce inference cost
85
+
86
+
87
+ rag_chain = (
88
+ {"context": itemgetter("question") | hf_retriever, "question": itemgetter("question")}
89
+ | rag_prompt | model | StrOutputParser()
90
+ )
91
+
92
+
93
+ #result = rag_chain.invoke({"question" : "Anyone interested in squash?"})
94
+ #print(result)
95
+
96
+
97
+ @tool
98
+ def matchUser( query):
99
+ """A tool to find the details matching the user details from the store based on user query."""
100
+ result = rag_chain.invoke({"question" : query})
101
+ return result
102
+
103
+ tools= [TavilySearchResults(max_results=1), matchUser]
104
+
105
+ tool_executor = ToolExecutor(tools)
106
+
107
+
108
+ model = ChatOpenAI(temperature=0, streaming=True)
109
+
110
+ functions = [convert_to_openai_function(t) for t in tools]
111
+ model = model.bind_functions(functions)
112
+
113
+
114
+
115
+ def should_continue(state):
116
+ messages = state['messages']
117
+ last_message = messages[-1]
118
+ if "function_call" not in last_message.additional_kwargs:
119
+ return "end"
120
+ else:
121
+ return "continue"
122
+
123
+
124
+ def call_model(state):
125
+ messages = state['messages']
126
+ response = model.invoke(messages)
127
+ return {"messages": [response]}
128
+
129
+
130
+ def call_tool(state):
131
+ messages = state['messages']
132
+ last_message = messages[-1]
133
+ action = ToolInvocation(
134
+ tool=last_message.additional_kwargs["function_call"]["name"],
135
+ tool_input=json.loads(last_message.additional_kwargs["function_call"]["arguments"]),
136
+ )
137
+ response = tool_executor.invoke(action)
138
+ function_message = FunctionMessage(content=str(response), name=action.tool)
139
+ return {"messages": [function_message]}
140
+
141
+ def print_messages(messages):
142
+ next_is_tool = False
143
+ initial_query = True
144
+ for message in messages["messages"]:
145
+ if "function_call" in message.additional_kwargs:
146
+ print()
147
+ print(f'Tool Call - Name: {message.additional_kwargs["function_call"]["name"]} + Query: {message.additional_kwargs["function_call"]["arguments"]}')
148
+ next_is_tool = True
149
+ continue
150
+ if next_is_tool:
151
+ print(f"Tool Response: {message.content}")
152
+ next_is_tool = False
153
+ continue
154
+ if initial_query:
155
+ print(f"Initial Query: {message.content}")
156
+ print()
157
+ initial_query = False
158
+ continue
159
+ print()
160
+ print(f"Agent Response: {message.content}")
161
+
162
+ def construct_response(messages):
163
+ next_is_tool = False
164
+ initial_query = True
165
+ response = ""
166
+ for message in messages["messages"]:
167
+ if "function_call" in message.additional_kwargs:
168
+ print()
169
+ print(f'Tool Call - Name: {message.additional_kwargs["function_call"]["name"]} + Query: {message.additional_kwargs["function_call"]["arguments"]}')
170
+ next_is_tool = True
171
+ continue
172
+ if next_is_tool:
173
+ print(f"Tool Response: {message.content}")
174
+ if "url" not in message.content:
175
+ response = response + message.content
176
+ next_is_tool = False
177
+ continue
178
+ if initial_query:
179
+ print(f"Initial Query: {message.content}")
180
+ print()
181
+ initial_query = False
182
+ continue
183
+ print()
184
+ print(f"Agent Response: {message.content}")
185
+ response = response + message.content
186
+ return response
187
+
188
+ workflow = StateGraph(AgentState)
189
+
190
+ workflow.add_node("agent", call_model)
191
+ workflow.add_node("action", call_tool)
192
+
193
+ workflow.set_entry_point("agent")
194
+
195
+
196
+ workflow.add_conditional_edges(
197
+ "agent",
198
+ should_continue,
199
+ {
200
+ "continue": "action",
201
+ "end": END
202
+ }
203
+ )
204
+
205
+ workflow.add_edge('action', 'agent')
206
+
207
+ app = workflow.compile()
208
+
209
+ #messages = [HumanMessage(content="Any user named Ganesh. Where to play cricket")]
210
+ #inputs = {"messages" : [HumanMessage(content="Anyone interested in cricket? Provide more information about who is interested. Also get me some locations where I can play cricket in Toronto")]}
211
+
212
+ #result = app.invoke({"messages": messages})
213
+
214
+ #print_messages(result)
215
+ #messages = app.invoke(inputs)
216
+
217
+ #print_messages(messages)
218
+ @cl.on_message
219
+ async def run_convo(message: cl.Message):
220
+ msg = cl.Message(content="")
221
+ await msg.send()
222
+ await cl.sleep(1) #hack to simulate loader!
223
+
224
+ inputs = {"messages": [HumanMessage(content=message.content)]}
225
+
226
+ res = app.invoke(inputs, config=RunnableConfig(callbacks=[
227
+ cl.LangchainCallbackHandler(
228
+ to_ignore=["ChannelRead", "RunnableLambda", "ChannelWrite", "__start__", "_execute"]
229
+ )]))
230
+
231
+ content = construct_response(res)
232
+ #for response in (res["messages"]):
233
+ # if message.content not in response:
234
+ # content = content+response.content
235
+ await cl.Message(content=content).send()
chainlit.md ADDED
@@ -0,0 +1 @@
 
 
1
+ Welcome to SquadAI πŸ‘‹
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ chainlit
2
+ langgraph
3
+ langchain
4
+ langchain_openai
5
+ langchain_experimental
6
+ python-dotenv
7
+ qdrant-client
squadusersinfo.psv ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Name|Location|Interest|Goals|Biography
2
+ Ganesh|Toronto|Cricket,Tennis,technology|Create useful software for poor people's upliftment|Software engineer
3
+ Jaron|New york|Soccer,Hiking,Netwroking|Build, ship, share |Product manager
4
+ Mike|Sacramento|Cricket,Tennis,technology|Create useful software for poor people's upliftment|Software engineer
5
+ John|Ottawa|Soccer,Hiking,Netwroking|Build, ship, share |Product manager
6
+ Smith|Brussels|Cricket,Tennis,technology|Create useful software for poor people's upliftment|Software engineer
7
+ Varun|Frankfurt|Soccer,Hiking,Netwroking|Build, ship, share |Product manager
8
+ Kevin|Mississauga|Cricket,Tennis,technology|Create useful software for poor people's upliftment|Software engineer
9
+ Virat|Montreal|Soccer,Hiking,Netwroking|Build, ship, share |Product manager
10
+ AJesh|Toronto|Cricket,Tennis,technology|Create useful software for poor people's upliftment|Software engineer
11
+ Mohan|Quebec|Soccer,Hiking,Netwroking|Build, ship, share |Product manager
12
+ karthik|Toronto|Cricket,Tennis,technology|Create useful software for poor people's upliftment|Software engineer
13
+ Ram|New york|Soccer,Hiking,Netwroking|Build, ship, share |Product manager