Rohil Bansal commited on
Commit
1396cc4
·
1 Parent(s): d92decd

Shifted from openai to azureopenai

Browse files
src/__pycache__/index.cpython-312.pyc CHANGED
Binary files a/src/__pycache__/index.cpython-312.pyc and b/src/__pycache__/index.cpython-312.pyc differ
 
src/llm.py CHANGED
@@ -7,7 +7,7 @@ from typing import Literal
7
 
8
  from langchain_core.prompts import ChatPromptTemplate
9
  from langchain_core.pydantic_v1 import BaseModel, Field
10
- from langchain_openai import ChatOpenAI
11
 
12
  #%%
13
  # Data model
@@ -21,7 +21,7 @@ class RouteQuery(BaseModel):
21
 
22
 
23
  # LLM with function call
24
- llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.3)
25
  structured_llm_router = llm.with_structured_output(RouteQuery)
26
 
27
  #%%
@@ -60,7 +60,7 @@ class GradeDocuments(BaseModel):
60
 
61
  #%%
62
  # LLM with function call
63
- llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.3)
64
  structured_llm_grader = llm.with_structured_output(GradeDocuments)
65
 
66
  # Prompt
@@ -90,7 +90,7 @@ from langchain_core.output_parsers import StrOutputParser
90
  prompt = hub.pull("rlm/rag-prompt")
91
 
92
  # LLM
93
- llm = ChatOpenAI(model_name="gpt-4o-mini", temperature=0.3)
94
 
95
 
96
  # Post-processing
@@ -120,7 +120,7 @@ class GradeHallucinations(BaseModel):
120
 
121
 
122
  # LLM with function call
123
- llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.3)
124
  structured_llm_grader = llm.with_structured_output(GradeHallucinations)
125
 
126
  # Prompt
@@ -150,7 +150,7 @@ class GradeAnswer(BaseModel):
150
 
151
 
152
  # LLM with function call
153
- llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.3)
154
  structured_llm_grader = llm.with_structured_output(GradeAnswer)
155
 
156
  # Prompt
@@ -170,7 +170,7 @@ answer_grader.invoke({"question": question, "generation": generation})
170
  ### Question Re-writer
171
 
172
  # LLM
173
- llm = ChatOpenAI(model="gpt-4o-mini", temperature=0.3)
174
 
175
  # Prompt
176
  system = """You a question re-writer that converts an input question to a better version that is optimized \n
 
7
 
8
  from langchain_core.prompts import ChatPromptTemplate
9
  from langchain_core.pydantic_v1 import BaseModel, Field
10
+ from langchain_openai import AzureChatOpenAI
11
 
12
  #%%
13
  # Data model
 
21
 
22
 
23
  # LLM with function call
24
+ llm = AzureChatOpenAI(model="gpt-4o-mini", temperature=0.3)
25
  structured_llm_router = llm.with_structured_output(RouteQuery)
26
 
27
  #%%
 
60
 
61
  #%%
62
  # LLM with function call
63
+ llm = AzureChatOpenAI(model="gpt-4o-mini", temperature=0.3)
64
  structured_llm_grader = llm.with_structured_output(GradeDocuments)
65
 
66
  # Prompt
 
90
  prompt = hub.pull("rlm/rag-prompt")
91
 
92
  # LLM
93
+ llm = AzureChatOpenAI(model_name="gpt-4o-mini", temperature=0.3)
94
 
95
 
96
  # Post-processing
 
120
 
121
 
122
  # LLM with function call
123
+ llm = AzureChatOpenAI(model="gpt-4o-mini", temperature=0.3)
124
  structured_llm_grader = llm.with_structured_output(GradeHallucinations)
125
 
126
  # Prompt
 
150
 
151
 
152
  # LLM with function call
153
+ llm = AzureChatOpenAI(model="gpt-4o-mini", temperature=0.3)
154
  structured_llm_grader = llm.with_structured_output(GradeAnswer)
155
 
156
  # Prompt
 
170
  ### Question Re-writer
171
 
172
  # LLM
173
+ llm = AzureChatOpenAI(model="gpt-4o-mini", temperature=0.3)
174
 
175
  # Prompt
176
  system = """You a question re-writer that converts an input question to a better version that is optimized \n
vectordb/08d73b15-e800-45c5-a450-5b9d696166f3/length.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71bde5af77b7e5e53efccdfbabf3da1e125a2ebfdc944d9b9ed18b04ecf527fe
3
  size 4000
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36145df1efa1a3bfba46c79e2b76437f2fd83cbfea85af22e6d7bebd2958e995
3
  size 4000