File size: 2,132 Bytes
8202002
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67


import os
import re
import time

from llmware.prompts import Prompt, HumanInTheLoop
from llmware.configs import LLMWareConfig


def contract_analysis_simple (model_name):

    # my contracts folder path - note:  this assumes prior preparation step
    contracts_path = "/home/ubuntu/contracts/"

    # query list - "key" : "value"
    query_list = {"executive employment agreement": "What are the name of the two parties?",
                  "base salary": "What is the executive's base salary?",
                  "governing law": "What is the governing law?"}

    print("\nupdate: loading model - ", model_name)

    prompter = Prompt().load_model(model_name)

    # start the clock to measure processing time, once model loaded
    t0 = time.time()

    for i, contract in enumerate(os.listdir(contracts_path)):

        print("\nAnalyzing contract: ", str(i+1), contract)

        for key, value in query_list.items():

            # contract is parsed, text-chunked, and then filtered by topic key
            source = prompter.add_source_document(contracts_path, contract, query=key)

            # calling the LLM with 'source' information from the contract automatically packaged into the prompt
            responses = prompter.prompt_with_source(value, prompt_name="just_the_facts", temperature=0.3)

            for r, response in enumerate(responses):
                print("LLM Response - ", key, " - ", re.sub("[\n]"," ", response["llm_response"]))

            # We're done with this contract, clear the source from the prompt
            prompter.clear_source_materials()

    # capture time of the processing
    print("\nupdate: time cycle: ", time.time() - t0)

    # Save jsonl report to jsonl to /prompt_history folder
    print("\nupdate: prompt state saved at: ", os.path.join(LLMWareConfig.get_prompt_path(),prompter.prompt_id))

    prompter.save_state()

    csv_output = HumanInTheLoop(prompter).export_current_interaction_to_csv()

    print("update: csv output - ", csv_output)

    return 0


if __name__ == "__main__":

    model = "llmware/dragon-deci-6b-v0"

    contract_analysis_simple(model)