tunable OpenAIChatAtomicFlow
#1
by
yeeef
- opened
- OpenAIChatAtomicFlow.py +199 -2
OpenAIChatAtomicFlow.py
CHANGED
@@ -2,11 +2,14 @@ import pprint
|
|
2 |
from copy import deepcopy
|
3 |
|
4 |
import hydra
|
|
|
5 |
|
6 |
import colorama
|
7 |
import time
|
8 |
|
9 |
-
from typing import List, Dict, Optional, Any
|
|
|
|
|
10 |
|
11 |
from langchain import PromptTemplate
|
12 |
import langchain
|
@@ -22,6 +25,7 @@ from flows.messages.chat_message import ChatMessage
|
|
22 |
from flows.utils.caching_utils import flow_run_cache
|
23 |
|
24 |
log = utils.get_pylogger(__name__)
|
|
|
25 |
|
26 |
|
27 |
class OpenAIChatAtomicFlow(AtomicFlow):
|
@@ -43,6 +47,28 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
43 |
demonstrations_response_template: PromptTemplate = None
|
44 |
response_annotators: Optional[Dict[str, MessageAnnotator]] = {}
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
def __init__(self, **kwargs):
|
47 |
self._validate_parameters(kwargs)
|
48 |
super().__init__(**kwargs)
|
@@ -107,7 +133,10 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
107 |
response_annotators = config.get("response_annotators", {})
|
108 |
if len(response_annotators) > 0:
|
109 |
for key, config in response_annotators.items():
|
110 |
-
|
|
|
|
|
|
|
111 |
return {"response_annotators": response_annotators}
|
112 |
|
113 |
@classmethod
|
@@ -321,3 +350,171 @@ class OpenAIChatAtomicFlow(AtomicFlow):
|
|
321 |
|
322 |
# ~~~ The final answer should be in self.flow_state, thus allow_class_namespace=False ~~~
|
323 |
return self._get_keys_from_state(keys=expected_outputs, allow_class_namespace=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from copy import deepcopy
|
3 |
|
4 |
import hydra
|
5 |
+
import logging
|
6 |
|
7 |
import colorama
|
8 |
import time
|
9 |
|
10 |
+
from typing import List, Dict, Optional, Any, Callable, Tuple
|
11 |
+
|
12 |
+
from flaml import tune, BlendSearch
|
13 |
|
14 |
from langchain import PromptTemplate
|
15 |
import langchain
|
|
|
25 |
from flows.utils.caching_utils import flow_run_cache
|
26 |
|
27 |
log = utils.get_pylogger(__name__)
|
28 |
+
logger = log
|
29 |
|
30 |
|
31 |
class OpenAIChatAtomicFlow(AtomicFlow):
|
|
|
47 |
demonstrations_response_template: PromptTemplate = None
|
48 |
response_annotators: Optional[Dict[str, MessageAnnotator]] = {}
|
49 |
|
50 |
+
default_search_space = {
|
51 |
+
# "model": tune.choice(
|
52 |
+
# [
|
53 |
+
# # "text-ada-001",
|
54 |
+
# # "text-babbage-001",
|
55 |
+
# # "text-davinci-003",
|
56 |
+
# "gpt-3.5-turbo",
|
57 |
+
# # "gpt-4",
|
58 |
+
# ]
|
59 |
+
# ),
|
60 |
+
"temperature_or_top_p": tune.choice(
|
61 |
+
[
|
62 |
+
{"temperature": tune.uniform(0, 2)},
|
63 |
+
{"top_p": tune.uniform(0, 1)},
|
64 |
+
]
|
65 |
+
),
|
66 |
+
"max_tokens": tune.lograndint(1000, 4000),
|
67 |
+
# we use langchain api, https://github.com/hwchase17/langchain/blob/master/langchain/chat_models/base.py#L201
|
68 |
+
# it only take the first generation as the output, thus n is not relevant
|
69 |
+
# "n": tune.randint(1, 100),
|
70 |
+
}
|
71 |
+
|
72 |
def __init__(self, **kwargs):
|
73 |
self._validate_parameters(kwargs)
|
74 |
super().__init__(**kwargs)
|
|
|
133 |
response_annotators = config.get("response_annotators", {})
|
134 |
if len(response_annotators) > 0:
|
135 |
for key, config in response_annotators.items():
|
136 |
+
if isinstance(config, MessageAnnotator):
|
137 |
+
response_annotators[key] = config
|
138 |
+
else:
|
139 |
+
response_annotators[key] = hydra.utils.instantiate(config, _convert_="partial")
|
140 |
return {"response_annotators": response_annotators}
|
141 |
|
142 |
@classmethod
|
|
|
350 |
|
351 |
# ~~~ The final answer should be in self.flow_state, thus allow_class_namespace=False ~~~
|
352 |
return self._get_keys_from_state(keys=expected_outputs, allow_class_namespace=False)
|
353 |
+
|
354 |
+
@classmethod
|
355 |
+
def tune(
|
356 |
+
cls,
|
357 |
+
tune_dps: List[Dict],
|
358 |
+
metric: str,
|
359 |
+
mode: str,
|
360 |
+
eval_func: Callable,
|
361 |
+
api_key: str,
|
362 |
+
log_file_name: Optional[str] = None, # TODO(yeeef)
|
363 |
+
inference_budget: Optional[float] = None,
|
364 |
+
optimization_budget: Optional[float] = None,
|
365 |
+
num_samples: Optional[int] = 1,
|
366 |
+
logging_level: Optional[int] = logging.WARN, # TODO(yeeef)
|
367 |
+
initial_flow_config: Optional[Dict] = None, # if not supplied will use default flow config of the class (xxx.yaml)
|
368 |
+
**config,
|
369 |
+
) -> Tuple[Dict, Any]: # tune.ExperimentAnalysis
|
370 |
+
"""
|
371 |
+
Args:
|
372 |
+
- tune_dps (list): The list of data points to tune the hyperparameters.
|
373 |
+
- metric (str): The metric to optimize.
|
374 |
+
- mode (str): The optimization mode, "min" or "max.
|
375 |
+
- eval_func (Callable): The evaluation function for responses.
|
376 |
+
The function should take a response and a data point as input,
|
377 |
+
and return a dict of metrics.
|
378 |
+
- log_file_name (str, optional): The log file.
|
379 |
+
- inference_budget (float, optional): The inference budget, dollar per instance.
|
380 |
+
- optimization_budget (float, optional): The optimization budget, dollar in total.
|
381 |
+
- num_samples (int, optional): The number of samples to evaluate.
|
382 |
+
-1 means no hard restriction in the number of trials
|
383 |
+
and the actual number is decided by optimization_budget. Defaults to 1.
|
384 |
+
- logging_level (optional): logging level. Defaults to logging.WARNING.
|
385 |
+
- **config (dict): The search space to update over the default search.
|
386 |
+
For prompt, please provide a string/Callable or a list of strings/Callables.
|
387 |
+
- If prompt is provided for chat models, it will be converted to messages under role "user".
|
388 |
+
- Do not provide both prompt and messages for chat models, but provide either of them.
|
389 |
+
- A string template will be used to generate a prompt for each data instance
|
390 |
+
using `prompt.format(**data)`.
|
391 |
+
- A callable template will be used to generate a prompt for each data instance
|
392 |
+
using `prompt(data)`.
|
393 |
+
For stop, please provide a string, a list of strings, or a list of lists of strings.
|
394 |
+
For messages (chat models only), please provide a list of messages (for a single chat prefix)
|
395 |
+
or a list of lists of messages (for multiple choices of chat prefix to choose from).
|
396 |
+
Each message should be a dict with keys "role" and "content". The value of "content" can be a string/Callable template.
|
397 |
+
|
398 |
+
Returns:
|
399 |
+
- dict: The optimized hyperparameter setting.
|
400 |
+
- tune.ExperimentAnalysis: The tuning results.
|
401 |
+
"""
|
402 |
+
|
403 |
+
initial_flow_config = initial_flow_config or cls.get_config()
|
404 |
+
space = cls.default_search_space.copy()
|
405 |
+
|
406 |
+
if config is not None:
|
407 |
+
space.update(config)
|
408 |
+
if "messages" in space:
|
409 |
+
space.pop("prompt", None)
|
410 |
+
temperature = space.pop("temperature", None)
|
411 |
+
top_p = space.pop("top_p", None)
|
412 |
+
if temperature is not None and top_p is None:
|
413 |
+
space["temperature_or_top_p"] = {"temperature": temperature}
|
414 |
+
elif temperature is None and top_p is not None:
|
415 |
+
space["temperature_or_top_p"] = {"top_p": top_p}
|
416 |
+
elif temperature is not None and top_p is not None:
|
417 |
+
space.pop("temperature_or_top_p")
|
418 |
+
space["temperature"] = temperature
|
419 |
+
space["top_p"] = top_p
|
420 |
+
log.warning("temperature and top_p are not recommended to vary together.")
|
421 |
+
|
422 |
+
# Note: currently we fix the model rather than make it tunable
|
423 |
+
search_alg = BlendSearch(
|
424 |
+
cost_attr="cost",
|
425 |
+
cost_budget=optimization_budget,
|
426 |
+
metric=metric,
|
427 |
+
mode=mode,
|
428 |
+
space=space,
|
429 |
+
)
|
430 |
+
|
431 |
+
# Args:
|
432 |
+
# evaluation_function: A user-defined evaluation function.
|
433 |
+
# It takes a configuration as input, outputs a evaluation
|
434 |
+
# result (can be a numerical value or a dictionary of string
|
435 |
+
# and numerical value pairs) for the input configuration.
|
436 |
+
# For machine learning tasks, it usually involves training and
|
437 |
+
# scoring a machine learning model, e.g., through validation loss.
|
438 |
+
|
439 |
+
|
440 |
+
def updated_flow_config_with_search_config(flow_config: Dict[str, Any], search_config: Dict[str, Any]):
|
441 |
+
"""
|
442 |
+
inputs are immutable
|
443 |
+
"""
|
444 |
+
flow_config = deepcopy(flow_config)
|
445 |
+
search_config = deepcopy(search_config)
|
446 |
+
|
447 |
+
temperature_or_top_p = search_config.pop("temperature_or_top_p", None)
|
448 |
+
if temperature_or_top_p is not None:
|
449 |
+
search_config.update(temperature_or_top_p)
|
450 |
+
|
451 |
+
flow_config["model_name"] = search_config.get("model", flow_config["model_name"])
|
452 |
+
generation_parameters = flow_config["generation_parameters"]
|
453 |
+
for generation_parameter in generation_parameters:
|
454 |
+
if generation_parameter == "model_kwargs":
|
455 |
+
continue
|
456 |
+
if generation_parameter in search_config:
|
457 |
+
generation_parameters[generation_parameter] = search_config[generation_parameter]
|
458 |
+
|
459 |
+
model_kwargs = generation_parameters["model_kwargs"]
|
460 |
+
for model_kwarg in model_kwargs:
|
461 |
+
if model_kwarg in search_config:
|
462 |
+
model_kwargs[model_kwarg] = search_config[model_kwarg]
|
463 |
+
|
464 |
+
return flow_config
|
465 |
+
|
466 |
+
def tune_run_eval(search_config: Dict[str, Any]) -> Dict[str, float]:
|
467 |
+
"""
|
468 |
+
evaluation_function: A user-defined evaluation function.
|
469 |
+
It takes a configuration as input, outputs a evaluation
|
470 |
+
result (can be a numerical value or a dictionary of string
|
471 |
+
and numerical value pairs) for the input configuration.
|
472 |
+
For machine learning tasks, it usually involves training and
|
473 |
+
scoring a machine learning model, e.g., through validation loss.
|
474 |
+
"""
|
475 |
+
# extract the flow_construct_kwargs from search_config
|
476 |
+
"""
|
477 |
+
{'expected_inputs': [], 'expected_outputs': [], 'flow_type': 'Flow', 'verbose': True, 'dry_run': False, 'namespace_clearing_after_run': True, 'n_api_retries': 6, 'wait_time_between_retries': 20, 'system_name': 'system', 'user_name': 'user', 'assistant_name': 'assistant', 'response_annotators': {'code_extractor': <flows.message_annotators.regex_extractor_first.RegexFirstOccurrenceExtractor object at 0x7f532121bc70>}, 'query_message_prompt_template': {'_target_': 'langchain.PromptTemplate', 'template': '# Problem statement\n{{problem_description}}\n\n# Input description\n{{input_description}}\n\n# Output description\n{{output_description}}\n\n{{io_examples_and_explanation}}\n\n\nThe input should be read from the standard input and the output should be passed to the standard output.\nReturn Python code that solves the problem. Reply in the following format:\n```python\n{{code_placeholder}}\n```', 'input_variables': ['problem_description', 'input_description', 'output_description', 'io_examples_and_explanation'], 'partial_variables': {'code_placeholder': '{{python_code}}'}, 'template_format': 'jinja2'}, 'demonstrations': None, 'demonstrations_response_template': None, 'name': 'CodeAgent', 'description': 'ToDO: add description', 'model_name': 'gpt-3.5-turbo', 'generation_parameters': {'n': 1, 'max_tokens': 3000, 'temperature': 0.3, 'model_kwargs': {'top_p': 0.2, 'frequency_penalty': 0, 'presence_penalty': 0}}, 'system_message_prompt_template': {'_target_': 'langchain.PromptTemplate', 'template': 'Your goal is to provide executable Python code that solves a competitive programming problem. The code should correctly handle all corner cases in order to pass the hidden test cases, which are used to evaluate the correctness of the solution.\n\nThe user will specify the problem by providing you with:\n - the problem statement\n - input description\n - output description\n - example test cases\n - (optional) explanation of the test cases\n\nThe user will provide you with a task and an output format that you will strictly follow.', 'input_variables': [], 'template_format': 'jinja2'}, 'human_message_prompt_template': {'_target_': 'langchain.PromptTemplate', 'template': '{{query}}', 'input_variables': ['query'], 'template_format': 'jinja2'}}
|
478 |
+
"""
|
479 |
+
log.info(f"Tunning with config: {search_config}")
|
480 |
+
# TODO: the code currently only works when there is no subspace, i.e. there is only one model to tune with
|
481 |
+
# align search_config with flow_config
|
482 |
+
updated_flow_config = updated_flow_config_with_search_config(flow_config=initial_flow_config, search_config=search_config)
|
483 |
+
log.info(f"Updated flow_config: {updated_flow_config}")
|
484 |
+
# flow_launcher = FlowAPILauncher(flow, 1, False, 3, 0, ["code"]) TODO: maybe refactor with flow_launcher
|
485 |
+
|
486 |
+
# TODO: limitations: langchain api call does not give us the cost of the api call, and only give us
|
487 |
+
# one result no matter the n
|
488 |
+
final_metrics = {}
|
489 |
+
for sample in tune_dps:
|
490 |
+
sample["api_key"] = api_key
|
491 |
+
# log.info(f"sample: {sample}")
|
492 |
+
flow = cls.instantiate_from_config(updated_flow_config)
|
493 |
+
task_message = flow.package_task_message(recipient_flow=flow,
|
494 |
+
task_name="run_task",
|
495 |
+
task_data=sample,
|
496 |
+
expected_outputs=["code"])
|
497 |
+
output_message = flow(task_message)
|
498 |
+
# log.info(f"output_message: {output_message}")
|
499 |
+
|
500 |
+
metrics = eval_func(output_message.data['code'], sample)
|
501 |
+
log.info(f"metrics for dp: {metrics}")
|
502 |
+
if not final_metrics:
|
503 |
+
final_metrics = metrics
|
504 |
+
else:
|
505 |
+
for k, v in metrics.items():
|
506 |
+
final_metrics[k] += v
|
507 |
+
log.info(f"final metric {final_metrics} for this config {search_config}")
|
508 |
+
return final_metrics
|
509 |
+
|
510 |
+
analysis = tune.run(
|
511 |
+
tune_run_eval,
|
512 |
+
search_alg=search_alg,
|
513 |
+
num_samples=num_samples,
|
514 |
+
log_file_name=log_file_name,
|
515 |
+
verbose=3,
|
516 |
+
)
|
517 |
+
best_search_config = analysis.best_config
|
518 |
+
flow_config = updated_flow_config_with_search_config(initial_flow_config, best_search_config)
|
519 |
+
log.info(f"best search config found: {best_search_config}, analysis: {analysis.best_result}")
|
520 |
+
return flow_config, analysis
|