Llamole / config /generate /qwen_drug.yaml
msun415's picture
Upload folder using huggingface_hub
13362e2 verified
raw
history blame contribute delete
837 Bytes
### model
model_name_or_path: Qwen/Qwen2-7B-Instruct
new_special_tokens: <design_start>,<design_end>,<design_body>,<molecule>,<retro_start>,<retro_end>,<retro_body>,<rollback_start>,<rollback_end>
graph_decoder_path: saves/graph_decoder
graph_encoder_path: saves/graph_encoder
graph_predictor_path: saves/graph_predictor
adapter_name_or_path: saves/Qwen2-7B-Instruct-Adapter
graph_lm_connector_path: saves/Qwen2-7B-Instruct-Adapter/connector
### generation
max_length: 512
max_new_tokens: 128
temperature: 0.6
top_p: 0.9
### method
finetuning_type: lora
do_train: false
flash_attn: disabled
learned_query_size: 8
### dataset
dataset: molqa_drug_examples
template: qwen
cutoff_len: 128
overwrite_cache: true
preprocessing_num_workers: 16
output_dir: null
bf16: true
pure_bf16: true
### eval
per_device_eval_batch_size: 6
report_to: 'none'