# ################################ # Model: Llama2 dModel + NLL # Authors: # Pooneh Mousavi 2023 # ################################ # URL for the gpt2 model model_hub: "HaNguyen/test_llama2" #meta-llama/Llama-2-7b-chat-hf llama2_folder: recipes/MultiWOZ/response_generation/llama2/results/train_with_llama2/1995/save/llama2_checkpoint/ # history_window, i.e. how many user-system exchanges consider as context. max_history: 2 # decoder setting freeze_model: True num_beams: 8 max_new_tokens: 50 top_k: 45 top_p: 0.9 #LLAMA2 model model: !new:custom.LLAMA2_expanded source: !ref freeze: !ref save_path: !ref max_new_tokens: !ref num_beams: !ref top_k: !ref top_p: !ref with_peft: True # Masks padding_mask: !name:speechbrain.lobes.models.transformer.Transformer.get_key_padding_mask pretrainer: !new:speechbrain.utils.parameter_transfer.Pretrainer loadables: model: !ref modules: model: !ref