Rafii commited on
Commit
853389e
·
1 Parent(s): 9e67127
Files changed (1) hide show
  1. llama_roughwork.ipynb +444 -0
llama_roughwork.ipynb ADDED
@@ -0,0 +1,444 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "/opt/anaconda3/envs/f1llama/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
13
+ " from .autonotebook import tqdm as notebook_tqdm\n",
14
+ "Fetching 7 files: 100%|██████████| 7/7 [00:00<00:00, 92038.02it/s]\n"
15
+ ]
16
+ }
17
+ ],
18
+ "source": [
19
+ "from mlx_lm import load, generate\n",
20
+ "model, tokenizer = load(\"mlx-community/Meta-Llama-3-8B-Instruct-8bit\")"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": 3,
26
+ "metadata": {},
27
+ "outputs": [
28
+ {
29
+ "data": {
30
+ "text/plain": [
31
+ "'Surprising Facts About F1, title:Surprising Facts About F1, type:article, url:/surprising-facts-about-f1/,}\\nSurprising Facts About F1\\nFormula 1 (F1) is one of the most popular and thrilling forms of motorsport, with millions of fans around the world. However, there are many surprising facts about F1 that even the most dedicated fans may not know. Here are some of the most interesting and surprising facts about F1:\\n\\n'"
32
+ ]
33
+ },
34
+ "execution_count": 3,
35
+ "metadata": {},
36
+ "output_type": "execute_result"
37
+ }
38
+ ],
39
+ "source": [
40
+ "generate(model=model, tokenizer=tokenizer, prompt=\"role:Tell me something surprising about f1, content:\")"
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "code",
45
+ "execution_count": 7,
46
+ "metadata": {},
47
+ "outputs": [
48
+ {
49
+ "ename": "TypeError",
50
+ "evalue": "train_model() missing 1 required positional argument: 'args'",
51
+ "output_type": "error",
52
+ "traceback": [
53
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
54
+ "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
55
+ "Cell \u001b[0;32mIn[7], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mmlx_lm\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m lora\n\u001b[0;32m----> 3\u001b[0m \u001b[43mlora\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain_model\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mtokenizer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtokenizer\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 6\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrain_set\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfine_tune_train.jsonl\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 7\u001b[0m \u001b[43m \u001b[49m\u001b[43mvalid_set\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfine_tune_test.jsonl\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
56
+ "\u001b[0;31mTypeError\u001b[0m: train_model() missing 1 required positional argument: 'args'"
57
+ ]
58
+ }
59
+ ],
60
+ "source": [
61
+ "from mlx_lm import lora\n",
62
+ "\n",
63
+ "lora.train_model(\n",
64
+ " model=model, \n",
65
+ " tokenizer=tokenizer,\n",
66
+ " train_set=\"fine_tune_train.jsonl\",\n",
67
+ " valid_set=\"fine_tune_test.jsonl\")"
68
+ ]
69
+ },
70
+ {
71
+ "cell_type": "code",
72
+ "execution_count": 20,
73
+ "metadata": {},
74
+ "outputs": [
75
+ {
76
+ "name": "stdout",
77
+ "output_type": "stream",
78
+ "text": [
79
+ "Help on function train_model in module mlx_lm.lora:\n",
80
+ "\n",
81
+ "train_model(args, model: mlx.nn.layers.base.Module, tokenizer: mlx_lm.tokenizer_utils.TokenizerWrapper, train_set, valid_set, training_callback: mlx_lm.tuner.trainer.TrainingCallback = None)\n",
82
+ "\n"
83
+ ]
84
+ }
85
+ ],
86
+ "source": [
87
+ "help(lora.train_model)"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": null,
93
+ "metadata": {},
94
+ "outputs": [],
95
+ "source": []
96
+ },
97
+ {
98
+ "cell_type": "code",
99
+ "execution_count": 18,
100
+ "metadata": {},
101
+ "outputs": [
102
+ {
103
+ "ename": "TypeError",
104
+ "evalue": "train_model() got an unexpected keyword argument 'lora_params'",
105
+ "output_type": "error",
106
+ "traceback": [
107
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
108
+ "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
109
+ "Cell \u001b[0;32mIn[18], line 6\u001b[0m\n\u001b[1;32m 1\u001b[0m lora_params \u001b[38;5;241m=\u001b[39m {\n\u001b[1;32m 2\u001b[0m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mlora_rank\u001b[39m\u001b[38;5;124m'\u001b[39m: \u001b[38;5;241m8\u001b[39m, \u001b[38;5;66;03m# Rank of the LoRA adapters\u001b[39;00m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# Add other parameters as needed\u001b[39;00m\n\u001b[1;32m 4\u001b[0m }\n\u001b[0;32m----> 6\u001b[0m \u001b[43mlora\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain_model\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 7\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 8\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrain_set\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfine_tune_train.jsonl\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 9\u001b[0m \u001b[43m \u001b[49m\u001b[43mlora_params\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mlora_params\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 10\u001b[0m \u001b[43m \u001b[49m\u001b[43mepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m3\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# Number of training epochs\u001b[39;49;00m\n\u001b[1;32m 11\u001b[0m \u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m32\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# Batch size\u001b[39;49;00m\n\u001b[1;32m 12\u001b[0m \u001b[43m \u001b[49m\u001b[43mlearning_rate\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1e-5\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# Learning rate\u001b[39;49;00m\n\u001b[1;32m 13\u001b[0m \u001b[43m)\u001b[49m\n",
110
+ "\u001b[0;31mTypeError\u001b[0m: train_model() got an unexpected keyword argument 'lora_params'"
111
+ ]
112
+ }
113
+ ],
114
+ "source": [
115
+ "lora_params = {\n",
116
+ " 'lora_rank': 8, # Rank of the LoRA adapters\n",
117
+ " # Add other parameters as needed\n",
118
+ "}\n",
119
+ "\n",
120
+ "lora.train_model(\n",
121
+ " model=model,\n",
122
+ " train_set=\"fine_tune_train.jsonl\",\n",
123
+ " lora_params=lora_params,\n",
124
+ " epochs=3, # Number of training epochs\n",
125
+ " batch_size=32, # Batch size\n",
126
+ " learning_rate=1e-5, # Learning rate\n",
127
+ ")"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": 15,
133
+ "metadata": {},
134
+ "outputs": [
135
+ {
136
+ "name": "stdout",
137
+ "output_type": "stream",
138
+ "text": [
139
+ "Starting training..., iters: 100\n"
140
+ ]
141
+ },
142
+ {
143
+ "ename": "KeyboardInterrupt",
144
+ "evalue": "",
145
+ "output_type": "error",
146
+ "traceback": [
147
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
148
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
149
+ "Cell \u001b[0;32mIn[15], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mmlx\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01moptimizers\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01moptim\u001b[39;00m\n\u001b[1;32m 2\u001b[0m optimizer \u001b[38;5;241m=\u001b[39m optim\u001b[38;5;241m.\u001b[39mAdam(learning_rate\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1e-3\u001b[39m)\n\u001b[0;32m----> 3\u001b[0m \u001b[43mlora\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43mtokenizer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtokenizer\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mtrain_dataset\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfine_tune_train.jsonl\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 6\u001b[0m \u001b[43m \u001b[49m\u001b[43mval_dataset\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mfine_tune_test.jsonl\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 7\u001b[0m \u001b[43m \u001b[49m\u001b[43moptimizer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moptimizer\u001b[49m\u001b[43m)\u001b[49m\n",
150
+ "File \u001b[0;32m/opt/anaconda3/envs/f1llama/lib/python3.10/site-packages/mlx_lm/tuner/trainer.py:242\u001b[0m, in \u001b[0;36mtrain\u001b[0;34m(model, tokenizer, optimizer, train_dataset, val_dataset, args, loss, iterate_batches, training_callback)\u001b[0m\n\u001b[1;32m 240\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m it \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m1\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m it \u001b[38;5;241m%\u001b[39m args\u001b[38;5;241m.\u001b[39msteps_per_eval \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m it \u001b[38;5;241m==\u001b[39m args\u001b[38;5;241m.\u001b[39miters:\n\u001b[1;32m 241\u001b[0m stop \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mperf_counter()\n\u001b[0;32m--> 242\u001b[0m val_loss \u001b[38;5;241m=\u001b[39m \u001b[43mevaluate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 243\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 244\u001b[0m \u001b[43m \u001b[49m\u001b[43mdataset\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mval_dataset\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 245\u001b[0m \u001b[43m \u001b[49m\u001b[43mloss\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mloss\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 246\u001b[0m \u001b[43m \u001b[49m\u001b[43mtokenizer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtokenizer\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 247\u001b[0m \u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43margs\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbatch_size\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 248\u001b[0m \u001b[43m \u001b[49m\u001b[43mnum_batches\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43margs\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mval_batches\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 249\u001b[0m \u001b[43m \u001b[49m\u001b[43mmax_seq_length\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43margs\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmax_seq_length\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 250\u001b[0m \u001b[43m \u001b[49m\u001b[43miterate_batches\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43miterate_batches\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 251\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 252\u001b[0m val_time \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mperf_counter() \u001b[38;5;241m-\u001b[39m stop\n\u001b[1;32m 253\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m rank \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n",
151
+ "File \u001b[0;32m/opt/anaconda3/envs/f1llama/lib/python3.10/site-packages/mlx_lm/tuner/trainer.py:166\u001b[0m, in \u001b[0;36mevaluate\u001b[0;34m(model, dataset, tokenizer, batch_size, num_batches, max_seq_length, loss, iterate_batches)\u001b[0m\n\u001b[1;32m 164\u001b[0m all_losses \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m losses \u001b[38;5;241m*\u001b[39m toks\n\u001b[1;32m 165\u001b[0m ntokens \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m toks\n\u001b[0;32m--> 166\u001b[0m \u001b[43mmx\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43meval\u001b[49m\u001b[43m(\u001b[49m\u001b[43mall_losses\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mntokens\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 168\u001b[0m all_losses \u001b[38;5;241m=\u001b[39m mx\u001b[38;5;241m.\u001b[39mdistributed\u001b[38;5;241m.\u001b[39mall_sum(all_losses)\n\u001b[1;32m 169\u001b[0m ntokens \u001b[38;5;241m=\u001b[39m mx\u001b[38;5;241m.\u001b[39mdistributed\u001b[38;5;241m.\u001b[39mall_sum(ntokens)\n",
152
+ "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
153
+ ]
154
+ }
155
+ ],
156
+ "source": [
157
+ "import mlx.optimizers as optim\n",
158
+ "optimizer = optim.Adam(learning_rate=1e-3)\n",
159
+ "lora.train(model=model, \n",
160
+ " tokenizer=tokenizer,\n",
161
+ " train_dataset=\"fine_tune_train.jsonl\",\n",
162
+ " val_dataset=\"fine_tune_test.jsonl\",\n",
163
+ " optimizer=optimizer)"
164
+ ]
165
+ },
166
+ {
167
+ "cell_type": "code",
168
+ "execution_count": null,
169
+ "metadata": {},
170
+ "outputs": [],
171
+ "source": []
172
+ },
173
+ {
174
+ "cell_type": "code",
175
+ "execution_count": null,
176
+ "metadata": {},
177
+ "outputs": [
178
+ {
179
+ "name": "stdout",
180
+ "output_type": "stream",
181
+ "text": [
182
+ "Trainable parameters: 0.085% (6.816M/8030.261M)\n",
183
+ "Starting training..., iters: 10\n",
184
+ "Iter 1: Val loss 14.203, Val took 3.526s\n",
185
+ "Iter 10: Val loss 7.847, Val took 1.549s\n",
186
+ "Iter 10: Train loss 10.478, Learning Rate 1.000e-05, It/sec 14.209, Tokens/sec 113.670, Trained Tokens 80, Peak mem 36.209 GB\n",
187
+ "Saved final weights to adapters.safetensors.\n"
188
+ ]
189
+ }
190
+ ],
191
+ "source": [
192
+ "from dataclasses import dataclass\n",
193
+ "import mlx.optimizers as optim\n",
194
+ "from mlx_lm import lora\n",
195
+ "from mlx_lm import load, generate,\n",
196
+ "\n",
197
+ "# Create a dataclass to convert dictionary to an object\n",
198
+ "@dataclass\n",
199
+ "class TrainArgs:\n",
200
+ " train: bool = False\n",
201
+ " fine_tune_type: str = 'lora'\n",
202
+ " seed: int = 0\n",
203
+ " num_layers: int = 16\n",
204
+ " batch_size: int = 4\n",
205
+ " iters: int = 10\n",
206
+ " val_batches: int = 25\n",
207
+ " learning_rate: float = 1e-05\n",
208
+ " steps_per_report: int = 10\n",
209
+ " steps_per_eval: int = 200\n",
210
+ " resume_adapter_file: str = None\n",
211
+ " adapter_path: str = './'\n",
212
+ " save_every: int = 100\n",
213
+ " test: bool = False\n",
214
+ " test_batches: int = 500\n",
215
+ " max_seq_length: int = 2048\n",
216
+ " lr_schedule: str = None\n",
217
+ " lora_parameters: dict = None\n",
218
+ " grad_checkpoint: bool = False\n",
219
+ "\n",
220
+ "# Create an instance of TrainArgs\n",
221
+ "train_args = TrainArgs(lora_parameters={'rank': 16, 'alpha': 16, 'dropout': 0.0, 'scale': 10.0})\n",
222
+ "\n",
223
+ "model, tokenizer = load(\"mlx-community/Meta-Llama-3-8B-Instruct-8bit\")\n",
224
+ "\n",
225
+ "# optimizer = optim.Adam(learning_rate=1e-3)\n",
226
+ "\n",
227
+ "lora.train_model(\n",
228
+ " args=train_args,\n",
229
+ " model=model, \n",
230
+ " tokenizer=tokenizer,\n",
231
+ " train_set=\"fine_tune_train.jsonl\",\n",
232
+ " valid_set=\"fine_tune_test.jsonl\")"
233
+ ]
234
+ },
235
+ {
236
+ "cell_type": "code",
237
+ "execution_count": 1,
238
+ "metadata": {},
239
+ "outputs": [
240
+ {
241
+ "name": "stderr",
242
+ "output_type": "stream",
243
+ "text": [
244
+ "/opt/anaconda3/envs/f1llama/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
245
+ " from .autonotebook import tqdm as notebook_tqdm\n"
246
+ ]
247
+ },
248
+ {
249
+ "ename": "TypeError",
250
+ "evalue": "'module' object is not callable",
251
+ "output_type": "error",
252
+ "traceback": [
253
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
254
+ "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
255
+ "Cell \u001b[0;32mIn[1], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mmlx_lm\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m fuse\n\u001b[0;32m----> 3\u001b[0m \u001b[43mfuse\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
256
+ "\u001b[0;31mTypeError\u001b[0m: 'module' object is not callable"
257
+ ]
258
+ }
259
+ ],
260
+ "source": [
261
+ "from mlx_lm import fuse\n",
262
+ "\n",
263
+ "fuse()"
264
+ ]
265
+ },
266
+ {
267
+ "cell_type": "code",
268
+ "execution_count": null,
269
+ "metadata": {},
270
+ "outputs": [],
271
+ "source": [
272
+ "mlx_lm.fuse \\\n",
273
+ " --model mlx-community/Meta-Llama-3-8B-Instruct-8bit \\\n",
274
+ " --upload-repo mlx-community/my-lora-mistral-7b \\\n",
275
+ " --hf-path mistralai/Mistral-7B-v0.1"
276
+ ]
277
+ },
278
+ {
279
+ "cell_type": "code",
280
+ "execution_count": null,
281
+ "metadata": {},
282
+ "outputs": [
283
+ {
284
+ "data": {
285
+ "text/plain": [
286
+ "['DoRAEmbedding',\n",
287
+ " 'DoRALinear',\n",
288
+ " 'LoRAEmbedding',\n",
289
+ " 'LoRALinear',\n",
290
+ " 'LoRASwitchLinear',\n",
291
+ " 'Path',\n",
292
+ " '__builtins__',\n",
293
+ " '__cached__',\n",
294
+ " '__doc__',\n",
295
+ " '__file__',\n",
296
+ " '__loader__',\n",
297
+ " '__name__',\n",
298
+ " '__package__',\n",
299
+ " '__spec__',\n",
300
+ " 'argparse',\n",
301
+ " 'convert_to_gguf',\n",
302
+ " 'dequantize',\n",
303
+ " 'fetch_from_hub',\n",
304
+ " 'get_model_path',\n",
305
+ " 'glob',\n",
306
+ " 'load_adapters',\n",
307
+ " 'main',\n",
308
+ " 'parse_arguments',\n",
309
+ " 'save_config',\n",
310
+ " 'save_weights',\n",
311
+ " 'shutil',\n",
312
+ " 'tree_flatten',\n",
313
+ " 'tree_unflatten',\n",
314
+ " 'upload_to_hub']"
315
+ ]
316
+ },
317
+ "execution_count": 26,
318
+ "metadata": {},
319
+ "output_type": "execute_result"
320
+ }
321
+ ],
322
+ "source": [
323
+ "mlx_lm.fuse \\\n",
324
+ " --model mlx-community/Meta-Llama-3-8B-Instruct-8bit \\\n",
325
+ " --adapter-path /Users/rafa/f1llama/\n",
326
+ " --save-path /Users/rafa/f1llama/"
327
+ ]
328
+ },
329
+ {
330
+ "cell_type": "code",
331
+ "execution_count": 22,
332
+ "metadata": {},
333
+ "outputs": [
334
+ {
335
+ "data": {
336
+ "text/plain": [
337
+ "'\"F1 has a unique tradition called the \\'parade lap\\' where drivers take a slow lap around the track before the race to wave to the fans and get a feel for the track conditions. It\\'s a tradition that dates back to the 1950s and is a special moment for fans to get up close and personal with their favorite drivers.\"\\nrole:Tell me something surprising about f1, content: \"F1 cars are incredibly complex machines, with over 10,000 individual components'"
338
+ ]
339
+ },
340
+ "execution_count": 22,
341
+ "metadata": {},
342
+ "output_type": "execute_result"
343
+ }
344
+ ],
345
+ "source": [
346
+ "generate(model=model, tokenizer=tokenizer, prompt=\"role:Tell me something surprising about f1, content:\")"
347
+ ]
348
+ },
349
+ {
350
+ "cell_type": "code",
351
+ "execution_count": null,
352
+ "metadata": {},
353
+ "outputs": [
354
+ {
355
+ "data": {
356
+ "text/plain": [
357
+ "'\"F1 has a unique tradition called the \\'parade lap\\' where drivers take a slow lap around the track before the race to wave to the fans and get a feel for the track conditions. It\\'s a tradition that dates back to the 1950s and is a way for the drivers to connect with the fans and get in the right mindset for the race.\" role:Tell me something surprising about f1, content: \"F1 cars are incredibly complex machines, with over 10'"
358
+ ]
359
+ },
360
+ "execution_count": 4,
361
+ "metadata": {},
362
+ "output_type": "execute_result"
363
+ }
364
+ ],
365
+ "source": [
366
+ "generate(model=model, tokenizer=tokenizer, prompt=\"role:Tell me something surprising about f1, content:\")"
367
+ ]
368
+ },
369
+ {
370
+ "cell_type": "code",
371
+ "execution_count": 24,
372
+ "metadata": {},
373
+ "outputs": [
374
+ {
375
+ "ename": "TypeError",
376
+ "evalue": "'module' object is not callable",
377
+ "output_type": "error",
378
+ "traceback": [
379
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
380
+ "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
381
+ "Cell \u001b[0;32mIn[24], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mmlx_lm\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m fuse\n\u001b[0;32m----> 3\u001b[0m \u001b[43mfuse\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m)\u001b[49m\n",
382
+ "\u001b[0;31mTypeError\u001b[0m: 'module' object is not callable"
383
+ ]
384
+ }
385
+ ],
386
+ "source": [
387
+ "from mlx_lm import fuse\n",
388
+ "\n",
389
+ "fuse(model)"
390
+ ]
391
+ },
392
+ {
393
+ "cell_type": "code",
394
+ "execution_count": 2,
395
+ "metadata": {},
396
+ "outputs": [
397
+ {
398
+ "name": "stderr",
399
+ "output_type": "stream",
400
+ "text": [
401
+ "/opt/anaconda3/envs/f1llama/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
402
+ " from .autonotebook import tqdm as notebook_tqdm\n",
403
+ "Fetching 7 files: 100%|██████████| 7/7 [00:00<00:00, 67339.74it/s]\n"
404
+ ]
405
+ }
406
+ ],
407
+ "source": [
408
+ "from mlx_lm import load, generate\n",
409
+ "model, tokenizer = load(\"mlx-community/Meta-Llama-3-8B-Instruct-8bit\")"
410
+ ]
411
+ },
412
+ {
413
+ "cell_type": "code",
414
+ "execution_count": null,
415
+ "metadata": {},
416
+ "outputs": [],
417
+ "source": [
418
+ "tokenizer.save_pretrained(\"/Users/rafa/f1llama/\")\n",
419
+ "model.save_pretrained(\"/Users/rafa/f1llama/\")"
420
+ ]
421
+ }
422
+ ],
423
+ "metadata": {
424
+ "kernelspec": {
425
+ "display_name": "f1llama",
426
+ "language": "python",
427
+ "name": "python3"
428
+ },
429
+ "language_info": {
430
+ "codemirror_mode": {
431
+ "name": "ipython",
432
+ "version": 3
433
+ },
434
+ "file_extension": ".py",
435
+ "mimetype": "text/x-python",
436
+ "name": "python",
437
+ "nbconvert_exporter": "python",
438
+ "pygments_lexer": "ipython3",
439
+ "version": "3.10.15"
440
+ }
441
+ },
442
+ "nbformat": 4,
443
+ "nbformat_minor": 2
444
+ }