Remove redundant files.
Browse files- Makefile +3 -0
- open-llama-7b-q3_K.bin +0 -3
- open-llama-7b-q3_K.bin +1 -0
- open-llama-7b-q4_K.bin +0 -3
- open-llama-7b-q4_K.bin +1 -0
- open-llama-7b-q5_K.bin +0 -3
- open-llama-7b-q5_K.bin +1 -0
Makefile
CHANGED
@@ -37,6 +37,9 @@ llama.cpp/quantize: llama.cpp
|
|
37 |
$(MODEL_NAME)-f16.bin: $(HF_FILES) | llama.cpp
|
38 |
$(PYTHON) llama.cpp/convert.py --outtype f16 --outfile $@ .
|
39 |
|
|
|
|
|
|
|
40 |
$(MODEL_NAME)-q%.bin: $(MODEL_NAME)-f16.bin | llama.cpp/quantize
|
41 |
llama.cpp/quantize $< $@ q$*
|
42 |
|
|
|
37 |
$(MODEL_NAME)-f16.bin: $(HF_FILES) | llama.cpp
|
38 |
$(PYTHON) llama.cpp/convert.py --outtype f16 --outfile $@ .
|
39 |
|
40 |
+
$(MODEL_NAME)-q%_K.bin: $(MODEL_NAME)-q%_K_M.bin
|
41 |
+
ln -s $< $@
|
42 |
+
|
43 |
$(MODEL_NAME)-q%.bin: $(MODEL_NAME)-f16.bin | llama.cpp/quantize
|
44 |
llama.cpp/quantize $< $@ q$*
|
45 |
|
open-llama-7b-q3_K.bin
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b6f0431fbc6fac5126cd5a855545d9fe27f4a18c6168a99ac37c138a4a75b5c4
|
3 |
-
size 3231072384
|
|
|
|
|
|
|
|
open-llama-7b-q3_K.bin
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
open-llama-7b-q3_K_M.bin
|
open-llama-7b-q4_K.bin
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:9a48fc57b63afe6218c2c70ec85c998f88a78d14d009928da7a1f4e8643e73a6
|
3 |
-
size 4046946432
|
|
|
|
|
|
|
|
open-llama-7b-q4_K.bin
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
open-llama-7b-q4_K_M.bin
|
open-llama-7b-q5_K.bin
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:233d50a95afd1f4edb7ba856eb5b7e22fa1cd509a80b08291f4f7bb9cf9a0486
|
3 |
-
size 4765483136
|
|
|
|
|
|
|
|
open-llama-7b-q5_K.bin
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
open-llama-7b-q5_K_M.bin
|