sharpenb commited on
Commit
c2c8a13
·
verified ·
1 Parent(s): e080c05

Upload folder using huggingface_hub (#2)

Browse files

- fd560447e693da851ac7d4d91629941f85e453a6d07cefde0286a12fafb31096 (e410a7ee162d200c6b0e788d011a9536aa415b76)
- 450c9761314bdcbaea2ed47176f68d3f869feb114521e2023d160408eec9270b (6acf992f007ce56d1961383c222581d5b408f5e8)
- c8d6a9856cdf4da007996766ca7b05b6d1856ff03373ea0de44797441c06c08f (1c070c86f885c0d45f69e560e892ef8b0cea628b)

Files changed (2) hide show
  1. config.json +1 -1
  2. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmpnftdc8uhwq8u8ir4",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmp748kwrlysbvloxxi",
3
  "architectures": [
4
  "MistralForCausalLM"
5
  ],
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmpnftdc8uh",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmp748kwrly",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}