Sarath0x8f commited on
Commit
f98c0f0
·
verified ·
1 Parent(s): 2434df4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -17,14 +17,13 @@ llm_models = [
17
  "meta-llama/Meta-Llama-3-8B-Instruct",
18
  "mistralai/Mistral-7B-Instruct-v0.2",
19
  "tiiuae/falcon-7b-instruct",
20
- "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
21
  # "deepseek-ai/deepseek-vl2", ## 54GB > 10GB
22
  # "deepseek-ai/deepseek-vl2-small", ## 32GB > 10GB
23
  # "deepseek-ai/deepseek-vl2-tiny", ## high response time
24
  # "deepseek-ai/deepseek-llm-7b-chat", ## 13GB > 10GB
25
  # "deepseek-ai/deepseek-math-7b-instruct", ## 13GB > 10GB
26
- "deepseek-ai/deepseek-coder-33b-instruct",
27
- "deepseek-ai/deepseek-vl-1.3b-base",
28
  # "deepseek-ai/DeepSeek-R1-Zero", ## 688GB > 10GB
29
  # "mistralai/Mixtral-8x22B-Instruct-v0.1", ## 281GB>10GB
30
  # "NousResearch/Yarn-Mistral-7b-64k", ## 14GB>10GB
 
17
  "meta-llama/Meta-Llama-3-8B-Instruct",
18
  "mistralai/Mistral-7B-Instruct-v0.2",
19
  "tiiuae/falcon-7b-instruct",
20
+ # "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
21
  # "deepseek-ai/deepseek-vl2", ## 54GB > 10GB
22
  # "deepseek-ai/deepseek-vl2-small", ## 32GB > 10GB
23
  # "deepseek-ai/deepseek-vl2-tiny", ## high response time
24
  # "deepseek-ai/deepseek-llm-7b-chat", ## 13GB > 10GB
25
  # "deepseek-ai/deepseek-math-7b-instruct", ## 13GB > 10GB
26
+ # "deepseek-ai/deepseek-coder-33b-instruct", ## 66GB > 10GB
 
27
  # "deepseek-ai/DeepSeek-R1-Zero", ## 688GB > 10GB
28
  # "mistralai/Mixtral-8x22B-Instruct-v0.1", ## 281GB>10GB
29
  # "NousResearch/Yarn-Mistral-7b-64k", ## 14GB>10GB