eswardivi commited on
Commit
a6a230c
·
verified ·
1 Parent(s): 7448d3b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -10
app.py CHANGED
@@ -17,16 +17,12 @@ subprocess.run(
17
  shell=True,
18
  )
19
 
20
- token = os.environ["HF_TOKEN"]
21
-
22
-
23
  model = AutoModelForCausalLM.from_pretrained(
24
- "microsoft/phi-4",
25
- token=token,
26
  trust_remote_code=True,
27
  torch_dtype=torch.bfloat16
28
  )
29
- tok = AutoTokenizer.from_pretrained("microsoft/phi-4", token=token)
30
  terminators = [
31
  tok.eos_token_id,
32
  ]
@@ -80,27 +76,27 @@ def chat(message, history, temperature, do_sample, max_tokens):
80
 
81
  demo = gr.ChatInterface(
82
  fn=chat,
83
- examples=[["Write me a poem about Machine Learning."]],
84
  # multimodal=False,
85
  additional_inputs_accordion=gr.Accordion(
86
  label="⚙️ Parameters", open=False, render=False
87
  ),
88
  additional_inputs=[
89
  gr.Slider(
90
- minimum=0, maximum=1, step=0.1, value=0.9, label="Temperature", render=False
91
  ),
92
  gr.Checkbox(label="Sampling", value=True),
93
  gr.Slider(
94
  minimum=128,
95
  maximum=4096,
96
  step=1,
97
- value=512,
98
  label="Max new tokens",
99
  render=False,
100
  ),
101
  ],
102
  stop_btn="Stop Generation",
103
  title="Chat With LLMs",
104
- description="Now Running [microsoft/phi-4](https://huggingface.co/microsoft/phi-4)",
105
  )
106
  demo.launch()
 
17
  shell=True,
18
  )
19
 
 
 
 
20
  model = AutoModelForCausalLM.from_pretrained(
21
+ "krutrim-ai-labs/Krutrim-2-instruct",
 
22
  trust_remote_code=True,
23
  torch_dtype=torch.bfloat16
24
  )
25
+ tok = AutoTokenizer.from_pretrained("krutrim-ai-labs/Krutrim-2-instruct", token=token)
26
  terminators = [
27
  tok.eos_token_id,
28
  ]
 
76
 
77
  demo = gr.ChatInterface(
78
  fn=chat,
79
+ examples=[["Evaru Nuvvu?"]],
80
  # multimodal=False,
81
  additional_inputs_accordion=gr.Accordion(
82
  label="⚙️ Parameters", open=False, render=False
83
  ),
84
  additional_inputs=[
85
  gr.Slider(
86
+ minimum=0, maximum=1, step=0.1, value=0.3, label="Temperature", render=False
87
  ),
88
  gr.Checkbox(label="Sampling", value=True),
89
  gr.Slider(
90
  minimum=128,
91
  maximum=4096,
92
  step=1,
93
+ value=1024,
94
  label="Max new tokens",
95
  render=False,
96
  ),
97
  ],
98
  stop_btn="Stop Generation",
99
  title="Chat With LLMs",
100
+ description="Now Running [krutrim-ai-labs/Krutrim-2-instruct](https://huggingface.co/krutrim-ai-labs/Krutrim-2-instruct)",
101
  )
102
  demo.launch()