ndurner commited on
Commit
10a91ef
·
1 Parent(s): 1c05cb6
Files changed (1) hide show
  1. app.py +16 -4
app.py CHANGED
@@ -232,6 +232,9 @@ def bot(message, history, oai_key, system_prompt, seed, temperature, max_tokens,
232
  role = "system"
233
  else:
234
  role = "developer"
 
 
 
235
  history_openai_format.append({"role": role, "content": system_prompt})
236
 
237
  for msg in history:
@@ -264,12 +267,21 @@ def bot(message, history, oai_key, system_prompt, seed, temperature, max_tokens,
264
  if log_to_console:
265
  print(f"br_prompt: {str(history_openai_format)}")
266
 
267
- if model in ["o1", "o1-high", "o1-2024-12-17", "o3-mini"]:
 
 
 
 
 
 
 
 
 
268
  response = client.chat.completions.create(
269
- model="o1" if model == "o1-high" else model,
270
  messages= history_openai_format,
271
  seed=seed_i,
272
- reasoning_effort="high" if model == "o1-high" else "medium",
273
  **({"max_completion_tokens": max_tokens} if max_tokens > 0 else {})
274
  )
275
 
@@ -407,7 +419,7 @@ with gr.Blocks(delete_cache=(86400, 86400)) as demo:
407
 
408
  oai_key = gr.Textbox(label="OpenAI API Key", elem_id="oai_key")
409
  model = gr.Dropdown(label="Model", value="gpt-4-turbo", allow_custom_value=True, elem_id="model",
410
- choices=["gpt-4-turbo", "gpt-4o-2024-05-13", "gpt-4o-2024-11-20", "o1-high", "o1-mini", "o1", "o3-mini", "o1-preview", "chatgpt-4o-latest", "gpt-4o", "gpt-4o-mini", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-vision-preview", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-1106", "whisper", "dall-e-3"])
411
  system_prompt = gr.TextArea("You are a helpful yet diligent AI assistant. Answer faithfully and factually correct. Respond with 'I do not know' if uncertain.", label="System/Developer Prompt", lines=3, max_lines=250, elem_id="system_prompt")
412
  seed = gr.Textbox(label="Seed", elem_id="seed")
413
  temp = gr.Slider(0, 2, label="Temperature", elem_id="temp", value=1)
 
232
  role = "system"
233
  else:
234
  role = "developer"
235
+
236
+ if not system_prompt.startswith("Formatting re-enabled"):
237
+ system_prompt = "Formatting re-enabled\n" + system_prompt
238
  history_openai_format.append({"role": role, "content": system_prompt})
239
 
240
  for msg in history:
 
267
  if log_to_console:
268
  print(f"br_prompt: {str(history_openai_format)}")
269
 
270
+ if model in ["o1", "o1-high", "o1-2024-12-17", "o3-mini", "o3-mini-high"]:
271
+ # reasoning effort
272
+ high = False
273
+ if model == "o1-high":
274
+ model = "o1"
275
+ high = True
276
+ elif model == "o3-mini-high":
277
+ model = "o3-mini"
278
+ high = True
279
+
280
  response = client.chat.completions.create(
281
+ model=model,
282
  messages= history_openai_format,
283
  seed=seed_i,
284
+ reasoning_effort="high" if high else "medium",
285
  **({"max_completion_tokens": max_tokens} if max_tokens > 0 else {})
286
  )
287
 
 
419
 
420
  oai_key = gr.Textbox(label="OpenAI API Key", elem_id="oai_key")
421
  model = gr.Dropdown(label="Model", value="gpt-4-turbo", allow_custom_value=True, elem_id="model",
422
+ choices=["gpt-4o", "gpt-4-turbo", "o1-high", "o1-mini", "o1", "o3-mini-high", "o3-mini", "o1-preview", "chatgpt-4o-latest", "gpt-4o-2024-05-13", "gpt-4o-2024-11-20", "gpt-4o-mini", "gpt-4", "gpt-4-vision-preview", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-1106", "whisper", "dall-e-3"])
423
  system_prompt = gr.TextArea("You are a helpful yet diligent AI assistant. Answer faithfully and factually correct. Respond with 'I do not know' if uncertain.", label="System/Developer Prompt", lines=3, max_lines=250, elem_id="system_prompt")
424
  seed = gr.Textbox(label="Seed", elem_id="seed")
425
  temp = gr.Slider(0, 2, label="Temperature", elem_id="temp", value=1)