Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from interpreter import interpreter
|
3 |
+
import os
|
4 |
+
|
5 |
+
# Page configuration
|
6 |
+
st.set_page_config(page_title="AutoInterpreter", layout="wide")
|
7 |
+
|
8 |
+
# Initialize session state for settings if not exists
|
9 |
+
if "settings" not in st.session_state:
|
10 |
+
st.session_state.settings = {
|
11 |
+
"api_key": os.getenv("HF_API_KEY", ""),
|
12 |
+
"api_base": "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-72B-Instruct",
|
13 |
+
"model": "huggingface/Qwen/Qwen2.5-72B-Instruct",
|
14 |
+
"auto_run": True,
|
15 |
+
"context_window": 8000,
|
16 |
+
"max_tokens": 4000
|
17 |
+
}
|
18 |
+
|
19 |
+
# Create header with title and settings button
|
20 |
+
col1, col2 = st.columns([0.9, 0.1])
|
21 |
+
with col1:
|
22 |
+
st.title("Open Interpreter REPL")
|
23 |
+
with col2:
|
24 |
+
settings_button = st.button("⚙️", help="Settings")
|
25 |
+
|
26 |
+
# Settings modal
|
27 |
+
if settings_button:
|
28 |
+
settings_modal = st.container()
|
29 |
+
with settings_modal:
|
30 |
+
st.markdown("### Settings")
|
31 |
+
cols = st.columns(2)
|
32 |
+
|
33 |
+
with cols[0]:
|
34 |
+
# API Settings
|
35 |
+
st.text_input(
|
36 |
+
"API Key",
|
37 |
+
value=st.session_state.settings["api_key"],
|
38 |
+
type="password",
|
39 |
+
key="api_key",
|
40 |
+
on_change=lambda: st.session_state.settings.update({"api_key": st.session_state.api_key})
|
41 |
+
)
|
42 |
+
st.text_input(
|
43 |
+
"Model",
|
44 |
+
value=st.session_state.settings["model"],
|
45 |
+
key="model",
|
46 |
+
on_change=lambda: st.session_state.settings.update({"model": st.session_state.model})
|
47 |
+
)
|
48 |
+
|
49 |
+
with cols[1]:
|
50 |
+
# Model Settings
|
51 |
+
st.toggle(
|
52 |
+
"Auto Run",
|
53 |
+
value=st.session_state.settings["auto_run"],
|
54 |
+
key="auto_run",
|
55 |
+
on_change=lambda: st.session_state.settings.update({"auto_run": st.session_state.auto_run})
|
56 |
+
)
|
57 |
+
st.number_input(
|
58 |
+
"Max Tokens",
|
59 |
+
value=st.session_state.settings["max_tokens"],
|
60 |
+
min_value=100,
|
61 |
+
max_value=8000,
|
62 |
+
key="max_tokens",
|
63 |
+
on_change=lambda: st.session_state.settings.update({"max_tokens": st.session_state.max_tokens})
|
64 |
+
)
|
65 |
+
|
66 |
+
# Apply settings to interpreter
|
67 |
+
interpreter.llm.api_key = st.session_state.settings["api_key"]
|
68 |
+
interpreter.llm.api_base = st.session_state.settings["api_base"]
|
69 |
+
interpreter.llm.model = st.session_state.settings["model"]
|
70 |
+
interpreter.auto_run = st.session_state.settings["auto_run"]
|
71 |
+
interpreter.context_window = st.session_state.settings["context_window"]
|
72 |
+
interpreter.max_tokens = st.session_state.settings["max_tokens"]
|
73 |
+
|
74 |
+
# Initialize messages session state
|
75 |
+
if "messages" not in st.session_state:
|
76 |
+
st.session_state.messages = []
|
77 |
+
|
78 |
+
# Clear button
|
79 |
+
if st.button("🗑️ Clear", help="Clear chat"):
|
80 |
+
interpreter.messages = []
|
81 |
+
st.session_state.messages = []
|
82 |
+
st.rerun()
|
83 |
+
|
84 |
+
# Display chat history
|
85 |
+
for message in st.session_state.messages:
|
86 |
+
with st.chat_message(message["role"]):
|
87 |
+
st.markdown(message["content"])
|
88 |
+
|
89 |
+
# User input
|
90 |
+
user_input = st.chat_input("Enter your message:")
|
91 |
+
|
92 |
+
if user_input:
|
93 |
+
# Display user message
|
94 |
+
st.chat_message("user").write(user_input)
|
95 |
+
st.session_state.messages.append({"role": "user", "content": user_input})
|
96 |
+
|
97 |
+
try:
|
98 |
+
# Create a chat message container for the assistant
|
99 |
+
with st.chat_message("assistant"):
|
100 |
+
response_placeholder = st.empty()
|
101 |
+
message_buffer = []
|
102 |
+
code_buffer = []
|
103 |
+
|
104 |
+
# Stream the response
|
105 |
+
for chunk in interpreter.chat(user_input, stream=True):
|
106 |
+
if isinstance(chunk, dict):
|
107 |
+
content = chunk.get('content')
|
108 |
+
if content is not None and not any(skip in str(content) for skip in ["context window", "max_tokens", "<|im_end|>"]):
|
109 |
+
if chunk.get('type') == 'console':
|
110 |
+
# Accumulate code separately
|
111 |
+
code_buffer.append(str(content))
|
112 |
+
# Show complete message + current code
|
113 |
+
full_response = []
|
114 |
+
if message_buffer:
|
115 |
+
full_response.extend(message_buffer)
|
116 |
+
if code_buffer:
|
117 |
+
full_response.append(f"\n```python\n{''.join(code_buffer)}\n```\n")
|
118 |
+
response_placeholder.markdown(''.join(full_response))
|
119 |
+
else:
|
120 |
+
# Accumulate message until we have a complete thought
|
121 |
+
current = str(content)
|
122 |
+
message_buffer.append(current)
|
123 |
+
if '.' in current or '\n' in current or len(''.join(message_buffer)) > 80:
|
124 |
+
# Show complete message + current code
|
125 |
+
full_response = []
|
126 |
+
if message_buffer:
|
127 |
+
full_response.extend(message_buffer)
|
128 |
+
if code_buffer:
|
129 |
+
full_response.append(f"\n```python\n{''.join(code_buffer)}\n```\n")
|
130 |
+
response_placeholder.markdown(''.join(full_response))
|
131 |
+
|
132 |
+
# Store the complete response
|
133 |
+
final_response = []
|
134 |
+
if message_buffer:
|
135 |
+
final_response.extend(message_buffer)
|
136 |
+
if code_buffer:
|
137 |
+
final_response.append(f"\n```python\n{''.join(code_buffer)}\n```\n")
|
138 |
+
|
139 |
+
st.session_state.messages.append({
|
140 |
+
"role": "assistant",
|
141 |
+
"content": ''.join(final_response)
|
142 |
+
})
|
143 |
+
|
144 |
+
except Exception as e:
|
145 |
+
st.error(f"Error: {str(e)}")
|