Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,72 +1,214 @@
|
|
1 |
import streamlit as st
|
2 |
-
import
|
3 |
-
import
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
|
7 |
-
MAP_API_KEY = "6y94o8MigcYbmaCI6IlVQGtmza5tzKri"
|
8 |
-
POINT_API_KEY = "n5IyYjKAA0IgOZ7cNEleGtkWg4fJLBil"
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
18 |
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
st.sidebar.title("تنظیمات")
|
24 |
-
selected_date = st.sidebar.date_input("تاریخ پیشبینی:")
|
25 |
-
selected_time = st.sidebar.time_input("زمان پیشبینی:")
|
26 |
-
|
27 |
-
st.markdown("<h3>موقعیت: شرکت دهخدا</h3>", unsafe_allow_html=True)
|
28 |
-
|
29 |
-
# درخواست دادهها از API
|
30 |
-
def get_weather_data(lat, lon):
|
31 |
-
url = f"https://api.windy.com/api/point-forecast/v2"
|
32 |
-
headers = {"Authorization": POINT_API_KEY}
|
33 |
-
params = {
|
34 |
-
"lat": lat,
|
35 |
-
"lon": lon,
|
36 |
-
"model": "gfs",
|
37 |
-
"parameters": ["windSpeed", "windDirection", "temperature", "precipitation"],
|
38 |
-
"levels": ["surface"],
|
39 |
-
"key": 6y94o8MigcYbmaCI6IlVQGtmza5tzKri,
|
40 |
-
}
|
41 |
-
response = requests.get(url, headers=headers, params=params)
|
42 |
-
return response.json() if response.status_code == 200 else None
|
43 |
-
|
44 |
-
# دریافت دادهها
|
45 |
-
weather_data = get_weather_data(DEFAULT_LAT, DEFAULT_LON)
|
46 |
-
|
47 |
-
if weather_data:
|
48 |
-
st.markdown("<h3 style='color: #007BFF;'>اطلاعات آبوهوا</h3>", unsafe_allow_html=True)
|
49 |
-
st.write(f"🌬️ **سرعت باد:** {weather_data['windSpeed']} متر بر ثانیه")
|
50 |
-
st.write(f"🧭 **جهت باد:** {weather_data['windDirection']} درجه")
|
51 |
-
st.write(f"🌡️ **دمای هوا:** {weather_data['temperature']} °C")
|
52 |
-
st.write(f"🌧️ **بارش:** {weather_data['precipitation']} میلیمتر")
|
53 |
-
else:
|
54 |
-
st.error("مشکلی در دریافت اطلاعات آبوهوا وجود دارد.")
|
55 |
-
|
56 |
-
# نمایش نقشه
|
57 |
-
m = folium.Map(location=[DEFAULT_LAT, DEFAULT_LON], zoom_start=10)
|
58 |
-
folium.Marker([DEFAULT_LAT, DEFAULT_LON], tooltip="شرکت دهخدا").add_to(m)
|
59 |
-
st_folium(m, width=700, height=500)
|
60 |
-
|
61 |
-
# پیشبینی انتشار دود
|
62 |
-
def calculate_fire_spread(wind_speed, wind_direction):
|
63 |
-
spread_distance = wind_speed * 3 * 60 * 60 # سه ساعت
|
64 |
-
spread_lat = DEFAULT_LAT + (spread_distance / 111000) * wind_direction
|
65 |
-
spread_lon = DEFAULT_LON + (spread_distance / (111000 * abs(wind_direction)))
|
66 |
-
return spread_lat, spread_lon
|
67 |
-
|
68 |
-
if weather_data:
|
69 |
-
st.markdown("<h3 style='color: #FF5722;'>پیشبینی انتشار دود</h3>", unsafe_allow_html=True)
|
70 |
-
lat, lon = calculate_fire_spread(weather_data['windSpeed'], weather_data['windDirection'])
|
71 |
-
st.write(f"🔺 دود در سه ساعت آینده به موقعیت زیر میرسد:")
|
72 |
-
st.write(f"📍 Latitude: {lat:.6f}, Longitude: {lon:.6f}")
|
|
|
1 |
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
import numpy as np
|
4 |
+
import plotly.express as px
|
5 |
+
import plotly.graph_objects as go
|
6 |
+
from sklearn.ensemble import RandomForestRegressor
|
7 |
+
from sklearn.model_selection import train_test_split
|
8 |
+
from sklearn.preprocessing import LabelEncoder
|
9 |
+
from scipy import stats
|
10 |
+
import statsmodels.api as sm
|
11 |
+
from datetime import datetime
|
12 |
|
13 |
+
st.set_page_config(page_title="Excel Analysis Suite", layout="wide")
|
|
|
|
|
14 |
|
15 |
+
def load_data(file):
|
16 |
+
if file.name.endswith('.xlsx') or file.name.endswith('.xls'):
|
17 |
+
df = pd.read_excel(file)
|
18 |
+
else:
|
19 |
+
df = pd.read_csv(file) # Fallback to CSV
|
20 |
+
return df
|
21 |
|
22 |
+
def get_column_type(column):
|
23 |
+
if pd.api.types.is_numeric_dtype(column):
|
24 |
+
return "numeric"
|
25 |
+
elif pd.api.types.is_datetime64_any_dtype(column):
|
26 |
+
return "datetime"
|
27 |
+
else:
|
28 |
+
return "categorical"
|
29 |
|
30 |
+
def main():
|
31 |
+
st.title("📊 Excel Analysis Suite")
|
32 |
+
|
33 |
+
# File Upload
|
34 |
+
uploaded_file = st.file_uploader(
|
35 |
+
"Drop Excel File Here (.xlsx, .xls, .csv)",
|
36 |
+
type=['xlsx', 'xls', 'csv']
|
37 |
+
)
|
38 |
+
|
39 |
+
if uploaded_file:
|
40 |
+
# File Metadata
|
41 |
+
st.sidebar.header("File Information")
|
42 |
+
st.sidebar.write(f"📄 Name: {uploaded_file.name}")
|
43 |
+
st.sidebar.write(f"📏 Size: {uploaded_file.size / 1024:.2f} KB")
|
44 |
+
|
45 |
+
# Load Data
|
46 |
+
df = load_data(uploaded_file)
|
47 |
+
st.sidebar.write(f"📊 Dimensions: {df.shape[0]} rows × {df.shape[1]} columns")
|
48 |
+
|
49 |
+
# Data Preview
|
50 |
+
st.subheader("Data Preview")
|
51 |
+
st.dataframe(df.head(10), use_container_width=True)
|
52 |
+
|
53 |
+
# Column Selection
|
54 |
+
st.sidebar.header("Column Selection")
|
55 |
+
column_types = {col: get_column_type(df[col]) for col in df.columns}
|
56 |
+
selected_columns = st.sidebar.multiselect(
|
57 |
+
"Select columns for analysis",
|
58 |
+
df.columns,
|
59 |
+
default=df.select_dtypes(include=[np.number]).columns.tolist()[:2]
|
60 |
+
)
|
61 |
+
|
62 |
+
# Analysis Type Selection
|
63 |
+
analysis_type = st.radio(
|
64 |
+
"Choose Analysis Type",
|
65 |
+
["Statistical Analysis", "Predictive Modeling", "Time Series Analysis"],
|
66 |
+
horizontal=True
|
67 |
+
)
|
68 |
+
|
69 |
+
if analysis_type == "Statistical Analysis":
|
70 |
+
st.subheader("Statistical Analysis")
|
71 |
+
|
72 |
+
if len(selected_columns) >= 2:
|
73 |
+
col1, col2 = st.columns(2)
|
74 |
+
|
75 |
+
with col1:
|
76 |
+
st.write("### Correlation Analysis")
|
77 |
+
corr_matrix = df[selected_columns].corr()
|
78 |
+
fig = px.imshow(
|
79 |
+
corr_matrix,
|
80 |
+
color_continuous_scale='RdBu',
|
81 |
+
aspect='auto'
|
82 |
+
)
|
83 |
+
st.plotly_chart(fig, use_container_width=True)
|
84 |
+
|
85 |
+
with col2:
|
86 |
+
st.write("### Summary Statistics")
|
87 |
+
st.dataframe(df[selected_columns].describe(), use_container_width=True)
|
88 |
+
|
89 |
+
# ANOVA Analysis for categorical vs numeric
|
90 |
+
numeric_cols = [col for col in selected_columns if column_types[col] == "numeric"]
|
91 |
+
categorical_cols = [col for col in selected_columns if column_types[col] == "categorical"]
|
92 |
+
|
93 |
+
if numeric_cols and categorical_cols:
|
94 |
+
st.write("### ANOVA Analysis")
|
95 |
+
target = st.selectbox("Select numeric variable", numeric_cols)
|
96 |
+
factor = st.selectbox("Select categorical variable", categorical_cols)
|
97 |
+
|
98 |
+
groups = [group for _, group in df.groupby(factor)[target]]
|
99 |
+
f_stat, p_val = stats.f_oneway(*groups)
|
100 |
+
st.write(f"F-statistic: {f_stat:.4f}")
|
101 |
+
st.write(f"p-value: {p_val:.4f}")
|
102 |
+
|
103 |
+
fig = px.box(df, x=factor, y=target)
|
104 |
+
st.plotly_chart(fig, use_container_width=True)
|
105 |
+
|
106 |
+
elif analysis_type == "Predictive Modeling":
|
107 |
+
st.subheader("Predictive Modeling")
|
108 |
+
|
109 |
+
numeric_cols = [col for col in df.columns if column_types[col] == "numeric"]
|
110 |
+
if len(numeric_cols) >= 2:
|
111 |
+
target = st.selectbox("Select target variable", numeric_cols)
|
112 |
+
features = st.multiselect(
|
113 |
+
"Select feature variables",
|
114 |
+
[col for col in numeric_cols if col != target],
|
115 |
+
default=[col for col in numeric_cols if col != target][:2]
|
116 |
+
)
|
117 |
+
|
118 |
+
if features:
|
119 |
+
test_size = st.slider("Test set size (%)", 10, 40, 20) / 100
|
120 |
+
|
121 |
+
X = df[features]
|
122 |
+
y = df[target]
|
123 |
+
|
124 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
125 |
+
X, y, test_size=test_size, random_state=42
|
126 |
+
)
|
127 |
+
|
128 |
+
model = RandomForestRegressor(n_estimators=100, random_state=42)
|
129 |
+
model.fit(X_train, y_train)
|
130 |
+
|
131 |
+
y_pred = model.predict(X_test)
|
132 |
+
r2_score = model.score(X_test, y_test)
|
133 |
+
|
134 |
+
st.write(f"R² Score: {r2_score:.4f}")
|
135 |
+
|
136 |
+
fig = px.scatter(
|
137 |
+
x=y_test, y=y_pred,
|
138 |
+
labels={'x': 'Actual', 'y': 'Predicted'},
|
139 |
+
title='Actual vs Predicted Values'
|
140 |
+
)
|
141 |
+
fig.add_trace(
|
142 |
+
go.Scatter(
|
143 |
+
x=[y_test.min(), y_test.max()],
|
144 |
+
y=[y_test.min(), y_test.max()],
|
145 |
+
mode='lines',
|
146 |
+
name='Perfect Prediction',
|
147 |
+
line=dict(dash='dash')
|
148 |
+
)
|
149 |
+
)
|
150 |
+
st.plotly_chart(fig, use_container_width=True)
|
151 |
+
|
152 |
+
# Feature Importance
|
153 |
+
importance_df = pd.DataFrame({
|
154 |
+
'Feature': features,
|
155 |
+
'Importance': model.feature_importances_
|
156 |
+
}).sort_values('Importance', ascending=True)
|
157 |
+
|
158 |
+
fig = px.bar(
|
159 |
+
importance_df,
|
160 |
+
x='Importance',
|
161 |
+
y='Feature',
|
162 |
+
orientation='h',
|
163 |
+
title='Feature Importance'
|
164 |
+
)
|
165 |
+
st.plotly_chart(fig, use_container_width=True)
|
166 |
+
|
167 |
+
elif analysis_type == "Time Series Analysis":
|
168 |
+
st.subheader("Time Series Analysis")
|
169 |
+
|
170 |
+
date_cols = [col for col in df.columns if column_types[col] == "datetime"]
|
171 |
+
numeric_cols = [col for col in df.columns if column_types[col] == "numeric"]
|
172 |
+
|
173 |
+
if date_cols and numeric_cols:
|
174 |
+
date_col = st.selectbox("Select date column", date_cols)
|
175 |
+
value_col = st.selectbox("Select value column", numeric_cols)
|
176 |
+
|
177 |
+
# Resample data to daily frequency
|
178 |
+
df_ts = df.set_index(date_col)[[value_col]].resample('D').mean()
|
179 |
+
|
180 |
+
# Plot time series
|
181 |
+
fig = px.line(
|
182 |
+
df_ts,
|
183 |
+
title=f'{value_col} Over Time',
|
184 |
+
labels={'value': value_col, 'index': 'Date'}
|
185 |
+
)
|
186 |
+
st.plotly_chart(fig, use_container_width=True)
|
187 |
+
|
188 |
+
# Decompose time series
|
189 |
+
decomposition = sm.tsa.seasonal_decompose(
|
190 |
+
df_ts[value_col].fillna(method='ffill'),
|
191 |
+
period=30
|
192 |
+
)
|
193 |
+
|
194 |
+
fig = go.Figure()
|
195 |
+
fig.add_trace(go.Scatter(
|
196 |
+
x=decomposition.trend.index,
|
197 |
+
y=decomposition.trend.values,
|
198 |
+
name='Trend'
|
199 |
+
))
|
200 |
+
fig.add_trace(go.Scatter(
|
201 |
+
x=decomposition.seasonal.index,
|
202 |
+
y=decomposition.seasonal.values,
|
203 |
+
name='Seasonal'
|
204 |
+
))
|
205 |
+
fig.add_trace(go.Scatter(
|
206 |
+
x=decomposition.resid.index,
|
207 |
+
y=decomposition.resid.values,
|
208 |
+
name='Residual'
|
209 |
+
))
|
210 |
+
fig.update_layout(title='Time Series Decomposition')
|
211 |
+
st.plotly_chart(fig, use_container_width=True)
|
212 |
|
213 |
+
if __name__ == "__main__":
|
214 |
+
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|