Spaces:
Sleeping
Sleeping
Lennard Schober
commited on
Commit
·
d2bb714
1
Parent(s):
16a2246
Some changes
Browse files
app.py
CHANGED
@@ -38,29 +38,33 @@ def complex_heat_eq_solution(x, t, k, a, b, c):
|
|
38 |
)
|
39 |
|
40 |
|
41 |
-
def plot_heat_equation(m, approx_type):
|
42 |
global glob_k, glob_a, glob_b, glob_c, n_x, n_t
|
43 |
|
|
|
|
|
|
|
|
|
44 |
try:
|
45 |
-
loaded_values = np.load(f"{approx_type}_m{m}.npz")
|
46 |
except:
|
47 |
raise gr.Error(f"First train the coefficients for {approx_type} and m = {m}")
|
48 |
alpha = loaded_values["alpha"]
|
49 |
Phi = loaded_values["Phi"]
|
50 |
|
51 |
# Create grids for x and t
|
52 |
-
x = np.linspace(0, 1,
|
53 |
-
t = np.linspace(0, 5,
|
54 |
X, T = np.meshgrid(x, t)
|
55 |
|
56 |
# Compute the real solution over the grid
|
57 |
U_real = complex_heat_eq_solution(X, T, glob_k, glob_a, glob_b, glob_c)
|
58 |
|
59 |
# Compute the selected approximation
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
|
65 |
# Create the 3D plot with Plotly
|
66 |
traces = []
|
@@ -133,11 +137,11 @@ def plot_heat_equation(m, approx_type):
|
|
133 |
return fig
|
134 |
|
135 |
|
136 |
-
def plot_errors(m, approx_type):
|
137 |
global n_x, n_t
|
138 |
|
139 |
try:
|
140 |
-
loaded_values = np.load(f"{approx_type}_m{m}.npz")
|
141 |
except:
|
142 |
raise gr.Error(f"First train the coefficients for {approx_type} and m = {m}")
|
143 |
alpha = loaded_values["alpha"]
|
@@ -154,8 +158,8 @@ def plot_errors(m, approx_type):
|
|
154 |
# Compute the selected approximation
|
155 |
U_approx = np.zeros_like(U_real)
|
156 |
for i, t_val in enumerate(t):
|
157 |
-
|
158 |
-
U_approx[i, :] = np.dot(
|
159 |
|
160 |
U_err = abs(U_approx - U_real)
|
161 |
|
@@ -229,19 +233,19 @@ def generate_data():
|
|
229 |
return a_train, u_train, x, t
|
230 |
|
231 |
|
232 |
-
def
|
233 |
"""Compute random features with adjustable kernel width."""
|
234 |
if kernel == "SINE":
|
235 |
-
return np.sin(
|
236 |
elif kernel == "GFF":
|
237 |
-
return np.log(np.linalg.norm(a - theta_j, axis=-1)) / (2 * np.pi)
|
238 |
else:
|
239 |
raise ValueError("Unsupported kernel type!")
|
240 |
|
241 |
|
242 |
def design_matrix(a, theta, kernel):
|
243 |
"""Construct design matrix."""
|
244 |
-
return np.array([
|
245 |
|
246 |
|
247 |
def learn_coefficients(Phi, u):
|
@@ -255,29 +259,7 @@ def approximate_solution(a, alpha, theta, kernel):
|
|
255 |
return Phi @ alpha
|
256 |
|
257 |
|
258 |
-
def
|
259 |
-
# grid coords
|
260 |
-
x, y = np.meshgrid(x, y)
|
261 |
-
# coefficient array, up to x^kx, y^ky
|
262 |
-
coeffs = np.ones((kx + 1, ky + 1))
|
263 |
-
|
264 |
-
# solve array
|
265 |
-
a = np.zeros((coeffs.size, x.size))
|
266 |
-
|
267 |
-
# for each coefficient produce array x^i, y^j
|
268 |
-
for index, (j, i) in enumerate(np.ndindex(coeffs.shape)):
|
269 |
-
# do not include powers greater than order
|
270 |
-
if order is not None and i + j > order:
|
271 |
-
arr = np.zeros_like(x)
|
272 |
-
else:
|
273 |
-
arr = coeffs[i, j] * x**i * y**j
|
274 |
-
a[index] = arr.ravel()
|
275 |
-
|
276 |
-
# do leastsq fitting and return leastsq result
|
277 |
-
return np.linalg.lstsq(a.T, np.ravel(z), rcond=None)
|
278 |
-
|
279 |
-
|
280 |
-
def train_coefficients(m, kernel):
|
281 |
global glob_k, glob_a, glob_b, glob_c, n_x, n_t
|
282 |
# Start time for training
|
283 |
start_time = time.time()
|
@@ -286,36 +268,57 @@ def train_coefficients(m, kernel):
|
|
286 |
a_train, u_train, x, t = generate_data()
|
287 |
|
288 |
# Define random features
|
289 |
-
|
290 |
-
(
|
291 |
-
|
292 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
293 |
)
|
294 |
-
)
|
295 |
|
296 |
# Construct design matrix and learn coefficients
|
297 |
Phi = design_matrix(a_train, theta, kernel)
|
298 |
alpha = learn_coefficients(Phi, u_train)
|
299 |
-
|
300 |
-
|
301 |
-
[complex_heat_eq_solution(x, t_i, glob_k, glob_a, glob_b, glob_c) for t_i in t]
|
302 |
-
)
|
303 |
-
a_test = np.c_[np.meshgrid(x, t)[0].ravel(), np.meshgrid(x, t)[1].ravel()]
|
304 |
-
u_approx = approximate_solution(a_test, alpha, theta, kernel).reshape(n_t, n_x)
|
305 |
|
306 |
# Save values to the npz folder
|
307 |
np.savez(
|
308 |
-
f"{kernel}_m{m}.npz",
|
309 |
alpha=alpha,
|
310 |
kernel=kernel,
|
311 |
Phi=Phi,
|
312 |
theta=theta,
|
313 |
)
|
314 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
315 |
# Compute average error
|
316 |
-
avg_err = np.mean(np.abs(
|
317 |
|
318 |
-
return
|
|
|
|
|
319 |
|
320 |
|
321 |
def plot_function(k, a, b, c):
|
@@ -381,12 +384,14 @@ def plot_function(k, a, b, c):
|
|
381 |
return fig
|
382 |
|
383 |
|
384 |
-
def plot_all(m, kernel):
|
385 |
# Generate the plot content (replace this with your actual plot logic)
|
386 |
approx_fig = plot_heat_equation(
|
387 |
-
m, kernel
|
388 |
) # Replace with your function for approx_plot
|
389 |
-
error_fig = plot_errors(
|
|
|
|
|
390 |
|
391 |
# Return the figures and make the plots visible
|
392 |
return (
|
@@ -431,7 +436,6 @@ def create_gradio_ui():
|
|
431 |
# Function parameter inputs
|
432 |
gr.Markdown(markdown_content)
|
433 |
|
434 |
-
|
435 |
with gr.Row():
|
436 |
with gr.Column(min_width=500):
|
437 |
k_slider = gr.Slider(
|
@@ -487,8 +491,13 @@ def create_gradio_ui():
|
|
487 |
choices=[50, 250, 1000, 5000, 10000, 25000],
|
488 |
value=1000,
|
489 |
)
|
490 |
-
|
491 |
-
|
|
|
|
|
|
|
|
|
|
|
492 |
|
493 |
with gr.Column():
|
494 |
# Button to train coefficients
|
@@ -496,7 +505,7 @@ def create_gradio_ui():
|
|
496 |
# Function to trigger training and update dropdown
|
497 |
train_button.click(
|
498 |
fn=train_coefficients,
|
499 |
-
inputs=[m_slider, kernel_dropdown],
|
500 |
outputs=output,
|
501 |
)
|
502 |
approx_button = gr.Button("Plot Approximation")
|
@@ -509,7 +518,7 @@ def create_gradio_ui():
|
|
509 |
|
510 |
approx_button.click(
|
511 |
fn=plot_all,
|
512 |
-
inputs=[m_slider, kernel_dropdown],
|
513 |
outputs=[approx_plot, error_plot],
|
514 |
)
|
515 |
|
|
|
38 |
)
|
39 |
|
40 |
|
41 |
+
def plot_heat_equation(m, approx_type, quality, rand_or_det):
|
42 |
global glob_k, glob_a, glob_b, glob_c, n_x, n_t
|
43 |
|
44 |
+
# Plot with more points than it was calculated
|
45 |
+
new_nx = 1 * n_x
|
46 |
+
new_nt = 1 * n_t
|
47 |
+
|
48 |
try:
|
49 |
+
loaded_values = np.load(f"{approx_type}_m{m}_{str.lower(quality)}_{str.lower(rand_or_det)}.npz")
|
50 |
except:
|
51 |
raise gr.Error(f"First train the coefficients for {approx_type} and m = {m}")
|
52 |
alpha = loaded_values["alpha"]
|
53 |
Phi = loaded_values["Phi"]
|
54 |
|
55 |
# Create grids for x and t
|
56 |
+
x = np.linspace(0, 1, new_nx) # Spatial grid
|
57 |
+
t = np.linspace(0, 5, new_nt) # Temporal grid
|
58 |
X, T = np.meshgrid(x, t)
|
59 |
|
60 |
# Compute the real solution over the grid
|
61 |
U_real = complex_heat_eq_solution(X, T, glob_k, glob_a, glob_b, glob_c)
|
62 |
|
63 |
# Compute the selected approximation
|
64 |
+
# Compute the approximations as a single matrix multiplication
|
65 |
+
Phi_reshaped = Phi.reshape(n_t, n_x, -1)
|
66 |
+
# The result will be of shape (n_t, n_x), as U_approx should match U_real's shape
|
67 |
+
U_approx = np.einsum("ijk,k->ij", Phi_reshaped, alpha)
|
68 |
|
69 |
# Create the 3D plot with Plotly
|
70 |
traces = []
|
|
|
137 |
return fig
|
138 |
|
139 |
|
140 |
+
def plot_errors(m, approx_type, quality, rand_or_det):
|
141 |
global n_x, n_t
|
142 |
|
143 |
try:
|
144 |
+
loaded_values = np.load(f"{approx_type}_m{m}_{str.lower(quality)}_{str.lower(rand_or_det)}.npz")
|
145 |
except:
|
146 |
raise gr.Error(f"First train the coefficients for {approx_type} and m = {m}")
|
147 |
alpha = loaded_values["alpha"]
|
|
|
158 |
# Compute the selected approximation
|
159 |
U_approx = np.zeros_like(U_real)
|
160 |
for i, t_val in enumerate(t):
|
161 |
+
Phi_at_t = Phi[i * n_x : (i + 1) * n_x]
|
162 |
+
U_approx[i, :] = np.dot(Phi_at_t, alpha)
|
163 |
|
164 |
U_err = abs(U_approx - U_real)
|
165 |
|
|
|
233 |
return a_train, u_train, x, t
|
234 |
|
235 |
|
236 |
+
def features(a, theta_j, kernel="SINE", k=1, eps=1e-8):
|
237 |
"""Compute random features with adjustable kernel width."""
|
238 |
if kernel == "SINE":
|
239 |
+
return np.sin(k * np.linalg.norm(a - theta_j, axis=-1) + eps)
|
240 |
elif kernel == "GFF":
|
241 |
+
return np.log(np.linalg.norm(a - theta_j, axis=-1) + eps) / (2 * np.pi)
|
242 |
else:
|
243 |
raise ValueError("Unsupported kernel type!")
|
244 |
|
245 |
|
246 |
def design_matrix(a, theta, kernel):
|
247 |
"""Construct design matrix."""
|
248 |
+
return np.array([features(a, theta_j, kernel=kernel) for theta_j in theta]).T
|
249 |
|
250 |
|
251 |
def learn_coefficients(Phi, u):
|
|
|
259 |
return Phi @ alpha
|
260 |
|
261 |
|
262 |
+
def train_coefficients(m, kernel, quality, rand_or_det):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
global glob_k, glob_a, glob_b, glob_c, n_x, n_t
|
264 |
# Start time for training
|
265 |
start_time = time.time()
|
|
|
268 |
a_train, u_train, x, t = generate_data()
|
269 |
|
270 |
# Define random features
|
271 |
+
if rand_or_det == "Random":
|
272 |
+
theta = np.column_stack(
|
273 |
+
(
|
274 |
+
np.random.uniform(-1, 1, size=m), # First dimension: [-1, 1]
|
275 |
+
np.random.uniform(-5, 5, size=m), # Second dimension: [-5, 5]
|
276 |
+
)
|
277 |
+
)
|
278 |
+
else:
|
279 |
+
theta = np.column_stack(
|
280 |
+
(
|
281 |
+
np.linspace(-1, 1, m), # First dimension: [-1, 1]
|
282 |
+
np.linspace(-5, 5, m), # Second dimension: [-5, 5]
|
283 |
+
)
|
284 |
)
|
|
|
285 |
|
286 |
# Construct design matrix and learn coefficients
|
287 |
Phi = design_matrix(a_train, theta, kernel)
|
288 |
alpha = learn_coefficients(Phi, u_train)
|
289 |
+
|
290 |
+
end_time = f"{time.time() - start_time:.2f}"
|
|
|
|
|
|
|
|
|
291 |
|
292 |
# Save values to the npz folder
|
293 |
np.savez(
|
294 |
+
f"{kernel}_m{m}_{str.lower(quality)}_{str.lower(rand_or_det)}.npz",
|
295 |
alpha=alpha,
|
296 |
kernel=kernel,
|
297 |
Phi=Phi,
|
298 |
theta=theta,
|
299 |
)
|
300 |
|
301 |
+
# Create grids for x and t
|
302 |
+
# x_random = np.random.uniform(0, 1, 10 * n_x) # Spatial grid
|
303 |
+
# t_random = np.random.uniform(0, 5, 10 * n_t) # Temporal grid
|
304 |
+
# X, T = np.meshgrid(x_random, t_random) # Create the mesh grid
|
305 |
+
# Compute the real solution over the grid
|
306 |
+
# U_real = complex_heat_eq_solution(X, T, glob_k, glob_a, glob_b, glob_c)
|
307 |
+
# print(U_real.shape)
|
308 |
+
# print(U_real)
|
309 |
+
# Compute the selected approximation
|
310 |
+
# U_approx = np.zeros_like(U_real)
|
311 |
+
# for i, xpos in enumerate(x_random):
|
312 |
+
# for j, tpos in enumerate(t_random):
|
313 |
+
# Phi_at_x_t = test_approx([xpos, tpos], theta, kernel)
|
314 |
+
# U_approx[j, i] = np.dot(Phi_at_x_t, alpha)
|
315 |
+
|
316 |
# Compute average error
|
317 |
+
# avg_err = np.mean(np.abs(U_real - U_approx))
|
318 |
|
319 |
+
return (
|
320 |
+
f"Training completed in {end_time} seconds." # The average error is {avg_err}."
|
321 |
+
)
|
322 |
|
323 |
|
324 |
def plot_function(k, a, b, c):
|
|
|
384 |
return fig
|
385 |
|
386 |
|
387 |
+
def plot_all(m, kernel, quality, rand_or_det):
|
388 |
# Generate the plot content (replace this with your actual plot logic)
|
389 |
approx_fig = plot_heat_equation(
|
390 |
+
m, kernel, quality, rand_or_det
|
391 |
) # Replace with your function for approx_plot
|
392 |
+
error_fig = plot_errors(
|
393 |
+
m, kernel, quality, rand_or_det
|
394 |
+
) # Replace with your function for error_plot
|
395 |
|
396 |
# Return the figures and make the plots visible
|
397 |
return (
|
|
|
436 |
# Function parameter inputs
|
437 |
gr.Markdown(markdown_content)
|
438 |
|
|
|
439 |
with gr.Row():
|
440 |
with gr.Column(min_width=500):
|
441 |
k_slider = gr.Slider(
|
|
|
491 |
choices=[50, 250, 1000, 5000, 10000, 25000],
|
492 |
value=1000,
|
493 |
)
|
494 |
+
rand_det_dropdown = gr.Dropdown(
|
495 |
+
label="Choose Random / Deterministic",
|
496 |
+
choices=["Deterministic", "Random"],
|
497 |
+
value="Deterministic",
|
498 |
+
)
|
499 |
+
# Output to show status
|
500 |
+
output = gr.Textbox(label="Status", interactive=False)
|
501 |
|
502 |
with gr.Column():
|
503 |
# Button to train coefficients
|
|
|
505 |
# Function to trigger training and update dropdown
|
506 |
train_button.click(
|
507 |
fn=train_coefficients,
|
508 |
+
inputs=[m_slider, kernel_dropdown, quality_dropdown, rand_det_dropdown],
|
509 |
outputs=output,
|
510 |
)
|
511 |
approx_button = gr.Button("Plot Approximation")
|
|
|
518 |
|
519 |
approx_button.click(
|
520 |
fn=plot_all,
|
521 |
+
inputs=[m_slider, kernel_dropdown, quality_dropdown, rand_det_dropdown],
|
522 |
outputs=[approx_plot, error_plot],
|
523 |
)
|
524 |
|