vault backup: 2025-08-12 13:23:53

This commit is contained in:
Dane Sabo 2025-08-12 13:23:53 -04:00
parent 2d97e5032b
commit f31cf96e7b
3 changed files with 246 additions and 1 deletions

View File

@ -0,0 +1,235 @@
# PINN for a tube-in-tube heat exchanger (steady 2D xr)
# - Uses automatic differentiation for PDE residuals
# - Prescribes velocity fields u_h(x,r), u_c(x,r)
# - Solves for Th, Tw, Tc
import torch
import torch.nn as nn
# ====== User-configurable physical constants (nondimensional if you scaled) ======
Pe_h = 100.0 # Peclet (hot side) = u_h*L/alpha_h
Pe_c = 120.0 # Peclet (cold side) = u_c*L/alpha_c
alpha_w_ratio = 1.0 # wall alpha relative to hot-side alpha if nondim
Ri = 1.0 # inner radius (nondim, after scaling)
Ro = 1.3 # outer radius
device = "cuda" if torch.cuda.is_available() else "cpu"
dtype = torch.float32 # float32 is usually fine; use float64 if residuals are stiff
# ====== Velocity profiles (nondim) ======
def u_h(x, r):
# Poiseuille in a circular tube: u_max*(1 - (r/Ri)^2). Here Ri=1 in nondim.
return 2.0 * (1.0 - r**2) # average=1 if scaled; adjust factor to match Pe_h
def u_c(x, r):
# Simple plug profile in annulus as a first approximation
return torch.ones_like(r)
# ====== Network ======
class MLP(nn.Module):
def __init__(self, in_dim=2, hidden=128, depth=7, out_dim=3, act=nn.SiLU):
super().__init__()
layers = [nn.Linear(in_dim, hidden), act()]
for _ in range(depth - 1):
layers += [nn.Linear(hidden, hidden), act()]
# Separate heads for Th, Tw, Tc improves conditioning
self.backbone = nn.Sequential(*layers)
self.head_h = nn.Linear(hidden, 1)
self.head_w = nn.Linear(hidden, 1)
self.head_c = nn.Linear(hidden, 1)
# Xavier init helps with tanh/SiLU nets
def init(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
self.apply(init)
def forward(self, x, r):
z = torch.stack([x, r], dim=-1)
f = self.backbone(z)
Th = self.head_h(f)
Tw = self.head_w(f)
Tc = self.head_c(f)
return Th, Tw, Tc
net = MLP().to(device).to(dtype)
# ====== Utility: gradients via autograd ======
def grads(y, x):
return torch.autograd.grad(
y, x, grad_outputs=torch.ones_like(y), create_graph=True
)[0]
# ====== Collocation samplers ======
def sample_in_hot(N):
# x in [0,1], r in [0,Ri]
x = torch.rand(N, 1, device=device, dtype=dtype)
r = Ri * torch.sqrt(torch.rand(N, 1, device=device, dtype=dtype)) # area-uniform
return x.requires_grad_(True), r.requires_grad_(True)
def sample_in_wall(N):
x = torch.rand(N, 1, device=device, dtype=dtype)
r = torch.sqrt(
(Ro**2 - Ri**2) * torch.rand(N, 1, device=device, dtype=dtype) + Ri**2
)
return x.requires_grad_(True), r.requires_grad_(True)
def sample_in_cold(N):
x = torch.rand(N, 1, device=device, dtype=dtype)
r = (Ro + (Ro)) * 0.5 * 0 + (
torch.rand(N, 1, device=device, dtype=dtype) * (Ro - Ri) + Ri
) # simple annulus
return x.requires_grad_(True), r.requires_grad_(True)
def sample_interface_Ri(N):
x = torch.rand(N, 1, device=device, dtype=dtype)
r = torch.full_like(x, Ri, requires_grad=True)
return x, r
def sample_interface_Ro(N):
x = torch.rand(N, 1, device=device, dtype=dtype)
r = torch.full_like(x, Ro, requires_grad=True)
return x, r
def sample_inlet_hot(N):
x = torch.zeros(N, 1, device=device, dtype=dtype, requires_grad=True)
r = Ri * torch.sqrt(torch.rand(N, 1, device=device, dtype=dtype))
return x, r
def sample_inlet_cold_counterflow(N):
x = torch.ones(N, 1, device=device, dtype=dtype, requires_grad=True)
r = torch.rand(N, 1, device=device, dtype=dtype) * (Ro - Ri) + Ri
return x, r
def sample_outlet_hot(N):
x = torch.ones(N, 1, device=device, dtype=dtype, requires_grad=True)
r = Ri * torch.sqrt(torch.rand(N, 1, device=device, dtype=dtype))
return x, r
def sample_outlet_cold(N):
x = torch.zeros(N, 1, device=device, dtype=dtype, requires_grad=True)
r = torch.rand(N, 1, device=device, dtype=dtype) * (Ro - Ri) + Ri
return x, r
# ====== Boundary condition targets (nondim) ======
T_h_in = 1.0 # scale so hot inlet is 1
T_c_in = 0.0 # cold inlet is 0
# ====== Training loop ======
opt = torch.optim.Adam(net.parameters(), lr=1e-3)
for it in range(20000):
opt.zero_grad()
# ----- Interior residuals -----
Nh = Nw = Nc = 512
xh, rh = sample_in_hot(Nh)
xw, rw = sample_in_wall(Nw)
xc, rc = sample_in_cold(Nc)
Th, Tw, Tc = net(xh, rh)
Th_x = grads(Th, xh)
Th_r = grads(Th, rh)
Th_xx = grads(Th_x, xh)
Th_rr = grads(Th_r, rh)
# Hot PDE: u_h dTh/dx = (1/Pe_h)*(Th_rr + (1/r) Th_r + Th_xx) (choose to keep xx or drop)
uh = u_h(xh, rh)
hot_res = uh * Th_x - (1.0 / Pe_h) * (Th_rr + (1.0 / rh) * Th_r + Th_xx)
(Tw_,) = net(xw, rw)[1:2] # only Tw
Tw_x = grads(Tw_, xw)
Tw_r = grads(Tw_, rw)
Tw_xx = grads(Tw_x, xw)
Tw_rr = grads(Tw_r, rw)
wall_res = Tw_rr + (1.0 / rw) * Tw_r + Tw_xx # alpha_w absorbed in scaling
Tc = net(xc, rc)[2]
Tc_x = grads(Tc, xc)
Tc_r = grads(Tc, rc)
Tc_xx = grads(Tc_x, xc)
Tc_rr = grads(Tc_r, rc)
uc = u_c(xc, rc)
cold_res = uc * Tc_x - (1.0 / Pe_c) * (Tc_rr + (1.0 / rc) * Tc_r + Tc_xx)
L_pde = hot_res.pow(2).mean() + wall_res.pow(2).mean() + cold_res.pow(2).mean()
# ----- Interface continuity (temperature + flux) -----
Ni = 256
xi, rRi = sample_interface_Ri(Ni)
Th_i, Tw_i, _ = net(xi, rRi)
# fluxes: -k dT/dr. With nondim, use ratios; set k_w/k_h = kw_rel, etc.
dTh_dr = grads(Th_i, rRi)
dTw_dr = grads(Tw_i, rRi)
kw_over_kh = 1.0
L_int_Ri = (Th_i - Tw_i).pow(2).mean() + (dTh_dr - kw_over_kh * dTw_dr).pow(
2
).mean()
xo, rRo = sample_interface_Ro(Ni)
_, Tw_o, Tc_o = net(xo, rRo)
dTw_dr_o = grads(Tw_o, rRo)
dTc_dr_o = grads(Tc_o, rRo)
kc_over_kw = 1.0
L_int_Ro = (Tc_o - Tw_o).pow(2).mean() + (kc_over_kw * dTc_dr_o - dTw_dr_o).pow(
2
).mean()
# ----- Boundary conditions -----
Nb = 256
x_in_h, r_in_h = sample_inlet_hot(Nb)
Th_in_pred = net(x_in_h, r_in_h)[0]
L_in_h = (Th_in_pred - T_h_in).pow(2).mean()
x_in_c, r_in_c = sample_inlet_cold_counterflow(Nb)
Tc_in_pred = net(x_in_c, r_in_c)[2]
L_in_c = (Tc_in_pred - T_c_in).pow(2).mean()
# Outlets: convective (∂T/∂x ≈ 0)
x_out_h, r_out_h = sample_outlet_hot(Nb)
Th_out = net(x_out_h, r_out_h)[0]
L_out_h = grads(Th_out, x_out_h).pow(2).mean()
x_out_c, r_out_c = sample_outlet_cold(Nb)
Tc_out = net(x_out_c, r_out_c)[2]
L_out_c = grads(Tc_out, x_out_c).pow(2).mean()
# Symmetry at r=0: dTh/dr = 0
Ns = 128
xs = torch.rand(Ns, 1, device=device, dtype=dtype, requires_grad=True)
rs0 = torch.zeros_like(xs, requires_grad=True)
Th_axis = net(xs, rs0)[0]
L_sym = grads(Th_axis, rs0).pow(2).mean()
# ----- Total loss with simple weights (tune these!) -----
L = (
1.0 * L_pde
+ 5.0 * (L_int_Ri + L_int_Ro)
+ 2.0 * (L_in_h + L_in_c)
+ 1.0 * (L_out_h + L_out_c)
+ 1.0 * L_sym
)
L.backward()
opt.step()
if it % 1000 == 0:
print(
f"it={it} L={L.item():.3e} PDE={L_pde.item():.3e} IF={L_int_Ri.item()+L_int_Ro.item():.3e}"
)

View File

@ -1,7 +1,7 @@
---
---
# 2025-08-10
# 2025-08-11
Today I'm finishing the thesis ideas, for real this time.
I'm also going to get a model of a heat exchanger working.

View File

@ -0,0 +1,10 @@
---
---
# 2025-08-12
Yesterday I finished my thesis ideas! Hooray! It feels
really good to actually have some writing finished. Today,
I'm making a model for a simple heat exchanger in python.
Then, I'm going to train a physics informed neural network
on it and start fucking with parameters.