Description
Hello,
My problem is described as follows
We will solve a simple ODE system:
with the initial conditions
The reference solution is here, where the parameters
My code can't predict the parameters correctly, how can I modify the loss function so that the parameters can be predicted correctly?
# General Loss Function
def loss_func(self):
y_pred = self.net_y(self.t)
v_nn, m_pred, h_pred, n_pred = y_pred[:, 0], y_pred[:, 1], y_pred[:, 2], y_pred[:, 3] # NN_{rho}, NN_{u}, NN_{p}
# Reshape data
m_pred = m_pred.reshape(len(m_pred), 1)
h_pred = h_pred.reshape(len(h_pred), 1)
n_pred = n_pred.reshape(len(n_pred), 1)
v_nn = v_nn.reshape(len(v_nn), 1)
v_pred = 10.0- (self.g1 * m_pred**3 * h_pred *(v_nn-50.0))-\
(self.g2 * n_pred**4 * (v_nn-77.0))-(self.g3 * (v_nn-54.387))
# Total Loss
loss = torch.mean((self.m - m_pred) ** 2) + torch.mean((self.h - h_pred) ** 2) + \
torch.mean((self.n - n_pred) ** 2) + torch.mean(((self.v - v_pred)) ** 2)
self.optimizer.zero_grad()
loss.backward()
self.iter += 1
# if self.iter%101==0:
# print("iter: ",self.iter)
print(
'Loss: %.3f, g1_PINNs: %.5f ,g2_PINNs: %.5f,g3_PINNs: %.5f ' %
(
loss.item(),
self.g1.item(),
self.g2.item(),
self.g3.item()
)
)
return loss
# Train network through minimization of loss function w/r to theta and gamma
def train(self, nIter):
self.dnn.train()
# Backward and optimize
self.optimizer.step(self.loss_func)
My complete code is in this link https://github.com/squarefaceyao/pinn_inverse_pes/blob/main/HH_inverse__pytorch.py