From c93434bb58d645bbaf4aca45948d6f7b5a6402bc Mon Sep 17 00:00:00 2001 From: SarveshJoshi33 Date: Wed, 22 Jan 2025 13:44:04 -0500 Subject: [PATCH 1/8] Changes in the MeshFixture, FunctionSpace, Objective. Array shape mismatch for solving exists --- optimism/FunctionSpace.py | 238 +++++++++++++++++++++++++++++++------- optimism/Objective.py | 83 ++++++++++++- test/MeshFixture.py | 224 +++++++++++++++++++++++++++++++++++ test/ShearProblem_DOF.py | 161 ++++++++++++++++++++++++++ test/TestFixture.py | 17 +++ 5 files changed, 679 insertions(+), 44 deletions(-) create mode 100644 test/MeshFixture.py create mode 100644 test/ShearProblem_DOF.py create mode 100644 test/TestFixture.py diff --git a/optimism/FunctionSpace.py b/optimism/FunctionSpace.py index d22dff3b..fd11f616 100644 --- a/optimism/FunctionSpace.py +++ b/optimism/FunctionSpace.py @@ -1,21 +1,16 @@ -from jax.scipy.linalg import solve -from jaxtyping import Array, Float -from optimism import Interpolants -from optimism import Mesh -from optimism import QuadratureRule -from typing import Tuple -import equinox as eqx -import jax -import jax.numpy as np +from collections import namedtuple import numpy as onp +import jax +import jax.numpy as np +from jax.scipy.linalg import solve -class EssentialBC(eqx.Module): - nodeSet: str - component: int +from optimism import Interpolants +from optimism import Mesh -class FunctionSpace(eqx.Module): +FunctionSpace = namedtuple('FunctionSpace', ['shapes', 'vols', 'shapeGrads', 'mesh', 'quadratureRule', 'isAxisymmetric']) +FunctionSpace.__doc__ = \ """Data needed for calculus on functions in the discrete function space. In describing the shape of the attributes, ``ne`` is the number of @@ -37,12 +32,8 @@ class FunctionSpace(eqx.Module): isAxisymmetric: boolean indicating if the function space data are axisymmetric. """ - shapes: Float[Array, "ne nqpe nn"] - vols: Float[Array, "ne nqpe"] - shapeGrads: Float[Array, "ne nqpe nn nd"] - mesh: Mesh.Mesh - quadratureRule: QuadratureRule.QuadratureRule - isAxisymmetric: bool + +EssentialBC = namedtuple('EssentialBC', ['nodeSet', 'component']) def construct_function_space(mesh, quadratureRule, mode2D='cartesian'): @@ -363,42 +354,49 @@ def integrate_function_on_edges(functionSpace, func, U, quadRule, edges): return np.sum(integrate_on_edges(functionSpace, func, U, quadRule, edges)) -class DofManager(eqx.Module): - # TODO get type hints below correct - # TODO this one could be moved to jax types if we move towards - # TODO jit safe preconditioners/solvers - fieldShape: Tuple[int, int] - isBc: any - isUnknown: any - ids: any - unknownIndices: any - bcIndices: any - dofToUnknown: any - HessRowCoords: any - HessColCoords: any - hessian_bc_mask: any +def create_nodeset_layers(mesh): + coords = mesh.coords + tol = 1e-8 + # Create unique layers of nodesets along the y-direction + unique_layers = sorted(onp.unique(coords[:,1])) + Ny = int(input("Enter the expected number of nodeset layers in y-direction: ")) + assert len(unique_layers) == Ny, f"ERROR - Expected {Ny} layers, but found {len(unique_layers)}." + layer_rows = [] + + for i, y_val in enumerate(unique_layers): + nodes_in_layer = onp.flatnonzero(onp.abs(coords[:, 1] - y_val) < tol) + layer_rows.append(nodes_in_layer) + + max_nodes_per_layer = max(len(row) for row in layer_rows) + y_layers = -1 * np.ones((len(layer_rows), max_nodes_per_layer), dtype=int) # Initialize with -1 + + for i, row in enumerate(layer_rows): + y_layers = y_layers.at[i, :len(row)].set(row) # Fill each row with nodes from the layer + + # # For debugging + # print("Layers in y-direction: ", y_layers) + return y_layers + +class DofManager: def __init__(self, functionSpace, dim, EssentialBCs): self.fieldShape = Mesh.num_nodes(functionSpace.mesh), dim - isBc = onp.full(self.fieldShape, False, dtype=bool) + self.isBc = onp.full(self.fieldShape, False, dtype=bool) for ebc in EssentialBCs: - isBc[functionSpace.mesh.nodeSets[ebc.nodeSet], ebc.component] = True - self.isBc = isBc + self.isBc[functionSpace.mesh.nodeSets[ebc.nodeSet], ebc.component] = True self.isUnknown = ~self.isBc - self.ids = onp.arange(self.isBc.size).reshape(self.fieldShape) + self.ids = np.arange(self.isBc.size).reshape(self.fieldShape) self.unknownIndices = self.ids[self.isUnknown] self.bcIndices = self.ids[self.isBc] - ones = onp.ones(self.isBc.size, dtype=int) * -1 - dofToUnknown = ones - dofToUnknown[self.unknownIndices] = onp.arange(self.unknownIndices.size) - self.dofToUnknown = dofToUnknown + ones = np.ones(self.isBc.size, dtype=int) * -1 + self.dofToUnknown = ones.at[self.unknownIndices].set(np.arange(self.unknownIndices.size)) self.HessRowCoords, self.HessColCoords = self._make_hessian_coordinates(functionSpace.mesh.conns) - self.hessian_bc_mask = self._make_hessian_bc_mask(onp.array(functionSpace.mesh.conns)) + self.hessian_bc_mask = self._make_hessian_bc_mask(functionSpace.mesh.conns) def get_bc_size(self): @@ -450,7 +448,7 @@ def _make_hessian_coordinates(self, conns): rowCoords[rangeBegin:rangeEnd] = elHessCoords.ravel() colCoords[rangeBegin:rangeEnd] = elHessCoords.T.ravel() - rangeBegin += onp.square(nElUnknowns[e]) + rangeBegin += np.square(nElUnknowns[e]) return rowCoords, colCoords @@ -466,3 +464,157 @@ def _make_hessian_bc_mask(self, conns): hessian_bc_mask[e,eFlag,:] = False hessian_bc_mask[e,:,eFlag] = False return hessian_bc_mask + +# Different class for Multi-Point Constrained Problem +class DofManagerMPC: + def __init__(self, functionSpace, dim, EssentialBCs, mesh): + self.fieldShape = Mesh.num_nodes(functionSpace.mesh), dim + self.isBc = onp.full(self.fieldShape, False, dtype=bool) + for ebc in EssentialBCs: + self.isBc[functionSpace.mesh.nodeSets[ebc.nodeSet], ebc.component] = True + self.isUnknown = ~self.isBc + + self.ids = np.arange(self.isBc.size).reshape(self.fieldShape) + + # Create layers and assign master and slave rows + self.layers = create_nodeset_layers(mesh) + print("These are the layers created for the mesh: ", self.layers) + self.master_layer = int(input("Choose the row index for master nodes: ")) + self.slave_layer = int(input("Choose the row index for slave nodes: ")) + + # Generate master and slave arrays + self.master_array = self._create_layer_array(self.master_layer) + self.slave_array = self._create_layer_array(self.slave_layer) + + # Create DOF mappings for unknowns + self.unknownIndices = self.ids[self.isUnknown] + self.bcIndices = self.ids[self.isBc] + + ones = np.ones(self.isBc.size, dtype=int) * -1 + self.dofToUnknown = ones.at[self.unknownIndices].set(np.arange(self.unknownIndices.size)) + + # Compute Hessian-related data + self.HessRowCoords, self.HessColCoords = self._make_hessian_coordinates(functionSpace.mesh.conns) + self.hessian_bc_mask = self._make_hessian_bc_mask(functionSpace.mesh.conns) + + def get_bc_size(self): + return np.sum(self.isBc).item() + + def get_unknown_size(self): + return np.sum(self.isUnknown).item() + + def create_field(self, Uu, Ubc=0.0): + U = np.zeros(self.isBc.shape).at[self.isBc].set(Ubc) + return U.at[self.isUnknown].set(Uu) + + def get_bc_values(self, U): + return U[self.isBc] + + def get_unknown_values(self, U): + return U[self.isUnknown] + + def get_master_values(self, U): + return U[self.master_array[:, 1:]] + + def get_slave_values(self, U): + return U[self.slave_array[:, 1:]] + + def _create_layer_array(self, layer_index): + """Creates an array for the specified layer (master or slave) with DOF mappings.""" + layer_nodes = self.layers[layer_index] + layer_array = [] + for node in layer_nodes: + node_dofs = self.ids[node] + layer_array.append([node, *node_dofs]) + return onp.array(layer_array, dtype=int) + + def _make_hessian_coordinates(self, conns): + """Creates row and column coordinates for the Hessian, considering master and slave nodes.""" + nElUnknowns = onp.zeros(conns.shape[0], dtype=int) + nHessianEntries = 0 + for e, eNodes in enumerate(conns): + elUnknownFlags = self.isUnknown[eNodes, :].ravel() + nElUnknowns[e] = onp.sum(elUnknownFlags) + + # Include master and slave nodes in the size calculation + eNodes_list = onp.asarray(eNodes).tolist() + elMasterNodes = set(eNodes_list).intersection(set(self.master_array[:, 0])) + elSlaveNodes = set(eNodes_list).intersection(set(self.slave_array[:, 0])) + + nElMasters = sum(len(self.master_array[onp.where(self.master_array[:, 0] == node)[0], 1:].ravel()) for node in elMasterNodes) + nElSlaves = sum(len(self.slave_array[onp.where(self.slave_array[:, 0] == node)[0], 1:].ravel()) for node in elSlaveNodes) + + totalDOFs = nElUnknowns[e] + nElMasters + nElSlaves + nHessianEntries += totalDOFs ** 2 + + # Allocate sufficient space for Hessian coordinates + rowCoords = onp.zeros(nHessianEntries, dtype=int) + colCoords = onp.zeros(nHessianEntries, dtype=int) + rangeBegin = 0 + + for e, eNodes in enumerate(conns): + eNodes_list = onp.asarray(eNodes).tolist() + elDofs = self.ids[eNodes, :] + elUnknownFlags = self.isUnknown[eNodes, :] + elUnknowns = self.dofToUnknown[elDofs[elUnknownFlags]] + + # Identify master and slave DOFs for the element + elMasterNodes = set(eNodes_list).intersection(set(self.master_array[:, 0])) + elSlaveNodes = set(eNodes_list).intersection(set(self.slave_array[:, 0])) + + elMasterDofs = [] + for node in elMasterNodes: + master_idx = onp.where(self.master_array[:, 0] == node)[0] + elMasterDofs.extend(self.master_array[master_idx, 1:].ravel()) + + elSlaveDofs = [] + for node in elSlaveNodes: + slave_idx = onp.where(self.slave_array[:, 0] == node)[0] + elSlaveDofs.extend(self.slave_array[slave_idx, 1:].ravel()) + + # Combine unknowns, master, and slave DOFs + elAllUnknowns = onp.concatenate([elUnknowns, elMasterDofs, elSlaveDofs]) + elHessCoords = onp.tile(elAllUnknowns, (len(elAllUnknowns), 1)) + + nElHessianEntries = len(elAllUnknowns) ** 2 + rangeEnd = rangeBegin + nElHessianEntries + + # Assign values to the Hessian coordinate arrays + rowCoords[rangeBegin:rangeEnd] = elHessCoords.ravel() + colCoords[rangeBegin:rangeEnd] = elHessCoords.T.ravel() + + rangeBegin += nElHessianEntries + + return rowCoords, colCoords + + + def _make_hessian_bc_mask(self, conns): + """Creates a mask for BCs in the Hessian, considering master and slave nodes.""" + nElements, nNodesPerElement = conns.shape + nFields = self.ids.shape[1] + nDofPerElement = nNodesPerElement * nFields + + hessian_bc_mask = onp.full((nElements, nDofPerElement, nDofPerElement), True, dtype=bool) + for e, eNodes in enumerate(conns): + # Get boundary condition flags for all DOFs + eFlag = self.isBc[eNodes, :].ravel() + + # Identify master and slave nodes + eNodes_list = onp.asarray(eNodes).tolist() + masterFlag = onp.isin(eNodes_list, self.master_array[:, 0]) + slaveFlag = onp.isin(eNodes_list, self.slave_array[:, 0]) + + # Expand master and slave flags to match the DOF shape + masterFlag_expanded = onp.repeat(masterFlag, nFields) + slaveFlag_expanded = onp.repeat(slaveFlag, nFields) + + # Combine flags + combinedFlag = eFlag | masterFlag_expanded | slaveFlag_expanded + + # Update Hessian mask + hessian_bc_mask[e, combinedFlag, :] = False + hessian_bc_mask[e, :, combinedFlag] = False + + return hessian_bc_mask + + diff --git a/optimism/Objective.py b/optimism/Objective.py index 8f3d3f8b..9689f658 100644 --- a/optimism/Objective.py +++ b/optimism/Objective.py @@ -265,4 +265,85 @@ def get_value(self, x): def get_residual(self, x): return self.gradient(self.scaling * x) - + +class ObjectiveMPC(Objective): + def __init__(self, f, x, p, dofManagerMPC, precondStrategy=None): + """ + Initialize the Objective class for MPC with the condensation method. + + Parameters: + - f: Objective function. + - x: Initial guess for the primal DOF vector. + - p: Parameters for the objective function. + - dofManagerMPC: Instance of DofManagerMPC with master-slave relationships. + - precondStrategy: Preconditioning strategy (optional). + """ + super().__init__(f, x, p, precondStrategy) + self.dofManagerMPC = dofManagerMPC + self.master_dofs = dofManagerMPC.master_array[:, 1:] + self.slave_dofs = dofManagerMPC.slave_array[:, 1:] + self.slave_to_master_map = self.create_slave_to_master_map() + print("self.master_dofs:", self.master_dofs) + print("master_dof_indices.size:", self.master_dofs.size) + + def create_slave_to_master_map(self): + """Create the mapping to express slave DOFs in terms of master DOFs.""" + # Placeholder for the actual implementation. + # This should create a relationship of the form: + # slave_dofs = C @ master_dofs + d + # where C is the constraint matrix and d is a constant vector. + pass + + def value(self, x_master): + """Evaluate the objective function with reduced DOFs.""" + x_full = self.expand_master_to_full_dofs(x_master) + print("x_master shape:", x_master.shape) + print("fieldShape:", self.dofManagerMPC.fieldShape) + assert x_master.shape[0] == self.master_dofs.size, "Mismatch in master DOF sizes!" + + return super().value(x_full) + + def gradient(self, x_master): + """Compute the gradient w.r.t. master DOFs.""" + x_full = self.expand_master_to_full_dofs(x_master) + grad_full = super().gradient(x_full) + return self.reduce_gradient_to_master(grad_full) + + def hessian_vec(self, x_master, vx_master): + """Compute the Hessian-vector product w.r.t. master DOFs.""" + x_full = self.expand_master_to_full_dofs(x_master) + vx_full = self.expand_master_to_full_dofs(vx_master) + hess_vec_full = super().hessian_vec(x_full, vx_full) + return self.reduce_gradient_to_master(hess_vec_full) + + def expand_master_to_full_dofs(self, x_master): + """Expand master DOFs to the full DOF vector.""" + # Initialize the full DOF array + x_full = np.zeros(self.dofManagerMPC.fieldShape) + + # Ensure `self.master_dofs` is a 1D array of indices + master_dof_indices = self.master_dofs.ravel() + + # Validate shape consistency + assert x_master.shape[0] == master_dof_indices.size, ( + f"Mismatch: x_master has shape {x_master.shape}, " + f"but master DOFs have size {master_dof_indices.size}" + ) + + # Assign master DOFs + x_full = x_full.at[master_dof_indices].set(x_master) + + # Add slave DOFs + if self.slave_to_master_map is not None: + slave_values = self.slave_to_master_map @ x_master # Ensure compatibility + slave_dof_indices = self.slave_dofs.ravel() + x_full = x_full.at[slave_dof_indices].set(slave_values) + + return x_full + + + def reduce_gradient_to_master(self, grad_full): + """Reduce the full gradient to master DOFs.""" + # Extract the gradient corresponding to the master DOFs. + return grad_full[self.master_dofs] + diff --git a/test/MeshFixture.py b/test/MeshFixture.py new file mode 100644 index 00000000..085d97bb --- /dev/null +++ b/test/MeshFixture.py @@ -0,0 +1,224 @@ +# ---------------- NEW MESHFIXTURE SCRIPT -------------------------- +# ------------------------------------------------------------------ +# Note - This script involves a function creating nodeset layers +# ------------------------------------------------------------------ + +import jax.numpy as jnp +import numpy as onp +from optimism.Mesh import * +from optimism import Surface + +# testing utils +from TestFixture import * + +d_kappa = 1.0 +d_nu = 0.3 +d_E = 3*d_kappa*(1 - 2*d_nu) +defaultProps = {'elastic modulus': d_E, + 'poisson ratio': d_nu} + + +def compute_residual_norm(grad_func, dofValues): + grad = grad_func(dofValues) + return np.linalg.norm(grad) + +def map_to_arch(x, R): + r = R-x[0] + y = x[1] + pi = np.pi + return r*np.array([-np.cos(pi*y), np.sin(pi*y)]) + +def map_to_cos(x, L, warp): + y = x[1] + pi = np.pi + return np.array([L*(y-0.5), warp*np.cos(2*pi*(y-0.5))-x[0]]) + +class MeshFixture(TestFixture): + + def create_mesh_and_disp(self, Nx, Ny, xRange, yRange, initial_disp_func, setNamePostFix=''): + coords, conns = create_structured_mesh_data(Nx, Ny, xRange, yRange) + tol = 1e-8 + nodeSets = {} + nodeSets['left'+setNamePostFix] = np.flatnonzero(coords[:,0] < xRange[0] + tol) + nodeSets['bottom'+setNamePostFix] = np.flatnonzero(coords[:,1] < yRange[0] + tol) + nodeSets['right'+setNamePostFix] = np.flatnonzero(coords[:,0] > xRange[1] - tol) + nodeSets['top'+setNamePostFix] = np.flatnonzero(coords[:,1] > yRange[1] - tol) + nodeSets['all_boundary'+setNamePostFix] = np.flatnonzero((coords[:,0] < xRange[0] + tol) | + (coords[:,1] < yRange[0] + tol) | + (coords[:,0] > xRange[1] - tol) | + (coords[:,1] > yRange[1] - tol) ) + + def is_edge_on_left(xyOnEdge): + return np.all( xyOnEdge[:,0] < xRange[0] + tol ) + + def is_edge_on_bottom(xyOnEdge): + return np.all( xyOnEdge[:,1] < yRange[0] + tol ) + + def is_edge_on_right(xyOnEdge): + return np.all( xyOnEdge[:,0] > xRange[1] - tol ) + + def is_edge_on_top(xyOnEdge): + return np.all( xyOnEdge[:,1] > yRange[1] - tol ) + + sideSets = {} + sideSets['left'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_left) + sideSets['bottom'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_bottom) + sideSets['right'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_right) + sideSets['top'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_top) + + allBoundaryEdges = np.vstack([s for s in sideSets.values()]) + sideSets['all_boundary'+setNamePostFix] = allBoundaryEdges + + blocks = {'block'+setNamePostFix: np.arange(conns.shape[0])} + mesh = construct_mesh_from_basic_data(coords, conns, blocks, nodeSets, sideSets) + return mesh, vmap(initial_disp_func)(mesh.coords) + + + def create_arch_mesh_disp_and_edges(self, N, M, w, R, + bcSetFraction=1./3., + setNamePostFix=''): + h = 1.0 + coords, conns = create_structured_mesh_data(N, M, [-w, w], [0., h]) + + tol = 1e-8 + nodeSets = {} + nodeSets['left'+setNamePostFix] = np.flatnonzero(coords[:,1] < 0.0 + tol) + nodeSets['right'+setNamePostFix] = np.flatnonzero(coords[:,1] > h - tol) + nodeSets['bottom'+setNamePostFix] = np.flatnonzero(coords[:,0] > w - tol) + nodeSets['top'+setNamePostFix] = np.flatnonzero(coords[:,0] < -w + tol) + nodeSets['top_left'+setNamePostFix] = np.intersect1d(nodeSets['top'+setNamePostFix], + nodeSets['left'+setNamePostFix]) + a = 0.5*h*(1.0 - bcSetFraction) + b = 0.5*h*(1.0 + bcSetFraction) + + nodeSets['push'+setNamePostFix] = np.flatnonzero( (coords[:,0] < tol - w) + & (coords[:,1] > a) + & (coords[:,1] < b) ) + + def is_edge_on_left(xyOnEdge): + return np.all(xyOnEdge[:,1] < 0.0 + tol) + + def is_edge_on_bottom(xyOnEdge): + return np.all( xyOnEdge[:,0] > w - tol ) + + def is_edge_on_right(xyOnEdge): + return np.all( xyOnEdge[:,1] > h - tol ) + + def is_edge_on_top(xyOnEdge): + return np.all( xyOnEdge[:,0] < tol - w ) + + def is_edge_on_loading_patch(xyOnEdge): + return np.all((xyOnEdge[:,0] < tol - w) & (xyOnEdge[:,1] > a) & (xyOnEdge[:,1] < b)) + + sideSets = {} + sideSets['top'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_top) + sideSets['bottom'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_bottom) + sideSets['left'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_left) + sideSets['right'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_right) + sideSets['push'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_loading_patch) + + coords = vmap(map_to_arch, (0,None))(coords, R) + blocks = {'block'+setNamePostFix: np.arange(conns.shape[0])} + U = np.zeros(coords.shape) + return construct_mesh_from_basic_data(coords, conns, blocks, nodeSets, sideSets), U + + + def create_cos_mesh_disp_and_edges(self, N, M, w, L, warp): + h = 1.0 + coords, conns = create_structured_mesh_data(N, M, [-w, w], [0., h]) + + tol = 1e-8 + nodeSets = {} + nodeSets['left'] = np.flatnonzero(coords[:,1] < 0.0 + tol) + nodeSets['right'] = np.flatnonzero(coords[:,1] > h - tol) + nodeSets['top'] = np.flatnonzero( (coords[:,0] < tol - w) + & (coords[:,1] > 0.3) + & (coords[:,1] < 0.7) ) + + def is_edge_on_left(xyOnEdge): + return np.all( xyOnEdge[:,0] < -w + tol ) + + sideSets = {} + sideSets['top'] = Surface.create_edges(coords, conns, is_edge_on_left) + + coords = vmap(map_to_cos, (0,None,None))(coords, L, warp) + blocks = {'block': np.arange(conns.shape[0])} + U = np.zeros(coords.shape) + return construct_mesh_from_basic_data(coords, conns, None, nodeSets, sideSets), U + + def create_mesh_disp_and_nodeset_layers(self, Nx, Ny, xRange, yRange, initial_disp_func, setNamePostFix=''): + coords, conns = create_structured_mesh_data(Nx, Ny, xRange, yRange) + tol = 1e-8 + nodeSets = {} + + # Predefined boundary node sets + nodeSets['left'+setNamePostFix] = jnp.flatnonzero(coords[:, 0] < xRange[0] + tol) + nodeSets['bottom'+setNamePostFix] = jnp.flatnonzero(coords[:, 1] < yRange[0] + tol) + nodeSets['right'+setNamePostFix] = jnp.flatnonzero(coords[:, 0] > xRange[1] - tol) + nodeSets['top'+setNamePostFix] = jnp.flatnonzero(coords[:, 1] > yRange[1] - tol) + nodeSets['all_boundary'+setNamePostFix] = jnp.flatnonzero( + (coords[:, 0] < xRange[0] + tol) | + (coords[:, 1] < yRange[0] + tol) | + (coords[:, 0] > xRange[1] - tol) | + (coords[:, 1] > yRange[1] - tol) + ) + + # Identify unique y-layers for nodes + unique_y_layers = sorted(onp.unique(coords[:, 1])) + # print("Unique y-layers identified:", unique_y_layers) + + # Ensure we have the expected number of layers + assert len(unique_y_layers) == Ny, f"Expected {Ny} y-layers, but found {len(unique_y_layers)}." + + # Initialize an empty list to store rows of nodes + y_layer_rows = [] + + # Map nodes to y_layers and construct rows + for i, y_val in enumerate(unique_y_layers): + nodes_in_layer = onp.flatnonzero(onp.abs(coords[:, 1] - y_val) < tol) + y_layer_rows.append(nodes_in_layer) + # print(f"Nodes in y-layer {i+1} (y = {y_val}):", nodes_in_layer) + + # Convert list of rows into a structured 2D JAX array, padding with -1 + max_nodes_per_layer = max(len(row) for row in y_layer_rows) + y_layers = -1 * jnp.ones((len(y_layer_rows), max_nodes_per_layer), dtype=int) # Initialize with -1 + + for i, row in enumerate(y_layer_rows): + y_layers = y_layers.at[i, :len(row)].set(row) # Fill each row with nodes from the layer + + # Print for debugging + # print("y_layers (2D array):", y_layers) + + def is_edge_on_left(xyOnEdge): + return np.all( xyOnEdge[:,0] < xRange[0] + tol ) + + def is_edge_on_bottom(xyOnEdge): + return np.all( xyOnEdge[:,1] < yRange[0] + tol ) + + def is_edge_on_right(xyOnEdge): + return np.all( xyOnEdge[:,0] > xRange[1] - tol ) + + def is_edge_on_top(xyOnEdge): + return np.all( xyOnEdge[:,1] > yRange[1] - tol ) + + sideSets = {} + sideSets['left'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_left) + sideSets['bottom'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_bottom) + sideSets['right'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_right) + sideSets['top'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_top) + + allBoundaryEdges = np.vstack([s for s in sideSets.values()]) + sideSets['all_boundary'+setNamePostFix] = allBoundaryEdges + + blocks = {'block'+setNamePostFix: np.arange(conns.shape[0])} + mesh = construct_mesh_from_basic_data(coords, conns, blocks, nodeSets, sideSets) + + return mesh, vmap(initial_disp_func)(mesh.coords), y_layers + + + + + + + + diff --git a/test/ShearProblem_DOF.py b/test/ShearProblem_DOF.py new file mode 100644 index 00000000..cd49f164 --- /dev/null +++ b/test/ShearProblem_DOF.py @@ -0,0 +1,161 @@ +# Test Case - Displacement based Shear Test +# --------------------------------------------------- +# Note - This approach involves segregation of master and slave DOFs +# --------------------------------------------------- + +import sys +sys.path.insert(0, "/home/sarvesh/Documents/Github/optimism") + + +import jax.numpy as np +from jax import jit, grad + +from optimism import EquationSolver as EqSolver +from optimism import QuadratureRule +from optimism import Mechanics +from optimism import Mesh +from optimism import Objective +from optimism import FunctionSpace +from optimism.Mesh import create_higher_order_mesh_from_simplex_mesh +from optimism.material import Neohookean +from optimism.FunctionSpace import DofManagerMPC + +from MeshFixture import * +import matplotlib.pyplot as plt + + + +class ShearTest(MeshFixture): + + def setUp(self): + dummyDispGrad = np.eye(2) + self.mesh = self.create_mesh_disp_and_nodeset_layers(5, 5, [0.,1.], [0.,1.], + lambda x: dummyDispGrad.dot(x))[0] + self.mesh = create_higher_order_mesh_from_simplex_mesh(self.mesh, order=1, createNodeSetsFromSideSets=True) + self.quadRule = QuadratureRule.create_quadrature_rule_on_triangle(degree=1) + self.fs = FunctionSpace.construct_function_space(self.mesh, self.quadRule) + + self.EBCs = [FunctionSpace.EssentialBC(nodeSet='top', component=0), + FunctionSpace.EssentialBC(nodeSet='top', component=1), + FunctionSpace.EssentialBC(nodeSet='bottom', component=0), + FunctionSpace.EssentialBC(nodeSet='bottom', component=1)] + + # dofManager = DofManagerMPC(self.fs, 2, self.EBCs, self.mesh) + + # self.master_array = dofManager.isMaster + # self.slave_array = dofManager.isSlave + # self.assembly = dofManager.dofAssembly + # print("Assembly: ", self.assembly) + + shearModulus = 0.855 # MPa + bulkModulus = 1000*shearModulus # MPa + youngModulus = 9.0*bulkModulus*shearModulus / (3.0*bulkModulus + shearModulus) + poissonRatio = (3.0*bulkModulus - 2.0*shearModulus) / 2.0 / (3.0*bulkModulus + shearModulus) + props = { + 'elastic modulus': youngModulus, + 'poisson ratio': poissonRatio, + 'version': 'coupled' + } + self.mat = Neohookean.create_material_model_functions(props) + + self.freq = 0.3 / 2.0 / np.pi + self.cycles = 1 + self.steps_per_cycle = 64 + self.steps = self.cycles*self.steps_per_cycle + totalTime = self.cycles / self.freq + self.dt = totalTime / self.steps + self.maxDisp = 1.0 + + def run(self): + mechFuncs = Mechanics.create_mechanics_functions(self.fs, + "plane strain", + self.mat) + dofManager = DofManagerMPC(self.fs, 2, self.EBCs, self.mesh) + + def create_field(Uu, disp): + def get_ubcs(disp): + V = np.zeros(self.mesh.coords.shape) + index = (self.mesh.nodeSets['top'], 0) + V = V.at[index].set(disp) + return dofManager.get_bc_values(V) + + return dofManager.create_field(Uu, get_ubcs(disp)) + + def energy_function_all_dofs(U, p): + internalVariables = p.state_data + return mechFuncs.compute_strain_energy(U, internalVariables, self.dt) + + def compute_energy(Uu, p): + U = create_field(Uu, p.bc_data) + return energy_function_all_dofs(U, p) + + nodal_forces = jit(grad(energy_function_all_dofs, argnums=0)) + integrate_free_energy = jit(mechFuncs.compute_strain_energy) + + def write_output(Uu, p, step): + from optimism import VTKWriter + U = create_field(Uu, p.bc_data) + plotName = "../Results/Disp_based_Shear_Test/"+'output-'+str(step).zfill(3) + writer = VTKWriter.VTKWriter(self.mesh, baseFileName=plotName) + + writer.add_nodal_field(name='displ', nodalData=U, fieldType=VTKWriter.VTKFieldType.VECTORS) + + energyDensities, stresses = mechFuncs.\ + compute_output_energy_densities_and_stresses(U, p.state_data, self.dt) + cellEnergyDensities = FunctionSpace.project_quadrature_field_to_element_field(self.fs, energyDensities) + cellStresses = FunctionSpace.project_quadrature_field_to_element_field(self.fs, stresses) + writer.add_cell_field(name='strain_energy_density', + cellData=cellEnergyDensities, + fieldType=VTKWriter.VTKFieldType.SCALARS) + writer.add_cell_field(name='stress', + cellData=cellStresses, + fieldType=VTKWriter.VTKFieldType.TENSORS) + writer.write() + + Uu = dofManager.get_unknown_values(np.zeros(self.mesh.coords.shape)) + ivs = mechFuncs.compute_initial_state() + p = Objective.Params(bc_data=0., state_data=ivs) + U = create_field(Uu, p.bc_data) + self.objective = Objective.ObjectiveMPC(compute_energy, Uu, p, dofManager) + + index = (self.mesh.nodeSets['top'], 0) + + time = 0.0 + times = [] + externalWorkStore = [] + incrementalPotentialStore = [] + forceHistory = [] + dispHistory = [] + for step in range(1, self.steps+1): + force_prev = np.array(nodal_forces(U, p).at[index].get()) + applied_disp_prev = U.at[index].get() + + # disp = self.maxDisp * np.sin(2.0 * np.pi * self.freq * time) + disp = self.maxDisp * self.freq * time + print("Displacement in this load step: ", disp) + + p = Objective.param_index_update(p, 0, disp) + Uu, solverSuccess = EqSolver.nonlinear_equation_solve(self.objective, Uu, p, EqSolver.get_settings(), useWarmStart=False) + U = create_field(Uu, p.bc_data) + ivs = mechFuncs.compute_updated_internal_variables(U, p.state_data, self.dt) + p = Objective.param_index_update(p, 1, ivs) + + write_output(Uu, p, step) + + force = np.array(nodal_forces(U, p).at[index].get()) + applied_disp = U.at[index].get() + externalWorkStore.append( 0.5*np.tensordot((force + force_prev),(applied_disp - applied_disp_prev), axes=1) ) + incrementalPotentialStore.append(integrate_free_energy(U, ivs, self.dt)) + + forceHistory.append( np.sum(force) ) + dispHistory.append(disp) + + times.append(time) + time += self.dt + + + + +app = ShearTest() +app.setUp() +app.run() \ No newline at end of file diff --git a/test/TestFixture.py b/test/TestFixture.py new file mode 100644 index 00000000..0c005e8a --- /dev/null +++ b/test/TestFixture.py @@ -0,0 +1,17 @@ +import unittest +import numpy + +class TestFixture(unittest.TestCase): + def assertArrayEqual(self, a, b): + self.assertEqual( len(a), len(b) ) + self.assertIsNone(numpy.testing.assert_array_equal(a,b)) + + def assertArrayNotEqual(self, a, b): + self.assertTrue( (a-b != 0).any() ) + + def assertArrayNear(self, a, b, decimals): + self.assertEqual(len(a), len(b)) + self.assertIsNone(numpy.testing.assert_array_almost_equal(a,b,decimals)) + + def assertNear(self, a, b, decimals): + self.assertAlmostEqual(a, b, decimals) From 309f5ff53a90cdb41e6b71939a0830459fc76834 Mon Sep 17 00:00:00 2001 From: SarveshJoshi33 Date: Wed, 22 Jan 2025 14:27:34 -0500 Subject: [PATCH 2/8] MPC based shear test running, need to perform some more testing to verify the apt implementation --- optimism/Objective.py | 97 ++++++++++----------- test/{ShearProblem_DOF.py => test_shear.py} | 2 +- 2 files changed, 45 insertions(+), 54 deletions(-) rename test/{ShearProblem_DOF.py => test_shear.py} (98%) diff --git a/optimism/Objective.py b/optimism/Objective.py index 9689f658..db3769e0 100644 --- a/optimism/Objective.py +++ b/optimism/Objective.py @@ -269,8 +269,8 @@ def get_residual(self, x): class ObjectiveMPC(Objective): def __init__(self, f, x, p, dofManagerMPC, precondStrategy=None): """ - Initialize the Objective class for MPC with the condensation method. - + Initialize the Objective class for MPC with full DOFs and master-slave constraints. + Parameters: - f: Objective function. - x: Initial guess for the primal DOF vector. @@ -283,67 +283,58 @@ def __init__(self, f, x, p, dofManagerMPC, precondStrategy=None): self.master_dofs = dofManagerMPC.master_array[:, 1:] self.slave_dofs = dofManagerMPC.slave_array[:, 1:] self.slave_to_master_map = self.create_slave_to_master_map() - print("self.master_dofs:", self.master_dofs) - print("master_dof_indices.size:", self.master_dofs.size) def create_slave_to_master_map(self): - """Create the mapping to express slave DOFs in terms of master DOFs.""" - # Placeholder for the actual implementation. - # This should create a relationship of the form: - # slave_dofs = C @ master_dofs + d - # where C is the constraint matrix and d is a constant vector. - pass - - def value(self, x_master): - """Evaluate the objective function with reduced DOFs.""" - x_full = self.expand_master_to_full_dofs(x_master) - print("x_master shape:", x_master.shape) - print("fieldShape:", self.dofManagerMPC.fieldShape) - assert x_master.shape[0] == self.master_dofs.size, "Mismatch in master DOF sizes!" + """ + Create the mapping to express slave DOFs in terms of master DOFs. + Returns: + - C: Constraint matrix (2D array). + - d: Constant vector (1D array). + """ + num_slaves = len(self.slave_dofs) + num_masters = len(self.master_dofs) + + # Initialize the constraint matrix with the correct dimensions + C = np.zeros((num_slaves, num_masters)) # Replace with actual mapping logic + + # Initialize the constant vector + d = np.zeros(num_slaves) # Replace with actual constant values if needed + + return C, d + + def value(self, x_full): + # Ensure the constraints are satisfied + x_full = self.enforce_constraints(x_full) return super().value(x_full) - def gradient(self, x_master): - """Compute the gradient w.r.t. master DOFs.""" - x_full = self.expand_master_to_full_dofs(x_master) + def gradient(self, x_full): + x_full = self.enforce_constraints(x_full) grad_full = super().gradient(x_full) - return self.reduce_gradient_to_master(grad_full) + return grad_full - def hessian_vec(self, x_master, vx_master): - """Compute the Hessian-vector product w.r.t. master DOFs.""" - x_full = self.expand_master_to_full_dofs(x_master) - vx_full = self.expand_master_to_full_dofs(vx_master) + def hessian_vec(self, x_full, vx_full): + x_full = self.enforce_constraints(x_full) hess_vec_full = super().hessian_vec(x_full, vx_full) - return self.reduce_gradient_to_master(hess_vec_full) - - def expand_master_to_full_dofs(self, x_master): - """Expand master DOFs to the full DOF vector.""" - # Initialize the full DOF array - x_full = np.zeros(self.dofManagerMPC.fieldShape) - - # Ensure `self.master_dofs` is a 1D array of indices - master_dof_indices = self.master_dofs.ravel() + return hess_vec_full - # Validate shape consistency - assert x_master.shape[0] == master_dof_indices.size, ( - f"Mismatch: x_master has shape {x_master.shape}, " - f"but master DOFs have size {master_dof_indices.size}" - ) - - # Assign master DOFs - x_full = x_full.at[master_dof_indices].set(x_master) - - # Add slave DOFs + def enforce_constraints(self, x_full): + """ + Enforce the master-slave relationship in the full DOF vector. + """ if self.slave_to_master_map is not None: - slave_values = self.slave_to_master_map @ x_master # Ensure compatibility - slave_dof_indices = self.slave_dofs.ravel() - x_full = x_full.at[slave_dof_indices].set(slave_values) - + C, d = self.slave_to_master_map + + # # Debug shapes + # print("Shape of C:", C.shape) + # print("Shape of x_full[self.master_dofs]:", x_full[self.master_dofs].shape) + # print("Shape of d:", d.shape) + + # Ensure broadcasting compatibility + slave_values = C @ x_full[self.master_dofs] + d[:, None] + # print("Shape of slave_values:", slave_values.shape) + + x_full = x_full.at[self.slave_dofs].set(slave_values) return x_full - def reduce_gradient_to_master(self, grad_full): - """Reduce the full gradient to master DOFs.""" - # Extract the gradient corresponding to the master DOFs. - return grad_full[self.master_dofs] - diff --git a/test/ShearProblem_DOF.py b/test/test_shear.py similarity index 98% rename from test/ShearProblem_DOF.py rename to test/test_shear.py index cd49f164..82c18026 100644 --- a/test/ShearProblem_DOF.py +++ b/test/test_shear.py @@ -95,7 +95,7 @@ def compute_energy(Uu, p): def write_output(Uu, p, step): from optimism import VTKWriter U = create_field(Uu, p.bc_data) - plotName = "../Results/Disp_based_Shear_Test/"+'output-'+str(step).zfill(3) + plotName = "../test_results/disp_based_shear_test/"+'output-'+str(step).zfill(3) writer = VTKWriter.VTKWriter(self.mesh, baseFileName=plotName) writer.add_nodal_field(name='displ', nodalData=U, fieldType=VTKWriter.VTKFieldType.VECTORS) From 6254839cff42cc06db60c836d0c519a5f78efc99 Mon Sep 17 00:00:00 2001 From: SarveshJoshi33 Date: Thu, 23 Jan 2025 14:00:53 -0500 Subject: [PATCH 3/8] Updated the FunctionSpace script with the equinox module --- optimism/FunctionSpace.py | 106 ++++++++++++++++++++++++++++---------- optimism/Objective.py | 19 +++---- 2 files changed, 84 insertions(+), 41 deletions(-) diff --git a/optimism/FunctionSpace.py b/optimism/FunctionSpace.py index fd11f616..57082af8 100644 --- a/optimism/FunctionSpace.py +++ b/optimism/FunctionSpace.py @@ -1,16 +1,21 @@ -from collections import namedtuple -import numpy as onp - -import jax -import jax.numpy as np from jax.scipy.linalg import solve - +from jaxtyping import Array, Float from optimism import Interpolants from optimism import Mesh +from optimism import QuadratureRule +from typing import Tuple +import equinox as eqx +import jax +import jax.numpy as np +import numpy as onp + + +class EssentialBC(eqx.Module): + nodeSet: str + component: int -FunctionSpace = namedtuple('FunctionSpace', ['shapes', 'vols', 'shapeGrads', 'mesh', 'quadratureRule', 'isAxisymmetric']) -FunctionSpace.__doc__ = \ +class FunctionSpace(eqx.Module): """Data needed for calculus on functions in the discrete function space. In describing the shape of the attributes, ``ne`` is the number of @@ -32,8 +37,12 @@ isAxisymmetric: boolean indicating if the function space data are axisymmetric. """ - -EssentialBC = namedtuple('EssentialBC', ['nodeSet', 'component']) + shapes: Float[Array, "ne nqpe nn"] + vols: Float[Array, "ne nqpe"] + shapeGrads: Float[Array, "ne nqpe nn nd"] + mesh: Mesh.Mesh + quadratureRule: QuadratureRule.QuadratureRule + isAxisymmetric: bool def construct_function_space(mesh, quadratureRule, mode2D='cartesian'): @@ -353,7 +362,6 @@ def integrate_function_on_edges(functionSpace, func, U, quadRule, edges): integrate_on_edges = jax.vmap(integrate_function_on_edge, (None, None, None, None, 0)) return np.sum(integrate_on_edges(functionSpace, func, U, quadRule, edges)) - def create_nodeset_layers(mesh): coords = mesh.coords tol = 1e-8 @@ -378,25 +386,43 @@ def create_nodeset_layers(mesh): # print("Layers in y-direction: ", y_layers) return y_layers -class DofManager: + +class DofManager(eqx.Module): + # TODO get type hints below correct + # TODO this one could be moved to jax types if we move towards + # TODO jit safe preconditioners/solvers + fieldShape: Tuple[int, int] + isBc: any + isUnknown: any + ids: any + unknownIndices: any + bcIndices: any + dofToUnknown: any + HessRowCoords: any + HessColCoords: any + hessian_bc_mask: any + def __init__(self, functionSpace, dim, EssentialBCs): self.fieldShape = Mesh.num_nodes(functionSpace.mesh), dim - self.isBc = onp.full(self.fieldShape, False, dtype=bool) + isBc = onp.full(self.fieldShape, False, dtype=bool) for ebc in EssentialBCs: - self.isBc[functionSpace.mesh.nodeSets[ebc.nodeSet], ebc.component] = True + isBc[functionSpace.mesh.nodeSets[ebc.nodeSet], ebc.component] = True + self.isBc = isBc self.isUnknown = ~self.isBc - self.ids = np.arange(self.isBc.size).reshape(self.fieldShape) + self.ids = onp.arange(self.isBc.size).reshape(self.fieldShape) self.unknownIndices = self.ids[self.isUnknown] self.bcIndices = self.ids[self.isBc] - ones = np.ones(self.isBc.size, dtype=int) * -1 - self.dofToUnknown = ones.at[self.unknownIndices].set(np.arange(self.unknownIndices.size)) + ones = onp.ones(self.isBc.size, dtype=int) * -1 + dofToUnknown = ones + dofToUnknown[self.unknownIndices] = onp.arange(self.unknownIndices.size) + self.dofToUnknown = dofToUnknown self.HessRowCoords, self.HessColCoords = self._make_hessian_coordinates(functionSpace.mesh.conns) - self.hessian_bc_mask = self._make_hessian_bc_mask(functionSpace.mesh.conns) + self.hessian_bc_mask = self._make_hessian_bc_mask(onp.array(functionSpace.mesh.conns)) def get_bc_size(self): @@ -448,7 +474,7 @@ def _make_hessian_coordinates(self, conns): rowCoords[rangeBegin:rangeEnd] = elHessCoords.ravel() colCoords[rangeBegin:rangeEnd] = elHessCoords.T.ravel() - rangeBegin += np.square(nElUnknowns[e]) + rangeBegin += onp.square(nElUnknowns[e]) return rowCoords, colCoords @@ -465,16 +491,40 @@ def _make_hessian_bc_mask(self, conns): hessian_bc_mask[e,:,eFlag] = False return hessian_bc_mask + # Different class for Multi-Point Constrained Problem -class DofManagerMPC: +from optimism import Mesh +import equinox as eqx +import jax.numpy as np +import numpy as onp +from typing import List, Tuple + + +class DofManagerMPC(eqx.Module): + fieldShape: Tuple[int, int] + isBc: np.ndarray + isUnknown: np.ndarray + ids: np.ndarray + unknownIndices: np.ndarray + bcIndices: np.ndarray + dofToUnknown: np.ndarray + HessRowCoords: np.ndarray + HessColCoords: np.ndarray + hessian_bc_mask: np.ndarray + master_layer: int = eqx.static_field() + slave_layer: int = eqx.static_field() + master_array: np.ndarray + slave_array: np.ndarray + layers: List[List[int]] = eqx.static_field() + def __init__(self, functionSpace, dim, EssentialBCs, mesh): - self.fieldShape = Mesh.num_nodes(functionSpace.mesh), dim + self.fieldShape = (Mesh.num_nodes(functionSpace.mesh), dim) self.isBc = onp.full(self.fieldShape, False, dtype=bool) for ebc in EssentialBCs: self.isBc[functionSpace.mesh.nodeSets[ebc.nodeSet], ebc.component] = True self.isUnknown = ~self.isBc - self.ids = np.arange(self.isBc.size).reshape(self.fieldShape) + self.ids = onp.arange(self.isBc.size).reshape(self.fieldShape) # Create layers and assign master and slave rows self.layers = create_nodeset_layers(mesh) @@ -526,7 +576,7 @@ def _create_layer_array(self, layer_index): for node in layer_nodes: node_dofs = self.ids[node] layer_array.append([node, *node_dofs]) - return onp.array(layer_array, dtype=int) + return np.array(layer_array, dtype=int) def _make_hessian_coordinates(self, conns): """Creates row and column coordinates for the Hessian, considering master and slave nodes.""" @@ -538,8 +588,8 @@ def _make_hessian_coordinates(self, conns): # Include master and slave nodes in the size calculation eNodes_list = onp.asarray(eNodes).tolist() - elMasterNodes = set(eNodes_list).intersection(set(self.master_array[:, 0])) - elSlaveNodes = set(eNodes_list).intersection(set(self.slave_array[:, 0])) + elMasterNodes = set(eNodes_list).intersection(set(onp.array(self.master_array[:, 0]))) + elSlaveNodes = set(eNodes_list).intersection(set(onp.array(self.slave_array[:, 0]))) nElMasters = sum(len(self.master_array[onp.where(self.master_array[:, 0] == node)[0], 1:].ravel()) for node in elMasterNodes) nElSlaves = sum(len(self.slave_array[onp.where(self.slave_array[:, 0] == node)[0], 1:].ravel()) for node in elSlaveNodes) @@ -559,8 +609,8 @@ def _make_hessian_coordinates(self, conns): elUnknowns = self.dofToUnknown[elDofs[elUnknownFlags]] # Identify master and slave DOFs for the element - elMasterNodes = set(eNodes_list).intersection(set(self.master_array[:, 0])) - elSlaveNodes = set(eNodes_list).intersection(set(self.slave_array[:, 0])) + elMasterNodes = set(eNodes_list).intersection(set(onp.array(self.master_array[:, 0]))) + elSlaveNodes = set(eNodes_list).intersection(set(onp.array(self.slave_array[:, 0]))) elMasterDofs = [] for node in elMasterNodes: @@ -587,7 +637,6 @@ def _make_hessian_coordinates(self, conns): return rowCoords, colCoords - def _make_hessian_bc_mask(self, conns): """Creates a mask for BCs in the Hessian, considering master and slave nodes.""" nElements, nNodesPerElement = conns.shape @@ -618,3 +667,4 @@ def _make_hessian_bc_mask(self, conns): return hessian_bc_mask + diff --git a/optimism/Objective.py b/optimism/Objective.py index db3769e0..63294d39 100644 --- a/optimism/Objective.py +++ b/optimism/Objective.py @@ -3,6 +3,7 @@ import numpy as onp from scipy.sparse import diags as sparse_diags from scipy.sparse import csc_matrix +import jax.random as rand # static vs dynamics # differentiable vs undifferentiable @@ -294,8 +295,9 @@ def create_slave_to_master_map(self): num_slaves = len(self.slave_dofs) num_masters = len(self.master_dofs) + key = rand.PRNGKey(0) # Initialize the constraint matrix with the correct dimensions - C = np.zeros((num_slaves, num_masters)) # Replace with actual mapping logic + C = rand.uniform(key, (num_slaves, num_masters)) # Replace with actual mapping logic # Initialize the constant vector d = np.zeros(num_slaves) # Replace with actual constant values if needed @@ -319,22 +321,13 @@ def hessian_vec(self, x_full, vx_full): return hess_vec_full def enforce_constraints(self, x_full): - """ - Enforce the master-slave relationship in the full DOF vector. - """ if self.slave_to_master_map is not None: C, d = self.slave_to_master_map - - # # Debug shapes - # print("Shape of C:", C.shape) - # print("Shape of x_full[self.master_dofs]:", x_full[self.master_dofs].shape) - # print("Shape of d:", d.shape) - - # Ensure broadcasting compatibility slave_values = C @ x_full[self.master_dofs] + d[:, None] - # print("Shape of slave_values:", slave_values.shape) - + print("Slave values before assignment:", slave_values) x_full = x_full.at[self.slave_dofs].set(slave_values) + print("x_full after enforcing constraints:", x_full) return x_full + From 0de8cb41118822c5f4893e961b02609805542998 Mon Sep 17 00:00:00 2001 From: SarveshJoshi33 Date: Fri, 24 Jan 2025 09:32:41 -0500 Subject: [PATCH 4/8] Changes regarding the import and the highlight comment for DofManagerMPC --- optimism/FunctionSpace.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/optimism/FunctionSpace.py b/optimism/FunctionSpace.py index 57082af8..2c1c6bab 100644 --- a/optimism/FunctionSpace.py +++ b/optimism/FunctionSpace.py @@ -3,7 +3,7 @@ from optimism import Interpolants from optimism import Mesh from optimism import QuadratureRule -from typing import Tuple +from typing import Tuple, List import equinox as eqx import jax import jax.numpy as np @@ -492,13 +492,7 @@ def _make_hessian_bc_mask(self, conns): return hessian_bc_mask -# Different class for Multi-Point Constrained Problem -from optimism import Mesh -import equinox as eqx -import jax.numpy as np -import numpy as onp -from typing import List, Tuple - +# DofManager for Multi-Point Constrained Problem class DofManagerMPC(eqx.Module): fieldShape: Tuple[int, int] From b6c96d2c1baadddfd35a0759c9e1f0abc9f91994 Mon Sep 17 00:00:00 2001 From: SarveshJoshi33 Date: Tue, 4 Feb 2025 10:45:40 -0500 Subject: [PATCH 5/8] Normalized the test directory --- optimism/test/MeshFixture.py | 88 +++++++++++++- test/MeshFixture.py | 224 ----------------------------------- test/TestFixture.py | 17 --- test/test_shear.py | 161 ------------------------- 4 files changed, 85 insertions(+), 405 deletions(-) delete mode 100644 test/MeshFixture.py delete mode 100644 test/TestFixture.py delete mode 100644 test/test_shear.py diff --git a/optimism/test/MeshFixture.py b/optimism/test/MeshFixture.py index 341b4356..085d97bb 100644 --- a/optimism/test/MeshFixture.py +++ b/optimism/test/MeshFixture.py @@ -1,9 +1,15 @@ -# fea data structures +# ---------------- NEW MESHFIXTURE SCRIPT -------------------------- +# ------------------------------------------------------------------ +# Note - This script involves a function creating nodeset layers +# ------------------------------------------------------------------ + +import jax.numpy as jnp +import numpy as onp from optimism.Mesh import * from optimism import Surface # testing utils -from .TestFixture import * +from TestFixture import * d_kappa = 1.0 d_nu = 0.3 @@ -139,4 +145,80 @@ def is_edge_on_left(xyOnEdge): blocks = {'block': np.arange(conns.shape[0])} U = np.zeros(coords.shape) return construct_mesh_from_basic_data(coords, conns, None, nodeSets, sideSets), U - + + def create_mesh_disp_and_nodeset_layers(self, Nx, Ny, xRange, yRange, initial_disp_func, setNamePostFix=''): + coords, conns = create_structured_mesh_data(Nx, Ny, xRange, yRange) + tol = 1e-8 + nodeSets = {} + + # Predefined boundary node sets + nodeSets['left'+setNamePostFix] = jnp.flatnonzero(coords[:, 0] < xRange[0] + tol) + nodeSets['bottom'+setNamePostFix] = jnp.flatnonzero(coords[:, 1] < yRange[0] + tol) + nodeSets['right'+setNamePostFix] = jnp.flatnonzero(coords[:, 0] > xRange[1] - tol) + nodeSets['top'+setNamePostFix] = jnp.flatnonzero(coords[:, 1] > yRange[1] - tol) + nodeSets['all_boundary'+setNamePostFix] = jnp.flatnonzero( + (coords[:, 0] < xRange[0] + tol) | + (coords[:, 1] < yRange[0] + tol) | + (coords[:, 0] > xRange[1] - tol) | + (coords[:, 1] > yRange[1] - tol) + ) + + # Identify unique y-layers for nodes + unique_y_layers = sorted(onp.unique(coords[:, 1])) + # print("Unique y-layers identified:", unique_y_layers) + + # Ensure we have the expected number of layers + assert len(unique_y_layers) == Ny, f"Expected {Ny} y-layers, but found {len(unique_y_layers)}." + + # Initialize an empty list to store rows of nodes + y_layer_rows = [] + + # Map nodes to y_layers and construct rows + for i, y_val in enumerate(unique_y_layers): + nodes_in_layer = onp.flatnonzero(onp.abs(coords[:, 1] - y_val) < tol) + y_layer_rows.append(nodes_in_layer) + # print(f"Nodes in y-layer {i+1} (y = {y_val}):", nodes_in_layer) + + # Convert list of rows into a structured 2D JAX array, padding with -1 + max_nodes_per_layer = max(len(row) for row in y_layer_rows) + y_layers = -1 * jnp.ones((len(y_layer_rows), max_nodes_per_layer), dtype=int) # Initialize with -1 + + for i, row in enumerate(y_layer_rows): + y_layers = y_layers.at[i, :len(row)].set(row) # Fill each row with nodes from the layer + + # Print for debugging + # print("y_layers (2D array):", y_layers) + + def is_edge_on_left(xyOnEdge): + return np.all( xyOnEdge[:,0] < xRange[0] + tol ) + + def is_edge_on_bottom(xyOnEdge): + return np.all( xyOnEdge[:,1] < yRange[0] + tol ) + + def is_edge_on_right(xyOnEdge): + return np.all( xyOnEdge[:,0] > xRange[1] - tol ) + + def is_edge_on_top(xyOnEdge): + return np.all( xyOnEdge[:,1] > yRange[1] - tol ) + + sideSets = {} + sideSets['left'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_left) + sideSets['bottom'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_bottom) + sideSets['right'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_right) + sideSets['top'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_top) + + allBoundaryEdges = np.vstack([s for s in sideSets.values()]) + sideSets['all_boundary'+setNamePostFix] = allBoundaryEdges + + blocks = {'block'+setNamePostFix: np.arange(conns.shape[0])} + mesh = construct_mesh_from_basic_data(coords, conns, blocks, nodeSets, sideSets) + + return mesh, vmap(initial_disp_func)(mesh.coords), y_layers + + + + + + + + diff --git a/test/MeshFixture.py b/test/MeshFixture.py deleted file mode 100644 index 085d97bb..00000000 --- a/test/MeshFixture.py +++ /dev/null @@ -1,224 +0,0 @@ -# ---------------- NEW MESHFIXTURE SCRIPT -------------------------- -# ------------------------------------------------------------------ -# Note - This script involves a function creating nodeset layers -# ------------------------------------------------------------------ - -import jax.numpy as jnp -import numpy as onp -from optimism.Mesh import * -from optimism import Surface - -# testing utils -from TestFixture import * - -d_kappa = 1.0 -d_nu = 0.3 -d_E = 3*d_kappa*(1 - 2*d_nu) -defaultProps = {'elastic modulus': d_E, - 'poisson ratio': d_nu} - - -def compute_residual_norm(grad_func, dofValues): - grad = grad_func(dofValues) - return np.linalg.norm(grad) - -def map_to_arch(x, R): - r = R-x[0] - y = x[1] - pi = np.pi - return r*np.array([-np.cos(pi*y), np.sin(pi*y)]) - -def map_to_cos(x, L, warp): - y = x[1] - pi = np.pi - return np.array([L*(y-0.5), warp*np.cos(2*pi*(y-0.5))-x[0]]) - -class MeshFixture(TestFixture): - - def create_mesh_and_disp(self, Nx, Ny, xRange, yRange, initial_disp_func, setNamePostFix=''): - coords, conns = create_structured_mesh_data(Nx, Ny, xRange, yRange) - tol = 1e-8 - nodeSets = {} - nodeSets['left'+setNamePostFix] = np.flatnonzero(coords[:,0] < xRange[0] + tol) - nodeSets['bottom'+setNamePostFix] = np.flatnonzero(coords[:,1] < yRange[0] + tol) - nodeSets['right'+setNamePostFix] = np.flatnonzero(coords[:,0] > xRange[1] - tol) - nodeSets['top'+setNamePostFix] = np.flatnonzero(coords[:,1] > yRange[1] - tol) - nodeSets['all_boundary'+setNamePostFix] = np.flatnonzero((coords[:,0] < xRange[0] + tol) | - (coords[:,1] < yRange[0] + tol) | - (coords[:,0] > xRange[1] - tol) | - (coords[:,1] > yRange[1] - tol) ) - - def is_edge_on_left(xyOnEdge): - return np.all( xyOnEdge[:,0] < xRange[0] + tol ) - - def is_edge_on_bottom(xyOnEdge): - return np.all( xyOnEdge[:,1] < yRange[0] + tol ) - - def is_edge_on_right(xyOnEdge): - return np.all( xyOnEdge[:,0] > xRange[1] - tol ) - - def is_edge_on_top(xyOnEdge): - return np.all( xyOnEdge[:,1] > yRange[1] - tol ) - - sideSets = {} - sideSets['left'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_left) - sideSets['bottom'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_bottom) - sideSets['right'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_right) - sideSets['top'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_top) - - allBoundaryEdges = np.vstack([s for s in sideSets.values()]) - sideSets['all_boundary'+setNamePostFix] = allBoundaryEdges - - blocks = {'block'+setNamePostFix: np.arange(conns.shape[0])} - mesh = construct_mesh_from_basic_data(coords, conns, blocks, nodeSets, sideSets) - return mesh, vmap(initial_disp_func)(mesh.coords) - - - def create_arch_mesh_disp_and_edges(self, N, M, w, R, - bcSetFraction=1./3., - setNamePostFix=''): - h = 1.0 - coords, conns = create_structured_mesh_data(N, M, [-w, w], [0., h]) - - tol = 1e-8 - nodeSets = {} - nodeSets['left'+setNamePostFix] = np.flatnonzero(coords[:,1] < 0.0 + tol) - nodeSets['right'+setNamePostFix] = np.flatnonzero(coords[:,1] > h - tol) - nodeSets['bottom'+setNamePostFix] = np.flatnonzero(coords[:,0] > w - tol) - nodeSets['top'+setNamePostFix] = np.flatnonzero(coords[:,0] < -w + tol) - nodeSets['top_left'+setNamePostFix] = np.intersect1d(nodeSets['top'+setNamePostFix], - nodeSets['left'+setNamePostFix]) - a = 0.5*h*(1.0 - bcSetFraction) - b = 0.5*h*(1.0 + bcSetFraction) - - nodeSets['push'+setNamePostFix] = np.flatnonzero( (coords[:,0] < tol - w) - & (coords[:,1] > a) - & (coords[:,1] < b) ) - - def is_edge_on_left(xyOnEdge): - return np.all(xyOnEdge[:,1] < 0.0 + tol) - - def is_edge_on_bottom(xyOnEdge): - return np.all( xyOnEdge[:,0] > w - tol ) - - def is_edge_on_right(xyOnEdge): - return np.all( xyOnEdge[:,1] > h - tol ) - - def is_edge_on_top(xyOnEdge): - return np.all( xyOnEdge[:,0] < tol - w ) - - def is_edge_on_loading_patch(xyOnEdge): - return np.all((xyOnEdge[:,0] < tol - w) & (xyOnEdge[:,1] > a) & (xyOnEdge[:,1] < b)) - - sideSets = {} - sideSets['top'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_top) - sideSets['bottom'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_bottom) - sideSets['left'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_left) - sideSets['right'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_right) - sideSets['push'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_loading_patch) - - coords = vmap(map_to_arch, (0,None))(coords, R) - blocks = {'block'+setNamePostFix: np.arange(conns.shape[0])} - U = np.zeros(coords.shape) - return construct_mesh_from_basic_data(coords, conns, blocks, nodeSets, sideSets), U - - - def create_cos_mesh_disp_and_edges(self, N, M, w, L, warp): - h = 1.0 - coords, conns = create_structured_mesh_data(N, M, [-w, w], [0., h]) - - tol = 1e-8 - nodeSets = {} - nodeSets['left'] = np.flatnonzero(coords[:,1] < 0.0 + tol) - nodeSets['right'] = np.flatnonzero(coords[:,1] > h - tol) - nodeSets['top'] = np.flatnonzero( (coords[:,0] < tol - w) - & (coords[:,1] > 0.3) - & (coords[:,1] < 0.7) ) - - def is_edge_on_left(xyOnEdge): - return np.all( xyOnEdge[:,0] < -w + tol ) - - sideSets = {} - sideSets['top'] = Surface.create_edges(coords, conns, is_edge_on_left) - - coords = vmap(map_to_cos, (0,None,None))(coords, L, warp) - blocks = {'block': np.arange(conns.shape[0])} - U = np.zeros(coords.shape) - return construct_mesh_from_basic_data(coords, conns, None, nodeSets, sideSets), U - - def create_mesh_disp_and_nodeset_layers(self, Nx, Ny, xRange, yRange, initial_disp_func, setNamePostFix=''): - coords, conns = create_structured_mesh_data(Nx, Ny, xRange, yRange) - tol = 1e-8 - nodeSets = {} - - # Predefined boundary node sets - nodeSets['left'+setNamePostFix] = jnp.flatnonzero(coords[:, 0] < xRange[0] + tol) - nodeSets['bottom'+setNamePostFix] = jnp.flatnonzero(coords[:, 1] < yRange[0] + tol) - nodeSets['right'+setNamePostFix] = jnp.flatnonzero(coords[:, 0] > xRange[1] - tol) - nodeSets['top'+setNamePostFix] = jnp.flatnonzero(coords[:, 1] > yRange[1] - tol) - nodeSets['all_boundary'+setNamePostFix] = jnp.flatnonzero( - (coords[:, 0] < xRange[0] + tol) | - (coords[:, 1] < yRange[0] + tol) | - (coords[:, 0] > xRange[1] - tol) | - (coords[:, 1] > yRange[1] - tol) - ) - - # Identify unique y-layers for nodes - unique_y_layers = sorted(onp.unique(coords[:, 1])) - # print("Unique y-layers identified:", unique_y_layers) - - # Ensure we have the expected number of layers - assert len(unique_y_layers) == Ny, f"Expected {Ny} y-layers, but found {len(unique_y_layers)}." - - # Initialize an empty list to store rows of nodes - y_layer_rows = [] - - # Map nodes to y_layers and construct rows - for i, y_val in enumerate(unique_y_layers): - nodes_in_layer = onp.flatnonzero(onp.abs(coords[:, 1] - y_val) < tol) - y_layer_rows.append(nodes_in_layer) - # print(f"Nodes in y-layer {i+1} (y = {y_val}):", nodes_in_layer) - - # Convert list of rows into a structured 2D JAX array, padding with -1 - max_nodes_per_layer = max(len(row) for row in y_layer_rows) - y_layers = -1 * jnp.ones((len(y_layer_rows), max_nodes_per_layer), dtype=int) # Initialize with -1 - - for i, row in enumerate(y_layer_rows): - y_layers = y_layers.at[i, :len(row)].set(row) # Fill each row with nodes from the layer - - # Print for debugging - # print("y_layers (2D array):", y_layers) - - def is_edge_on_left(xyOnEdge): - return np.all( xyOnEdge[:,0] < xRange[0] + tol ) - - def is_edge_on_bottom(xyOnEdge): - return np.all( xyOnEdge[:,1] < yRange[0] + tol ) - - def is_edge_on_right(xyOnEdge): - return np.all( xyOnEdge[:,0] > xRange[1] - tol ) - - def is_edge_on_top(xyOnEdge): - return np.all( xyOnEdge[:,1] > yRange[1] - tol ) - - sideSets = {} - sideSets['left'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_left) - sideSets['bottom'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_bottom) - sideSets['right'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_right) - sideSets['top'+setNamePostFix] = Surface.create_edges(coords, conns, is_edge_on_top) - - allBoundaryEdges = np.vstack([s for s in sideSets.values()]) - sideSets['all_boundary'+setNamePostFix] = allBoundaryEdges - - blocks = {'block'+setNamePostFix: np.arange(conns.shape[0])} - mesh = construct_mesh_from_basic_data(coords, conns, blocks, nodeSets, sideSets) - - return mesh, vmap(initial_disp_func)(mesh.coords), y_layers - - - - - - - - diff --git a/test/TestFixture.py b/test/TestFixture.py deleted file mode 100644 index 0c005e8a..00000000 --- a/test/TestFixture.py +++ /dev/null @@ -1,17 +0,0 @@ -import unittest -import numpy - -class TestFixture(unittest.TestCase): - def assertArrayEqual(self, a, b): - self.assertEqual( len(a), len(b) ) - self.assertIsNone(numpy.testing.assert_array_equal(a,b)) - - def assertArrayNotEqual(self, a, b): - self.assertTrue( (a-b != 0).any() ) - - def assertArrayNear(self, a, b, decimals): - self.assertEqual(len(a), len(b)) - self.assertIsNone(numpy.testing.assert_array_almost_equal(a,b,decimals)) - - def assertNear(self, a, b, decimals): - self.assertAlmostEqual(a, b, decimals) diff --git a/test/test_shear.py b/test/test_shear.py deleted file mode 100644 index 82c18026..00000000 --- a/test/test_shear.py +++ /dev/null @@ -1,161 +0,0 @@ -# Test Case - Displacement based Shear Test -# --------------------------------------------------- -# Note - This approach involves segregation of master and slave DOFs -# --------------------------------------------------- - -import sys -sys.path.insert(0, "/home/sarvesh/Documents/Github/optimism") - - -import jax.numpy as np -from jax import jit, grad - -from optimism import EquationSolver as EqSolver -from optimism import QuadratureRule -from optimism import Mechanics -from optimism import Mesh -from optimism import Objective -from optimism import FunctionSpace -from optimism.Mesh import create_higher_order_mesh_from_simplex_mesh -from optimism.material import Neohookean -from optimism.FunctionSpace import DofManagerMPC - -from MeshFixture import * -import matplotlib.pyplot as plt - - - -class ShearTest(MeshFixture): - - def setUp(self): - dummyDispGrad = np.eye(2) - self.mesh = self.create_mesh_disp_and_nodeset_layers(5, 5, [0.,1.], [0.,1.], - lambda x: dummyDispGrad.dot(x))[0] - self.mesh = create_higher_order_mesh_from_simplex_mesh(self.mesh, order=1, createNodeSetsFromSideSets=True) - self.quadRule = QuadratureRule.create_quadrature_rule_on_triangle(degree=1) - self.fs = FunctionSpace.construct_function_space(self.mesh, self.quadRule) - - self.EBCs = [FunctionSpace.EssentialBC(nodeSet='top', component=0), - FunctionSpace.EssentialBC(nodeSet='top', component=1), - FunctionSpace.EssentialBC(nodeSet='bottom', component=0), - FunctionSpace.EssentialBC(nodeSet='bottom', component=1)] - - # dofManager = DofManagerMPC(self.fs, 2, self.EBCs, self.mesh) - - # self.master_array = dofManager.isMaster - # self.slave_array = dofManager.isSlave - # self.assembly = dofManager.dofAssembly - # print("Assembly: ", self.assembly) - - shearModulus = 0.855 # MPa - bulkModulus = 1000*shearModulus # MPa - youngModulus = 9.0*bulkModulus*shearModulus / (3.0*bulkModulus + shearModulus) - poissonRatio = (3.0*bulkModulus - 2.0*shearModulus) / 2.0 / (3.0*bulkModulus + shearModulus) - props = { - 'elastic modulus': youngModulus, - 'poisson ratio': poissonRatio, - 'version': 'coupled' - } - self.mat = Neohookean.create_material_model_functions(props) - - self.freq = 0.3 / 2.0 / np.pi - self.cycles = 1 - self.steps_per_cycle = 64 - self.steps = self.cycles*self.steps_per_cycle - totalTime = self.cycles / self.freq - self.dt = totalTime / self.steps - self.maxDisp = 1.0 - - def run(self): - mechFuncs = Mechanics.create_mechanics_functions(self.fs, - "plane strain", - self.mat) - dofManager = DofManagerMPC(self.fs, 2, self.EBCs, self.mesh) - - def create_field(Uu, disp): - def get_ubcs(disp): - V = np.zeros(self.mesh.coords.shape) - index = (self.mesh.nodeSets['top'], 0) - V = V.at[index].set(disp) - return dofManager.get_bc_values(V) - - return dofManager.create_field(Uu, get_ubcs(disp)) - - def energy_function_all_dofs(U, p): - internalVariables = p.state_data - return mechFuncs.compute_strain_energy(U, internalVariables, self.dt) - - def compute_energy(Uu, p): - U = create_field(Uu, p.bc_data) - return energy_function_all_dofs(U, p) - - nodal_forces = jit(grad(energy_function_all_dofs, argnums=0)) - integrate_free_energy = jit(mechFuncs.compute_strain_energy) - - def write_output(Uu, p, step): - from optimism import VTKWriter - U = create_field(Uu, p.bc_data) - plotName = "../test_results/disp_based_shear_test/"+'output-'+str(step).zfill(3) - writer = VTKWriter.VTKWriter(self.mesh, baseFileName=plotName) - - writer.add_nodal_field(name='displ', nodalData=U, fieldType=VTKWriter.VTKFieldType.VECTORS) - - energyDensities, stresses = mechFuncs.\ - compute_output_energy_densities_and_stresses(U, p.state_data, self.dt) - cellEnergyDensities = FunctionSpace.project_quadrature_field_to_element_field(self.fs, energyDensities) - cellStresses = FunctionSpace.project_quadrature_field_to_element_field(self.fs, stresses) - writer.add_cell_field(name='strain_energy_density', - cellData=cellEnergyDensities, - fieldType=VTKWriter.VTKFieldType.SCALARS) - writer.add_cell_field(name='stress', - cellData=cellStresses, - fieldType=VTKWriter.VTKFieldType.TENSORS) - writer.write() - - Uu = dofManager.get_unknown_values(np.zeros(self.mesh.coords.shape)) - ivs = mechFuncs.compute_initial_state() - p = Objective.Params(bc_data=0., state_data=ivs) - U = create_field(Uu, p.bc_data) - self.objective = Objective.ObjectiveMPC(compute_energy, Uu, p, dofManager) - - index = (self.mesh.nodeSets['top'], 0) - - time = 0.0 - times = [] - externalWorkStore = [] - incrementalPotentialStore = [] - forceHistory = [] - dispHistory = [] - for step in range(1, self.steps+1): - force_prev = np.array(nodal_forces(U, p).at[index].get()) - applied_disp_prev = U.at[index].get() - - # disp = self.maxDisp * np.sin(2.0 * np.pi * self.freq * time) - disp = self.maxDisp * self.freq * time - print("Displacement in this load step: ", disp) - - p = Objective.param_index_update(p, 0, disp) - Uu, solverSuccess = EqSolver.nonlinear_equation_solve(self.objective, Uu, p, EqSolver.get_settings(), useWarmStart=False) - U = create_field(Uu, p.bc_data) - ivs = mechFuncs.compute_updated_internal_variables(U, p.state_data, self.dt) - p = Objective.param_index_update(p, 1, ivs) - - write_output(Uu, p, step) - - force = np.array(nodal_forces(U, p).at[index].get()) - applied_disp = U.at[index].get() - externalWorkStore.append( 0.5*np.tensordot((force + force_prev),(applied_disp - applied_disp_prev), axes=1) ) - incrementalPotentialStore.append(integrate_free_energy(U, ivs, self.dt)) - - forceHistory.append( np.sum(force) ) - dispHistory.append(disp) - - times.append(time) - time += self.dt - - - - -app = ShearTest() -app.setUp() -app.run() \ No newline at end of file From 92e09fd6a744b241a23bf3d514aa78e56c7cf3cf Mon Sep 17 00:00:00 2001 From: SarveshJoshi33 Date: Wed, 12 Feb 2025 12:11:56 -0500 Subject: [PATCH 6/8] Created the compression test with MPCs. Need to make some changes in the aspect of Objective.py and merge the DofManagerMPC with the original class --- optimism/FunctionSpace.py | 193 ++++++++++++++++++++-- optimism/Objective.py | 8 +- optimism/test/square_mesh_compression.e | Bin 0 -> 65324 bytes optimism/test/square_mesh_compression.exo | Bin 0 -> 30963 bytes optimism/test/test_compression.txt | 149 +++++++++++++++++ optimism/test/test_compression_linear.py | 149 +++++++++++++++++ optimism/test/test_compression_nl.py | 145 ++++++++++++++++ optimism/test/test_compression_nl_MPC.py | 151 +++++++++++++++++ optimism/test/test_shear.py | 161 ++++++++++++++++++ 9 files changed, 934 insertions(+), 22 deletions(-) create mode 100644 optimism/test/square_mesh_compression.e create mode 100644 optimism/test/square_mesh_compression.exo create mode 100644 optimism/test/test_compression.txt create mode 100644 optimism/test/test_compression_linear.py create mode 100644 optimism/test/test_compression_nl.py create mode 100644 optimism/test/test_compression_nl_MPC.py create mode 100644 optimism/test/test_shear.py diff --git a/optimism/FunctionSpace.py b/optimism/FunctionSpace.py index 2c1c6bab..6b3315a5 100644 --- a/optimism/FunctionSpace.py +++ b/optimism/FunctionSpace.py @@ -492,7 +492,175 @@ def _make_hessian_bc_mask(self, conns): return hessian_bc_mask -# DofManager for Multi-Point Constrained Problem +# # DofManager for Multi-Point Constrained Problem + +# class DofManagerMPC(eqx.Module): +# fieldShape: Tuple[int, int] +# isBc: np.ndarray +# isUnknown: np.ndarray +# ids: np.ndarray +# unknownIndices: np.ndarray +# bcIndices: np.ndarray +# dofToUnknown: np.ndarray +# HessRowCoords: np.ndarray +# HessColCoords: np.ndarray +# hessian_bc_mask: np.ndarray +# master_layer: int = eqx.static_field() +# slave_layer: int = eqx.static_field() +# master_array: np.ndarray +# slave_array: np.ndarray +# layers: List[List[int]] = eqx.static_field() + +# def __init__(self, functionSpace, dim, EssentialBCs, mesh): +# self.fieldShape = (Mesh.num_nodes(functionSpace.mesh), dim) +# self.isBc = onp.full(self.fieldShape, False, dtype=bool) +# for ebc in EssentialBCs: +# self.isBc[functionSpace.mesh.nodeSets[ebc.nodeSet], ebc.component] = True +# self.isUnknown = ~self.isBc + +# self.ids = onp.arange(self.isBc.size).reshape(self.fieldShape) + +# # Create layers and assign master and slave rows +# self.layers = create_nodeset_layers(mesh) +# print("These are the layers created for the mesh: ", self.layers) +# self.master_layer = int(input("Choose the row index for master nodes: ")) +# self.slave_layer = int(input("Choose the row index for slave nodes: ")) + +# # Generate master and slave arrays +# self.master_array = self._create_layer_array(self.master_layer) +# self.slave_array = self._create_layer_array(self.slave_layer) + +# # Create DOF mappings for unknowns +# self.unknownIndices = self.ids[self.isUnknown] +# self.bcIndices = self.ids[self.isBc] + +# ones = np.ones(self.isBc.size, dtype=int) * -1 +# self.dofToUnknown = ones.at[self.unknownIndices].set(np.arange(self.unknownIndices.size)) + +# # Compute Hessian-related data +# self.HessRowCoords, self.HessColCoords = self._make_hessian_coordinates(functionSpace.mesh.conns) +# self.hessian_bc_mask = self._make_hessian_bc_mask(functionSpace.mesh.conns) + +# def get_bc_size(self): +# return np.sum(self.isBc).item() + +# def get_unknown_size(self): +# return np.sum(self.isUnknown).item() + +# def create_field(self, Uu, Ubc=0.0): +# U = np.zeros(self.isBc.shape).at[self.isBc].set(Ubc) +# return U.at[self.isUnknown].set(Uu) + +# def get_bc_values(self, U): +# return U[self.isBc] + +# def get_unknown_values(self, U): +# return U[self.isUnknown] + +# def get_master_values(self, U): +# return U[self.master_array[:, 1:]] + +# def get_slave_values(self, U): +# return U[self.slave_array[:, 1:]] + +# def _create_layer_array(self, layer_index): +# """Creates an array for the specified layer (master or slave) with DOF mappings.""" +# layer_nodes = self.layers[layer_index] +# layer_array = [] +# for node in layer_nodes: +# node_dofs = self.ids[node] +# layer_array.append([node, *node_dofs]) +# return np.array(layer_array, dtype=int) + +# def _make_hessian_coordinates(self, conns): +# """Creates row and column coordinates for the Hessian, considering master and slave nodes.""" +# nElUnknowns = onp.zeros(conns.shape[0], dtype=int) +# nHessianEntries = 0 +# for e, eNodes in enumerate(conns): +# elUnknownFlags = self.isUnknown[eNodes, :].ravel() +# nElUnknowns[e] = onp.sum(elUnknownFlags) + +# # Include master and slave nodes in the size calculation +# eNodes_list = onp.asarray(eNodes).tolist() +# elMasterNodes = set(eNodes_list).intersection(set(onp.array(self.master_array[:, 0]))) +# elSlaveNodes = set(eNodes_list).intersection(set(onp.array(self.slave_array[:, 0]))) + +# nElMasters = sum(len(self.master_array[onp.where(self.master_array[:, 0] == node)[0], 1:].ravel()) for node in elMasterNodes) +# nElSlaves = sum(len(self.slave_array[onp.where(self.slave_array[:, 0] == node)[0], 1:].ravel()) for node in elSlaveNodes) + +# totalDOFs = nElUnknowns[e] + nElMasters + nElSlaves +# nHessianEntries += totalDOFs ** 2 + +# # Allocate sufficient space for Hessian coordinates +# rowCoords = onp.zeros(nHessianEntries, dtype=int) +# colCoords = onp.zeros(nHessianEntries, dtype=int) +# rangeBegin = 0 + +# for e, eNodes in enumerate(conns): +# eNodes_list = onp.asarray(eNodes).tolist() +# elDofs = self.ids[eNodes, :] +# elUnknownFlags = self.isUnknown[eNodes, :] +# elUnknowns = self.dofToUnknown[elDofs[elUnknownFlags]] + +# # Identify master and slave DOFs for the element +# elMasterNodes = set(eNodes_list).intersection(set(onp.array(self.master_array[:, 0]))) +# elSlaveNodes = set(eNodes_list).intersection(set(onp.array(self.slave_array[:, 0]))) + +# elMasterDofs = [] +# for node in elMasterNodes: +# master_idx = onp.where(self.master_array[:, 0] == node)[0] +# elMasterDofs.extend(self.master_array[master_idx, 1:].ravel()) + +# elSlaveDofs = [] +# for node in elSlaveNodes: +# slave_idx = onp.where(self.slave_array[:, 0] == node)[0] +# elSlaveDofs.extend(self.slave_array[slave_idx, 1:].ravel()) + +# # Combine unknowns, master, and slave DOFs +# elAllUnknowns = onp.concatenate([elUnknowns, elMasterDofs, elSlaveDofs]) +# elHessCoords = onp.tile(elAllUnknowns, (len(elAllUnknowns), 1)) + +# nElHessianEntries = len(elAllUnknowns) ** 2 +# rangeEnd = rangeBegin + nElHessianEntries + +# # Assign values to the Hessian coordinate arrays +# rowCoords[rangeBegin:rangeEnd] = elHessCoords.ravel() +# colCoords[rangeBegin:rangeEnd] = elHessCoords.T.ravel() + +# rangeBegin += nElHessianEntries + +# return rowCoords, colCoords + +# def _make_hessian_bc_mask(self, conns): +# """Creates a mask for BCs in the Hessian, considering master and slave nodes.""" +# nElements, nNodesPerElement = conns.shape +# nFields = self.ids.shape[1] +# nDofPerElement = nNodesPerElement * nFields + +# hessian_bc_mask = onp.full((nElements, nDofPerElement, nDofPerElement), True, dtype=bool) +# for e, eNodes in enumerate(conns): +# # Get boundary condition flags for all DOFs +# eFlag = self.isBc[eNodes, :].ravel() + +# # Identify master and slave nodes +# eNodes_list = onp.asarray(eNodes).tolist() +# masterFlag = onp.isin(eNodes_list, self.master_array[:, 0]) +# slaveFlag = onp.isin(eNodes_list, self.slave_array[:, 0]) + +# # Expand master and slave flags to match the DOF shape +# masterFlag_expanded = onp.repeat(masterFlag, nFields) +# slaveFlag_expanded = onp.repeat(slaveFlag, nFields) + +# # Combine flags +# combinedFlag = eFlag | masterFlag_expanded | slaveFlag_expanded + +# # Update Hessian mask +# hessian_bc_mask[e, combinedFlag, :] = False +# hessian_bc_mask[e, :, combinedFlag] = False + +# return hessian_bc_mask + + class DofManagerMPC(eqx.Module): fieldShape: Tuple[int, int] @@ -505,11 +673,8 @@ class DofManagerMPC(eqx.Module): HessRowCoords: np.ndarray HessColCoords: np.ndarray hessian_bc_mask: np.ndarray - master_layer: int = eqx.static_field() - slave_layer: int = eqx.static_field() master_array: np.ndarray slave_array: np.ndarray - layers: List[List[int]] = eqx.static_field() def __init__(self, functionSpace, dim, EssentialBCs, mesh): self.fieldShape = (Mesh.num_nodes(functionSpace.mesh), dim) @@ -520,15 +685,13 @@ def __init__(self, functionSpace, dim, EssentialBCs, mesh): self.ids = onp.arange(self.isBc.size).reshape(self.fieldShape) - # Create layers and assign master and slave rows - self.layers = create_nodeset_layers(mesh) - print("These are the layers created for the mesh: ", self.layers) - self.master_layer = int(input("Choose the row index for master nodes: ")) - self.slave_layer = int(input("Choose the row index for slave nodes: ")) + # Ensure master and slave nodesets are defined + if "master" not in functionSpace.mesh.nodeSets or "slave" not in functionSpace.mesh.nodeSets: + raise ValueError("Node sets 'master' and 'slave' must be defined in the mesh.") - # Generate master and slave arrays - self.master_array = self._create_layer_array(self.master_layer) - self.slave_array = self._create_layer_array(self.slave_layer) + # Assign master and slave arrays directly from nodesets + self.master_array = self._create_layer_array(functionSpace.mesh.nodeSets["master"]) + self.slave_array = self._create_layer_array(functionSpace.mesh.nodeSets["slave"]) # Create DOF mappings for unknowns self.unknownIndices = self.ids[self.isUnknown] @@ -563,9 +726,8 @@ def get_master_values(self, U): def get_slave_values(self, U): return U[self.slave_array[:, 1:]] - def _create_layer_array(self, layer_index): + def _create_layer_array(self, layer_nodes): """Creates an array for the specified layer (master or slave) with DOF mappings.""" - layer_nodes = self.layers[layer_index] layer_array = [] for node in layer_nodes: node_dofs = self.ids[node] @@ -659,6 +821,3 @@ def _make_hessian_bc_mask(self, conns): hessian_bc_mask[e, :, combinedFlag] = False return hessian_bc_mask - - - diff --git a/optimism/Objective.py b/optimism/Objective.py index 63294d39..b49d98b5 100644 --- a/optimism/Objective.py +++ b/optimism/Objective.py @@ -3,7 +3,6 @@ import numpy as onp from scipy.sparse import diags as sparse_diags from scipy.sparse import csc_matrix -import jax.random as rand # static vs dynamics # differentiable vs undifferentiable @@ -295,9 +294,8 @@ def create_slave_to_master_map(self): num_slaves = len(self.slave_dofs) num_masters = len(self.master_dofs) - key = rand.PRNGKey(0) # Initialize the constraint matrix with the correct dimensions - C = rand.uniform(key, (num_slaves, num_masters)) # Replace with actual mapping logic + C = np.zeros((num_slaves, num_masters)) # Replace with actual mapping logic # Initialize the constant vector d = np.zeros(num_slaves) # Replace with actual constant values if needed @@ -324,9 +322,9 @@ def enforce_constraints(self, x_full): if self.slave_to_master_map is not None: C, d = self.slave_to_master_map slave_values = C @ x_full[self.master_dofs] + d[:, None] - print("Slave values before assignment:", slave_values) + # print("Slave values before assignment:", slave_values) x_full = x_full.at[self.slave_dofs].set(slave_values) - print("x_full after enforcing constraints:", x_full) + # print("x_full after enforcing constraints:", x_full) return x_full diff --git a/optimism/test/square_mesh_compression.e b/optimism/test/square_mesh_compression.e new file mode 100644 index 0000000000000000000000000000000000000000..f70f0015a9d9518b6329134299b4ec3b1c554706 GIT binary patch literal 65324 zcmeF)b)3~@yYKM{x*JqLzyd@%6wyII6kDVWq)X{eMMbedkxs#G#X@qxZpEgtySvZl zv(|mX@%QX`(DUr`$Jqy7ulK&L-+f(c4Qs9MtQiI#ck4E7vog}ZicpH8(7--}yAAF! zs82>lrjx=W2Mp@dZN$hvL%mA>DmZx5pl-bf4Du=)g}oj;q<5bYsd@GWN+Hks4D7>F z!9rXYNndyCIq>|{l6PJroDCT4aRbj&yCuW@bsO4eIQNkn_;FJAF|0?o;eC3!oB9pr z>pNuBaQb{G;_rRL$l(J9_j8<49mP-(MSBb#&~0>|;UfkN8O&uyeqRnTj9O~2J#%G2mjm5dD%fd#ts@fRO|Jfc)~cI=$DZo&!eK z98vGM0VDg5>RE5dP+p(`BL?kmM)nyovfhYcqk0VQ(``_n5&gUM8Zv0;@IE7aACwi&b7NfxAB*cMV9yYKZV#II(*1bKW^OhHL}l_&RyzpX18m@N&2?&_8)Owpw|#y zg|Vrbw=oy-7r(cv;rEvBGY-?c<5J^)vs>f4UAyD=wA=DGHhc{JR(-8}P2%H?kE2Mp zwgUzZ?9^l6sPqfxjvwD&^s(*c<0|6g*>!K}`*Yu4fAjw0Q_$V}E7WVq;K6-*jr95T zetf;mf4r~4eol59Id16xmZ|(5+P7=Oj=#8%4&75-Mz7TGFa7z(a~?jqnfV+ShVL2i zmmh0N`eH?IYN6D}8PvnK4Ii-g6@SrJu9m)7;cHqfwd*JS9`|4KV;A$irrTflH1F2GvWTzby3a};r?RXkh*!9oLE<;_UGlK z>%yGnsd=egby>Jh*D~oxm66dRym9A-d8j3F!|g*Yo*V9eS51H1Qn|6Fe_tJP)3tqW z`u9C=S4}^zt-0ZG=59>?enKsin|@vz@2cT>%T3pgx#4-;RX3#`?}FTPU6>nQpItS) zj=AZ&EjPT5vF7>uhWib#(NF2X^ z_&klTcYNI2!{_CkU7wGCecbaBkGoXjapwzxBahD0Ncj?6A&L19ksXQKcr|>?J*W+&ZFYg24^EBjq zb@KB(-C-&Uhm}R zccbvQJ0xI9dll4*X!N=FONIEuf@;rfBLwagvZ?_ zJnnJfeST5meeJ*N^E>Yj@A!TmA9s8|Prlyq^Lu9E^E>&tJHFn@?^}|O`|svFKJNIw79aPX-q+&m9Uph{>v#P8 z&g*?G`T3p9d3@Z-oX5}ab>Z{-pT4h+%L(5vhtKc49(VjY7IPlo&;Rb@j<5Hg-p})T z+{v$F@%4_6JNbGizi(N+hws;tk9%wQ`kj1V%jH?drz-0}OC1xa0S!@$);cKbItPz9#X$wx{>=_`VjuekULIp1yu3-`C>T?=s=% z4$0ShYvTL02^BamSxK{N2|(ubk)g{aW&I$Dg+(A9sAc^U8T% zU%%ty-c!z#uXlXh@$)$S0!q0^hU&oT~Yw>Z%&+k1w?)Z9_%H#D;KJNH>$HzT7ysyQa$L~{2b%`drzO=@o^{L&*S5c?`wO?d3@Z- zzb`3~$a!9`cQWVk>sa#Zcg%TwUrYXZ?4Q1$$DAj>Z;7AZ@#ih^`_%a7(B%7Ce7)oQ zd8zbIkux&F&(Gt}730qx=7q0gdwSfZ!=FQw?`z4&9pBe>=X_`Scd+Zv-{J2Ob7GD2 z|N46O`(gO|r^Njv=eyrW()-Elor&A;{`~u|KYsG|$&a`D=L+fD?`fXAzvRdNzdcX> ze*fvm+w<+?-vj=&uFCoAb#Ay`oj8yG9$;}?dOH-yyo%WNyPc0#Qh|1ANP|VC!WXK$Lm<*dHgu> zes|wb{QHoHCZGWZnJmLjLQI8}BdqapLXbJYL5d&*SeS ze%yFJalZR~I{utEUAyf1xpDfsL*lw)xIQy+9{1vW_rG)b>;1>`xZgQ(KgruCuj795 z_Zj!%{l4IqwnFmQJCjd8}H%v|8yRIe@zn~H{MU2$LnN`w~OcT z`;QX4{(Kexob{j8|L*%MwTItd$uN(#^d9!(_gmY-pR2b1!y0cF>y~i)cpYoJ-|dO} z-yE)!=W##Q$Hdc82c?#Js`DP85aV^Z#P1`{%O=j_emsvg-adJ|-9Hyt zIp1IL-#ab{|1M=tsPjWj&gUl1<9^)Bt0r%ky#IK+`1^?0fA{{A`^o!>e@-eMejc*> z-*u*cK9amH6ZT4kS~79F{==Q zc{pDGPp$EB=BEF>Xz5T(g<3Y$@}U+EwOFV{LoE|(ynU?kcJY4VeyqjwxLrJtHQwJp zt?}c>n!LaGbB_3Pp1iK(zq^Swul>@w>Gy^B@9^S&yx+XeP%4a2@}5 zB5`kfIFIvqp1gg$j<=8d@%G92ro_jOd-3+MZcgmQ^H~3Np8j)o>D<58GU2*psCiwN zO5849Cu`ZT7tiDU#_Ra;<8^W#&-Ya0{p_ih%1!@#rD3Rz64y<__3riGcYur!XPjVh_pR92&{=HI%-0<_SMC}}|leJ@FuhSl`^Sb}! z{lx1oiH{raKl%H}YcJkU^5e$y`1i%*a>LIxLmeOL=*0QhJg&!t^StgS-ruM^ZlBlv zChsrqCC}q^{C&iampqTRkAIIeKX;RV44ohUcWCi?Q8BKUwF6y?Fb)ZWs4r zP2NxP<0kJX-haGZyq~xiZy)RK{q*09tji65KN9NNe|Q~lzbbLNHHquE7tdFQ`RY*P z$Bi}a{nHxnFR!}$?+L@-yM=$pxntMA=S-h(4cA)|HMzGval5?cJHy_#P~-i_{aE99 zyx({pYw~{L{l)#f=i%==bN@f9@z0}s>)vNz?=!IX8QA*_?0p9IJ_CE7fxXYb-e+L% zGqCp=*!v9ZeFpYE1ACu=z0bhjXJGF$u=g3*`wZ-T2KGJ!d!K>5&%oYiVDB@q_Zisx z4D5Xd{wF*GWB(^Si2vX*r2q5q|G^#nKYr8nfAaqS@dxpr{r3Iu!~5T__rD=;Z}q<) z@0tH?fBzf$@xP_-%>SOg{~h}H-`2Of=Xw0^;@kV*$9K>F-wOWs_v8PM4Xb>IA_ zJ$KK3KU5CA)jiLuplax?&h|$&cx`WW&%Qcpgx*>W2jD<>ZEtnY{vaG2daHYf!2hnl z*Y;NT>}#QR=&kN~b|~tE-sjfR`=?mKAhQGo%{b&p&^{vTbI`md&X4jnNgx=Z`1#k?!wzs-x zZ%$i<-s+xbX0>(btDwzs-xUjwIx-s+xb z=Jd4CTb+4!FisD>)mcrnhu8L2_v~xqjL=)%tAh^k+TQA(eO+`6z12O>>Z4QWtmecQ{c?r>fD@O3TO6K=VtUWIJ37pw>PI#LvMA@Gjn=*=&jDo=@szW-s+yc zS-mp!R`<;4Rq)#0>Ylwhy*l((_dGMJ(?V}`W=^NWYkRAE=JXmkv$s08H>=l%-s+xb zW_3pBtYf>$31{|J=l14wR_LwnnbXYh2B4`=pP=k{iGLFldSd1js$hTiJTj4s0B&|A&3=jL=t=&kNe#`W;p-s+w? zT?%LR*2yrZ%dj-`R_C6Z)f+-@b$$o*BIv&g`wu?ak>ep|`r{ znOVIx^j2qP^fq{HZ*|X{-VSH>R_A8)4mh*7Iya+t!kN9*xxHDvEA&?P%<0|m+TQA( z8NCP2?5)o2&FQ_Nx4LIe?}OL&R`<;5ayYZMIya;D!BI2a-s+ycSzQr&t9zcA(?>#Yb!J9a!fSi0d-i5{Rp_nmnbFl)6MCze zHm7Uh%--s*Ib8>5_EzUhkqc+`R_FHSbUl`a>Yf?h5U%al!##Vm`e^8_?wQla;I+Ng zy_@kkoY`BQ+nduTLT`1?Gqd_+=&jB?GpkR9-s;RVv-))CtYf>W7S8Og z&h5?WbD_7o=b2f3KJ->+X7mMkZEtnY-mJbDdaHY$nbnsYn-C1ZVbE=jL=XwuIhlrad>STSISk&oi^SE%a7ro|)6_p|?8oY&~{_-s)^U%;`?7 z57n7xX7%mRTX({oz5}oAt?rr8cj3(5>fDUJ2WR$H=l16G{m@(8Gov5CYkRAE=JZ22 zv$s08H>)3o-s+wi{TN=`Tir9GpTL>D)wwzS6wd6e&dum&aAt3HZf{OM553hr&&=r; zp|?8o%&dMHdaE-t`W3vkx4LINlacIy0x=!fSi0duH@IIJ37pH>2Of znZ4DyIsF07?5)nt>5p(`Z*^{#e}Xf6t8?>fPJa%)^_OsFPJa!()tP7J^taGkoq1+X ze-FLYnP=wokI-A4?Zlt(+TQA(Jz349*r%#{=9jEydO&ZrP34!YW@d#sHMK)VGqc0F zy>l~dcFAgHzI1POFO^@inwdY$si__F`yqV+s&i^;hn!{>4CnUF*)yk^h0?QB&rT}S z=9jEy77ja9_snT#k#KJB+)R_v%%bU8s%IyaX)>BwEIl)??xgZ;cFAdGajNH3_fpv< ztC=OjoSNDpqnRbcxxI5UO-3_IrDv(0om8I9E?Lbi9d@YhrSeNwGs}cIHML`Y$!ca< zs&i^;$NZAj%yMB)P3@RpvYJ^w%&Dmz^GjAUD}*^UwPSwCb7sXbr>1tuXl5ns!@0fH zJd@GPebcj4&rT}OWHfWX^vt}vlghK%C8wE{sh(5aGpCtV!nwV3^K5>}X=c^1Lv=58 zM#nZ4Dyy&0|Yiuj`Owf?oq1-4>xACw z%ri55Sm>?J%yM0LZEtnYEZ2iGd#iJMGh9FPR`<+r19)w3bwhFz~ znP+CTb?B|m%xN2VZEtnYoE{5j_EzWi=Cp0-t?qedR*wt4)tP7J^!U(Qoq1+X+lAih z%rkR(Lg=l|%;<^m+TQA(IXwx^?5)o2&Faaax4P$)l)-nb>^8lJuUQB zXP%kW(?f4{=9yV-A9|}Z&&=u>p|?8o%&c|@z15j#=DB0&tYiuj^vuv(oq1+f&kDWOnP+D8?9f}Cd1hA63BA>sXJ)l)=&jB?Gppx@-s;RVbJ{KR zR%f1>)9#_SI`hn&_6WVznP+CTXXvfYJTt4kLT`0uMtj3+d#ii)=Cn`ft?qedPWy)5 z>dZ59+As80XJ)iNytcQxXHEyenZ4DyIXw@~?5)o2&FT4}x4P$8!^GkdFZGddK`?5)o2&FQevTix@_oDL7Y)tMO`0k7???%A8+k)gM` zXGTY1bm*;S+Ki5YGkdGM_U3eK=&kOV(Q)wF-s+wi9S>*rR_FHSbVBH@?s;ZTCx+hY z%#2Qg*Y;NT%;*JhW^Z+FZ%!`^z12O>%<1INTb-HFi{Q1r)je~1F`U_3otx21;LP6Y z+}@l{3BA=l&&=tip|?8o%&cA(daEnZ4Dyy;+?WdaHY$nbql`w>tC8oL&=pt258c>9wJ^Iy0v;;I+Ng zJu`Y8oY`BQo8OsmW^Z+Fe$DEv&|7DRGjlp8^j2q{nbotC8tj-I))tP7Jbbjcq z&dlfncx`WW&zvrVGkdFZd$YPI^j7ygGpCC~Z*}IGIb9NZt258c>h+fGL}-WqzV zduH@Dcx`WW&)%%w9(t>Lo|)A(|ba1 zb>^8_y*Kn$XP%kW`$BJZ=9yVt9(t=Y&&>1vp|?6SqYvQ0&|A&38GQ)O?5*ya(TCy8 z-s;?pu7ERpt8;Vu2%Onlotx8@aAt3HZf{Okh2H9(8C?yp?XB*a(KT>pZ*^`)*TR{- z)w#VnT^D+*duB8jUfWyUvp1{jLvMA@GjqBj^j2q{nbSu@Z*^u)AA{HSR`<;5<8Wqg zb#6wVfHQlmb9=M;WazE#nbD`+h2HASGjsZK=&jB?Gs7E0Z*^u)U%{)P zx0-2l`Wl?sTivxctFMRN>Yit2^^MS5ote`&;kCWhJu~_ioY`BQo6}8jW^Z+FZ&o*l z-s+xbW_3&GtYh1$7hc<2-Lp5R?}gs#o@ZwD{m@&Tnb8m6wY}9nGx{N%*;}34o70a% zZ*|WzbNX@UtYh3M6wd6e&dum&aAt3HZf{OM553hr&&=r;p|?8o%$$B1 zdaEmSwX7%gPTfYfs=JeapTb-HF@8Gq))jfN&`hDoF?s;Zbe+a$RnP+D8 z$Ix4yd1g+33cb~t8T}bv+gsf;qrbqJz16un{T0satTjX9y62f${XO(nXP%kU zKSFPHW={Wv*Y;NT$Y@pu)tPy z`O>pg&rT}O=9jEy&5DtY(!7=T!I1Xja*9ZtvVolhLel=~=30CzWS1 znpHkMGq3KX@=QjvDx_!T)tywH$!S)_^vuk6v+TCzWY3nzdhgW?tP%<=O0#)2zx=&#CUEvP)L8s)RW;wL?y`s)loW=kAf^ zto`B4o@%C3y&0|+daHY$G0dtSdaE6CF>xJH0Kb)E22BEh)Gs_L(wY}9nv)l;I?5)nta$`8N zw>me=P2kMl>f9_hg)@7rb9?jLEc90Q%<|#z+TQA(y?H(&^j7!Ga&vfXZ*|Y!3?CVK zt9zcA;TEB{Iy0|F!E1Y~d-mq^=+Il;^UR#)gx>1RjJAZ=_Ez`o&FL|rx4LIWTfu94 zt9$llwRPyN?s;Zb+l1cg%rmokZ0N1dJTs?lLvM9vMvsHn_Ez`I>G5!8Z*^`?+rgQ= z)w#VnJt6c~_dGMJCx+hY%rkR(Qs}MDJTt>5hu-SU?3&Y4LT^1aoSD_rLT`2EnOQwO z^j2q{nbr28w>tC8tez2it258cYKPEUoq1+XJBHrs%rmpvDfCuno|)6mp|?8o%&c|^ zz15i+JriErTivrar)P!U>Yit2_3Y4Foq1+X&k4QNnP=v-Yv`@c%;~xC+TQA(8SMsV z_EzWSv^$*HTb-NH9&l!Fb#6v`!kN9*xf$&RXZBX-X0$h)*;}34o7Fy{x4P$$$o*5kgXZBX-X7oHbv$r}oqvyk!z16w-9SCRkR_A8doDK@T zbui565O{5Gb%<0I`Tb-HFQSjQ{ z>Yf=L4QKXN=jL<_oY`BQo6)gwW^Z+FZ%)UB-s+wi9S^VVt?rr8321RGqZYO=&jB?GpmzBZ*}IGS-mLqR%f1>)r&)Kb!J8{f!Fp{ z_sr-NIJ37pw>PJkhTiI)IlT;C+gshUH>*=aZ*|WzvwC^xtYlw>y)yJx z_dGMtSB2i{%ofJTt3vLT`0uM(4t7d#ii)W_4cZt?rrA`S9A_ z>Yf>00B81A=l14wVd$;yd1g)*h2HASGjqB)^j2q{nbReqw>tC8oL(P#t258c>C(_! zoq1+fmxbQy%rmokL+Gu}%;=5q+TQA(IlT$a?5)nt=*@6uZ*^{OPHzdl)je~1E4;S1 zx@T`rZwtNEJq~stB-}=>Yiuj^zqPJ zoq1+fp9sCxnHhZ&UfWyUGow$znZ4Dy8GRbg?5)o2&G0j!x4LIepT%>bx0+{j`aGQ3 zTivxcr!R!w>Yh1$5nkI{-Lp5VFNNOfo;iIPUfWyUGou^f%--tU-mJb7daHY$nblWA zZ*^u)UxU~7R`<;4>u_dob#6}IfHQlmb9;08X6UW%d1g-E3cb~tIo$-W?XB*a)6H;Z zZ*^`?x4@ab)w#V{-5PqUd**Z-ytcQxXHK`nnZ4Dyy;tC8oW31; zt258c={upfIy0m1!fSi0d*<{#IJ37pw>PWrhu-R*XJ++-&|96E(GTIZz12N?GyGBL zt?rp$v-)x9t)IY*ehRPct?t>I)z3n2bUW{HIy0l+!)tr1d-mq^htONyGp9eo zYkRAE=JY2xv$r}or$57)z16wBIsGN{R`)zJtG|Zc>dc(}2Cwa{?%A8u-$QS8&y4;7 zukEew*_+cpLvMADjAmzqbMxxCd*n1bGtBJWaZXONv(mFv&rT}S=9ipivzR)kx|hl> zInB-&=G4>_Sv}Yj$CJGMZf^ zoZCA$)8sU}XnL0F*-7Qu{F2k`Vqu5sUMjm}HM@A2Q&T(Um#k)&pgO0fcFZqX%`O?{ z)YJ|c%`O$r?VX!xGMZgFJxlfMr1ETb$!T_(utRmvoMx8|=l0IcGa1b;m!741c2b!( zyJR)HeAuD7XHK&#gmZi6W}2L4S4_`RJv*sPn_qI8T`BBP-AiScoM!J6=G4>qM)$FS2-kRDYr`h|b&xv4q z?w-AQt`>T$d!Cu+>Y=wf^UMs_2))&rXJ+_-&|96EYiC{2(Rs}?%A8?MxnR5=b0I99D1uWv)lw; z+gshUH_uH&Z*|Z7HiOspR`=}9@Zq7ix@VS;fYUnz15i+JsMuyTivrar#Ydwy62fWZ5ev2GtbQFF`>6QGpDWKwY}9nGuj%? z?5)ntX&X4Rw>me=$HJMt)w$UfGL}o)&tmduH@>cx`WW&)%H2 z553hr&&=u>p|?6SqaEP2z12N?v)VEAR`<+kCwOgdb*8DZ*|Wzv)VKCR%f1>)n1{uI`hmt_YS?)nK|u)zM;38XV1-QztCIVGo$_C zwY}9ndviJ<^j7ygGpFZ;-s;RVvwD8$t^8_9Ta-2GtbQF;Luy0d1h9J zgx>1Rj1Gm@_Ez`o&FQevTir9K!{N2P)jfN2IwJH|_dGMJBSUX>W===JYkRAE=5#ci z*;}34o6|9&x4P$ZYlwhog8|rduH?^cx`WW&x~FSXZBX-_Gb8!&|BRzqf>Bc z=&fejbF+F`=&kN~W=^Mu-s;RVb9#B`tdcH@39s#~?wQf6;LP6Y+>Blg zXZBX-_U3e2=&kOV)9LWq-s+wiy#~(gtb0S_x@Sgbz-xP}d-mq^y3kwQ^USQy z487HvIh_Ts?XB*a(b;fjZ*^{OPUnQ)>Yit2b#Cab&O9@#^FnWRW=7}3YkRAE_GWcK z=&kOV(S`8Z-s+ycIb9Tbt9zcA)y1K=Iy0k7;I+NgJ$tiyedw+3d1h9ZhTiJTGjqBu z^j2r)_Xc=vZ*|Z7n$sIYZ@np;nbVs?Z*}IGIlU$HR%f1>(_2Gtb>^8_y)E=sXP%kW z+e2@4W=`*b*Y;NT?9J(&p|`r{nOVIn^j2q{nbo^PZ*^uy?}69$R`=}9>b;@2y62fW zy)X1uXJ&LcytcQxXKz;T553hr&&=urp|?6SqYuJsd#ih9^dUI2w>q~srw@nT>Yh1W z0k7???wQd?;LP6Y+}^CN487GoGr9_1+gshUH>ayZZ*|X{u7TI~R`<;4S~#<}Iya;1 z;LP6Y+}@n#hTiI)XJ&PM=&jDo@&gZu6y{uS*YKzUj4@P zSZk8UQiBG~>o;rOpne|X)Nv;NHw)Rwhx{mjf+&Q-D1xFWhT4JD1)*nhw`X^ zil~HrurKyQWmG{`?2l@wjv6=s2jU6hvU%>C*VY!gp+X!PQ__B9qn-jd^tO!6FQ>{ z&cs#-Eea070{O}H7i;8xs*+i?f(#9g=>_uyXKhvm2*58y#Ogom*Lk6#+fk;xRmqC-5Ym!qa#L&*C{ej~DPFUc$@Rh*$6`Uc>8n18?FjY{F)2!B%X; zcI?1Typ4D8F5biY_y8Z`BYccc@F_mS=lB9&;wyZOZ}2U?!}s_BKjJ6+j9>68e#7th z1Ak^@WKuHuKbcv`Mn2?60Te_b6h;vgMKKgd36w-BltvkpMLCp51yn>O?1O!=A1b2? zs$zdsLv_@^0XPr`;b0tsny7`^I23ho80w-P>Z1V~q7fRS37VoA4#yE_jw8_mN8xDX zpe2q$E3`%%9E-L%4#%S%PQZyc2`A$eoQl(MI@;q5bU;URLT7ZrnK%n);~aFwx#))O z=z*T-h2H3czUYVk7=ZI|J_ceC24e_@Vi<;F1V&;MMq>=dVjRX}0w!V-F2IGDjEitF zF2NLBipww+m*WatiK}omreQj+!L^uy>o60uFdK6)7xOS53$PH2uoz2lJ(gk_ZorMW z2{+>w+=|<9JMO@pxC?jV9^8xjupIZ}0X&F@@Gw^35v;^2ti~Fw#X96-JvQJ`Jch^d z1fIlGcpA^(Sv-g5@d94NOL!R@@d{qWYj_=R;7z=RP1uYr*otk~jvd&ExA6|%#d~-k zAK*iLgpctFKE-GF9ADr|e1)&^4Zg*9_#QvtNBo4J@e6*%Z}=U5;Lr4z{j5xC7P65K z`B4A`Q3!=m1VvE{#Zdw!Q3|C|24ztW{5Fg=Ve1cE$89v7s z_!3{?YkY%m@g2U$5BL#3;b;7UU-27$#~=7J{Vo5lzaz`$Iv?_*01BcI3Zn>$q8N&! z1WKY5N}~+Qq8!Sj0xF^s_QAf`50y~`Rk1&+p*m{d033*ea4-%*P1Hhd9Ev(P40TZt z_0a$g(Fl#v1WnNlhvNt|$B}4(qi{5G&=SX>6hvU%>C*VY!gp+X!PQ__B z9qn-jI-nyup)e2!0#D*8JdJ1YES|&jcmXfsCA^G{cm=QGHN1{D z@Fw2ECTzwQY{fQg#}4eo+js}>;yt{N5AY#A!pHaopW-uojxX>fzQWh|2H)a4e2*XS zBYwiq_yxb>H~fx2@Mrpu`}s1dS;$5{-|Xyl+JjzKH5MjIT9wm1&Qqa9Abi8u)-;}o2V({MW4;|z2_M|47GbitW8 z3uogTbj7*ohVJNrp6G?%=!3rKhyECV^Kd=}Vh{#n2!>)9hGPUqViZPW48~#{#$y5| zViGREg_w+sa4{~y6kLkSFcp{M3S5b+a5bi3IUuCPDgv3fez@1PUws-I1^{#Y@CCxI2YZ}9X-$! zz0ezd&=>vC9|Le6&c{Fu!e9)+Pz=LxjKD~Y!f1@aSd7DXOu$4;!UebxlW`F)#wD17 zOK};d;&NPpD{&RB#xzXFHMkZta2;l17G`4(=3*Y^V*wUo5f)JYK+ycnL3KBVNI)cnz=P4ZMlBunC*71zWKV+pz;X@iyMUyLb=p;{$w% zkMJ=*!Ke5PpW_RBiLdZAzQMQn4&UPk{D`0MGk(FZ_zl0~5B!;tQ6K}E$U-*qAwL)t zD2PHRj3OwCVknLhD2Y-ijWQ^Uawv}qsEA6~2m4|_R7Mq4#r~*<>ZpMOa3BuC!8imp zQ46(kDC*!a)I~kiM*}oOBQ!=6G(|HUjw8?G3 zfD>^NPR1!X6{q2Jw8t6ffR5;d&ggxOvEHyfD17h7vW-Df+@HZmtiU{#}&8|SK(?* z!*pDOYcT`YVJ2o_Hs)Y1=3zb-U?CP^F_z$ZEX6Y1fE#fWZpJOR6}RDb+<`lB7w*PA zxEJ?fIqt^;cn}ZaVXVL-Scz3wjWt+{b;!kfY`~*<43FapJc+09G@ik;cn;6w1-yut z@G>^y6}*bq@H*bWn|KSGuo+vh72B{KJFpXP;~l(<_wYVGz=!wCfiG{fOI0?lzGTHq)gjU2SZF=&O>XoF+X7RTXu zw8IHF5hvkfoPtwv8cs)hoPiGLh)(E?E;tis;cT3Pt~eLn&>cO{6TQ$Ieb5*E&>sVE z9?r)=48mXx!B7mtaE!o6jKXM)!B~vLcuc@VOu_}Y5R-8cF2*I8f=h83rs8s3fh%zp zuEsP>$2GVXGjJVdVism&4(4JW=3@aCVi6W&39iRdEW-`B5jWvx+=5$i8*axPxD$8b zZrp==aUYiBemsB&@em%y3Os_9ScTPCgSA+PT&%|iJc`HgIG(_hcnVMB89a;U@H}3? zi+BkyV!3wKEbE>44>l* ze2K5{HNL^O_zvIW2mFYi@H2kFulNnW;}86q{?yORq-G%-`H&w4P!NSs7)4MN#ZVk2 zP!gq38f8!xC%UEZX8Y9FKN50Vm=loQzX&Do(@c zXpb|{0Ugl^ozVqn;w+qvbI=v%q8qxS2YR9xdZQ2eq96KW0M5ht7>Gd_j3F3`VHl1P z7>Q9BjWHODaTt#Yn21TZ02g91F2cpQ1XFM+F2ht@jw^5_uEN!rhUvHl*J1{)!%WP= zY|O!2%)@*vz(Op-Vl2V+Sc+x10XO0%+>BdrD{jN>xC3|MF5HcKa4+t|a@>yx@E{(- z!&rewuoA1V8f&l?>yV4}*nmgz7#_zHcoI+HX*`2x@f@DV3wRMP;bm;ZD|i*J;dQ)$ zH}MuWVKcU1E4E=fc3>yo#yfZy@8NxXfDiEzKE@~b6rbU9e1R|V6~4wd_!i&cd;EYO z@e_W=FZdO|;dlIjKht0Kvofh!$VNWoM*$Q>ArwXt6h$!+Bg(-a2V>M9_ph38ln*zqY0X#84kx0 zXpSS%0!QI!nV#$p`CV*(~( z5-z}nn2d{XF)qOrT#Cyu6_?`*T#2i2HKt)YuEDjKf$K06voITTFc-7M;{iN~hwv~~;1R6EDy+sDti?Ly zVm&tCQ9Opn@dTd4Q+OKB;8{F}=kWqw#7lS?8}SNW#cOySZ{SV5g-zIuE!c`}*p408 ziMR0%-o<-(A0OaDe1wnj2|mSV_#9v0OMHc|@eRJkclaJZ;79y~pYaQR#c%i>f8fvb zxBS_e)GTBpAM&FB3Zf7SqX>$k7>c6=N}?1>qYTQT9Ll2tDxwnh!M@lJl~Dy%u|KMz zI%?nm9EgK(Fb+XY)Ix0>iaIz9bx{xX(Ett62#wJMP0Lkg}ZSN?!|prj{ETd9>ha<7%T7yR$>)aV-40~9dfZA8}KL|!{c}YPvR*& zjc4#Ip2PEa0Wabuyo`-_1+U^YypA{UCf>p(Y{nLB#Wrlm4(!C+cn9y|J-m+(@F70J z$M^)F;xl}XFYqP4!q@l)-{L!bk00 zArwXt6h$! z+Bg(-a2V>M9_ph38ln*zqY0X#84kx0XpSS%0!QI!nV#$p`CV*(~(5-z}nn2d{XF)qOrT#Cyu6_?`*T#2i2HKt)Y zuEDjKf$K06voITTFc-7M;{iN~hwv~~;1R6EDy+sDti?LyVm&tCQ9Opn@dTd4Q+OKB;8{F}=kWqw#7lS? z8}SNW#cOySZ{SV5g-zIuE!c`}*p408iMR0%-o<-(A0OaDe1wnj2|mSV_#9v0OMHc| z@eRJkclaJZ;79y~pYaQR#c%i>f8fuIjQknML>97<4;fQqPueXuX~LuFJ!RqT&ysE!&q00-hA9E?Ly6SYtqhoTM+LtWHEeKbHr zG(uxEK~prt;Wz@#aU@#cC>)I(w8SxJh1O_;W6>7J;dr#e2{;ia;bfeGQ*jzjM|+%s z4(NzZ=!`Bn6KCOUoP(}77v0buJMZw7yZy518^SB$3P6iU<|=f48w4Yz(|b3 zXpF&FjKg?Lz(h>K1-KBCaS<-YC76OsaT%uKa$JEcaTTt{G)%`exE3>T9cE${W@8TK zVjkvW0TyBr7Gnvn$5JfA4Y(0E;bz=|TX7q1#~rv6cj0c_gL`owmg9arfCupq9>xkh zf|Xc>)mVeISchD!#|Au#$M86wz>|0iPvaRpi|6n>UcifZ2`^(KUcsw)4X@)3yotB4 z37fG6Td@t>u>(8tHr~Ozcn|O61AK^&@G(BYr}zw?;|qL=ukba#!MFGh-{S}Th@bE? ze!;K!4Zq_L{F#waAOo4mLN@XtKNu7!h(aigA}EStD2@^+iBc$yGAN63D31!Lh)UQ8 z`(i&-Mio@W{-}oPsDT4;AP&O8I0Q9O3$<}5>fkWcMLpC<12jYva@jK>5_#3WpR z3o#iN;bL5ZDYz7uVJa@i6}S>t;c867bX6qLlJD22;V8dFgQm!mAMKsj8A^0*2Wa5XAo8Y*Er_Q5sS7uRAx%s^#ahbowf zs+fiSF&jJ=8PzcdV=x!#Zdw!Q3|C|24ztWLLE-6iCUY>oQBiU9%rBfI-(OgqYKW&SvVW#pexQrH+1J? z`dE8Vhf;f@7kZ-)`l28DV*t*>`51^n7>ptCw*KCI?tJY=ViZPW48~#{#$y5|ViF(I z=lBAu_i-U6<04#)OE3kO;xbId<+uV@;woH?X_$^{a4lxwI?Ti@%*Gtd#XQW%0zRg% z?Lw-LVG$N%39iRdEW-`B5jWvx+=5$i8*axPxD$8bZrp==aUYiBemsB&@em%y3O?qc z)JLd3hbyrPtFZ=au@1Rdj}3SfkKu7VfhX}4p2jnH7SG{%ynq++5?;neynyk%>Kk|yZ($QQV+*!o8@6KycH(WkgLm;B-p2>{5Fg=Ve1cE$89v7s_!3{?Yd+>- z)NiQgQ@_P`_#QvtNBo4J@e6*%Z}=U5;Lr4zgseLo`BTG(l4|!{Imr&2c1J;3yo89JItSXoc2jgJaPa$KiNB zrXQmnwLkR)oQRWfGETv%I1Q(xJcf5gzMS(u?|Gizhr?ioFqB~oX9Ob|#c0Mb zmT`<{0uz~JW^~KR(hlNIVJg#@&J1QUi`mR!F7uer0v57}#Vlbd%UI3|R>(8$u4&DJ$u;85A0(<2RO)&9O5uXILa}8;%APVSwNnU zQ3pWF`w)d5LUf=Ve~uRbC?pImtzCGYiW+va!rdK6D`F7BM@CMei1U zrU*qTMsZ3|l2VkW46jp`a+Ie66{$pJs!)|`R5vqv<{Pq+e3Q4RK}~A$HnpikUEbkc z>QSHfXh1`vXEY|-VN>3x86VJ`4{1S5TA3OBmXBog-PW|>W7^V=_H>{lo#;##y3&nL z_>|B1obG%<4|>vz-t^&1zT#`XF*Dj>Um3l7Kl<}6-!XuJ3}P@t7|Jk)GlG$fVl-nI z%Q(g}fr(6FGEAb1nI))CN$TO2!A*nP1~(3F9o#&)eb9lR3qdD>ZiJ{OQD<}~=upt5pi@D& VLPaW29^DH%7<4g|p){q)@EA*`t)u_| literal 0 HcmV?d00001 diff --git a/optimism/test/square_mesh_compression.exo b/optimism/test/square_mesh_compression.exo new file mode 100644 index 0000000000000000000000000000000000000000..f8a77ec9b394a87b4136a8212acb7f62a9c23fe6 GIT binary patch literal 30963 zcmeHPeR!1BmA`N14JKibsC+|Y_|o7JxO5Q z{K};n!}exmOx77Z(`>#eBaCOag}=8jCqp?%8970kqLe3FP4$?Zdn|mzLbW`@BnF^v3MGT$KTz9iNusKU}GPhT0?t+V$0@&JtB%BhVfHb$QpR z4n$aXn{*)iRutC}i1%--zEdp((zPPH1T5L~%w{!8JgNjgO3nPw*4(2L61`v8iz<0r$j0{~OR}>Ogg|9KC|6-9) zKNzAYhvT7lCbBvrT(ab^VqtlGy@W542O16=Q(3d9shm;hQc?- zQ`lsPQsk5r_H{A|!mc{H&J<67G2tujtC%qRqj?`) zH#aebi!6l+Q=za&9q%;7k60`e6Pf8JCQAD%CX8@yo0a!UVhU4W6Q)98SB?GDfZ<_Z zVIQT0*+OUy{d_yI!Iltc4Jx#Her8y&?(Yb~o*KjGMe1ey6i6{m#alW zkBREL5AB+vj%j2+EqH3Cx=30vUVZfLyPs0qB{olu$-U%iHB$2CseQM-T&4n&VXWFz zJEL4-8#F;3Z_iz(o|oFjtLA5px2P74R~^j1Men&MsPlf6|0VTrlEJ5*=y>8zZFbF5 z7j8RxmjZRit5May8d{#dO@0=r*6LY)o-fDb%uizh8LeyBPGKm_0&6;KdZi_kxlUK z{H+R0>_qkGi09u{+r*ovUfK2BJL)ElZ0gP}htxXB>r;obD-Np#y5foNKk(nM7pZaT z=gYqOq54P3Fj_5H_R>e{cF8bSJsz05O_I-~)8lWsUg|N_lgf=V!y#QT&a_opX`Epg zp{#M61dQ*twVJVxqBf+AT03Fq%Cpj0yoOz(R!74!#?CLPu(VCk(?NAh=#W6Ze?&*U1Ck$mx-0;2-^(}c5t@RaMSW>AmtIQoO9Kh{<~JLkmqG1XA{{^{?X z37UYCq{&&yE194a4lPb}_uZx}P2zRYsWvm=DRt2dtBYQUf*$A9c7woYrvWMSNvfh+ zYO@2jKgogrwQWZYf1GYw9FwyWxc;PtUb1>kiw|~w;Twh)=VdtgXuD(VmRbTJOb?|T z!yYXtAOn$-nGKL3g>^nR6G83Gasfn)>5zEOSqEdgQd_Ka+e$h_RSu<>4-jgEn6Ae} zP1hA|Z5_?QP-7$z)@6F`jS#`j8YMlU76+2TU3EjKi_bm}$CtYR3N z=#1Kl4+9LOjJOu{-kzL$9r)C zA+%EMu^VJo!T2(}z14dgDHEEV&5PazwL`LJ#-I7E#rQw}sI?o@RNHpA9d^rJ7Wad}$IPL_2t~3fumkVbJNzznNE1U0+`n zeG|%&mx`X!r56mjFh?ixmXIU+@jn6qE}b2Mk;UnB)IAuK9sg7JtN=JYc=imB*U3mt z?`wDo?Q9)8nvDoo(LP{0rf#KOz=rqvtjB`tojKo3)cdVA-TNFk_-ySxIfn2)T03m( zAEuSVhM#}z&zb$j$uOw(?xN7D z{u&KfnR<=}tX2m$R^FLtG-L{BmtIo5biQr21%cOp_QTkiQ^>uvTC-l965F@!6|Wyp zG?w;#CzW0^i!%ot3r8u=FniB%{*qsQSUtaTo|;1QwG_Ijt9+V6$)d7a0wF(cZ_!}x z=(`EPo*j{w%>3aIJ?NU#Ua_?_iJ{ z2@ifzl{=!j0vGZ04aZn%(HOO|rL8d%46X9Fw*^BH|2m<+Fv}P1U5?m$^CxQ;7{*aK z24}EEn5`Sh33M@cdi2_%apFgup_93*(%QH_*ow&@gX@=Rwkon(=jkmjpT;*#w?{fQ ztHkmnkn|P@^5V5meaoe`VzKagpd%90w`Q{EDOy&L1xg`a&3ren7 z-Mq4NZec-TQ9&VJZjA&ZEg&y2ERlyJ+tj%t7|Fjj7+Kx9Vn$oL%*tT6wc8_?bs{su zYdae|0)FB5)&8co*7lA-7@SlPSl>2%j&EksjG41%6wNFuo#QJk7Sx08V^YA}(TIOJ zy2xTBv^N}k^>VaQyX@tZH!T+K2-EPr(mubFXV#us?~QvjF@?DvMqvuCpDVSu#*lUZ zP}o=8$9M9~0={G4{^5qg>l`N^RN-yRDDPS>j1MQh-5qyZ_YSj+>GwQ`^IAA zJ6(4Y!Aa<=_e$Wz3Iu5lHOU{@(9UHHvKGn`Wo^UK8X@fR(`{(E&{jdi3#z@kIMYd_ zL{sNU&kYd?*)sI8uf!_JqU3%!_R2ku%gZC@Tjewuf__@ayCRMYYQYUh22pzbvbu_b zTgY1@jtlz1Wi_>hLV>18A@^gBOP$=?t=)zTd4I%ld9y@o#?34J;cy`@kvMMjdWmKe zNxHAj)Z@6(8zx3!4i`SGxm7sF72B6~!iIRXo20d6CeH&449pA~NULDSgCG1k zvE0WTg(FO5W%a0^E;Xf(7!Z|3Y52Cgue6VH-)zp-yu7g?F@?E9BTR+DV(zUsq#Xb& zYhho%CtxTyWqXd-P&odb5ArTyZ&%V?VMAL=Hm;@n}2URpeP z7SKX3At}=$C$jm@#I)dyBeXLw`8)b?n%2vfCU!xJO1DpvKlI8uwYQ8hYV%Fo5~)7q z@H9IYu}T7aPtAbghS&0@CF-qKyCqY4DSFgv*t=W5a!qyO6@_w+FqIX>qgH*!)Nn4Z z*~T(@!NH5~Nlal%48l|>tkm_NH55LV*KD)dI{Ikt(!{cvyi3^IHG5*j(3TRdKh|u$ zWl4FRnw7I^!Re(%@mW9%y+=!#7UeH|qMspS8-kIT-_RY9IQE$_8&F0RZy{kF)!LJjqC^X*?rnaK2S!rq*04qwL zzjluMLDS!Nt9J^z^+i|V}RUD6z3zQDzVNyC-eBA5$=EK1J%G%w)>`|Ll#r5-Q8W2e4 zAD&5X63ujmIAL18+R)upgS!sx#1;&&u&M!+!scrx*51 zb3CuG-@)SXG*upIBxD+Gie6_W)J#&r8?zj{wUVZe6(_~40Kf+{+7%y&R`fc)m z3?MOM$(BAKv8ATSc7U+A7QFXcOu^@D`Tnrr`A~PtR;%T2jMDF6WNDqi2HUkv~Ya`Y}tbsmxAW|SXAR-_Krpa@OJeSIoFVA#&E|ceSc`$r(1xXdi zGeaKiJqqOkKMurxpidkZ_&GN4V~j9vh~rqnpCu2+k7I{)j3Lqx$FYPTV~RA?1AG`? z_&Lsq13v7k;0Ht+;(!k;I{bi0LmcpdFW?768tMU`APhes@&Oll2Y&Dnc5(0nANUD= zK;%Ol@PWtR2Sgh1f#(p09}xLaC-@L)@B<xpJ zeE#hwUzsh}HQUygEdLIh-?q1l?Ms&5_NUDDGg&{%Z2!vQ+h_5f5Fekf)U-QF2ja7I zC}TU=U&2Rhe>e^gnQ#}|Yx8xHo;H6M$BF&7^>x{HMCrnGLeiZtWQFdUiQ!YTB70EMG z9;^je8>Y*Hy5k>-689%N9v>e9KWIU@i#{O?n)G}mDDLCe7mLS)PUr9G)%5 0.5 - tol) + + funcSpace = FunctionSpace.construct_function_space(self.mesh, self.quadRule) + self.dofManager = DofManagerMPC(funcSpace, 2, self.EBCs, self.mesh) + + surfaceXCoords = self.mesh.coords[self.mesh.nodeSets['top']][:,0] + self.tractionArea = np.max(surfaceXCoords) - np.min(surfaceXCoords) + + def run(self): + funcSpace = FunctionSpace.construct_function_space(self.mesh, self.quadRule) + mechFuncs = Mechanics.create_mechanics_functions(funcSpace, mode2D='plane strain', materialModel=self.matModel) + + def energy_function(Uu, p): + U = self.create_field(Uu) + internal_variables = p.state_data + strainEnergy = mechFuncs.compute_strain_energy(U, internal_variables) + + F = p.bc_data + def force_function(x, n): + return np.array([0.0, F/self.tractionArea]) + + loadPotential = Mechanics.compute_traction_potential_energy( + funcSpace, U, self.lineQuadRule, self.mesh.sideSets['top'], + force_function) + + return strainEnergy + loadPotential + + def write_output(Uu, p, step): + U = self.create_field(Uu) + plotName = 'results/' + 'output-' + str(step).zfill(3) + writer = VTKWriter.VTKWriter(self.mesh, baseFileName=plotName) + + writer.add_nodal_field(name='disp', nodalData=U, fieldType=VTKWriter.VTKFieldType.VECTORS) + energyDensities, stresses = mechFuncs.compute_output_energy_densities_and_stresses(U, p.state_data) + cellEnergyDensities = FunctionSpace.project_quadrature_field_to_element_field(funcSpace, energyDensities) + cellStresses = FunctionSpace.project_quadrature_field_to_element_field(funcSpace, stresses) + writer.add_cell_field(name='strain_energy_density', + cellData=cellEnergyDensities, + fieldType=VTKWriter.VTKFieldType.SCALARS) + writer.add_cell_field(name='cauchy_stress', + cellData=cellStresses, + fieldType=VTKWriter.VTKFieldType.TENSORS) + writer.write() + + # Problem Set Up + Uu = self.dofManager.get_unknown_values(np.zeros(self.mesh.coords.shape)) + ivs = mechFuncs.compute_initial_state() + p = Objective.Params(bc_data=0., state_data=ivs) + self.objective = Objective.ObjectiveMPC(energy_function, Uu, p, self.dofManager) + + # Load over the steps + force = 0. + + force_inc = self.maxForce / self.steps + + # K = self.objective.hessian(Uu) + # print(f"Hessian first row: {K[0,:5]}") + # print(f"Hessian Shape: {K.shape}") + + for step in range(1, self.steps): + print('------------------------------------') + print('LOAD STEP', step) + force += force_inc + p = Objective.param_index_update(p, 0, force) + Uu, solverSuccess = EquationSolver.nonlinear_equation_solve(self.objective, Uu, p, EquationSolver.get_settings()) + + if self.writeOutput: + write_output(Uu, p, step) + + + + +if __name__ == '__main__': + app = CompressionTest() + app.reload_mesh() + app.run() + + + + + diff --git a/optimism/test/test_shear.py b/optimism/test/test_shear.py new file mode 100644 index 00000000..82c18026 --- /dev/null +++ b/optimism/test/test_shear.py @@ -0,0 +1,161 @@ +# Test Case - Displacement based Shear Test +# --------------------------------------------------- +# Note - This approach involves segregation of master and slave DOFs +# --------------------------------------------------- + +import sys +sys.path.insert(0, "/home/sarvesh/Documents/Github/optimism") + + +import jax.numpy as np +from jax import jit, grad + +from optimism import EquationSolver as EqSolver +from optimism import QuadratureRule +from optimism import Mechanics +from optimism import Mesh +from optimism import Objective +from optimism import FunctionSpace +from optimism.Mesh import create_higher_order_mesh_from_simplex_mesh +from optimism.material import Neohookean +from optimism.FunctionSpace import DofManagerMPC + +from MeshFixture import * +import matplotlib.pyplot as plt + + + +class ShearTest(MeshFixture): + + def setUp(self): + dummyDispGrad = np.eye(2) + self.mesh = self.create_mesh_disp_and_nodeset_layers(5, 5, [0.,1.], [0.,1.], + lambda x: dummyDispGrad.dot(x))[0] + self.mesh = create_higher_order_mesh_from_simplex_mesh(self.mesh, order=1, createNodeSetsFromSideSets=True) + self.quadRule = QuadratureRule.create_quadrature_rule_on_triangle(degree=1) + self.fs = FunctionSpace.construct_function_space(self.mesh, self.quadRule) + + self.EBCs = [FunctionSpace.EssentialBC(nodeSet='top', component=0), + FunctionSpace.EssentialBC(nodeSet='top', component=1), + FunctionSpace.EssentialBC(nodeSet='bottom', component=0), + FunctionSpace.EssentialBC(nodeSet='bottom', component=1)] + + # dofManager = DofManagerMPC(self.fs, 2, self.EBCs, self.mesh) + + # self.master_array = dofManager.isMaster + # self.slave_array = dofManager.isSlave + # self.assembly = dofManager.dofAssembly + # print("Assembly: ", self.assembly) + + shearModulus = 0.855 # MPa + bulkModulus = 1000*shearModulus # MPa + youngModulus = 9.0*bulkModulus*shearModulus / (3.0*bulkModulus + shearModulus) + poissonRatio = (3.0*bulkModulus - 2.0*shearModulus) / 2.0 / (3.0*bulkModulus + shearModulus) + props = { + 'elastic modulus': youngModulus, + 'poisson ratio': poissonRatio, + 'version': 'coupled' + } + self.mat = Neohookean.create_material_model_functions(props) + + self.freq = 0.3 / 2.0 / np.pi + self.cycles = 1 + self.steps_per_cycle = 64 + self.steps = self.cycles*self.steps_per_cycle + totalTime = self.cycles / self.freq + self.dt = totalTime / self.steps + self.maxDisp = 1.0 + + def run(self): + mechFuncs = Mechanics.create_mechanics_functions(self.fs, + "plane strain", + self.mat) + dofManager = DofManagerMPC(self.fs, 2, self.EBCs, self.mesh) + + def create_field(Uu, disp): + def get_ubcs(disp): + V = np.zeros(self.mesh.coords.shape) + index = (self.mesh.nodeSets['top'], 0) + V = V.at[index].set(disp) + return dofManager.get_bc_values(V) + + return dofManager.create_field(Uu, get_ubcs(disp)) + + def energy_function_all_dofs(U, p): + internalVariables = p.state_data + return mechFuncs.compute_strain_energy(U, internalVariables, self.dt) + + def compute_energy(Uu, p): + U = create_field(Uu, p.bc_data) + return energy_function_all_dofs(U, p) + + nodal_forces = jit(grad(energy_function_all_dofs, argnums=0)) + integrate_free_energy = jit(mechFuncs.compute_strain_energy) + + def write_output(Uu, p, step): + from optimism import VTKWriter + U = create_field(Uu, p.bc_data) + plotName = "../test_results/disp_based_shear_test/"+'output-'+str(step).zfill(3) + writer = VTKWriter.VTKWriter(self.mesh, baseFileName=plotName) + + writer.add_nodal_field(name='displ', nodalData=U, fieldType=VTKWriter.VTKFieldType.VECTORS) + + energyDensities, stresses = mechFuncs.\ + compute_output_energy_densities_and_stresses(U, p.state_data, self.dt) + cellEnergyDensities = FunctionSpace.project_quadrature_field_to_element_field(self.fs, energyDensities) + cellStresses = FunctionSpace.project_quadrature_field_to_element_field(self.fs, stresses) + writer.add_cell_field(name='strain_energy_density', + cellData=cellEnergyDensities, + fieldType=VTKWriter.VTKFieldType.SCALARS) + writer.add_cell_field(name='stress', + cellData=cellStresses, + fieldType=VTKWriter.VTKFieldType.TENSORS) + writer.write() + + Uu = dofManager.get_unknown_values(np.zeros(self.mesh.coords.shape)) + ivs = mechFuncs.compute_initial_state() + p = Objective.Params(bc_data=0., state_data=ivs) + U = create_field(Uu, p.bc_data) + self.objective = Objective.ObjectiveMPC(compute_energy, Uu, p, dofManager) + + index = (self.mesh.nodeSets['top'], 0) + + time = 0.0 + times = [] + externalWorkStore = [] + incrementalPotentialStore = [] + forceHistory = [] + dispHistory = [] + for step in range(1, self.steps+1): + force_prev = np.array(nodal_forces(U, p).at[index].get()) + applied_disp_prev = U.at[index].get() + + # disp = self.maxDisp * np.sin(2.0 * np.pi * self.freq * time) + disp = self.maxDisp * self.freq * time + print("Displacement in this load step: ", disp) + + p = Objective.param_index_update(p, 0, disp) + Uu, solverSuccess = EqSolver.nonlinear_equation_solve(self.objective, Uu, p, EqSolver.get_settings(), useWarmStart=False) + U = create_field(Uu, p.bc_data) + ivs = mechFuncs.compute_updated_internal_variables(U, p.state_data, self.dt) + p = Objective.param_index_update(p, 1, ivs) + + write_output(Uu, p, step) + + force = np.array(nodal_forces(U, p).at[index].get()) + applied_disp = U.at[index].get() + externalWorkStore.append( 0.5*np.tensordot((force + force_prev),(applied_disp - applied_disp_prev), axes=1) ) + incrementalPotentialStore.append(integrate_free_energy(U, ivs, self.dt)) + + forceHistory.append( np.sum(force) ) + dispHistory.append(disp) + + times.append(time) + time += self.dt + + + + +app = ShearTest() +app.setUp() +app.run() \ No newline at end of file From 918e85e25302fee4808037bbaf15c03def439df0 Mon Sep 17 00:00:00 2001 From: SarveshJoshi33 Date: Thu, 27 Feb 2025 16:39:23 -0500 Subject: [PATCH 7/8] JAX Tracer Error present in non_homogenous_MPC.py, for creating function space --- optimism/FunctionSpace.py | 533 +++++++----------- optimism/Objective.py | 162 ++++-- ...ssion_linear.py => non_homogeneous_MPC.py} | 89 ++- optimism/test/square_mesh_compression.e | Bin 65324 -> 0 bytes optimism/test/square_mesh_compression.exo | Bin 30963 -> 10116 bytes ..._compression_nl.py => test_compression.py} | 2 +- ...sion_nl_MPC.py => test_compression_MPC.py} | 27 +- 7 files changed, 388 insertions(+), 425 deletions(-) rename optimism/test/{test_compression_linear.py => non_homogeneous_MPC.py} (61%) delete mode 100644 optimism/test/square_mesh_compression.e rename optimism/test/{test_compression_nl.py => test_compression.py} (99%) rename optimism/test/{test_compression_nl_MPC.py => test_compression_MPC.py} (87%) diff --git a/optimism/FunctionSpace.py b/optimism/FunctionSpace.py index 6b3315a5..8f1cb421 100644 --- a/optimism/FunctionSpace.py +++ b/optimism/FunctionSpace.py @@ -8,6 +8,7 @@ import jax import jax.numpy as np import numpy as onp +import scipy.sparse as sp class EssentialBC(eqx.Module): @@ -362,31 +363,6 @@ def integrate_function_on_edges(functionSpace, func, U, quadRule, edges): integrate_on_edges = jax.vmap(integrate_function_on_edge, (None, None, None, None, 0)) return np.sum(integrate_on_edges(functionSpace, func, U, quadRule, edges)) -def create_nodeset_layers(mesh): - coords = mesh.coords - tol = 1e-8 - # Create unique layers of nodesets along the y-direction - unique_layers = sorted(onp.unique(coords[:,1])) - Ny = int(input("Enter the expected number of nodeset layers in y-direction: ")) - assert len(unique_layers) == Ny, f"ERROR - Expected {Ny} layers, but found {len(unique_layers)}." - - layer_rows = [] - - for i, y_val in enumerate(unique_layers): - nodes_in_layer = onp.flatnonzero(onp.abs(coords[:, 1] - y_val) < tol) - layer_rows.append(nodes_in_layer) - - max_nodes_per_layer = max(len(row) for row in layer_rows) - y_layers = -1 * np.ones((len(layer_rows), max_nodes_per_layer), dtype=int) # Initialize with -1 - - for i, row in enumerate(layer_rows): - y_layers = y_layers.at[i, :len(row)].set(row) # Fill each row with nodes from the layer - - # # For debugging - # print("Layers in y-direction: ", y_layers) - return y_layers - - class DofManager(eqx.Module): # TODO get type hints below correct # TODO this one could be moved to jax types if we move towards @@ -492,332 +468,211 @@ def _make_hessian_bc_mask(self, conns): return hessian_bc_mask -# # DofManager for Multi-Point Constrained Problem - -# class DofManagerMPC(eqx.Module): -# fieldShape: Tuple[int, int] -# isBc: np.ndarray -# isUnknown: np.ndarray -# ids: np.ndarray -# unknownIndices: np.ndarray -# bcIndices: np.ndarray -# dofToUnknown: np.ndarray -# HessRowCoords: np.ndarray -# HessColCoords: np.ndarray -# hessian_bc_mask: np.ndarray -# master_layer: int = eqx.static_field() -# slave_layer: int = eqx.static_field() -# master_array: np.ndarray -# slave_array: np.ndarray -# layers: List[List[int]] = eqx.static_field() - -# def __init__(self, functionSpace, dim, EssentialBCs, mesh): -# self.fieldShape = (Mesh.num_nodes(functionSpace.mesh), dim) -# self.isBc = onp.full(self.fieldShape, False, dtype=bool) -# for ebc in EssentialBCs: -# self.isBc[functionSpace.mesh.nodeSets[ebc.nodeSet], ebc.component] = True -# self.isUnknown = ~self.isBc - -# self.ids = onp.arange(self.isBc.size).reshape(self.fieldShape) - -# # Create layers and assign master and slave rows -# self.layers = create_nodeset_layers(mesh) -# print("These are the layers created for the mesh: ", self.layers) -# self.master_layer = int(input("Choose the row index for master nodes: ")) -# self.slave_layer = int(input("Choose the row index for slave nodes: ")) - -# # Generate master and slave arrays -# self.master_array = self._create_layer_array(self.master_layer) -# self.slave_array = self._create_layer_array(self.slave_layer) - -# # Create DOF mappings for unknowns -# self.unknownIndices = self.ids[self.isUnknown] -# self.bcIndices = self.ids[self.isBc] - -# ones = np.ones(self.isBc.size, dtype=int) * -1 -# self.dofToUnknown = ones.at[self.unknownIndices].set(np.arange(self.unknownIndices.size)) - -# # Compute Hessian-related data -# self.HessRowCoords, self.HessColCoords = self._make_hessian_coordinates(functionSpace.mesh.conns) -# self.hessian_bc_mask = self._make_hessian_bc_mask(functionSpace.mesh.conns) - -# def get_bc_size(self): -# return np.sum(self.isBc).item() - -# def get_unknown_size(self): -# return np.sum(self.isUnknown).item() - -# def create_field(self, Uu, Ubc=0.0): -# U = np.zeros(self.isBc.shape).at[self.isBc].set(Ubc) -# return U.at[self.isUnknown].set(Uu) - -# def get_bc_values(self, U): -# return U[self.isBc] - -# def get_unknown_values(self, U): -# return U[self.isUnknown] - -# def get_master_values(self, U): -# return U[self.master_array[:, 1:]] - -# def get_slave_values(self, U): -# return U[self.slave_array[:, 1:]] - -# def _create_layer_array(self, layer_index): -# """Creates an array for the specified layer (master or slave) with DOF mappings.""" -# layer_nodes = self.layers[layer_index] -# layer_array = [] -# for node in layer_nodes: -# node_dofs = self.ids[node] -# layer_array.append([node, *node_dofs]) -# return np.array(layer_array, dtype=int) - -# def _make_hessian_coordinates(self, conns): -# """Creates row and column coordinates for the Hessian, considering master and slave nodes.""" -# nElUnknowns = onp.zeros(conns.shape[0], dtype=int) -# nHessianEntries = 0 -# for e, eNodes in enumerate(conns): -# elUnknownFlags = self.isUnknown[eNodes, :].ravel() -# nElUnknowns[e] = onp.sum(elUnknownFlags) - -# # Include master and slave nodes in the size calculation -# eNodes_list = onp.asarray(eNodes).tolist() -# elMasterNodes = set(eNodes_list).intersection(set(onp.array(self.master_array[:, 0]))) -# elSlaveNodes = set(eNodes_list).intersection(set(onp.array(self.slave_array[:, 0]))) - -# nElMasters = sum(len(self.master_array[onp.where(self.master_array[:, 0] == node)[0], 1:].ravel()) for node in elMasterNodes) -# nElSlaves = sum(len(self.slave_array[onp.where(self.slave_array[:, 0] == node)[0], 1:].ravel()) for node in elSlaveNodes) - -# totalDOFs = nElUnknowns[e] + nElMasters + nElSlaves -# nHessianEntries += totalDOFs ** 2 - -# # Allocate sufficient space for Hessian coordinates -# rowCoords = onp.zeros(nHessianEntries, dtype=int) -# colCoords = onp.zeros(nHessianEntries, dtype=int) -# rangeBegin = 0 - -# for e, eNodes in enumerate(conns): -# eNodes_list = onp.asarray(eNodes).tolist() -# elDofs = self.ids[eNodes, :] -# elUnknownFlags = self.isUnknown[eNodes, :] -# elUnknowns = self.dofToUnknown[elDofs[elUnknownFlags]] - -# # Identify master and slave DOFs for the element -# elMasterNodes = set(eNodes_list).intersection(set(onp.array(self.master_array[:, 0]))) -# elSlaveNodes = set(eNodes_list).intersection(set(onp.array(self.slave_array[:, 0]))) +# DofManager for Multi-Point Constrained Problem -# elMasterDofs = [] -# for node in elMasterNodes: -# master_idx = onp.where(self.master_array[:, 0] == node)[0] -# elMasterDofs.extend(self.master_array[master_idx, 1:].ravel()) - -# elSlaveDofs = [] -# for node in elSlaveNodes: -# slave_idx = onp.where(self.slave_array[:, 0] == node)[0] -# elSlaveDofs.extend(self.slave_array[slave_idx, 1:].ravel()) - -# # Combine unknowns, master, and slave DOFs -# elAllUnknowns = onp.concatenate([elUnknowns, elMasterDofs, elSlaveDofs]) -# elHessCoords = onp.tile(elAllUnknowns, (len(elAllUnknowns), 1)) - -# nElHessianEntries = len(elAllUnknowns) ** 2 -# rangeEnd = rangeBegin + nElHessianEntries - -# # Assign values to the Hessian coordinate arrays -# rowCoords[rangeBegin:rangeEnd] = elHessCoords.ravel() -# colCoords[rangeBegin:rangeEnd] = elHessCoords.T.ravel() - -# rangeBegin += nElHessianEntries - -# return rowCoords, colCoords - -# def _make_hessian_bc_mask(self, conns): -# """Creates a mask for BCs in the Hessian, considering master and slave nodes.""" -# nElements, nNodesPerElement = conns.shape -# nFields = self.ids.shape[1] -# nDofPerElement = nNodesPerElement * nFields - -# hessian_bc_mask = onp.full((nElements, nDofPerElement, nDofPerElement), True, dtype=bool) -# for e, eNodes in enumerate(conns): -# # Get boundary condition flags for all DOFs -# eFlag = self.isBc[eNodes, :].ravel() - -# # Identify master and slave nodes -# eNodes_list = onp.asarray(eNodes).tolist() -# masterFlag = onp.isin(eNodes_list, self.master_array[:, 0]) -# slaveFlag = onp.isin(eNodes_list, self.slave_array[:, 0]) - -# # Expand master and slave flags to match the DOF shape -# masterFlag_expanded = onp.repeat(masterFlag, nFields) -# slaveFlag_expanded = onp.repeat(slaveFlag, nFields) - -# # Combine flags -# combinedFlag = eFlag | masterFlag_expanded | slaveFlag_expanded - -# # Update Hessian mask -# hessian_bc_mask[e, combinedFlag, :] = False -# hessian_bc_mask[e, :, combinedFlag] = False - -# return hessian_bc_mask +# class DofManagerMPC(DofManager): +# master_slave_pairs: any +# T: any +# s_tilde: any +# isIndependent: any +# dim: any +# def __init__(self, functionSpace, dim, EssentialBCs, master_slave_pairs): +# super().__init__(functionSpace, dim, EssentialBCs) + +# self.master_slave_pairs = master_slave_pairs +# self.dim = dim # Store the dimension (2D or 3D) +# self.create_mpc_transformation() + +# def create_mpc_transformation(self): +# """ +# Constructs the transformation matrix T and shift vector s_tilde +# for condensation of multipoint constraints. + +# - Independent DOFs (`ui` + `up`) remain. +# - Constrained DOFs (`uc`) are condensed out. +# - Transformation follows `u = T ũ + s̃` as per the condensation method. +# """ +# num_dofs = self.ids.size # Total DOFs +# num_total_dofs = num_dofs * self.dim # Account for x, y (or x, y, z) + +# # Identify independent (ui + up) and constrained DOFs (uc) +# independent_mask = ~onp.isin(self.ids, list(self.master_slave_pairs.keys())) +# independent_indices = onp.where(independent_mask)[0] +# constrained_indices = onp.array(list(self.master_slave_pairs.keys())) + +# num_independent_dofs = len(independent_indices) + +# # Initialize T matrix and shift vector s_tilde +# T = onp.zeros((num_total_dofs, num_independent_dofs * self.dim)) +# s_tilde = onp.zeros((num_total_dofs,)) # Ensures correct shape + +# # Identity mapping for independent DOFs (ui and up) +# for i, dof in enumerate(independent_indices): +# for d in range(self.dim): +# T[dof * self.dim + d, i * self.dim + d] = 1 # Block identity + +# # ** Compute Constraint Matrix `C` ** +# C = onp.zeros((len(constrained_indices) * self.dim, num_independent_dofs * self.dim)) + +# # Define `A_c` and `A_p` from constraint relationships +# A_c = onp.eye(len(constrained_indices) * self.dim) # Identity matrix for simplicity +# A_p = onp.zeros((len(constrained_indices) * self.dim, num_independent_dofs * self.dim)) + +# for i, slave in enumerate(constrained_indices): +# master = self.master_slave_pairs[slave] +# master_index = onp.where(independent_indices == master)[0][0] # Locate master in independent list + +# for d in range(self.dim): +# A_p[i * self.dim + d, master_index * self.dim + d] = 1 # Master-Slave dependency + +# # Solve for `C = -A_c⁻¹ A_p` (since `A_c` is identity, `C = -A_p`) +# C = -A_p # Direct assignment + +# # Compute shift vector `s_tilde = A_c⁻¹ b` (assume b = 0 for now) +# b = onp.zeros(len(constrained_indices) * self.dim) # Modify if external forces exist +# s_tilde_constrained = np.linalg.solve(A_c, b) # Solve A_c * uc = b +# for i, slave in enumerate(constrained_indices): +# for d in range(self.dim): +# s_tilde[slave * self.dim + d] = s_tilde_constrained[i * self.dim + d] # **Fixed Indexing** + +# # Assign constraints to transformation matrix `T` +# for i, slave in enumerate(constrained_indices): +# for d in range(self.dim): +# T[slave * self.dim + d, :] = C[i * self.dim + d, :] # Apply condensation row + +# # ** Convert to JAX-compatible arrays after full setup ** +# self.T = np.array(T) # Shape (total DOFs * dim, independent DOFs * dim) +# self.s_tilde = np.array(s_tilde) # Convert shift vector properly + +# # ** Update unknown indices ** +# self.isIndependent = independent_mask +# self.unknownIndices = self.ids[self.isIndependent] +# self.bcIndices = self.ids[self.isBc] +# ones = onp.ones(num_dofs, dtype=int) * -1 +# self.dofToUnknown = ones +# self.dofToUnknown[self.unknownIndices] = onp.arange(self.unknownIndices.size) -class DofManagerMPC(eqx.Module): - fieldShape: Tuple[int, int] - isBc: np.ndarray - isUnknown: np.ndarray - ids: np.ndarray - unknownIndices: np.ndarray - bcIndices: np.ndarray - dofToUnknown: np.ndarray - HessRowCoords: np.ndarray - HessColCoords: np.ndarray - hessian_bc_mask: np.ndarray - master_array: np.ndarray - slave_array: np.ndarray - - def __init__(self, functionSpace, dim, EssentialBCs, mesh): - self.fieldShape = (Mesh.num_nodes(functionSpace.mesh), dim) - self.isBc = onp.full(self.fieldShape, False, dtype=bool) - for ebc in EssentialBCs: - self.isBc[functionSpace.mesh.nodeSets[ebc.nodeSet], ebc.component] = True - self.isUnknown = ~self.isBc +# return self.T, self.s_tilde # **Explicitly Return Transformation Matrix and Shift Vector** - self.ids = onp.arange(self.isBc.size).reshape(self.fieldShape) +# def create_field(self, Uu, Ubc=None): +# """ +# Constructs the field using only the independent DOFs (ui and up). +# """ +# num_dofs = self.unknownIndices.shape[0] # Number of independent DOFs +# U_unconstrained = np.zeros((num_dofs, self.dim)) # Shape (num_independent_dofs, dim) - # Ensure master and slave nodesets are defined - if "master" not in functionSpace.mesh.nodeSets or "slave" not in functionSpace.mesh.nodeSets: - raise ValueError("Node sets 'master' and 'slave' must be defined in the mesh.") +# # Fix: Convert isIndependent to a NumPy array before using np.where +# independent_indices = onp.where(onp.array(self.isIndependent).reshape(-1, self.dim))[0] - # Assign master and slave arrays directly from nodesets - self.master_array = self._create_layer_array(functionSpace.mesh.nodeSets["master"]) - self.slave_array = self._create_layer_array(functionSpace.mesh.nodeSets["slave"]) +# # Set independent DOFs (ui and up) +# U_unconstrained = U_unconstrained.at[independent_indices].set(Uu) - # Create DOF mappings for unknowns - self.unknownIndices = self.ids[self.isUnknown] - self.bcIndices = self.ids[self.isBc] - - ones = np.ones(self.isBc.size, dtype=int) * -1 - self.dofToUnknown = ones.at[self.unknownIndices].set(np.arange(self.unknownIndices.size)) +# # Apply transformation: u = T * ũ + s̃ +# return np.matmul(self.T, U_unconstrained.reshape(-1)) + self.s_tilde - # Compute Hessian-related data - self.HessRowCoords, self.HessColCoords = self._make_hessian_coordinates(functionSpace.mesh.conns) - self.hessian_bc_mask = self._make_hessian_bc_mask(functionSpace.mesh.conns) +# def get_unknown_values(self, U): +# """ +# Retrieves only the independent DOF values. +# """ +# independent_indices = onp.where(self.isIndependent)[0] # Get independent DOF indices +# return U[independent_indices] # Directly index U without reshaping - def get_bc_size(self): - return np.sum(self.isBc).item() +# def get_bc_values(self, U): +# return U[self.isBc] - def get_unknown_size(self): - return np.sum(self.isUnknown).item() +class DofManagerMPC(DofManager): + def __init__(self, functionSpace, dim, EssentialBCs, master_slave_pairs, C, s): + """ + Initializes the DOF manager for MPC using precomputed C (constraint matrix) + and s (shift vector). + + Parameters: + - functionSpace: The function space used for DOF management. + - dim: Number of spatial dimensions (2D or 3D). + - EssentialBCs: Essential boundary conditions applied to the problem. + - master_slave_pairs: Dictionary mapping master nodes to slave nodes. + - C: Constraint matrix that enforces u_s = C * u_m + s. + - s: Shift vector (s̃) for enforcing non-homogeneous constraints. + """ + super().__init__(functionSpace, dim, EssentialBCs) + + self.master_slave_pairs = master_slave_pairs + self.dim = dim + self.C = np.array(C) # Constraint matrix (already precomputed) + self.s_tilde = np.array(s) # Shift vector + + self.create_mpc_transformation() + + def create_mpc_transformation(self): + """ + Constructs the transformation matrix T and updates the DOF mapping. + """ + num_dofs = self.ids.size # Total DOFs in the system + num_total_dofs = num_dofs * self.dim + + # Identify independent (ui + up) and constrained DOFs (uc) + independent_mask = ~np.isin(self.ids, list(self.master_slave_pairs.keys())) + independent_indices = np.where(independent_mask)[0] + constrained_indices = np.array(list(self.master_slave_pairs.keys())) + + num_independent_dofs = len(independent_indices) + + # ** Directly use precomputed C instead of constructing it manually ** + T = np.zeros((num_total_dofs, num_independent_dofs * self.dim)) + s_tilde = np.zeros((num_total_dofs,)) # Ensures correct shape + + # Identity mapping for independent DOFs + for i, dof in enumerate(independent_indices): + for d in range(self.dim): + T[dof * self.dim + d, i * self.dim + d] = 1 + + # Assign constraints from precomputed `C` + for i, slave in enumerate(constrained_indices): + for d in range(self.dim): + T[slave * self.dim + d, :] = self.C[i * self.dim + d, :] # Assigning rows from `C` + s_tilde[slave * self.dim + d] = self.s_tilde[i * self.dim + d] # Applying shift + + # Store transformation matrix and shift vector + self.T = T + self.s_tilde = s_tilde + + # ** Update unknown DOFs mapping ** + self.isIndependent = independent_mask + self.unknownIndices = self.ids[self.isIndependent] # Independent DOFs + self.bcIndices = self.ids[self.isBc] # Essential BC indices + + ones = np.ones(num_dofs, dtype=int) * -1 + self.dofToUnknown = ones + self.dofToUnknown[self.unknownIndices] = np.arange(self.unknownIndices.size) + + return self.T, self.s_tilde + + def create_field(self, Uu, Ubc=None): + """ + Constructs the field using only the independent DOFs (ui and up). + Applies transformation `u = T * ũ + s̃`. + """ + num_dofs = self.unknownIndices.shape[0] + U_unconstrained = np.zeros((num_dofs, self.dim)) + + # Set independent DOFs + independent_indices = np.where(np.array(self.isIndependent).reshape(-1, self.dim))[0] + U_unconstrained[independent_indices] = Uu + + # Apply transformation: u = T * ũ + s̃ + return np.matmul(self.T, U_unconstrained.reshape(-1)) + self.s_tilde - def create_field(self, Uu, Ubc=0.0): - U = np.zeros(self.isBc.shape).at[self.isBc].set(Ubc) - return U.at[self.isUnknown].set(Uu) + def get_unknown_values(self, U): + """ + Retrieves only the independent DOF values. + """ + independent_indices = np.where(self.isIndependent)[0] + return U[independent_indices] def get_bc_values(self, U): + """ + Retrieves boundary condition values. + """ return U[self.isBc] - - def get_unknown_values(self, U): - return U[self.isUnknown] - - def get_master_values(self, U): - return U[self.master_array[:, 1:]] - - def get_slave_values(self, U): - return U[self.slave_array[:, 1:]] - - def _create_layer_array(self, layer_nodes): - """Creates an array for the specified layer (master or slave) with DOF mappings.""" - layer_array = [] - for node in layer_nodes: - node_dofs = self.ids[node] - layer_array.append([node, *node_dofs]) - return np.array(layer_array, dtype=int) - - def _make_hessian_coordinates(self, conns): - """Creates row and column coordinates for the Hessian, considering master and slave nodes.""" - nElUnknowns = onp.zeros(conns.shape[0], dtype=int) - nHessianEntries = 0 - for e, eNodes in enumerate(conns): - elUnknownFlags = self.isUnknown[eNodes, :].ravel() - nElUnknowns[e] = onp.sum(elUnknownFlags) - - # Include master and slave nodes in the size calculation - eNodes_list = onp.asarray(eNodes).tolist() - elMasterNodes = set(eNodes_list).intersection(set(onp.array(self.master_array[:, 0]))) - elSlaveNodes = set(eNodes_list).intersection(set(onp.array(self.slave_array[:, 0]))) - - nElMasters = sum(len(self.master_array[onp.where(self.master_array[:, 0] == node)[0], 1:].ravel()) for node in elMasterNodes) - nElSlaves = sum(len(self.slave_array[onp.where(self.slave_array[:, 0] == node)[0], 1:].ravel()) for node in elSlaveNodes) - - totalDOFs = nElUnknowns[e] + nElMasters + nElSlaves - nHessianEntries += totalDOFs ** 2 - - # Allocate sufficient space for Hessian coordinates - rowCoords = onp.zeros(nHessianEntries, dtype=int) - colCoords = onp.zeros(nHessianEntries, dtype=int) - rangeBegin = 0 - - for e, eNodes in enumerate(conns): - eNodes_list = onp.asarray(eNodes).tolist() - elDofs = self.ids[eNodes, :] - elUnknownFlags = self.isUnknown[eNodes, :] - elUnknowns = self.dofToUnknown[elDofs[elUnknownFlags]] - - # Identify master and slave DOFs for the element - elMasterNodes = set(eNodes_list).intersection(set(onp.array(self.master_array[:, 0]))) - elSlaveNodes = set(eNodes_list).intersection(set(onp.array(self.slave_array[:, 0]))) - - elMasterDofs = [] - for node in elMasterNodes: - master_idx = onp.where(self.master_array[:, 0] == node)[0] - elMasterDofs.extend(self.master_array[master_idx, 1:].ravel()) - - elSlaveDofs = [] - for node in elSlaveNodes: - slave_idx = onp.where(self.slave_array[:, 0] == node)[0] - elSlaveDofs.extend(self.slave_array[slave_idx, 1:].ravel()) - - # Combine unknowns, master, and slave DOFs - elAllUnknowns = onp.concatenate([elUnknowns, elMasterDofs, elSlaveDofs]) - elHessCoords = onp.tile(elAllUnknowns, (len(elAllUnknowns), 1)) - - nElHessianEntries = len(elAllUnknowns) ** 2 - rangeEnd = rangeBegin + nElHessianEntries - - # Assign values to the Hessian coordinate arrays - rowCoords[rangeBegin:rangeEnd] = elHessCoords.ravel() - colCoords[rangeBegin:rangeEnd] = elHessCoords.T.ravel() - - rangeBegin += nElHessianEntries - - return rowCoords, colCoords - - def _make_hessian_bc_mask(self, conns): - """Creates a mask for BCs in the Hessian, considering master and slave nodes.""" - nElements, nNodesPerElement = conns.shape - nFields = self.ids.shape[1] - nDofPerElement = nNodesPerElement * nFields - - hessian_bc_mask = onp.full((nElements, nDofPerElement, nDofPerElement), True, dtype=bool) - for e, eNodes in enumerate(conns): - # Get boundary condition flags for all DOFs - eFlag = self.isBc[eNodes, :].ravel() - - # Identify master and slave nodes - eNodes_list = onp.asarray(eNodes).tolist() - masterFlag = onp.isin(eNodes_list, self.master_array[:, 0]) - slaveFlag = onp.isin(eNodes_list, self.slave_array[:, 0]) - - # Expand master and slave flags to match the DOF shape - masterFlag_expanded = onp.repeat(masterFlag, nFields) - slaveFlag_expanded = onp.repeat(slaveFlag, nFields) - - # Combine flags - combinedFlag = eFlag | masterFlag_expanded | slaveFlag_expanded - - # Update Hessian mask - hessian_bc_mask[e, combinedFlag, :] = False - hessian_bc_mask[e, :, combinedFlag] = False - - return hessian_bc_mask diff --git a/optimism/Objective.py b/optimism/Objective.py index b49d98b5..b0d15ac4 100644 --- a/optimism/Objective.py +++ b/optimism/Objective.py @@ -1,8 +1,12 @@ from optimism.JaxConfig import * from optimism.SparseCholesky import SparseCholesky import numpy as onp -from scipy.sparse import diags as sparse_diags +import jax.numpy as np +import jax +from jax import jit, grad, jacfwd, jvp, vjp from scipy.sparse import csc_matrix +from scipy.sparse import diags as sparse_diags + # static vs dynamics # differentiable vs undifferentiable @@ -265,67 +269,105 @@ def get_value(self, x): def get_residual(self, x): return self.gradient(self.scaling * x) +# Objective class for handling Multi-Point Constraints + +# class ObjectiveMPC: +# def __init__(self, f, x, p, dofManagerMPC, precondStrategy=None): +# """ +# Objective function for Multi-Point Constraints (MPC), ensuring +# condensation of independent DOFs (ui, up). +# """ +# self.precond = None +# self.precondStrategy = precondStrategy +# self.dofManagerMPC = dofManagerMPC # Store DOF Manager with MPC +# self.p = p + +# # JIT compile function derivatives +# self.objective = jit(f) +# self.grad_x = jit(grad(f, 0)) +# self.grad_p = jit(grad(f, 1)) +# self.hess = jit(jacfwd(self.grad_x, 0)) + +# # Create transformation matrix & shift vector +# self.T = self.dofManagerMPC.T +# self.s_tilde = self.dofManagerMPC.s_tilde + +# def value(self, x): +# """Compute objective function value.""" +# x_full = self.expand_to_full_dofs(x) +# return self.objective(x_full, self.p) + +# def gradient(self, x): +# """Compute reduced gradient.""" +# x_full = self.expand_to_full_dofs(x) +# grad_full = self.grad_x(x_full, self.p) +# return np.matmul(self.T.T, grad_full) # Reduce gradient + +# def hessian(self, x): +# """Compute reduced Hessian H̃ = Tᵀ H_full T""" +# print("Computing Hessian with MPC...") + +# x_full = self.expand_to_full_dofs(x) +# H_full = self.hess(x_full, self.p) # Compute full Hessian + +# return np.matmul(self.T.T, np.matmul(H_full, self.T)) # Condensed Hessian + +# def expand_to_full_dofs(self, x): +# """ +# Expand reduced DOF vector `x` (ui, up) to full DOFs (including uc) +# using transformation: u = T ũ + s̃. +# """ +# x_full = np.matmul(self.T, x) + self.s_tilde +# return x_full + +# def update_precond(self, x): +# """Update preconditioner with reduced Hessian.""" +# if self.precondStrategy is None: +# print("Updating with dense preconditioner in ObjectiveMPC.") +# H_reduced = csc_matrix(self.hessian(x)) +# self.precond = H_reduced +# else: +# self.precondStrategy.initialize(x, self.p) +# self.precond = self.precondStrategy.precond_at_attempt class ObjectiveMPC(Objective): - def __init__(self, f, x, p, dofManagerMPC, precondStrategy=None): - """ - Initialize the Objective class for MPC with full DOFs and master-slave constraints. - - Parameters: - - f: Objective function. - - x: Initial guess for the primal DOF vector. - - p: Parameters for the objective function. - - dofManagerMPC: Instance of DofManagerMPC with master-slave relationships. - - precondStrategy: Preconditioning strategy (optional). - """ - super().__init__(f, x, p, precondStrategy) + def __init__(self, objective_func, dofManagerMPC, x0, p, precondStrategy=None): self.dofManagerMPC = dofManagerMPC - self.master_dofs = dofManagerMPC.master_array[:, 1:] - self.slave_dofs = dofManagerMPC.slave_array[:, 1:] - self.slave_to_master_map = self.create_slave_to_master_map() - - def create_slave_to_master_map(self): - """ - Create the mapping to express slave DOFs in terms of master DOFs. - Returns: - - C: Constraint matrix (2D array). - - d: Constant vector (1D array). - """ - num_slaves = len(self.slave_dofs) - num_masters = len(self.master_dofs) - - # Initialize the constraint matrix with the correct dimensions - C = np.zeros((num_slaves, num_masters)) # Replace with actual mapping logic - - # Initialize the constant vector - d = np.zeros(num_slaves) # Replace with actual constant values if needed - - return C, d - - - def value(self, x_full): - # Ensure the constraints are satisfied - x_full = self.enforce_constraints(x_full) - return super().value(x_full) - - def gradient(self, x_full): - x_full = self.enforce_constraints(x_full) - grad_full = super().gradient(x_full) - return grad_full - - def hessian_vec(self, x_full, vx_full): - x_full = self.enforce_constraints(x_full) - hess_vec_full = super().hessian_vec(x_full, vx_full) - return hess_vec_full - - def enforce_constraints(self, x_full): - if self.slave_to_master_map is not None: - C, d = self.slave_to_master_map - slave_values = C @ x_full[self.master_dofs] + d[:, None] - # print("Slave values before assignment:", slave_values) - x_full = x_full.at[self.slave_dofs].set(slave_values) - # print("x_full after enforcing constraints:", x_full) - return x_full - + self.T = np.array(dofManagerMPC.T) # Transformation matrix + self.s_tilde = np.array(dofManagerMPC.s_tilde) # Shift vector + self.scaling = 1.0 # Optional scaling factor + self.invScaling = 1.0 + def condensed_objective(xBar, p): + x = self.expand_to_full_dofs(xBar) + return objective_func(x, p) + + xBar0 = self.reduce_to_independent_dofs(x0) + super().__init__(condensed_objective, xBar0, p, precondStrategy) + + def reduce_to_independent_dofs(self, x_full): + """Extracts independent DOFs (ui, up) from full DOF vector.""" + return np.matmul(self.T.T, x_full - self.s_tilde) + def expand_to_full_dofs(self, x_reduced): + """Expands reduced DOF vector (ui, up) to full DOFs including uc.""" + return np.matmul(self.T, x_reduced) + self.s_tilde + + def get_value(self, x): + return self.value(self.expand_to_full_dofs(x)) + + def get_residual(self, x): + return self.gradient(self.expand_to_full_dofs(x)) + + def hessian(self, x): + """Computes the condensed Hessian for the independent DOFs.""" + x_full = self.expand_to_full_dofs(self.scaling * x) + H_full = self.hess(x_full, self.p) # Full Hessian from original problem + H_reduced = np.matmul(self.T.T, np.matmul(H_full, self.T)) # Condensed Hessian + return H_reduced + + def update_precond(self, x): + """Updates preconditioner using the condensed Hessian.""" + print("Updating with condensed Hessian preconditioner.") + H_reduced = csc_matrix(self.hessian(x)) # Use reduced Hessian + self.precondStrategy.update(H_reduced) diff --git a/optimism/test/test_compression_linear.py b/optimism/test/non_homogeneous_MPC.py similarity index 61% rename from optimism/test/test_compression_linear.py rename to optimism/test/non_homogeneous_MPC.py index 797beed0..4c27e514 100644 --- a/optimism/test/test_compression_linear.py +++ b/optimism/test/non_homogeneous_MPC.py @@ -1,25 +1,27 @@ # -------------------------------------------------------------------------- # TEST CASE - Compression Test for implementation of MPCs # -------------------------------------------------------------------------- -# Note - Using Cubit Coreform for meshing +# Note - Using Cubit Coreform for meshing, solved using NON-LINEAR SOLVER # -------------------------------------------------------------------------- +import sys +sys.path.insert(0, "/home/sarvesh/Documents/Github/optimism") -from jax import jit, grad -from jax.scipy.linalg import solve +from jax import jit from optimism import VTKWriter +from optimism import EquationSolver from optimism import FunctionSpace from optimism import Mechanics from optimism import Mesh from optimism import Objective from optimism import QuadratureRule from optimism import ReadExodusMesh -from optimism.FunctionSpace import DofManager +from optimism.FunctionSpace import DofManagerMPC from optimism.FunctionSpace import EssentialBC from optimism.material import Neohookean import jax.numpy as np import numpy as onp - +from scipy.spatial import cKDTree class CompressionTest: @@ -49,7 +51,7 @@ def __init__(self): self.input_mesh = 'square_mesh_compression.exo' - self.maxForce = 0.1 + self.maxForce = -0.2 self.steps = 50 def create_field(self, Uu): @@ -63,12 +65,69 @@ def reload_mesh(self): origMesh = ReadExodusMesh.read_exodus_mesh(self.input_mesh) self.mesh = Mesh.create_higher_order_mesh_from_simplex_mesh(origMesh, order=2, createNodeSetsFromSideSets=True) - funcSpace = FunctionSpace.construct_function_space(self.mesh, self.quadRule) - self.dofManager = DofManager(funcSpace, 2, self.EBCs) + self.funcSpace = FunctionSpace.construct_function_space(self.mesh, self.quadRule) # This is where error is showcased surfaceXCoords = self.mesh.coords[self.mesh.nodeSets['top']][:,0] self.tractionArea = np.max(surfaceXCoords) - np.min(surfaceXCoords) + def master_slave_mapping(self): + coords = self.mesh.coords + tol = 1e-8 + + # Creating nodesets for master and slave + self.mesh.nodeSets['master'] = np.flatnonzero(coords[:,0] < -0.5 + tol) + self.mesh.nodeSets['slave'] = np.flatnonzero(coords[:,0] > 0.5 - tol) + + # Get master and slave node coordinates + master_coords = coords[self.mesh.nodeSets['master']] + slave_coords = coords[self.mesh.nodeSets['slave']] + + # Sort nodes by y-coordinate to ensure correct pairing + master_sorted_idx = np.argsort(master_coords[:, 1]) + slave_sorted_idx = np.argsort(slave_coords[:, 1]) + + # Ensure one-to-one pairing by iterating through sorted nodes + self.master_slave_pairs = { + int(self.mesh.nodeSets['master'][master_sorted_idx[i]]): + int(self.mesh.nodeSets['slave'][slave_sorted_idx[i]]) + for i in range(min(len(master_sorted_idx), len(slave_sorted_idx))) + } + + print("Master-Slave Pairs:", self.master_slave_pairs) + + def implement_constraints(self, alpha=2.0, shift_type="linear"): + coords = self.mesh.coords + + num_constraints = len(self.master_slave_pairs) + print("Number of Constraints: ", num_constraints) + + # Define Ap (Master Constraints) and Ac (Slave Constraints) + Ap = onp.ones((num_constraints, len(self.mesh.coords))) + Ac = onp.eye(num_constraints)*alpha + + b = onp.ones(num_constraints) + + for idx, (master, slave) in enumerate(self.master_slave_pairs.items()): + Ap[idx, master] = 1 # Master DOF stays unchanged + + # Compute nonzero shift `b[idx]` based on shift type + if shift_type == "linear": + b[idx] = coords[slave, 0] + coords[slave, 1] # Linear shift + elif shift_type == "sinusoidal": + b[idx] = onp.sin(coords[slave, 1]) # Sinusoidal variation + elif shift_type == "random": + b[idx] = onp.random.uniform(-0.1, 0.1) # Small random perturbation + else: + b[idx] = 0 # Default to zero shift + + # Getting C and s + Ac_inv = onp.linalg.inv(Ac) + C = -Ac_inv @ Ap + s = Ac_inv @ b + + print("Constraint Matrix:\n", C) + print("Shift Vector:\n", s) + def run(self): funcSpace = FunctionSpace.construct_function_space(self.mesh, self.quadRule) mechFuncs = Mechanics.create_mechanics_functions(funcSpace, mode2D='plane strain', materialModel=self.matModel) @@ -87,7 +146,7 @@ def force_function(x, n): force_function) return strainEnergy + loadPotential - + def write_output(Uu, p, step): U = self.create_field(Uu) plotName = 'results/' + 'output-' + str(step).zfill(3) @@ -109,28 +168,20 @@ def write_output(Uu, p, step): Uu = self.dofManager.get_unknown_values(np.zeros(self.mesh.coords.shape)) ivs = mechFuncs.compute_initial_state() p = Objective.Params(bc_data=0., state_data=ivs) - self.objective = Objective.Objective(energy_function, Uu, p) + self.objective = Objective.ObjectiveMPC(energy_function, Uu, p, self.dofManager, precondStrategy=None) # Load over the steps force = 0. force_inc = self.maxForce / self.steps - K = self.objective.hessian(Uu) - # print(f"Hessian first row: {K[0,:5]}") - # print(f"Hessian Shape: {K.shape}") for step in range(1, self.steps): print('------------------------------------') print('LOAD STEP', step) force += force_inc p = Objective.param_index_update(p, 0, force) - print(f"Step {step}: Applied force = {force}, Updated p = {p.bc_data}") - print(f"Force inside energy function: {p.bc_data}") - - nodal_forces = jit(grad(energy_function, argnums=0)) - F = nodal_forces(Uu, p) - Uu = solve(K, F) + Uu, solverSuccess = EquationSolver.nonlinear_equation_solve(self.objective, Uu, p, EquationSolver.get_settings()) if self.writeOutput: write_output(Uu, p, step) diff --git a/optimism/test/square_mesh_compression.e b/optimism/test/square_mesh_compression.e deleted file mode 100644 index f70f0015a9d9518b6329134299b4ec3b1c554706..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 65324 zcmeF)b)3~@yYKM{x*JqLzyd@%6wyII6kDVWq)X{eMMbedkxs#G#X@qxZpEgtySvZl zv(|mX@%QX`(DUr`$Jqy7ulK&L-+f(c4Qs9MtQiI#ck4E7vog}ZicpH8(7--}yAAF! zs82>lrjx=W2Mp@dZN$hvL%mA>DmZx5pl-bf4Du=)g}oj;q<5bYsd@GWN+Hks4D7>F z!9rXYNndyCIq>|{l6PJroDCT4aRbj&yCuW@bsO4eIQNkn_;FJAF|0?o;eC3!oB9pr z>pNuBaQb{G;_rRL$l(J9_j8<49mP-(MSBb#&~0>|;UfkN8O&uyeqRnTj9O~2J#%G2mjm5dD%fd#ts@fRO|Jfc)~cI=$DZo&!eK z98vGM0VDg5>RE5dP+p(`BL?kmM)nyovfhYcqk0VQ(``_n5&gUM8Zv0;@IE7aACwi&b7NfxAB*cMV9yYKZV#II(*1bKW^OhHL}l_&RyzpX18m@N&2?&_8)Owpw|#y zg|Vrbw=oy-7r(cv;rEvBGY-?c<5J^)vs>f4UAyD=wA=DGHhc{JR(-8}P2%H?kE2Mp zwgUzZ?9^l6sPqfxjvwD&^s(*c<0|6g*>!K}`*Yu4fAjw0Q_$V}E7WVq;K6-*jr95T zetf;mf4r~4eol59Id16xmZ|(5+P7=Oj=#8%4&75-Mz7TGFa7z(a~?jqnfV+ShVL2i zmmh0N`eH?IYN6D}8PvnK4Ii-g6@SrJu9m)7;cHqfwd*JS9`|4KV;A$irrTflH1F2GvWTzby3a};r?RXkh*!9oLE<;_UGlK z>%yGnsd=egby>Jh*D~oxm66dRym9A-d8j3F!|g*Yo*V9eS51H1Qn|6Fe_tJP)3tqW z`u9C=S4}^zt-0ZG=59>?enKsin|@vz@2cT>%T3pgx#4-;RX3#`?}FTPU6>nQpItS) zj=AZ&EjPT5vF7>uhWib#(NF2X^ z_&klTcYNI2!{_CkU7wGCecbaBkGoXjapwzxBahD0Ncj?6A&L19ksXQKcr|>?J*W+&ZFYg24^EBjq zb@KB(-C-&Uhm}R zccbvQJ0xI9dll4*X!N=FONIEuf@;rfBLwagvZ?_ zJnnJfeST5meeJ*N^E>Yj@A!TmA9s8|Prlyq^Lu9E^E>&tJHFn@?^}|O`|svFKJNIw79aPX-q+&m9Uph{>v#P8 z&g*?G`T3p9d3@Z-oX5}ab>Z{-pT4h+%L(5vhtKc49(VjY7IPlo&;Rb@j<5Hg-p})T z+{v$F@%4_6JNbGizi(N+hws;tk9%wQ`kj1V%jH?drz-0}OC1xa0S!@$);cKbItPz9#X$wx{>=_`VjuekULIp1yu3-`C>T?=s=% z4$0ShYvTL02^BamSxK{N2|(ubk)g{aW&I$Dg+(A9sAc^U8T% zU%%ty-c!z#uXlXh@$)$S0!q0^hU&oT~Yw>Z%&+k1w?)Z9_%H#D;KJNH>$HzT7ysyQa$L~{2b%`drzO=@o^{L&*S5c?`wO?d3@Z- zzb`3~$a!9`cQWVk>sa#Zcg%TwUrYXZ?4Q1$$DAj>Z;7AZ@#ih^`_%a7(B%7Ce7)oQ zd8zbIkux&F&(Gt}730qx=7q0gdwSfZ!=FQw?`z4&9pBe>=X_`Scd+Zv-{J2Ob7GD2 z|N46O`(gO|r^Njv=eyrW()-Elor&A;{`~u|KYsG|$&a`D=L+fD?`fXAzvRdNzdcX> ze*fvm+w<+?-vj=&uFCoAb#Ay`oj8yG9$;}?dOH-yyo%WNyPc0#Qh|1ANP|VC!WXK$Lm<*dHgu> zes|wb{QHoHCZGWZnJmLjLQI8}BdqapLXbJYL5d&*SeS ze%yFJalZR~I{utEUAyf1xpDfsL*lw)xIQy+9{1vW_rG)b>;1>`xZgQ(KgruCuj795 z_Zj!%{l4IqwnFmQJCjd8}H%v|8yRIe@zn~H{MU2$LnN`w~OcT z`;QX4{(Kexob{j8|L*%MwTItd$uN(#^d9!(_gmY-pR2b1!y0cF>y~i)cpYoJ-|dO} z-yE)!=W##Q$Hdc82c?#Js`DP85aV^Z#P1`{%O=j_emsvg-adJ|-9Hyt zIp1IL-#ab{|1M=tsPjWj&gUl1<9^)Bt0r%ky#IK+`1^?0fA{{A`^o!>e@-eMejc*> z-*u*cK9amH6ZT4kS~79F{==Q zc{pDGPp$EB=BEF>Xz5T(g<3Y$@}U+EwOFV{LoE|(ynU?kcJY4VeyqjwxLrJtHQwJp zt?}c>n!LaGbB_3Pp1iK(zq^Swul>@w>Gy^B@9^S&yx+XeP%4a2@}5 zB5`kfIFIvqp1gg$j<=8d@%G92ro_jOd-3+MZcgmQ^H~3Np8j)o>D<58GU2*psCiwN zO5849Cu`ZT7tiDU#_Ra;<8^W#&-Ya0{p_ih%1!@#rD3Rz64y<__3riGcYur!XPjVh_pR92&{=HI%-0<_SMC}}|leJ@FuhSl`^Sb}! z{lx1oiH{raKl%H}YcJkU^5e$y`1i%*a>LIxLmeOL=*0QhJg&!t^StgS-ruM^ZlBlv zChsrqCC}q^{C&iampqTRkAIIeKX;RV44ohUcWCi?Q8BKUwF6y?Fb)ZWs4r zP2NxP<0kJX-haGZyq~xiZy)RK{q*09tji65KN9NNe|Q~lzbbLNHHquE7tdFQ`RY*P z$Bi}a{nHxnFR!}$?+L@-yM=$pxntMA=S-h(4cA)|HMzGval5?cJHy_#P~-i_{aE99 zyx({pYw~{L{l)#f=i%==bN@f9@z0}s>)vNz?=!IX8QA*_?0p9IJ_CE7fxXYb-e+L% zGqCp=*!v9ZeFpYE1ACu=z0bhjXJGF$u=g3*`wZ-T2KGJ!d!K>5&%oYiVDB@q_Zisx z4D5Xd{wF*GWB(^Si2vX*r2q5q|G^#nKYr8nfAaqS@dxpr{r3Iu!~5T__rD=;Z}q<) z@0tH?fBzf$@xP_-%>SOg{~h}H-`2Of=Xw0^;@kV*$9K>F-wOWs_v8PM4Xb>IA_ zJ$KK3KU5CA)jiLuplax?&h|$&cx`WW&%Qcpgx*>W2jD<>ZEtnY{vaG2daHYf!2hnl z*Y;NT>}#QR=&kN~b|~tE-sjfR`=?mKAhQGo%{b&p&^{vTbI`md&X4jnNgx=Z`1#k?!wzs-x zZ%$i<-s+xbX0>(btDwzs-xUjwIx-s+xb z=Jd4CTb+4!FisD>)mcrnhu8L2_v~xqjL=)%tAh^k+TQA(eO+`6z12O>>Z4QWtmecQ{c?r>fD@O3TO6K=VtUWIJ37pw>PI#LvMA@Gjn=*=&jDo=@szW-s+yc zS-mp!R`<;4Rq)#0>Ylwhy*l((_dGMJ(?V}`W=^NWYkRAE=JXmkv$s08H>=l%-s+xb zW_3pBtYf>$31{|J=l14wR_LwnnbXYh2B4`=pP=k{iGLFldSd1js$hTiJTj4s0B&|A&3=jL=t=&kNe#`W;p-s+w? zT?%LR*2yrZ%dj-`R_C6Z)f+-@b$$o*BIv&g`wu?ak>ep|`r{ znOVIx^j2qP^fq{HZ*|X{-VSH>R_A8)4mh*7Iya+t!kN9*xxHDvEA&?P%<0|m+TQA( z8NCP2?5)o2&FQ_Nx4LIe?}OL&R`<;5ayYZMIya;D!BI2a-s+ycSzQr&t9zcA(?>#Yb!J9a!fSi0d-i5{Rp_nmnbFl)6MCze zHm7Uh%--s*Ib8>5_EzUhkqc+`R_FHSbUl`a>Yf?h5U%al!##Vm`e^8_?wQla;I+Ng zy_@kkoY`BQ+nduTLT`1?Gqd_+=&jB?GpkR9-s;RVv-))CtYf>W7S8Og z&h5?WbD_7o=b2f3KJ->+X7mMkZEtnY-mJbDdaHY$nbnsYn-C1ZVbE=jL=XwuIhlrad>STSISk&oi^SE%a7ro|)6_p|?8oY&~{_-s)^U%;`?7 z57n7xX7%mRTX({oz5}oAt?rr8cj3(5>fDUJ2WR$H=l16G{m@(8Gov5CYkRAE=JZ22 zv$s08H>)3o-s+wi{TN=`Tir9GpTL>D)wwzS6wd6e&dum&aAt3HZf{OM553hr&&=r; zp|?8o%&dMHdaE-t`W3vkx4LINlacIy0x=!fSi0duH@IIJ37pH>2Of znZ4DyIsF07?5)nt>5p(`Z*^{#e}Xf6t8?>fPJa%)^_OsFPJa!()tP7J^taGkoq1+X ze-FLYnP=wokI-A4?Zlt(+TQA(Jz349*r%#{=9jEydO&ZrP34!YW@d#sHMK)VGqc0F zy>l~dcFAgHzI1POFO^@inwdY$si__F`yqV+s&i^;hn!{>4CnUF*)yk^h0?QB&rT}S z=9jEy77ja9_snT#k#KJB+)R_v%%bU8s%IyaX)>BwEIl)??xgZ;cFAdGajNH3_fpv< ztC=OjoSNDpqnRbcxxI5UO-3_IrDv(0om8I9E?Lbi9d@YhrSeNwGs}cIHML`Y$!ca< zs&i^;$NZAj%yMB)P3@RpvYJ^w%&Dmz^GjAUD}*^UwPSwCb7sXbr>1tuXl5ns!@0fH zJd@GPebcj4&rT}OWHfWX^vt}vlghK%C8wE{sh(5aGpCtV!nwV3^K5>}X=c^1Lv=58 zM#nZ4Dyy&0|Yiuj`Owf?oq1-4>xACw z%ri55Sm>?J%yM0LZEtnYEZ2iGd#iJMGh9FPR`<+r19)w3bwhFz~ znP+CTb?B|m%xN2VZEtnYoE{5j_EzWi=Cp0-t?qedR*wt4)tP7J^!U(Qoq1+X+lAih z%rkR(Lg=l|%;<^m+TQA(IXwx^?5)o2&Faaax4P$)l)-nb>^8lJuUQB zXP%kW(?f4{=9yV-A9|}Z&&=u>p|?8o%&c|@z15j#=DB0&tYiuj^vuv(oq1+f&kDWOnP+D8?9f}Cd1hA63BA>sXJ)l)=&jB?Gppx@-s;RVbJ{KR zR%f1>)9#_SI`hn&_6WVznP+CTXXvfYJTt4kLT`0uMtj3+d#ii)=Cn`ft?qedPWy)5 z>dZ59+As80XJ)iNytcQxXHEyenZ4DyIXw@~?5)o2&FT4}x4P$8!^GkdFZGddK`?5)o2&FQevTix@_oDL7Y)tMO`0k7???%A8+k)gM` zXGTY1bm*;S+Ki5YGkdGM_U3eK=&kOV(Q)wF-s+wi9S>*rR_FHSbVBH@?s;ZTCx+hY z%#2Qg*Y;NT%;*JhW^Z+FZ%!`^z12O>%<1INTb-HFi{Q1r)je~1F`U_3otx21;LP6Y z+}@l{3BA=l&&=tip|?8o%&cA(daEnZ4Dyy;+?WdaHY$nbql`w>tC8oL&=pt258c>9wJ^Iy0v;;I+Ng zJu`Y8oY`BQo8OsmW^Z+Fe$DEv&|7DRGjlp8^j2q{nbotC8tj-I))tP7Jbbjcq z&dlfncx`WW&zvrVGkdFZd$YPI^j7ygGpCC~Z*}IGIb9NZt258c>h+fGL}-WqzV zduH@Dcx`WW&)%%w9(t>Lo|)A(|ba1 zb>^8_y*Kn$XP%kW`$BJZ=9yVt9(t=Y&&>1vp|?6SqYvQ0&|A&38GQ)O?5*ya(TCy8 z-s;?pu7ERpt8;Vu2%Onlotx8@aAt3HZf{Okh2H9(8C?yp?XB*a(KT>pZ*^`)*TR{- z)w#VnT^D+*duB8jUfWyUvp1{jLvMA@GjqBj^j2q{nbSu@Z*^u)AA{HSR`<;5<8Wqg zb#6wVfHQlmb9=M;WazE#nbD`+h2HASGjsZK=&jB?Gs7E0Z*^u)U%{)P zx0-2l`Wl?sTivxctFMRN>Yit2^^MS5ote`&;kCWhJu~_ioY`BQo6}8jW^Z+FZ&o*l z-s+xbW_3&GtYh1$7hc<2-Lp5R?}gs#o@ZwD{m@&Tnb8m6wY}9nGx{N%*;}34o70a% zZ*|WzbNX@UtYh3M6wd6e&dum&aAt3HZf{OM553hr&&=r;p|?8o%$$B1 zdaEmSwX7%gPTfYfs=JeapTb-HF@8Gq))jfN&`hDoF?s;Zbe+a$RnP+D8 z$Ix4yd1g+33cb~t8T}bv+gsf;qrbqJz16un{T0satTjX9y62f${XO(nXP%kU zKSFPHW={Wv*Y;NT$Y@pu)tPy z`O>pg&rT}O=9jEy&5DtY(!7=T!I1Xja*9ZtvVolhLel=~=30CzWS1 znpHkMGq3KX@=QjvDx_!T)tywH$!S)_^vuk6v+TCzWY3nzdhgW?tP%<=O0#)2zx=&#CUEvP)L8s)RW;wL?y`s)loW=kAf^ zto`B4o@%C3y&0|+daHY$G0dtSdaE6CF>xJH0Kb)E22BEh)Gs_L(wY}9nv)l;I?5)nta$`8N zw>me=P2kMl>f9_hg)@7rb9?jLEc90Q%<|#z+TQA(y?H(&^j7!Ga&vfXZ*|Y!3?CVK zt9zcA;TEB{Iy0|F!E1Y~d-mq^=+Il;^UR#)gx>1RjJAZ=_Ez`o&FL|rx4LIWTfu94 zt9$llwRPyN?s;Zb+l1cg%rmokZ0N1dJTs?lLvM9vMvsHn_Ez`I>G5!8Z*^`?+rgQ= z)w#VnJt6c~_dGMJCx+hY%rkR(Qs}MDJTt>5hu-SU?3&Y4LT^1aoSD_rLT`2EnOQwO z^j2q{nbr28w>tC8tez2it258cYKPEUoq1+XJBHrs%rmpvDfCuno|)6mp|?8o%&c|^ zz15i+JriErTivrar)P!U>Yit2_3Y4Foq1+X&k4QNnP=v-Yv`@c%;~xC+TQA(8SMsV z_EzWSv^$*HTb-NH9&l!Fb#6v`!kN9*xf$&RXZBX-X0$h)*;}34o7Fy{x4P$$$o*5kgXZBX-X7oHbv$r}oqvyk!z16w-9SCRkR_A8doDK@T zbui565O{5Gb%<0I`Tb-HFQSjQ{ z>Yf=L4QKXN=jL<_oY`BQo6)gwW^Z+FZ%)UB-s+wi9S^VVt?rr8321RGqZYO=&jB?GpmzBZ*}IGS-mLqR%f1>)r&)Kb!J8{f!Fp{ z_sr-NIJ37pw>PJkhTiI)IlT;C+gshUH>*=aZ*|WzvwC^xtYlw>y)yJx z_dGMtSB2i{%ofJTt3vLT`0uM(4t7d#ii)W_4cZt?rrA`S9A_ z>Yf>00B81A=l14wVd$;yd1g)*h2HASGjqB)^j2q{nbReqw>tC8oL(P#t258c>C(_! zoq1+fmxbQy%rmokL+Gu}%;=5q+TQA(IlT$a?5)nt=*@6uZ*^{OPHzdl)je~1E4;S1 zx@T`rZwtNEJq~stB-}=>Yiuj^zqPJ zoq1+fp9sCxnHhZ&UfWyUGow$znZ4Dy8GRbg?5)o2&G0j!x4LIepT%>bx0+{j`aGQ3 zTivxcr!R!w>Yh1$5nkI{-Lp5VFNNOfo;iIPUfWyUGou^f%--tU-mJb7daHY$nblWA zZ*^u)UxU~7R`<;4>u_dob#6}IfHQlmb9;08X6UW%d1g-E3cb~tIo$-W?XB*a)6H;Z zZ*^`?x4@ab)w#V{-5PqUd**Z-ytcQxXHK`nnZ4Dyy;tC8oW31; zt258c={upfIy0m1!fSi0d*<{#IJ37pw>PWrhu-R*XJ++-&|96E(GTIZz12N?GyGBL zt?rp$v-)x9t)IY*ehRPct?t>I)z3n2bUW{HIy0l+!)tr1d-mq^htONyGp9eo zYkRAE=JY2xv$r}or$57)z16wBIsGN{R`)zJtG|Zc>dc(}2Cwa{?%A8u-$QS8&y4;7 zukEew*_+cpLvMADjAmzqbMxxCd*n1bGtBJWaZXONv(mFv&rT}S=9ipivzR)kx|hl> zInB-&=G4>_Sv}Yj$CJGMZf^ zoZCA$)8sU}XnL0F*-7Qu{F2k`Vqu5sUMjm}HM@A2Q&T(Um#k)&pgO0fcFZqX%`O?{ z)YJ|c%`O$r?VX!xGMZgFJxlfMr1ETb$!T_(utRmvoMx8|=l0IcGa1b;m!741c2b!( zyJR)HeAuD7XHK&#gmZi6W}2L4S4_`RJv*sPn_qI8T`BBP-AiScoM!J6=G4>qM)$FS2-kRDYr`h|b&xv4q z?w-AQt`>T$d!Cu+>Y=wf^UMs_2))&rXJ+_-&|96EYiC{2(Rs}?%A8?MxnR5=b0I99D1uWv)lw; z+gshUH_uH&Z*|Z7HiOspR`=}9@Zq7ix@VS;fYUnz15i+JsMuyTivrar#Ydwy62fWZ5ev2GtbQFF`>6QGpDWKwY}9nGuj%? z?5)ntX&X4Rw>me=$HJMt)w$UfGL}o)&tmduH@>cx`WW&)%H2 z553hr&&=u>p|?6SqaEP2z12N?v)VEAR`<+kCwOgdb*8DZ*|Wzv)VKCR%f1>)n1{uI`hmt_YS?)nK|u)zM;38XV1-QztCIVGo$_C zwY}9ndviJ<^j7ygGpFZ;-s;RVvwD8$t^8_9Ta-2GtbQF;Luy0d1h9J zgx>1Rj1Gm@_Ez`o&FQevTir9K!{N2P)jfN2IwJH|_dGMJBSUX>W===JYkRAE=5#ci z*;}34o6|9&x4P$ZYlwhog8|rduH?^cx`WW&x~FSXZBX-_Gb8!&|BRzqf>Bc z=&fejbF+F`=&kN~W=^Mu-s;RVb9#B`tdcH@39s#~?wQf6;LP6Y+>Blg zXZBX-_U3e2=&kOV)9LWq-s+wiy#~(gtb0S_x@Sgbz-xP}d-mq^y3kwQ^USQy z487HvIh_Ts?XB*a(b;fjZ*^{OPUnQ)>Yit2b#Cab&O9@#^FnWRW=7}3YkRAE_GWcK z=&kOV(S`8Z-s+ycIb9Tbt9zcA)y1K=Iy0k7;I+NgJ$tiyedw+3d1h9ZhTiJTGjqBu z^j2r)_Xc=vZ*|Z7n$sIYZ@np;nbVs?Z*}IGIlU$HR%f1>(_2Gtb>^8_y)E=sXP%kW z+e2@4W=`*b*Y;NT?9J(&p|`r{nOVIn^j2q{nbo^PZ*^uy?}69$R`=}9>b;@2y62fW zy)X1uXJ&LcytcQxXKz;T553hr&&=urp|?6SqYuJsd#ih9^dUI2w>q~srw@nT>Yh1W z0k7???wQd?;LP6Y+}^CN487GoGr9_1+gshUH>ayZZ*|X{u7TI~R`<;4S~#<}Iya;1 z;LP6Y+}@n#hTiI)XJ&PM=&jDo@&gZu6y{uS*YKzUj4@P zSZk8UQiBG~>o;rOpne|X)Nv;NHw)Rwhx{mjf+&Q-D1xFWhT4JD1)*nhw`X^ zil~HrurKyQWmG{`?2l@wjv6=s2jU6hvU%>C*VY!gp+X!PQ__B9qn-jd^tO!6FQ>{ z&cs#-Eea070{O}H7i;8xs*+i?f(#9g=>_uyXKhvm2*58y#Ogom*Lk6#+fk;xRmqC-5Ym!qa#L&*C{ej~DPFUc$@Rh*$6`Uc>8n18?FjY{F)2!B%X; zcI?1Typ4D8F5biY_y8Z`BYccc@F_mS=lB9&;wyZOZ}2U?!}s_BKjJ6+j9>68e#7th z1Ak^@WKuHuKbcv`Mn2?60Te_b6h;vgMKKgd36w-BltvkpMLCp51yn>O?1O!=A1b2? zs$zdsLv_@^0XPr`;b0tsny7`^I23ho80w-P>Z1V~q7fRS37VoA4#yE_jw8_mN8xDX zpe2q$E3`%%9E-L%4#%S%PQZyc2`A$eoQl(MI@;q5bU;URLT7ZrnK%n);~aFwx#))O z=z*T-h2H3czUYVk7=ZI|J_ceC24e_@Vi<;F1V&;MMq>=dVjRX}0w!V-F2IGDjEitF zF2NLBipww+m*WatiK}omreQj+!L^uy>o60uFdK6)7xOS53$PH2uoz2lJ(gk_ZorMW z2{+>w+=|<9JMO@pxC?jV9^8xjupIZ}0X&F@@Gw^35v;^2ti~Fw#X96-JvQJ`Jch^d z1fIlGcpA^(Sv-g5@d94NOL!R@@d{qWYj_=R;7z=RP1uYr*otk~jvd&ExA6|%#d~-k zAK*iLgpctFKE-GF9ADr|e1)&^4Zg*9_#QvtNBo4J@e6*%Z}=U5;Lr4z{j5xC7P65K z`B4A`Q3!=m1VvE{#Zdw!Q3|C|24ztW{5Fg=Ve1cE$89v7s z_!3{?YkY%m@g2U$5BL#3;b;7UU-27$#~=7J{Vo5lzaz`$Iv?_*01BcI3Zn>$q8N&! z1WKY5N}~+Qq8!Sj0xF^s_QAf`50y~`Rk1&+p*m{d033*ea4-%*P1Hhd9Ev(P40TZt z_0a$g(Fl#v1WnNlhvNt|$B}4(qi{5G&=SX>6hvU%>C*VY!gp+X!PQ__B z9qn-jI-nyup)e2!0#D*8JdJ1YES|&jcmXfsCA^G{cm=QGHN1{D z@Fw2ECTzwQY{fQg#}4eo+js}>;yt{N5AY#A!pHaopW-uojxX>fzQWh|2H)a4e2*XS zBYwiq_yxb>H~fx2@Mrpu`}s1dS;$5{-|Xyl+JjzKH5MjIT9wm1&Qqa9Abi8u)-;}o2V({MW4;|z2_M|47GbitW8 z3uogTbj7*ohVJNrp6G?%=!3rKhyECV^Kd=}Vh{#n2!>)9hGPUqViZPW48~#{#$y5| zViGREg_w+sa4{~y6kLkSFcp{M3S5b+a5bi3IUuCPDgv3fez@1PUws-I1^{#Y@CCxI2YZ}9X-$! zz0ezd&=>vC9|Le6&c{Fu!e9)+Pz=LxjKD~Y!f1@aSd7DXOu$4;!UebxlW`F)#wD17 zOK};d;&NPpD{&RB#xzXFHMkZta2;l17G`4(=3*Y^V*wUo5f)JYK+ycnL3KBVNI)cnz=P4ZMlBunC*71zWKV+pz;X@iyMUyLb=p;{$w% zkMJ=*!Ke5PpW_RBiLdZAzQMQn4&UPk{D`0MGk(FZ_zl0~5B!;tQ6K}E$U-*qAwL)t zD2PHRj3OwCVknLhD2Y-ijWQ^Uawv}qsEA6~2m4|_R7Mq4#r~*<>ZpMOa3BuC!8imp zQ46(kDC*!a)I~kiM*}oOBQ!=6G(|HUjw8?G3 zfD>^NPR1!X6{q2Jw8t6ffR5;d&ggxOvEHyfD17h7vW-Df+@HZmtiU{#}&8|SK(?* z!*pDOYcT`YVJ2o_Hs)Y1=3zb-U?CP^F_z$ZEX6Y1fE#fWZpJOR6}RDb+<`lB7w*PA zxEJ?fIqt^;cn}ZaVXVL-Scz3wjWt+{b;!kfY`~*<43FapJc+09G@ik;cn;6w1-yut z@G>^y6}*bq@H*bWn|KSGuo+vh72B{KJFpXP;~l(<_wYVGz=!wCfiG{fOI0?lzGTHq)gjU2SZF=&O>XoF+X7RTXu zw8IHF5hvkfoPtwv8cs)hoPiGLh)(E?E;tis;cT3Pt~eLn&>cO{6TQ$Ieb5*E&>sVE z9?r)=48mXx!B7mtaE!o6jKXM)!B~vLcuc@VOu_}Y5R-8cF2*I8f=h83rs8s3fh%zp zuEsP>$2GVXGjJVdVism&4(4JW=3@aCVi6W&39iRdEW-`B5jWvx+=5$i8*axPxD$8b zZrp==aUYiBemsB&@em%y3Os_9ScTPCgSA+PT&%|iJc`HgIG(_hcnVMB89a;U@H}3? zi+BkyV!3wKEbE>44>l* ze2K5{HNL^O_zvIW2mFYi@H2kFulNnW;}86q{?yORq-G%-`H&w4P!NSs7)4MN#ZVk2 zP!gq38f8!xC%UEZX8Y9FKN50Vm=loQzX&Do(@c zXpb|{0Ugl^ozVqn;w+qvbI=v%q8qxS2YR9xdZQ2eq96KW0M5ht7>Gd_j3F3`VHl1P z7>Q9BjWHODaTt#Yn21TZ02g91F2cpQ1XFM+F2ht@jw^5_uEN!rhUvHl*J1{)!%WP= zY|O!2%)@*vz(Op-Vl2V+Sc+x10XO0%+>BdrD{jN>xC3|MF5HcKa4+t|a@>yx@E{(- z!&rewuoA1V8f&l?>yV4}*nmgz7#_zHcoI+HX*`2x@f@DV3wRMP;bm;ZD|i*J;dQ)$ zH}MuWVKcU1E4E=fc3>yo#yfZy@8NxXfDiEzKE@~b6rbU9e1R|V6~4wd_!i&cd;EYO z@e_W=FZdO|;dlIjKht0Kvofh!$VNWoM*$Q>ArwXt6h$!+Bg(-a2V>M9_ph38ln*zqY0X#84kx0 zXpSS%0!QI!nV#$p`CV*(~( z5-z}nn2d{XF)qOrT#Cyu6_?`*T#2i2HKt)YuEDjKf$K06voITTFc-7M;{iN~hwv~~;1R6EDy+sDti?Ly zVm&tCQ9Opn@dTd4Q+OKB;8{F}=kWqw#7lS?8}SNW#cOySZ{SV5g-zIuE!c`}*p408 ziMR0%-o<-(A0OaDe1wnj2|mSV_#9v0OMHc|@eRJkclaJZ;79y~pYaQR#c%i>f8fvb zxBS_e)GTBpAM&FB3Zf7SqX>$k7>c6=N}?1>qYTQT9Ll2tDxwnh!M@lJl~Dy%u|KMz zI%?nm9EgK(Fb+XY)Ix0>iaIz9bx{xX(Ett62#wJMP0Lkg}ZSN?!|prj{ETd9>ha<7%T7yR$>)aV-40~9dfZA8}KL|!{c}YPvR*& zjc4#Ip2PEa0Wabuyo`-_1+U^YypA{UCf>p(Y{nLB#Wrlm4(!C+cn9y|J-m+(@F70J z$M^)F;xl}XFYqP4!q@l)-{L!bk00 zArwXt6h$! z+Bg(-a2V>M9_ph38ln*zqY0X#84kx0XpSS%0!QI!nV#$p`CV*(~(5-z}nn2d{XF)qOrT#Cyu6_?`*T#2i2HKt)Y zuEDjKf$K06voITTFc-7M;{iN~hwv~~;1R6EDy+sDti?LyVm&tCQ9Opn@dTd4Q+OKB;8{F}=kWqw#7lS? z8}SNW#cOySZ{SV5g-zIuE!c`}*p408iMR0%-o<-(A0OaDe1wnj2|mSV_#9v0OMHc| z@eRJkclaJZ;79y~pYaQR#c%i>f8fuIjQknML>97<4;fQqPueXuX~LuFJ!RqT&ysE!&q00-hA9E?Ly6SYtqhoTM+LtWHEeKbHr zG(uxEK~prt;Wz@#aU@#cC>)I(w8SxJh1O_;W6>7J;dr#e2{;ia;bfeGQ*jzjM|+%s z4(NzZ=!`Bn6KCOUoP(}77v0buJMZw7yZy518^SB$3P6iU<|=f48w4Yz(|b3 zXpF&FjKg?Lz(h>K1-KBCaS<-YC76OsaT%uKa$JEcaTTt{G)%`exE3>T9cE${W@8TK zVjkvW0TyBr7Gnvn$5JfA4Y(0E;bz=|TX7q1#~rv6cj0c_gL`owmg9arfCupq9>xkh zf|Xc>)mVeISchD!#|Au#$M86wz>|0iPvaRpi|6n>UcifZ2`^(KUcsw)4X@)3yotB4 z37fG6Td@t>u>(8tHr~Ozcn|O61AK^&@G(BYr}zw?;|qL=ukba#!MFGh-{S}Th@bE? ze!;K!4Zq_L{F#waAOo4mLN@XtKNu7!h(aigA}EStD2@^+iBc$yGAN63D31!Lh)UQ8 z`(i&-Mio@W{-}oPsDT4;AP&O8I0Q9O3$<}5>fkWcMLpC<12jYva@jK>5_#3WpR z3o#iN;bL5ZDYz7uVJa@i6}S>t;c867bX6qLlJD22;V8dFgQm!mAMKsj8A^0*2Wa5XAo8Y*Er_Q5sS7uRAx%s^#ahbowf zs+fiSF&jJ=8PzcdV=x!#Zdw!Q3|C|24ztWLLE-6iCUY>oQBiU9%rBfI-(OgqYKW&SvVW#pexQrH+1J? z`dE8Vhf;f@7kZ-)`l28DV*t*>`51^n7>ptCw*KCI?tJY=ViZPW48~#{#$y5|ViF(I z=lBAu_i-U6<04#)OE3kO;xbId<+uV@;woH?X_$^{a4lxwI?Ti@%*Gtd#XQW%0zRg% z?Lw-LVG$N%39iRdEW-`B5jWvx+=5$i8*axPxD$8bZrp==aUYiBemsB&@em%y3O?qc z)JLd3hbyrPtFZ=au@1Rdj}3SfkKu7VfhX}4p2jnH7SG{%ynq++5?;neynyk%>Kk|yZ($QQV+*!o8@6KycH(WkgLm;B-p2>{5Fg=Ve1cE$89v7s_!3{?Yd+>- z)NiQgQ@_P`_#QvtNBo4J@e6*%Z}=U5;Lr4zgseLo`BTG(l4|!{Imr&2c1J;3yo89JItSXoc2jgJaPa$KiNB zrXQmnwLkR)oQRWfGETv%I1Q(xJcf5gzMS(u?|Gizhr?ioFqB~oX9Ob|#c0Mb zmT`<{0uz~JW^~KR(hlNIVJg#@&J1QUi`mR!F7uer0v57}#Vlbd%UI3|R>(8$u4&DJ$u;85A0(<2RO)&9O5uXILa}8;%APVSwNnU zQ3pWF`w)d5LUf=Ve~uRbC?pImtzCGYiW+va!rdK6D`F7BM@CMei1U zrU*qTMsZ3|l2VkW46jp`a+Ie66{$pJs!)|`R5vqv<{Pq+e3Q4RK}~A$HnpikUEbkc z>QSHfXh1`vXEY|-VN>3x86VJ`4{1S5TA3OBmXBog-PW|>W7^V=_H>{lo#;##y3&nL z_>|B1obG%<4|>vz-t^&1zT#`XF*Dj>Um3l7Kl<}6-!XuJ3}P@t7|Jk)GlG$fVl-nI z%Q(g}fr(6FGEAb1nI))CN$TO2!A*nP1~(3F9o#&)eb9lR3qdD>ZiJ{OQD<}~=upt5pi@D& VLPaW29^DH%7<4g|p){q)@EA*`t)u_| diff --git a/optimism/test/square_mesh_compression.exo b/optimism/test/square_mesh_compression.exo index f8a77ec9b394a87b4136a8212acb7f62a9c23fe6..5b7937570f4aa292b1da2934c42517d8428b4991 100644 GIT binary patch literal 10116 zcmd_vS#VTU8o=?eCy^ZmRF;4splsPd5e++nC_9RZO_l~)lXOUT1YE{_-#17?K-~9z zMbWtHJWkELSTzq$)jZC_Jj}y9_%Q!}-*ZD~CgNE0U=_FOm+w30JAL|o_jYbTJ>b4~^xtW53Q}fo99k#P!MMOe)=Iu^)OZEV$us!~s3bkKR_FuFkSsqA8tf%qDiF zo9nnPH*gPf@8|m$qG(&Ho@*5qy49?YXsF(uYVi7S?-HEg_UsO(yIx;HK9J4lRB-0i z*%mLiz4%&>e%VyE-Z$j%9;~fxsYzw0E>C55wA9Q>H}Nq@Wg3nq*<>a=E3>nux;dF> zNM?2?&L>NvgYWCGj^@lHa_e{9R2^rk3!ZrHK&{WJj83gN9KK_&v&lRJ}Z_m z6TLQm{l6I(WirRTpXU@Gt_|nc|7Ua7cYij$`3|JpQneK%Qjj()D=gT4Cv z5Df;nXSCMevR_~ zulRcW%Dvp$V~Bed+i%xy(YeJ7{>S}><<6}(-Po9{&3eDx$KOS-Z@gEL&!R+jPgA$g zc)tznR+O{GgL_c4HJ25P$nBR~A3Z(12t`pX_tX0|H5!lknOGP-!*WlWUsyYK5^D$5 zr83#Xw(8n!*P|(Sk$q;xhu8H`G#I;zSQyU5CzSj9I}{(?&SmUBn2gWm1Z&6nJUr%H zqQ_ruf3I15bUT-E^O-D;o{M}|qX)L1(?J-FA&3v}!JW}y++T@N4-M7+8Lt?44)1p`yia-9zddi?wyI-JzI*dNXZhE+ zr+wq=a47F{)|&TuIR5kdQ}l|i>kxjAj=S#B{L%Xxja!dp2SB2mAyjY$$uIOQ08Rp{{uPd*Lo-g}D?D&=A{B`l`+rsPP-~WT*_v%oH@i>kL z;@`Wx82{eH_jhlow;tItYU!V{TSiBBub?0v|JE4q(|NefqctzK=Zz19aYu*;^TzRf z{QN%D8J_<=#JyoYejc=kaUA34p9UGeh>a%F}{B%Yph9Uv+3Md?342fWYgXL#o;8p1iMed zp1ZdkeZB2IbJFF0NAY`<-(}2=es6Nl@9};&GdEguF8$L=?BQ%UHaEJ?KBmB$ zxzW10y-W?c(RI$*$Fz_etvP2O=Y-s7tpw-7vANN8_BS2Y%#9_mKl_*wa^uV}XCJdd zZnWl{z03}|(VBDi;=lEn8?8BKA7vpoT64}m%0q6nW)Br`Y;JU&xxM&rmgYv+*~gqP zHh122=Jq=`zrGFOG0k6W)G|2*xa}P_ID|) znH#O!pS@fba^vbSXD@3)ZnWl{y{rwn(V9K1gJW}}>+EAateG3F+rtJ}GdEhdhmEji zZnSQ0FPlPcbe(hda(T#&)||87D?)CxW)GWjWyp>8YY$h!nz_-n=Js-R$c?VEk1cR) zZgicwy<8J=qwAcrmqf^o*6d*`9Ge?mXKo+WAvbP?bM{daa-%ip?6EfFMr-y_hh)f& z_UycUYzw*3b>AD&e>yQ$c@(QA&sVx8|~S7``8(BqwAcrm*$WgtvP2OnUEW;IcG1~kQ=Sp zLkk?68(n7)*Tb5*(YihCf;Drab$i$iYvxAl_O}Pt%#GH+$G7+zU!oJA;|M;%$M^uf ze~qetvWnyP8acII=!1S3fFcaUAPm89jDYL%`MP*6m;YY-jL&QF#ziYOaMC><{Ex|s zndkLb=Fe`||G(^EQEBD8@``z-=l4*~O?%D!{CLg!!fW3jUjIUP4~lUL;(IZe-1{*U z!w}z>Q^`-mNQ}a0oQ^YaCdOba#$h~4FaZ;B7A9db&c+l>#Wb9Qb1@w=FcY&d8>J{i zIVwT;GH{%bu1-Ifh+>SeNC+@=CxCi&*KHQH7@E{(-!*~Rb;xRmqC-5Ym z!d~n{E85VG{pi2}9K_Q&glF(9p2PEa0Wabuyo^`yDqh3ucmr?ZExe6)@GjoN`}hDK z;v;;F!}tWB;s`#&=jg;2_!3{?YkY%m@g2U$59o#7=!3rKhyECVLKLAGr(hrkVK9bZ zD28D;M&MMOhLISB(KsDv;7p9cSd7DXlwblT;w((UWSosDn2KpQ2j^lsW?&{}VKz!p zhH_M(5_2#Y=iz+J!v&a+3vm&uumB6O2#c`#!ah zuo0VZIj+EFT#2i2HMZayB(N3Ls6j32ki<4@#}1@$Ev`d78qkO|ny?ei$RLXrT#sGY zjXk&lH{vGTj6dKO+=|<9JMO@pxC?jV9^8xja6cZvgLnuJ;}JZH$M86wz>|0id$A9# zXhS>pqXP$U5KrR}p24$t4$tESyoi_ZGG4)}cnz=P4ZMlB@HXDTyLb=p;{$w%kMJ=L z;}d*}BlrxTqZ41?OMHc|@eRJkclaJZpcndJ8m6Kz&Otw%i~g970hob8%tR4pp%}Aq z3QEx%gHVPkC`Sb zScRdu6q9fnR$~p;Vjb3F12$q9HsLH>jw`SkSK=yMjV-ta!;!#5Y(+I{P>VVwu?^cX z0y{7PDO`)|P>%*QB8?`Tik&DyGcw4c1=nL2c4H4t!+4CuIE=*@oQX4VI!0p@2L1JxO5Q z{K};n!}exmOx77Z(`>#eBaCOag}=8jCqp?%8970kqLe3FP4$?Zdn|mzLbW`@BnF^v3MGT$KTz9iNusKU}GPhT0?t+V$0@&JtB%BhVfHb$QpR z4n$aXn{*)iRutC}i1%--zEdp((zPPH1T5L~%w{!8JgNjgO3nPw*4(2L61`v8iz<0r$j0{~OR}>Ogg|9KC|6-9) zKNzAYhvT7lCbBvrT(ab^VqtlGy@W542O16=Q(3d9shm;hQc?- zQ`lsPQsk5r_H{A|!mc{H&J<67G2tujtC%qRqj?`) zH#aebi!6l+Q=za&9q%;7k60`e6Pf8JCQAD%CX8@yo0a!UVhU4W6Q)98SB?GDfZ<_Z zVIQT0*+OUy{d_yI!Iltc4Jx#Her8y&?(Yb~o*KjGMe1ey6i6{m#alW zkBREL5AB+vj%j2+EqH3Cx=30vUVZfLyPs0qB{olu$-U%iHB$2CseQM-T&4n&VXWFz zJEL4-8#F;3Z_iz(o|oFjtLA5px2P74R~^j1Men&MsPlf6|0VTrlEJ5*=y>8zZFbF5 z7j8RxmjZRit5May8d{#dO@0=r*6LY)o-fDb%uizh8LeyBPGKm_0&6;KdZi_kxlUK z{H+R0>_qkGi09u{+r*ovUfK2BJL)ElZ0gP}htxXB>r;obD-Np#y5foNKk(nM7pZaT z=gYqOq54P3Fj_5H_R>e{cF8bSJsz05O_I-~)8lWsUg|N_lgf=V!y#QT&a_opX`Epg zp{#M61dQ*twVJVxqBf+AT03Fq%Cpj0yoOz(R!74!#?CLPu(VCk(?NAh=#W6Ze?&*U1Ck$mx-0;2-^(}c5t@RaMSW>AmtIQoO9Kh{<~JLkmqG1XA{{^{?X z37UYCq{&&yE194a4lPb}_uZx}P2zRYsWvm=DRt2dtBYQUf*$A9c7woYrvWMSNvfh+ zYO@2jKgogrwQWZYf1GYw9FwyWxc;PtUb1>kiw|~w;Twh)=VdtgXuD(VmRbTJOb?|T z!yYXtAOn$-nGKL3g>^nR6G83Gasfn)>5zEOSqEdgQd_Ka+e$h_RSu<>4-jgEn6Ae} zP1hA|Z5_?QP-7$z)@6F`jS#`j8YMlU76+2TU3EjKi_bm}$CtYR3N z=#1Kl4+9LOjJOu{-kzL$9r)C zA+%EMu^VJo!T2(}z14dgDHEEV&5PazwL`LJ#-I7E#rQw}sI?o@RNHpA9d^rJ7Wad}$IPL_2t~3fumkVbJNzznNE1U0+`n zeG|%&mx`X!r56mjFh?ixmXIU+@jn6qE}b2Mk;UnB)IAuK9sg7JtN=JYc=imB*U3mt z?`wDo?Q9)8nvDoo(LP{0rf#KOz=rqvtjB`tojKo3)cdVA-TNFk_-ySxIfn2)T03m( zAEuSVhM#}z&zb$j$uOw(?xN7D z{u&KfnR<=}tX2m$R^FLtG-L{BmtIo5biQr21%cOp_QTkiQ^>uvTC-l965F@!6|Wyp zG?w;#CzW0^i!%ot3r8u=FniB%{*qsQSUtaTo|;1QwG_Ijt9+V6$)d7a0wF(cZ_!}x z=(`EPo*j{w%>3aIJ?NU#Ua_?_iJ{ z2@ifzl{=!j0vGZ04aZn%(HOO|rL8d%46X9Fw*^BH|2m<+Fv}P1U5?m$^CxQ;7{*aK z24}EEn5`Sh33M@cdi2_%apFgup_93*(%QH_*ow&@gX@=Rwkon(=jkmjpT;*#w?{fQ ztHkmnkn|P@^5V5meaoe`VzKagpd%90w`Q{EDOy&L1xg`a&3ren7 z-Mq4NZec-TQ9&VJZjA&ZEg&y2ERlyJ+tj%t7|Fjj7+Kx9Vn$oL%*tT6wc8_?bs{su zYdae|0)FB5)&8co*7lA-7@SlPSl>2%j&EksjG41%6wNFuo#QJk7Sx08V^YA}(TIOJ zy2xTBv^N}k^>VaQyX@tZH!T+K2-EPr(mubFXV#us?~QvjF@?DvMqvuCpDVSu#*lUZ zP}o=8$9M9~0={G4{^5qg>l`N^RN-yRDDPS>j1MQh-5qyZ_YSj+>GwQ`^IAA zJ6(4Y!Aa<=_e$Wz3Iu5lHOU{@(9UHHvKGn`Wo^UK8X@fR(`{(E&{jdi3#z@kIMYd_ zL{sNU&kYd?*)sI8uf!_JqU3%!_R2ku%gZC@Tjewuf__@ayCRMYYQYUh22pzbvbu_b zTgY1@jtlz1Wi_>hLV>18A@^gBOP$=?t=)zTd4I%ld9y@o#?34J;cy`@kvMMjdWmKe zNxHAj)Z@6(8zx3!4i`SGxm7sF72B6~!iIRXo20d6CeH&449pA~NULDSgCG1k zvE0WTg(FO5W%a0^E;Xf(7!Z|3Y52Cgue6VH-)zp-yu7g?F@?E9BTR+DV(zUsq#Xb& zYhho%CtxTyWqXd-P&odb5ArTyZ&%V?VMAL=Hm;@n}2URpeP z7SKX3At}=$C$jm@#I)dyBeXLw`8)b?n%2vfCU!xJO1DpvKlI8uwYQ8hYV%Fo5~)7q z@H9IYu}T7aPtAbghS&0@CF-qKyCqY4DSFgv*t=W5a!qyO6@_w+FqIX>qgH*!)Nn4Z z*~T(@!NH5~Nlal%48l|>tkm_NH55LV*KD)dI{Ikt(!{cvyi3^IHG5*j(3TRdKh|u$ zWl4FRnw7I^!Re(%@mW9%y+=!#7UeH|qMspS8-kIT-_RY9IQE$_8&F0RZy{kF)!LJjqC^X*?rnaK2S!rq*04qwL zzjluMLDS!Nt9J^z^+i|V}RUD6z3zQDzVNyC-eBA5$=EK1J%G%w)>`|Ll#r5-Q8W2e4 zAD&5X63ujmIAL18+R)upgS!sx#1;&&u&M!+!scrx*51 zb3CuG-@)SXG*upIBxD+Gie6_W)J#&r8?zj{wUVZe6(_~40Kf+{+7%y&R`fc)m z3?MOM$(BAKv8ATSc7U+A7QFXcOu^@D`Tnrr`A~PtR;%T2jMDF6WNDqi2HUkv~Ya`Y}tbsmxAW|SXAR-_Krpa@OJeSIoFVA#&E|ceSc`$r(1xXdi zGeaKiJqqOkKMurxpidkZ_&GN4V~j9vh~rqnpCu2+k7I{)j3Lqx$FYPTV~RA?1AG`? z_&Lsq13v7k;0Ht+;(!k;I{bi0LmcpdFW?768tMU`APhes@&Oll2Y&Dnc5(0nANUD= zK;%Ol@PWtR2Sgh1f#(p09}xLaC-@L)@B<xpJ zeE#hwUzsh}HQUygEdLIh-?q1l?Ms&5_NUDDGg&{%Z2!vQ+h_5f5Fekf)U-QF2ja7I zC}TU=U&2Rhe>e^gnQ#}|Yx8xHo;H6M$BF&7^>x{HMCrnGLeiZtWQFdUiQ!YTB70EMG z9;^je8>Y*Hy5k>-689%N9v>e9KWIU@i#{O?n)G}mDDLCe7mLS)PUr9G)%5 0.5 - tol) + # Get master and slave node coordinates + master_coords = coords[self.mesh.nodeSets['master']] + slave_coords = coords[self.mesh.nodeSets['slave']] + + # Sort nodes by y-coordinate to ensure correct pairing + master_sorted_idx = np.argsort(master_coords[:, 1]) + slave_sorted_idx = np.argsort(slave_coords[:, 1]) + + # Ensure one-to-one pairing by iterating through sorted nodes + self.master_slave_pairs = { + int(self.mesh.nodeSets['master'][master_sorted_idx[i]]): + int(self.mesh.nodeSets['slave'][slave_sorted_idx[i]]) + for i in range(min(len(master_sorted_idx), len(slave_sorted_idx))) + } + + print("Master-Slave Pairs:", self.master_slave_pairs) + funcSpace = FunctionSpace.construct_function_space(self.mesh, self.quadRule) - self.dofManager = DofManagerMPC(funcSpace, 2, self.EBCs, self.mesh) + self.dofManager = DofManagerMPC(funcSpace, 2, self.EBCs, self.master_slave_pairs) surfaceXCoords = self.mesh.coords[self.mesh.nodeSets['top']][:,0] self.tractionArea = np.max(surfaceXCoords) - np.min(surfaceXCoords) @@ -116,16 +134,13 @@ def write_output(Uu, p, step): Uu = self.dofManager.get_unknown_values(np.zeros(self.mesh.coords.shape)) ivs = mechFuncs.compute_initial_state() p = Objective.Params(bc_data=0., state_data=ivs) - self.objective = Objective.ObjectiveMPC(energy_function, Uu, p, self.dofManager) + self.objective = Objective.ObjectiveMPC(energy_function, Uu, p, self.dofManager, precondStrategy=None) # Load over the steps force = 0. force_inc = self.maxForce / self.steps - # K = self.objective.hessian(Uu) - # print(f"Hessian first row: {K[0,:5]}") - # print(f"Hessian Shape: {K.shape}") for step in range(1, self.steps): print('------------------------------------') From c75566c1e795b0c8f2b667a79cb38ead92f3e4e6 Mon Sep 17 00:00:00 2001 From: SarveshJoshi33 Date: Wed, 30 Apr 2025 14:58:15 -0400 Subject: [PATCH 8/8] Implemented MPCs with Ryan, need to run test cases --- optimism/FunctionSpace.py | 261 ++++++++------------------ optimism/Objective.py | 155 +++++++-------- optimism/test/test_compression_MPC.py | 53 +++++- 3 files changed, 180 insertions(+), 289 deletions(-) diff --git a/optimism/FunctionSpace.py b/optimism/FunctionSpace.py index 8f1cb421..e7115414 100644 --- a/optimism/FunctionSpace.py +++ b/optimism/FunctionSpace.py @@ -387,7 +387,7 @@ def __init__(self, functionSpace, dim, EssentialBCs): self.isUnknown = ~self.isBc self.ids = onp.arange(self.isBc.size).reshape(self.fieldShape) - + print(self.ids.shape) self.unknownIndices = self.ids[self.isUnknown] self.bcIndices = self.ids[self.isBc] @@ -467,212 +467,99 @@ def _make_hessian_bc_mask(self, conns): hessian_bc_mask[e,:,eFlag] = False return hessian_bc_mask - # DofManager for Multi-Point Constrained Problem - -# class DofManagerMPC(DofManager): -# master_slave_pairs: any -# T: any -# s_tilde: any -# isIndependent: any -# dim: any - -# def __init__(self, functionSpace, dim, EssentialBCs, master_slave_pairs): -# super().__init__(functionSpace, dim, EssentialBCs) - -# self.master_slave_pairs = master_slave_pairs -# self.dim = dim # Store the dimension (2D or 3D) -# self.create_mpc_transformation() - -# def create_mpc_transformation(self): -# """ -# Constructs the transformation matrix T and shift vector s_tilde -# for condensation of multipoint constraints. - -# - Independent DOFs (`ui` + `up`) remain. -# - Constrained DOFs (`uc`) are condensed out. -# - Transformation follows `u = T ũ + s̃` as per the condensation method. -# """ -# num_dofs = self.ids.size # Total DOFs -# num_total_dofs = num_dofs * self.dim # Account for x, y (or x, y, z) - -# # Identify independent (ui + up) and constrained DOFs (uc) -# independent_mask = ~onp.isin(self.ids, list(self.master_slave_pairs.keys())) -# independent_indices = onp.where(independent_mask)[0] -# constrained_indices = onp.array(list(self.master_slave_pairs.keys())) - -# num_independent_dofs = len(independent_indices) - -# # Initialize T matrix and shift vector s_tilde -# T = onp.zeros((num_total_dofs, num_independent_dofs * self.dim)) -# s_tilde = onp.zeros((num_total_dofs,)) # Ensures correct shape - -# # Identity mapping for independent DOFs (ui and up) -# for i, dof in enumerate(independent_indices): -# for d in range(self.dim): -# T[dof * self.dim + d, i * self.dim + d] = 1 # Block identity - -# # ** Compute Constraint Matrix `C` ** -# C = onp.zeros((len(constrained_indices) * self.dim, num_independent_dofs * self.dim)) - -# # Define `A_c` and `A_p` from constraint relationships -# A_c = onp.eye(len(constrained_indices) * self.dim) # Identity matrix for simplicity -# A_p = onp.zeros((len(constrained_indices) * self.dim, num_independent_dofs * self.dim)) - -# for i, slave in enumerate(constrained_indices): -# master = self.master_slave_pairs[slave] -# master_index = onp.where(independent_indices == master)[0][0] # Locate master in independent list - -# for d in range(self.dim): -# A_p[i * self.dim + d, master_index * self.dim + d] = 1 # Master-Slave dependency - -# # Solve for `C = -A_c⁻¹ A_p` (since `A_c` is identity, `C = -A_p`) -# C = -A_p # Direct assignment - -# # Compute shift vector `s_tilde = A_c⁻¹ b` (assume b = 0 for now) -# b = onp.zeros(len(constrained_indices) * self.dim) # Modify if external forces exist -# s_tilde_constrained = np.linalg.solve(A_c, b) # Solve A_c * uc = b -# for i, slave in enumerate(constrained_indices): -# for d in range(self.dim): -# s_tilde[slave * self.dim + d] = s_tilde_constrained[i * self.dim + d] # **Fixed Indexing** - -# # Assign constraints to transformation matrix `T` -# for i, slave in enumerate(constrained_indices): -# for d in range(self.dim): -# T[slave * self.dim + d, :] = C[i * self.dim + d, :] # Apply condensation row - -# # ** Convert to JAX-compatible arrays after full setup ** -# self.T = np.array(T) # Shape (total DOFs * dim, independent DOFs * dim) -# self.s_tilde = np.array(s_tilde) # Convert shift vector properly - -# # ** Update unknown indices ** -# self.isIndependent = independent_mask -# self.unknownIndices = self.ids[self.isIndependent] -# self.bcIndices = self.ids[self.isBc] - -# ones = onp.ones(num_dofs, dtype=int) * -1 -# self.dofToUnknown = ones -# self.dofToUnknown[self.unknownIndices] = onp.arange(self.unknownIndices.size) - -# return self.T, self.s_tilde # **Explicitly Return Transformation Matrix and Shift Vector** - -# def create_field(self, Uu, Ubc=None): -# """ -# Constructs the field using only the independent DOFs (ui and up). -# """ -# num_dofs = self.unknownIndices.shape[0] # Number of independent DOFs -# U_unconstrained = np.zeros((num_dofs, self.dim)) # Shape (num_independent_dofs, dim) - -# # Fix: Convert isIndependent to a NumPy array before using np.where -# independent_indices = onp.where(onp.array(self.isIndependent).reshape(-1, self.dim))[0] - -# # Set independent DOFs (ui and up) -# U_unconstrained = U_unconstrained.at[independent_indices].set(Uu) - -# # Apply transformation: u = T * ũ + s̃ -# return np.matmul(self.T, U_unconstrained.reshape(-1)) + self.s_tilde - -# def get_unknown_values(self, U): -# """ -# Retrieves only the independent DOF values. -# """ -# independent_indices = onp.where(self.isIndependent)[0] # Get independent DOF indices -# return U[independent_indices] # Directly index U without reshaping - -# def get_bc_values(self, U): -# return U[self.isBc] - class DofManagerMPC(DofManager): + dim: int = eqx.field() + master_slave_pairs: dict = eqx.field() + C: np.ndarray = eqx.field() + C_reduced: np.ndarray = eqx.field() + s_reduced: np.ndarray = eqx.field() + s_tilde: np.ndarray = eqx.field() + T: np.ndarray = eqx.field() + isIndependent: np.ndarray = eqx.field() + isUnconstrained: np.ndarray = eqx.field() + is_slave: np.ndarray = eqx.field() + is_indep_dof: np.ndarray = eqx.field() + unconstrainedIndices: np.ndarray = eqx.field() + slaveIndices: np.ndarray = eqx.field() + bcIndices: np.ndarray = eqx.field() + dofToUnknown: np.ndarray = eqx.field() + def __init__(self, functionSpace, dim, EssentialBCs, master_slave_pairs, C, s): - """ - Initializes the DOF manager for MPC using precomputed C (constraint matrix) - and s (shift vector). - - Parameters: - - functionSpace: The function space used for DOF management. - - dim: Number of spatial dimensions (2D or 3D). - - EssentialBCs: Essential boundary conditions applied to the problem. - - master_slave_pairs: Dictionary mapping master nodes to slave nodes. - - C: Constraint matrix that enforces u_s = C * u_m + s. - - s: Shift vector (s̃) for enforcing non-homogeneous constraints. - """ super().__init__(functionSpace, dim, EssentialBCs) - + self.fieldShape = Mesh.num_nodes(functionSpace.mesh), dim + self.dim = dim self.master_slave_pairs = master_slave_pairs - self.dim = dim - self.C = np.array(C) # Constraint matrix (already precomputed) - self.s_tilde = np.array(s) # Shift vector - + self.C = np.array(C) + self.s_tilde = np.array(s) + self.C_reduced = np.array(C) # Reduced constraint matrix + self.s_reduced = np.array(s) # Reduced shift vector + self.isIndependent = None + self.isUnconstrained = None + self.is_indep_dof = None + self.is_slave = None + self.slaveIndices = None + self.unconstrainedIndices = None self.create_mpc_transformation() def create_mpc_transformation(self): - """ - Constructs the transformation matrix T and updates the DOF mapping. - """ - num_dofs = self.ids.size # Total DOFs in the system - num_total_dofs = num_dofs * self.dim + slave_nodes = np.array(list(self.master_slave_pairs.keys())) + master_nodes = np.array(list(self.master_slave_pairs.values())) + is_slave = onp.full(self.fieldShape, False, dtype=bool) + # self.is_slave = is_slave.at[slave_nodes, :].set(True) + + self.is_slave = onp.full(self.fieldShape, False, dtype=bool) + self.is_slave[slave_nodes,:] = True - # Identify independent (ui + up) and constrained DOFs (uc) - independent_mask = ~np.isin(self.ids, list(self.master_slave_pairs.keys())) - independent_indices = np.where(independent_mask)[0] - constrained_indices = np.array(list(self.master_slave_pairs.keys())) + # self.is_slave = is_slave + self.isUnconstrained = ~self.is_slave - num_independent_dofs = len(independent_indices) + # print("shape of isUnknown: ", self.isUnknown.shape) - # ** Directly use precomputed C instead of constructing it manually ** - T = np.zeros((num_total_dofs, num_independent_dofs * self.dim)) - s_tilde = np.zeros((num_total_dofs,)) # Ensures correct shape + self.ids = onp.arange(self.is_slave.size).reshape(self.fieldShape) + print(self.ids.shape) + self.unconstrainedIndices = self.ids[self.isUnconstrained] + self.slaveIndices = self.ids[self.is_slave] - # Identity mapping for independent DOFs - for i, dof in enumerate(independent_indices): - for d in range(self.dim): - T[dof * self.dim + d, i * self.dim + d] = 1 + T = np.zeros((self.is_slave.size, self.unconstrainedIndices.size)) - # Assign constraints from precomputed `C` - for i, slave in enumerate(constrained_indices): - for d in range(self.dim): - T[slave * self.dim + d, :] = self.C[i * self.dim + d, :] # Assigning rows from `C` - s_tilde[slave * self.dim + d] = self.s_tilde[i * self.dim + d] # Applying shift + for local_idx, global_idx in enumerate(self.unconstrainedIndices): + T = T.at[global_idx, local_idx].set(1.0) - # Store transformation matrix and shift vector - self.T = T - self.s_tilde = s_tilde + # Build s_tilde: global shift + s_tilde = np.zeros(self.is_slave.size) + + for i, (slave_node, master_node) in enumerate(self.master_slave_pairs.items()): + for d in range(self.dim): + slave_dof = slave_node * self.dim + d + master_dof = master_node * self.dim + d - # ** Update unknown DOFs mapping ** - self.isIndependent = independent_mask - self.unknownIndices = self.ids[self.isIndependent] # Independent DOFs - self.bcIndices = self.ids[self.isBc] # Essential BC indices - ones = np.ones(num_dofs, dtype=int) * -1 - self.dofToUnknown = ones - self.dofToUnknown[self.unknownIndices] = np.arange(self.unknownIndices.size) + # Find column index in reduced DOFs + reduced_master_index = np.where(self.unconstrainedIndices == master_dof)[0] + if reduced_master_index.size == 0: + raise ValueError(f"Master DOF {master_dof} is not independent") - return self.T, self.s_tilde + T = T.at[slave_dof, reduced_master_index[0]].set(1.0) + s_tilde = s_tilde.at[slave_dof].set(self.s_reduced[i * self.dim + d]) - def create_field(self, Uu, Ubc=None): - """ - Constructs the field using only the independent DOFs (ui and up). - Applies transformation `u = T * ũ + s̃`. - """ - num_dofs = self.unknownIndices.shape[0] - U_unconstrained = np.zeros((num_dofs, self.dim)) + self.T = T + self.s_tilde = s_tilde - # Set independent DOFs - independent_indices = np.where(np.array(self.isIndependent).reshape(-1, self.dim))[0] - U_unconstrained[independent_indices] = Uu + # Track dof-to-unknown mapping + ones = onp.ones(self.is_slave.size, dtype=int) * -1 + dofToUnknown = ones + dofToUnknown[self.unconstrainedIndices] = onp.arange(self.unconstrainedIndices.size) + self.dofToUnknown = dofToUnknown - # Apply transformation: u = T * ũ + s̃ - return np.matmul(self.T, U_unconstrained.reshape(-1)) + self.s_tilde + def create_field(self, Uu, Ubc=0.0): + U_flat = np.matmul(self.T, Uu) + self.s_tilde + return U_flat.reshape(self.fieldShape) def get_unknown_values(self, U): - """ - Retrieves only the independent DOF values. - """ - independent_indices = np.where(self.isIndependent)[0] - return U[independent_indices] + print("shape of U in get unconstrained values: ", U.shape) + print("shape of isUnconstrained: ", self.isUnconstrained.shape) + return U[self.isUnknown] - def get_bc_values(self, U): - """ - Retrieves boundary condition values. - """ - return U[self.isBc] + def get_slave_values(self, U): + return U[self.is_slave] diff --git a/optimism/Objective.py b/optimism/Objective.py index b0d15ac4..a285e8d4 100644 --- a/optimism/Objective.py +++ b/optimism/Objective.py @@ -270,104 +270,75 @@ def get_residual(self, x): return self.gradient(self.scaling * x) # Objective class for handling Multi-Point Constraints - -# class ObjectiveMPC: -# def __init__(self, f, x, p, dofManagerMPC, precondStrategy=None): -# """ -# Objective function for Multi-Point Constraints (MPC), ensuring -# condensation of independent DOFs (ui, up). -# """ -# self.precond = None -# self.precondStrategy = precondStrategy -# self.dofManagerMPC = dofManagerMPC # Store DOF Manager with MPC -# self.p = p - -# # JIT compile function derivatives -# self.objective = jit(f) -# self.grad_x = jit(grad(f, 0)) -# self.grad_p = jit(grad(f, 1)) -# self.hess = jit(jacfwd(self.grad_x, 0)) - -# # Create transformation matrix & shift vector -# self.T = self.dofManagerMPC.T -# self.s_tilde = self.dofManagerMPC.s_tilde - -# def value(self, x): -# """Compute objective function value.""" -# x_full = self.expand_to_full_dofs(x) -# return self.objective(x_full, self.p) - -# def gradient(self, x): -# """Compute reduced gradient.""" -# x_full = self.expand_to_full_dofs(x) -# grad_full = self.grad_x(x_full, self.p) -# return np.matmul(self.T.T, grad_full) # Reduce gradient - -# def hessian(self, x): -# """Compute reduced Hessian H̃ = Tᵀ H_full T""" -# print("Computing Hessian with MPC...") - -# x_full = self.expand_to_full_dofs(x) -# H_full = self.hess(x_full, self.p) # Compute full Hessian - -# return np.matmul(self.T.T, np.matmul(H_full, self.T)) # Condensed Hessian - -# def expand_to_full_dofs(self, x): -# """ -# Expand reduced DOF vector `x` (ui, up) to full DOFs (including uc) -# using transformation: u = T ũ + s̃. -# """ -# x_full = np.matmul(self.T, x) + self.s_tilde -# return x_full - -# def update_precond(self, x): -# """Update preconditioner with reduced Hessian.""" -# if self.precondStrategy is None: -# print("Updating with dense preconditioner in ObjectiveMPC.") -# H_reduced = csc_matrix(self.hessian(x)) -# self.precond = H_reduced -# else: -# self.precondStrategy.initialize(x, self.p) -# self.precond = self.precondStrategy.precond_at_attempt - class ObjectiveMPC(Objective): - def __init__(self, objective_func, dofManagerMPC, x0, p, precondStrategy=None): + def __init__(self, objective_func, x_full, p, dofManagerMPC, precondStrategy=None): + """ + ObjectiveMPC: wraps the original energy functional to solve only for reduced DOFs. + - Applies u = T * ũ + s̃ for MPC condensation. + - Automatically condenses gradient and Hessian. + """ self.dofManagerMPC = dofManagerMPC - self.T = np.array(dofManagerMPC.T) # Transformation matrix - self.s_tilde = np.array(dofManagerMPC.s_tilde) # Shift vector - self.scaling = 1.0 # Optional scaling factor + self.T = dofManagerMPC.T + self.s_tilde = dofManagerMPC.s_tilde + self.p = p + self.precondStrategy = precondStrategy + + # Store full functional and derivatives + self.full_objective_func = jit(objective_func) + self.full_grad_x = jit(grad(objective_func, 0)) + self.full_hess = jit(jacfwd(self.full_grad_x, 0)) + + self.scaling = 1.0 self.invScaling = 1.0 - def condensed_objective(xBar, p): - x = self.expand_to_full_dofs(xBar) - return objective_func(x, p) - - xBar0 = self.reduce_to_independent_dofs(x0) - super().__init__(condensed_objective, xBar0, p, precondStrategy) - + # Reduced initial guess from full vector + self.x_reduced0 = self.reduce_to_independent_dofs(x_full) + + # Define reduced-space objective and gradient + self.objective = jit(lambda x_red, p: self.full_objective_func(self.expand_to_full_dofs(x_red), p)) + self.grad_x = jit(lambda x_red, p: self.reduce_gradient(self.full_grad_x(self.expand_to_full_dofs(x_red), p))) + + self.precond = SparseCholesky() + def reduce_to_independent_dofs(self, x_full): - """Extracts independent DOFs (ui, up) from full DOF vector.""" - return np.matmul(self.T.T, x_full - self.s_tilde) + """Extract reduced DOFs from full vector.""" + return self.dofManagerMPC.get_unknown_values(x_full) def expand_to_full_dofs(self, x_reduced): - """Expands reduced DOF vector (ui, up) to full DOFs including uc.""" - return np.matmul(self.T, x_reduced) + self.s_tilde - - def get_value(self, x): - return self.value(self.expand_to_full_dofs(x)) - - def get_residual(self, x): - return self.gradient(self.expand_to_full_dofs(x)) - - def hessian(self, x): - """Computes the condensed Hessian for the independent DOFs.""" - x_full = self.expand_to_full_dofs(self.scaling * x) - H_full = self.hess(x_full, self.p) # Full Hessian from original problem - H_reduced = np.matmul(self.T.T, np.matmul(H_full, self.T)) # Condensed Hessian + """Reconstruct full DOF vector using u = T ũ + s̃.""" + return self.dofManagerMPC.create_field(x_reduced) + + def reduce_gradient(self, grad_full): + """Project full gradient to reduced space.""" + return self.dofManagerMPC.get_unknown_values(grad_full) + + def value(self, x_reduced): + """Return energy functional in reduced space.""" + return self.objective(x_reduced, self.p) + + def gradient(self, x_reduced): + """Return reduced gradient.""" + return self.grad_x(x_reduced, self.p) + + def hessian(self, x_reduced): + """Compute reduced Hessian H̃ = Tᵀ H T.""" + x_full = self.expand_to_full_dofs(self.scaling * x_reduced) + H_full = self.full_hess(x_full, self.p) + H_reduced = np.matmul(self.T.T, np.matmul(H_full, self.T)) return H_reduced - - def update_precond(self, x): - """Updates preconditioner using the condensed Hessian.""" + + def update_precond(self, x_reduced): + """Update preconditioner from reduced Hessian.""" print("Updating with condensed Hessian preconditioner.") - H_reduced = csc_matrix(self.hessian(x)) # Use reduced Hessian - self.precondStrategy.update(H_reduced) + H_reduced = csc_matrix(self.hessian(x_reduced)) + self.precond.update(lambda attempt: H_reduced) + + def apply_precond(self, vx): + return self.precond.apply(vx) if self.precond else vx + + def multiply_by_approx_hessian(self, vx): + return self.precond.multiply_by_approximate(vx) if self.precond else vx + + def check_stability(self, x_reduced): + if self.precond: + self.precond.check_stability(x_reduced, self.p) diff --git a/optimism/test/test_compression_MPC.py b/optimism/test/test_compression_MPC.py index 1bc78332..5f85ef78 100644 --- a/optimism/test/test_compression_MPC.py +++ b/optimism/test/test_compression_MPC.py @@ -58,39 +58,69 @@ def create_field(self, Uu): def get_ubcs(): V = np.zeros(self.mesh.coords.shape) return self.dofManager.get_bc_values(V) - - return self.dofManager.create_field(Uu, get_ubcs()) + U = self.dofManager.create_field(Uu) + U.at[self.dofManager.isBc].set(get_ubcs()) + return U + # return self.dofManager.create_field(Uu, get_ubcs()) + def create_C_and_s(self): + """ + Create constraint matrix C and shift vector s based on master-slave pairs (JAX-safe). + """ + num_pairs = len(self.master_slave_pairs) + total_dofs = self.mesh.coords.shape[0] * self.dim + + # Must initialize with jax.numpy (np here is jax.numpy in your imports) + C = np.zeros((num_pairs * self.dim, total_dofs)) + s = np.zeros((num_pairs * self.dim,)) + + master_nodes = list(self.master_slave_pairs.keys()) + slave_nodes = list(self.master_slave_pairs.values()) + + for i, (master, slave) in enumerate(zip(master_nodes, slave_nodes)): + for d in range(self.dim): + row_idx = i * self.dim + d + col_idx = master * self.dim + d + C = C.at[row_idx, col_idx].set(1.0) + + # s remains zeros unless you want shifts + return C, s + def reload_mesh(self): origMesh = ReadExodusMesh.read_exodus_mesh(self.input_mesh) self.mesh = Mesh.create_higher_order_mesh_from_simplex_mesh(origMesh, order=2, createNodeSetsFromSideSets=True) coords = self.mesh.coords + self.dim = coords.shape[1] # either 2 or 3 dimensions tol = 1e-8 - # Creating nodesets for master and slave + # Create node sets self.mesh.nodeSets['master'] = np.flatnonzero(coords[:,0] < -0.5 + tol) self.mesh.nodeSets['slave'] = np.flatnonzero(coords[:,0] > 0.5 - tol) - # Get master and slave node coordinates + # Sort by y-coordinate master_coords = coords[self.mesh.nodeSets['master']] slave_coords = coords[self.mesh.nodeSets['slave']] - # Sort nodes by y-coordinate to ensure correct pairing master_sorted_idx = np.argsort(master_coords[:, 1]) slave_sorted_idx = np.argsort(slave_coords[:, 1]) - # Ensure one-to-one pairing by iterating through sorted nodes self.master_slave_pairs = { - int(self.mesh.nodeSets['master'][master_sorted_idx[i]]): - int(self.mesh.nodeSets['slave'][slave_sorted_idx[i]]) + int(self.mesh.nodeSets['slave'][slave_sorted_idx[i]]): + int(self.mesh.nodeSets['master'][master_sorted_idx[i]]) for i in range(min(len(master_sorted_idx), len(slave_sorted_idx))) } print("Master-Slave Pairs:", self.master_slave_pairs) funcSpace = FunctionSpace.construct_function_space(self.mesh, self.quadRule) - self.dofManager = DofManagerMPC(funcSpace, 2, self.EBCs, self.master_slave_pairs) + # USER CREATES C AND s HERE INSIDE TEST CASE + self.C, self.s = self.create_C_and_s() + + # PASS IT INTO DofManagerMPC + self.dofManager = DofManagerMPC(funcSpace, self.dim, self.EBCs, self.master_slave_pairs, self.C, self.s) + + # Top surface area surfaceXCoords = self.mesh.coords[self.mesh.nodeSets['top']][:,0] self.tractionArea = np.max(surfaceXCoords) - np.min(surfaceXCoords) @@ -132,9 +162,12 @@ def write_output(Uu, p, step): # Problem Set Up Uu = self.dofManager.get_unknown_values(np.zeros(self.mesh.coords.shape)) + print("Mesh coords: ", self.mesh.coords.shape) + print("Shape of Uu: ", Uu.shape) + # Uu = self.dofManager.get_unknown_values(np.zeros((self.mesh.coords.shape[0] * self.dim))) ivs = mechFuncs.compute_initial_state() p = Objective.Params(bc_data=0., state_data=ivs) - self.objective = Objective.ObjectiveMPC(energy_function, Uu, p, self.dofManager, precondStrategy=None) + self.objective = Objective.Objective(energy_function, Uu, p, precondStrategy=None) # Load over the steps force = 0.