From 2d1061a04ea1a50bae6f172d05e731b9114207b0 Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Sun, 12 Nov 2023 21:08:41 +0100 Subject: [PATCH 01/34] Preliminary version of FEM solver. --- src/bgem/upscale/__init__.py | 0 src/bgem/upscale/fem.py | 393 +++++++++++++++++++++++++++++++++++ src/bgem/upscale/fem_plot.py | 100 +++++++++ src/bgem/upscale/fields.py | 47 +++++ tests/upscale/test_fem.py | 148 +++++++++++++ 5 files changed, 688 insertions(+) create mode 100644 src/bgem/upscale/__init__.py create mode 100644 src/bgem/upscale/fem.py create mode 100644 src/bgem/upscale/fem_plot.py create mode 100644 src/bgem/upscale/fields.py create mode 100644 tests/upscale/test_fem.py diff --git a/src/bgem/upscale/__init__.py b/src/bgem/upscale/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/bgem/upscale/fem.py b/src/bgem/upscale/fem.py new file mode 100644 index 0000000..e66981c --- /dev/null +++ b/src/bgem/upscale/fem.py @@ -0,0 +1,393 @@ +""" +Exact FEM based homogenization using regular d-dimansional grid. +""" +from functools import cached_property +import numpy as np +from .fields import tn_to_voigt, voigt_coords + + +def Q1_1d_basis(points): + """ + Coeeficients of 1D Q1 basis. + + return a_ij matrix + where i row is i-th basis function with coefficients: + p_i(x) = a_i0 + a_i1 * x + a_i2 * x**2 + ... + """ + order = len(points) + res = np.empty((order, order)) + prod = np.ones(order) + for i in range(order): + res[i] = prod + prod = prod * points + monomial_at_point = res + return np.linalg.inv(monomial_at_point) + +def poly_diff_1d(poly_functions): + """ + poly_functions (n, m) shape array with m coefficients for n functions + Returns derivatives of the functions, shape: (n, m-1) + """ + order = poly_functions.shape[1] + return poly_functions[:, 1:] * np.arange(1, order) + + + +def eval_1d(poly_functions, x): + """ + Evaluate polynomials `poly_functions`, shape (n, order) for + vector of values `x`. + return shape: (n, len(x)) + """ + #x = np.atleast_1d(x) + # print((order, *x.shape)) + x = np.array(x) + abs_coef = poly_functions[:, np.full_like(x, -1, dtype=np.int64)] + # broadcast abs term to the result shape + res = abs_coef + for coef in poly_functions.T[-2::-1]: + res = res * x[None, :] + coef[:, None] + return res + + +# evaluation of tensor product basis functions +# +def flat_dim(x, dim): + """ + Flatten dimension related axes, i.e. first $d$ axes of the $x$ array. + x shape (n_1, ... n_d, other) -> (n_1 * .. * n_d, other) + """ + return x.reshape((-1, *x.shape[dim:])) + + +def tensor_dim(x, dim, order): + """ + x shape (order**dim, other) -> (order, ... , order, other) + + """ + assert x.shape[0] == order ** dim + new_shape = (*(dim * [order]), *x.shape[1:]) + return x.reshape(new_shape) + + +def outer_product_along_first_axis(arrays): + """ + arrays: list of `k` arrays a_i with shape [n_i, m] + :param arrays: + :return: res with shape [n_1, ... n_k, m] + """ + _, n = arrays[0].shape + result = arrays[0] + for arr in arrays[1:] : + assert arr.shape[1] == n + result = result[..., np.newaxis, :] * arr + return result + + +class Fe: + """ + Tensor product basis. + """ + + @classmethod + def Q1(cls, dim, order): + order = order + 1 + points = np.linspace(0, 1, order) + basis = Q1_1d_basis(points) + return cls(dim, basis) + + def __init__(self, dim, basis_1d): + """ + """ + n, m = basis_1d.shape + assert n == m + self.n_dofs_1d = n + self.dim = dim + self.basis = basis_1d + self.diff_basis = poly_diff_1d(basis_1d) + + @property + def n_dofs(self): + return self.n_dofs_1d ** self.dim + + def eval(self, points): + """ + Evaluate all tensor product basis functions at given points. + """ + dim, n_points = points.shape + assert dim == self.dim + #print(self.basis) + #print(points.ravel()) + dim_basis_values = eval_1d(self.basis, points.ravel()).reshape(-1, self.dim, n_points) + # shape (order, dim , n_points)) + tensor_values = outer_product_along_first_axis(dim_basis_values.transpose([1, 0, 2])) + # shape (order, ... order, n_points) + return flat_dim(tensor_values, self.dim) + + def grad_eval(self, points): + """ + Evaluate gradients of all tensor product basis functions at given points. + """ + dim, n_points = points.shape + assert dim == self.dim + dim_basis_values = eval_1d(self.basis, points.ravel()).reshape(-1, self.dim, n_points) + # shape (order, dim , n_points)) + diff_vals = eval_1d(self.diff_basis, points.ravel()) + dim_diff_values = diff_vals.reshape(-1, self.dim, n_points) + # shape (order, dim , n_points)) + + result = [] + for i_dim in range(dim): + diff_product_basis_list = [ + dim_diff_values[:, j_dim, :] if j_dim == i_dim else dim_basis_values[:, j_dim, :] + for j_dim in range(dim)] + prod_basis = outer_product_along_first_axis(diff_product_basis_list) + result.append(flat_dim(prod_basis, self.dim)) + result = np.stack(result) + # print(result.shape) + return result + + def ref_el_dofs(self): + n = self.n_dofs_1d + grid_slice = tuple(self.dim * [slice(0, n)]) + return np.mgrid[grid_slice].reshape(self.dim, -1) + + def __repr__(self): + return f"Q1(d={self.dim}, order={self.n_dofs_1d - 1})" + + + +class Grid: + """ + Regular grid, distribution of DOFs, System matrix assembly. + """ + def __init__(self, size, n_steps, fe: Fe): + """ + dim - dimension 1, 2, 3 + size - domain size (Lx, Ly, Lz) or just scalar L for a cube domain + n_steps - number of elements in each axis (nx, ny, nz) or just `n` for isotropic division + """ + self.dim = fe.dim + # Spatial dimension. + self.size = size * np.ones(self.dim) + # Array with physital dimensions of the homogenization domain. + self.n_steps = n_steps * np.ones(self.dim, dtype=np.int64) + # Int Array with number of elements for each axis. + self.fe = fe + # Tensor product finite element class. + + self.n_bc_dofs = 0 + # Number of bounday DOFs, first part of the calculation numbering of DOFs. + self.natur_map = None + # gives natural dof index for given calculation dof index + # natural numbering comes from flattened (ix, iy, iz) dof coordinates + # calculation numbering puts Dirichlet DOFs at the begining + self.el_dofs = None + # shape (n_local_dofs, n_elements), DOF indices in calculation numbering + + self.make_numbering(self.dim) + + @cached_property + def step(self): + # Array with step size in each axis. + return self.size / self.n_steps + + @property + def n_loc_dofs(self): + return self.fe.n_dofs + + @property + def dofs_shape(self): + return self.n_steps * (np.array(self.fe.n_dofs_1d) - 1) + 1 + @property + def n_dofs(self): + return np.prod(self.dofs_shape) + + @property + def n_elements(self): + return np.prod(self.n_steps) + + @property + def ax_dofs(self): + """ + Number of dofs in each axis. + :return: + """ + return self.n_steps * (self.fe.n_dofs_1d - 1) + 1 # shape (dim, ) + + @property + def dof_to_coord(self): + # Array for computiong global dof index from dof int coords. + return np.cumprod([1, *self.ax_dofs[:-1]]) + + @cached_property + def bc_coords(self): + bc_natur_indeces = self.natur_map[np.arange(self.n_bc_dofs, dtype=np.int64)] + return self.idx_to_coord(bc_natur_indeces) + + + @cached_property + def bc_points(self): + return self.bc_coords * self.step[:, None] + + def make_numbering(self, dim): + # grid of integers, set to (-1) + # go through boundary, enumerate, skip filled values + # go through internal nodes, enumerate remaining + # reshape -> computation_from_natural + assert self.ax_dofs.shape == (self.dim,) + n_dofs = np.prod(self.ax_dofs) + # mark boundary dofs -1, interior dofs -2 + calc_map = np.full(self.ax_dofs, -1, dtype=np.int64) + interior_slice = tuple(self.dim * [slice(1, -1)]) + calc_map[interior_slice] = -2 + + # construct new numbering of dofs + indices = np.where(calc_map == -1) + self.n_bc_dofs = len(indices[0]) + # print(self.n_bc_dofs, indices) + calc_map[indices] = np.arange(0, self.n_bc_dofs, dtype=np.int64) + indices = np.where(calc_map == -2) + calc_map[indices] = np.arange(self.n_bc_dofs, n_dofs, dtype=np.int64) + calc_map = calc_map.flatten() + self.natur_map = np.empty(len(calc_map), dtype=np.int64) + self.natur_map[calc_map[:]] = np.arange(len(calc_map), dtype=np.int64) + assert len(self.natur_map) == self.n_dofs + + # create element dofs mapping in natural dofs numbering + ref_dofs = self.fe.ref_el_dofs() # shape (dim, n_local_dofs) + assert ref_dofs.shape == (self.dim, self.fe.n_dofs_1d ** self.dim) + + #print(ax.shape, ref_dofs.shape) + ref_dofs = (self.dof_to_coord[None, :] @ ref_dofs).ravel() + #print(ref_dofs.shape) + + # Creating a meshgrid for each dimension + indices = np.meshgrid(*[np.arange(n) for n in self.n_steps], indexing='ij') + + # Calculating the tensor values based on the formula and axes + el_dofs = np.zeros(self.n_steps, dtype=np.int64) + o = self.fe.n_dofs_1d - 1 + for d in range(dim): + el_dofs += (self.dof_to_coord[d] * o ** (d + 1)) * indices[d] + #print(el_dofs) + el_dofs = el_dofs[..., None] + ref_dofs[None, :] # shape: nx, nY, nz, loc_dofs + self.el_dofs = calc_map[el_dofs.reshape(-1, el_dofs.shape[-1])] + + def barycenters(self): + """ + Barycenters of elements + :return: shape (dim, n_els) + """ + bary_axes = [self.step[i] * (np.arange(self.n_steps[i]) + 0.5) for i in range(self.dim)] + return np.stack(np.meshgrid(*bary_axes)).reshape(self.dim, -1) + + def idx_to_coord(self, dof_natur_indices): + """ + Produce physical coordinates for given natural dof indices. + :param dof_natur_indices: np.int64 array + :return: integer coordinates: (dim, *dof_natur_indeces.shape) + """ + indices = dof_natur_indices + coords = np.empty((self.dim, *dof_natur_indices.shape), dtype=np.int64) + for i in range(self.dim-1, 0, -1): + indices, coords[i] = np.divmod(indices, self.dof_to_coord[i]) + coords[0] = indices + return coords + + def __repr__(self): + msg = \ + f"{self.fe} Grid: {self.n_steps} Domain: {self.size}\n" + \ + f"Natur Map:\n{self.natur_map}\n" + \ + f"El_DOFs:\n{self.el_dofs}\n" + return msg + + @cached_property + def laplace(self): + """ + Return matrix M. Shape (n_voigt, n_loc_dofs * n_loc_dofs). + + This should be used to assmebly the local matrices like: + A_loc = M[None, :, :] @ K[:, None, :] + where A_loc is array of local matrices (n_elements, n_loc_dofs**2) + and K is (n_elements, K_tn_size), where K_tn_size is just upper triangle values + i.e. 1, 3, 6 for dim=1, 2, 3. + """ + # we integrate square of gradients, which is poly of degree 2*(deg -1) = 2deg - 2 + # Gaussian quadrature integrates exactly degree 2*deg -1 + deg = 2* (self.fe.n_dofs_1d - 1) # 2 * degeree of base function polynomials + + # points and wights on [0, 1] interval + points, weights = np.polynomial.legendre.leggauss(deg) + points = 0.5 * (points + 1.0) + weights = 0.5 * weights + + msh_params = self.dim * [points] + points_tn = np.stack(np.meshgrid(*msh_params)).reshape((self.dim, -1)) + outer_params = [jac * weights[:, None] for jac in self.step] + weights_tn = outer_product_along_first_axis(outer_params).ravel() + grad = self.fe.grad_eval(points_tn) # shape: (dim, n_loc_dofs, n_quads) + # dim, n_loc_dofs, n_quad = grad.shape + weight_grad = weights_tn[None, None :] * grad[:, :, :] # (dim, n_loc_dofs, n_quads) + full_tn_laplace = grad[:, None, :, :] @ weight_grad[None, :, :, :].transpose(0, 1, 3, 2) + + M = [ + full_tn_laplace[i, j, :, :] + if i==j else + full_tn_laplace[i, j, :, :] + full_tn_laplace[j, i, :, :] + for i, j in voigt_coords[self.dim] + ] + # M shape [n_voight, n_loc_dofs, n_loc_dofs] + # return np.reshape(M, (len(M), -1)).T + M = np.stack(M) + assert M.shape == (len(voigt_coords[self.dim]), self.fe.n_dofs, self.fe.n_dofs) + return M.reshape((M.shape[0], -1)) + + @cached_property + def loc_mat_ij(self): + """ + returns: rows, cols + both with shape: (loc_dofs, loc_dofs, n_elements) + Provides rows and cols for the local matrices. + """ + n_elements, n_loc_dofs = self.el_dofs.shape + rows = np.tile(self.el_dofs[:, :, None], [1, 1, n_loc_dofs]) + cols = np.tile(self.el_dofs[:, None, :], [1, n_loc_dofs, 1]) + return rows, cols + + + def assembly_dense(self, K_voight_tensors): + """ + K_voight_tensors, shape: (n_elements, n_voight) + """ + assert K_voight_tensors.shape == (self.n_elements, len(voigt_coords[self.dim])) + laplace = self.laplace + # locmat_dofs, n_voight = laplace.shape + # Use transpositions of intputs and output in order to enforce cache efficient storage. + #loc_matrices = np.zeros((self.n_loc_dofs self.n_loc_dofs, self.n_elements)) + #np.matmul(.T[:, None, :], laplace[None, :, :], out=loc_matrices.reshape(-1, self.n_elements).T) + loc_matrices = K_voight_tensors[:, None, :] @ laplace[None, :, :] + loc_matrices = loc_matrices.reshape((self.n_elements, self.fe.n_dofs, self.fe.n_dofs)) + A = np.zeros((self.n_dofs, self.n_dofs)) + # Use advanced indexing to add local matrices to the global matrix + np.add.at(A, self.loc_mat_ij, loc_matrices) + return A + def solve_system(self, K, p_grad_bc): + """ + :param K: array, shape: (n_voight, n_elements) + :param p_grad_bc: array, shape: (dim, n_vectors) + usually n_vectors >= dim + :return: pressure, shape: (n_vectors, n_dofs) + """ + d, n_rhs = p_grad_bc.shape + assert d == self.dim + A = self.assembly_dense(K) + pressure_bc = p_grad_bc.T @ self.bc_points # (n_vectors, n_bc_dofs) + B = pressure_bc @ A[:self.n_bc_dofs, self.n_bc_dofs:] # (n_vectors, n_interior_dofs) + pressure = np.empty((n_rhs, self.n_dofs)) + pressure[:, :self.n_bc_dofs] = pressure_bc + pressure[:, self.n_bc_dofs:] = np.linalg.solve(A[self.n_bc_dofs:, self.n_bc_dofs:], -B.T).T + pressure_natur = np.empty_like(pressure) + pressure_natur[:, self.natur_map[:]] = pressure[:, :] + return pressure_natur.reshape((n_rhs, *self.dofs_shape)) + diff --git a/src/bgem/upscale/fem_plot.py b/src/bgem/upscale/fem_plot.py new file mode 100644 index 0000000..3a3f4e0 --- /dev/null +++ b/src/bgem/upscale/fem_plot.py @@ -0,0 +1,100 @@ +import pyvista as pv +import numpy as np + +import matplotlib.pyplot as plt +import numpy as np + + +def plot_pressure_fields(x_grid, y_grid, pressure): + """ + x_grid: shape (M,) + y_grid: shape (N,) + Plots K scalar fields stored in a (K, M, N) shaped array `pressure`. + + Parameters: + pressure (numpy.ndarray): An array of shape (K, M, N) representing K scalar fields. + """ + K, M, N = pressure.shape + + # Create a figure and K subplots in a single row + fig, axes = plt.subplots(1, K, figsize=(K * 5, 5), sharey=True) + + # Setting the limits for all plots + #x_limit = (0, M) + #y_limit = (0, N) + + # Find the global min and max values for a common color scale + vmin, vmax = pressure.min(), pressure.max() + X, Y = np.meshgrid(x_grid, y_grid) + for i in range(K): + # Plot each scalar field + im = axes[i].pcolormesh(X, Y, pressure[i], vmin=vmin, vmax=vmax, shading='gouraud') + + #im = axes[i].imgshow(pressure[i, :, :], vmin=vmin, vmax=vmax, + # origin='lower', extent=x_limit + y_limit) + + # Set title for each subplot + axes[i].set_title(f"Field {i + 1}") + + # Set limits for x and y axis + #axes[i].set_xlim(x_limit) + #axes[i].set_ylim(y_limit) + + # Add a common color bar + fig.colorbar(im, ax=axes.ravel().tolist(), shrink=0.8) + # Show the plot + #plt.tight_layout() + plt.show() + + + + +def plot_grid(n=5): + """ + Create + :param n: + :return: + """ + # Create a PyVista mesh from the points + points = np.mgrid[:n, :n, :n] / (n - 1.0) + mesh = pv.StructuredGrid(*points[::-1]) + points = points.reshape((3, -1)) + return points, mesh + + +def scatter_3d(mesh, values, n=5): + # Normalize the function values for use in scaling + scaled_values = (values - np.min(values)) / (np.max(values) - np.min(values)) + + mesh['scalars'] = scaled_values + + # Create the glyphs: scale and color by the scalar values + geom = pv.Sphere(phi_resolution=8, theta_resolution=8) + glyphs = mesh.glyph(geom=geom, scale='scalars', factor=0.3) + + # Create a plotting object + p = pv.Plotter() + + # Add the glyphs to the plotter + p.add_mesh(glyphs, cmap='coolwarm', show_scalar_bar=True) + + # Add axes and bounding box for context + p.add_axes() + p.show_grid() + p.add_bounding_box() + + # Show the plot + p.show() + + +def plot_fn_3d(fn, n=5): + points, mesh = plot_grid(n) + values = fn(*points[::-1]) + scatter_3d(mesh, values) + + +def f(x, y, z): + return x * (1 - y) * z * (1 - z) * 4 + + +#plot_fn_3d(f) \ No newline at end of file diff --git a/src/bgem/upscale/fields.py b/src/bgem/upscale/fields.py new file mode 100644 index 0000000..195f127 --- /dev/null +++ b/src/bgem/upscale/fields.py @@ -0,0 +1,47 @@ +""" +Functions for construction of structured media fields. +""" +import numpy as np + +voigt_coords = { + 1: [(0, 0)], + 2: [(0, 0), (1, 1), (0, 1)], + 3: [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)] + } + +def tn_to_voigt(tn): + """ + (dim, dim, N) -> (n_voight, N) + :param array: + :return: + """ + m, n, N = tn.shape + assert m == n + dim = m + tn_voigt = [ + tn[i, j, :] + for i, j in voigt_coords[dim] + ] + return np.array(tn_voigt) + +def _tn(k, dim): + if type(k) == float: + k = k * np.eye(dim) + assert k.shape == (dim, dim) + return k +def K_structured(points, K0, Kx=None, fx=2.0, Ky=None, fy=4.0, Q=None): + x = points # shape: (N, dim) + _, dim = x.shape + K0 = _tn(K0, dim) + if Kx is None: + Kx = K0 + if Ky is None: + Ky = Kx + Kx = _tn(Kx, dim) + Ky = _tn(Ky, dim) + t = 0.5 * (np.sin(2 * np.pi * fx * x[0]) + 1) + s = 0.5 * (np.sin(2 * np.pi * fy * x[1]) + 1) + K = t * K0 + (1 - t) * (s * Kx + (1 - s) * Ky) + if Q is not None: + K = Q.T @ K @ Q + return tn_to_voigt(K) \ No newline at end of file diff --git a/tests/upscale/test_fem.py b/tests/upscale/test_fem.py new file mode 100644 index 0000000..4a7623a --- /dev/null +++ b/tests/upscale/test_fem.py @@ -0,0 +1,148 @@ +import numpy as np +import pytest + +from bgem.upscale import fem, fields, fem_plot + +def basis_1(): + points = np.array([0.0, 1.0]) + basis = fem.Q1_1d_basis(points) + return basis, points + +def basis_2(): + points = np.array([0.0, 0.5, 1.0]) + basis = fem.Q1_1d_basis(points) + return basis, points + + + + +def test_Q1_1D_basis(): + basis_order_1, points = basis_1() + assert basis_order_1.shape == (2, 2) + np.allclose(fem.eval_1d(basis_order_1, points), np.eye(2,2)) + print("Q1 order 1 basis: \n", basis_order_1) + + basis_order_2, points = basis_2() + assert basis_order_2.shape == (3, 3) + np.allclose(fem.eval_1d(basis_order_2, points), np.eye(3, 3)) + + print("Q1 order 2 basis: \n", basis_order_2) + + + + +def test_poly_diff_1d(): + diff_order_1 = fem.poly_diff_1d(basis_1()[0]) + assert diff_order_1.shape == (2, 1) + print("Q1 order 1 diff basis: \n", diff_order_1) + diff_order_2 = fem.poly_diff_1d(basis_2()[0]) + assert diff_order_2.shape == (3, 2) + print("Q1 order 2 diff basis: \n", diff_order_2) + +def test_eval_1d(): + basis_order_1, _ = basis_1() + points = [0.2, 0.7] + values = [[0.2, 0.7], [0.8, 0.3]] + np.allclose(fem.eval_1d(basis_order_1, points), values) + +def test_Fe_Q1(): + for dim in range(1, 4): + order = 1 + f = fem.Fe.Q1(dim, order) + points_1d = np.linspace(0, 1, 2*order + 1) + points = np.stack([ + points_1d, + *(dim - 1) * [np.zeros_like(points_1d)] + ]) + basis = f.eval(points) + assert basis.shape == ((order + 1)**dim, len(points_1d)) + grad = f.grad_eval(points) + assert grad.shape == (dim, (order + 1)**dim, len(points_1d)) + + +def test_flatten_dim(): + x = np.outer([1, 2, 3, 4, 5, 6, 7, 8], [10, 100, 1000]) + tensor_x = fem.tensor_dim(x, 3, 2) + assert tensor_x.shape == (2, 2, 2, 3) + #print(tensor_x) + flat_x = fem.flat_dim(tensor_x, 3) + assert flat_x.shape == x.shape + assert np.allclose(flat_x, x) + + +def test_grid_numbering(): + # Test Grid numbering + + dim = 1 + order = 2 + g = fem.Grid(100.0, 4, fem.Fe.Q1(dim, order)) + print(g) + + dim = 2 + order = 1 + g = fem.Grid(100.0, 4, fem.Fe.Q1(dim, order)) + print(g) + + dim = 3 + order = 1 + g = fem.Grid(100.0, 3, fem.Fe.Q1(dim, order)) + print(g) + +def test_grid_bc(): + g = fem.Grid(10, 2, fem.Fe.Q1(1, 1)) + assert np.all(g.bc_coords == np.array([[0, 2]])) + assert np.allclose(g.bc_points, np.array([[0, 10]])) + + g = fem.Grid(10, 2, fem.Fe.Q1(2, 1)) + ref = np.array([[0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2]]) + assert np.all(g.bc_coords == ref) + +def test_laplace(): + order = 1 + N = 3 + dim = 2 + g = fem.Grid(N, N, fem.Fe.Q1(dim, order)) + l = g.laplace.reshape((-1, g.fe.n_dofs, g.fe.n_dofs)) + print("\nlaplace, 2d:\n", l) +def test_grid_assembly(): + for dim in range(1, 4): + order = 1 + N = 3 + g = fem.Grid(30, N, fem.Fe.Q1(dim, order)) + K_const = np.diag(np.arange(1, dim + 1)) + K_const = fem.tn_to_voigt(K_const[:, :, None]) + K_field = K_const.T * np.ones(g.n_elements)[:, None] + A = g.assembly_dense(K_field) + n_dofs = (N+1)**dim + assert A.shape == (n_dofs, n_dofs) + +@pytest.mark.skip +def test_solve_system(): + for dim in range(1, 4): + order = 1 + N = 3 + g = fem.Grid(30, N, fem.Fe.Q1(dim, order)) + K_const = np.diag(np.arange(1, dim + 1)) + K_const = fem.tn_to_voigt(K_const[:, :, None]) + K_field = K_const.T * np.ones(g.n_elements)[:, None] + p_grads = np.eye(dim) + pressure = g.solve_system(K_field, p_grads) + assert pressure.shape == (dim, *dim * [N + 1]) + + +#@pytest.mark.skip +def test_solve_2d(): + dim = 2 + order = 1 + N = 30 + g = fem.Grid(100, N, fem.Fe.Q1(dim, order)) + x = g.barycenters()[0, :] + K_const = np.diag([1, 1]) + #K_const = np.ones((dim, dim)) + K_const = fields.tn_to_voigt(K_const[:, :, None]) + K_field = K_const.T * x[:, None] + #K_field = K_const.T * np.ones_like(x)[:, None] + p_grads = np.eye(dim) + pressure = g.solve_system(K_field, p_grads) + xy_grid = [np.linspace(0, g.size[i], g.ax_dofs[i]) for i in range(2)] + fem_plot.plot_pressure_fields(*xy_grid, pressure) \ No newline at end of file From 6b286387b880b74b99c303f4a696bce8078450ab Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Tue, 26 Mar 2024 10:06:40 +0100 Subject: [PATCH 02/34] Equivalent scalar and tensor properties. --- src/bgem/upscale/homogenization.py | 79 ++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 src/bgem/upscale/homogenization.py diff --git a/src/bgem/upscale/homogenization.py b/src/bgem/upscale/homogenization.py new file mode 100644 index 0000000..1abdb13 --- /dev/null +++ b/src/bgem/upscale/homogenization.py @@ -0,0 +1,79 @@ +import numpy as np +def equivalent_scalar(loads, responses): + assert loads.shape[1] == responses.shape[1] == 1 + return np.dot(loads[:, 0], responses[:, 0]) / np.dot(loads[:, 0], loads[:, 0]) + +def equivalent_sym_tensor_3d(loads, responses): + """ + :param loads: array, (N, dim), e.g grad pressure grad_p(x) + :param responses: (N, dim), e.g. Darcian velocity -v(x) = K(x) grad_p(x) + :return: + """ + # from LS problem for 6 unknowns in Voigt notation: X, YY, ZZ, YZ, XZ, XY + # the matrix has three blocks for Vx, Vy, Vz component of the responses + # each block has different sparsity pattern + n_loads = loads.shape[0] + zeros = np.zeros(n_loads) + ls_mat_vx = np.stack([loads[:, 0], zeros, zeros, zeros, loads[:, 2], loads[:, 1]], axis=1) + rhs_vx = responses[:, 0] + ls_mat_vy = np.stack([zeros, loads[:, 1], zeros, loads[:, 2], zeros, loads[:, 0]], axis=1) + rhs_vy = responses[:, 1] + ls_mat_vz = np.stack([zeros, zeros, loads[:, 2], loads[:, 1], loads[:, 0], zeros], axis=1) + rhs_vz = responses[:, 2] + ls_mat = np.concatenate([ls_mat_vx, ls_mat_vy, ls_mat_vz], axis=0) + rhs = np.concatenate([rhs_vx, rhs_vy, rhs_vz], axis=0) + assert ls_mat.shape == (3 * n_loads, 6) + assert rhs.shape == (3 * n_loads,) + result = np.linalg.lstsq(ls_mat, rhs, rcond=None) + cond_tn_voigt, residuals, rank, singulars = result + condition_number = singulars[0] / singulars[-1] + if condition_number > 1e3: + logging.warning(f"Badly conditioned inversion. Residual: {residuals}, max/min sing. : {condition_number}") + return cond_tn_voigt + + +def equivalent_sym_tensor_2d(loads, responses): + """ + :param loads: array, (N, dim), e.g grad pressure grad_p(x) + :param responses: (N, dim), e.g. Darcian velocity -v(x) = K(x) grad_p(x) + :return: + """ + # from LS problem for 6 unknowns in Voigt notation: X, YY, ZZ, YZ, XZ, XY + # the matrix has three blocks for Vx, Vy, Vz component of the responses + # each block has different sparsity pattern + n_loads = loads.shape[0] + zeros = np.zeros(n_loads) + ls_mat_vx = np.stack([loads[:, 0], zeros, loads[:, 1]], axis=1) + rhs_vx = responses[:, 0] + ls_mat_vy = np.stack([zeros, loads[:, 1], loads[:, 0]], axis=1) + rhs_vy = responses[:, 1] + ls_mat = np.concatenate([ls_mat_vx, ls_mat_vy], axis=0) + rhs = np.concatenate([rhs_vx, rhs_vy], axis=0) + assert ls_mat.shape == (2 * n_loads, 3) + assert rhs.shape == (2 * n_loads,) + result = np.linalg.lstsq(ls_mat, rhs, rcond=None) + cond_tn_voigt, residuals, rank, singulars = result + condition_number = singulars[0] / singulars[-1] + if condition_number > 1e3: + logging.warning(f"Badly conditioned inversion. Residual: {residuals}, max/min sing. : {condition_number}") + return cond_tn_voigt + + +_equivalent_sym_tensor = { + 1: equivalent_scalar, + 2: equivalent_sym_tensor_2d, + 3: equivalent_sym_tensor_3d +} + + + +def equivalent_posdef_tensor(loads, responses): + # tensor pos. def. <=> load . response > 0 + # ... we possibly modify responses to satisfy + dim = loads.shape[0] + assert dim == responses.shape[0] + unit_loads = loads / np.linalg.norm(loads, axis=1)[:, None] + load_components = np.sum(responses * unit_loads, axis=1) + responses_fixed = responses + (np.maximum(0, load_components) - load_components)[:, None] * unit_loads + + return _equivalent_sym_tensor[dim](loads, responses_fixed) From 8c48cb12f66b50aa57f207569d707d37ef898836 Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Tue, 26 Mar 2024 10:08:30 +0100 Subject: [PATCH 03/34] Improved FEM test. --- src/bgem/stochastic/__init__.py | 1 + src/bgem/upscale/__init__.py | 1 + src/bgem/upscale/fem.py | 61 ++++++++++++++++++++++++++++++--- src/bgem/upscale/fields.py | 55 +++++++++++++++++++++++------ tests/upscale/test_fem.py | 43 +++++++++++++++++++---- 5 files changed, 139 insertions(+), 22 deletions(-) diff --git a/src/bgem/stochastic/__init__.py b/src/bgem/stochastic/__init__.py index e69de29..fecaaec 100644 --- a/src/bgem/stochastic/__init__.py +++ b/src/bgem/stochastic/__init__.py @@ -0,0 +1 @@ +from .fracture import Population, PowerLawSize, UniformBoxPosition, VonMisesOrientation, FisherOrientation, Fracture \ No newline at end of file diff --git a/src/bgem/upscale/__init__.py b/src/bgem/upscale/__init__.py index e69de29..27f66fc 100644 --- a/src/bgem/upscale/__init__.py +++ b/src/bgem/upscale/__init__.py @@ -0,0 +1 @@ +from .fem import Fe, Grid, upscale \ No newline at end of file diff --git a/src/bgem/upscale/fem.py b/src/bgem/upscale/fem.py index e66981c..aa0c7f0 100644 --- a/src/bgem/upscale/fem.py +++ b/src/bgem/upscale/fem.py @@ -3,8 +3,9 @@ """ from functools import cached_property import numpy as np -from .fields import tn_to_voigt, voigt_coords - +from .fields import tn_to_voigt, voigt_to_tn, voigt_coords +from .homogenization import equivalent_posdef_tensor +#from bgem.stochastic import dfn def Q1_1d_basis(points): """ @@ -90,7 +91,7 @@ class Fe: """ @classmethod - def Q1(cls, dim, order): + def Q1(cls, dim, order=1): order = order + 1 points = np.linspace(0, 1, order) basis = Q1_1d_basis(points) @@ -126,7 +127,9 @@ def eval(self, points): def grad_eval(self, points): """ + points: (dim, n_points) Evaluate gradients of all tensor product basis functions at given points. + return: shape (dim, n_basis_fn, n_points) """ dim, n_points = points.shape assert dim == self.dim @@ -183,7 +186,7 @@ def __init__(self, size, n_steps, fe: Fe): # natural numbering comes from flattened (ix, iy, iz) dof coordinates # calculation numbering puts Dirichlet DOFs at the begining self.el_dofs = None - # shape (n_local_dofs, n_elements), DOF indices in calculation numbering + # shape (n_elements, n_local_dofs), DOF indices in calculation numbering self.make_numbering(self.dim) @@ -273,6 +276,7 @@ def make_numbering(self, dim): #print(el_dofs) el_dofs = el_dofs[..., None] + ref_dofs[None, :] # shape: nx, nY, nz, loc_dofs self.el_dofs = calc_map[el_dofs.reshape(-1, el_dofs.shape[-1])] + assert self.el_dofs.shape == (self.n_elements, self.fe.n_dofs) def barycenters(self): """ @@ -374,7 +378,7 @@ def assembly_dense(self, K_voight_tensors): return A def solve_system(self, K, p_grad_bc): """ - :param K: array, shape: (n_voight, n_elements) + :param K: array, shape: (n_elements, n_voight) :param p_grad_bc: array, shape: (dim, n_vectors) usually n_vectors >= dim :return: pressure, shape: (n_vectors, n_dofs) @@ -391,3 +395,50 @@ def solve_system(self, K, p_grad_bc): pressure_natur[:, self.natur_map[:]] = pressure[:, :] return pressure_natur.reshape((n_rhs, *self.dofs_shape)) + def field_grad(self, dof_vals): + """ + Compute solution gradient in element barycenters. + :param dof_vals: (n_vec, n_dofs) + :return: (n_vec, n_el, dim) + """ + el_dof_vals = dof_vals[:, self.el_dofs[:, :]] # (n_vec, n_el, n_loc_dofs) + quads = np.full((self.dim, 1), 0.5) # Zero order Gaussian quad. Integrates up to deg = 1. + grad_basis = self.fe.grad_eval(quads) # (dim, n_loc_dofs, 1) + grad_els = grad_basis[None,None,:, :,0] @ el_dof_vals[:,:, :, None] + return grad_els[:, :,:,0] + +def upscale(K, domain=None): + """ + + :param K: array (nx, ny, nz, n_voigt) or similar for dim=1, 2 + :param domain: domain size array, default np.ones(dim) + :return: Effective tensor. + """ + dim = len(K.shape) - 1 + if domain is None: + domain = np.ones(dim) + + order = 1 + g = Grid(domain, K.shape[:-1], Fe.Q1(dim, order)) + p_grads = np.eye(dim) + K_els = K.reshape((g.n_elements, -1)) + pressure = g.solve_system(K_els, p_grads) + #xy_grid = [np.linspace(0, g.size[i], g.ax_dofs[i]) for i in range(2)] + #fem_plot.plot_pressure_fields(*xy_grid, pressure) + pressure_flat = pressure.reshape((len(p_grads), -1)) + grad = g.field_grad(pressure_flat) # (n_vectors, n_els, dim) + loads = np.average(grad, axis=1) # (n_vectors, dim) + full_K_els = voigt_to_tn(K_els) + responses_els = grad[:, :, None, :] @ full_K_els[None, :, :, :] #(n_vec, n_els, 1, dim) + responses = np.average(responses_els[:, :, 0, :], axis=1) + return equivalent_posdef_tensor(loads, responses) + + +# def rasterize_dfn(fractures: dfn.FractureSet, step): +# """ +# Rasterize given fracture to the grid with `step` +# :param fractures: +# :param step: +# :return: +# """ +# pass \ No newline at end of file diff --git a/src/bgem/upscale/fields.py b/src/bgem/upscale/fields.py index 195f127..9febe74 100644 --- a/src/bgem/upscale/fields.py +++ b/src/bgem/upscale/fields.py @@ -9,20 +9,42 @@ 3: [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)] } +idx_to_full = { + 1: [(0, 0)], + 3: [[0, 2], [2, 1]], + 6: [[0, 5, 4], [5, 1, 3], [4, 3, 2]] + } + +idx_to_voigt = { + 1: ([0], [0]), + 2: ([0, 1, 0], [0, 1, 1]), + 3: ([0, 1, 2, 1, 0, 0], [0, 1, 2, 2, 2, 1]) +} + +def voigt_to_tn(vec): + """ + :param vec: (N, n_voigt) + :return: (N, dim, dim) + """ + _, n_voigt = vec.shape + return vec[:, idx_to_full[n_voigt]] + def tn_to_voigt(tn): """ - (dim, dim, N) -> (n_voight, N) + (N, dim, dim) -> (N, n_voight) :param array: :return: """ - m, n, N = tn.shape - assert m == n - dim = m - tn_voigt = [ - tn[i, j, :] - for i, j in voigt_coords[dim] - ] - return np.array(tn_voigt) + dim, dim, _ = tn.shape + return tn[:, idx_to_voigt[dim][0], idx_to_voigt[dim][1]] + # m, n, N = tn.shape + # assert m == n + # dim = m + # tn_voigt = [ + # tn[i, j, :] + # for i, j in voigt_coords[dim] + # ] + # return np.array(tn_voigt) def _tn(k, dim): if type(k) == float: @@ -30,6 +52,17 @@ def _tn(k, dim): assert k.shape == (dim, dim) return k def K_structured(points, K0, Kx=None, fx=2.0, Ky=None, fy=4.0, Q=None): + """ + + :param points: + :param K0: + :param Kx: + :param fx: + :param Ky: + :param fy: + :param Q: + :return: (N, n_voigt) + """ x = points # shape: (N, dim) _, dim = x.shape K0 = _tn(K0, dim) @@ -39,8 +72,8 @@ def K_structured(points, K0, Kx=None, fx=2.0, Ky=None, fy=4.0, Q=None): Ky = Kx Kx = _tn(Kx, dim) Ky = _tn(Ky, dim) - t = 0.5 * (np.sin(2 * np.pi * fx * x[0]) + 1) - s = 0.5 * (np.sin(2 * np.pi * fy * x[1]) + 1) + t = 0.5 * (np.sin(2 * np.pi * fx * x[:, 0]) + 1)[:, None, None] + s = 0.5 * (np.sin(2 * np.pi * fy * x[:, 1]) + 1)[:, None, None] K = t * K0 + (1 - t) * (s * Kx + (1 - s) * Ky) if Q is not None: K = Q.T @ K @ Q diff --git a/tests/upscale/test_fem.py b/tests/upscale/test_fem.py index 4a7623a..3cf9508 100644 --- a/tests/upscale/test_fem.py +++ b/tests/upscale/test_fem.py @@ -1,6 +1,7 @@ import numpy as np import pytest +from bgem.stochastic import dfn from bgem.upscale import fem, fields, fem_plot def basis_1(): @@ -110,8 +111,8 @@ def test_grid_assembly(): N = 3 g = fem.Grid(30, N, fem.Fe.Q1(dim, order)) K_const = np.diag(np.arange(1, dim + 1)) - K_const = fem.tn_to_voigt(K_const[:, :, None]) - K_field = K_const.T * np.ones(g.n_elements)[:, None] + K_const = fem.tn_to_voigt(K_const[None, :, :]) + K_field = K_const * np.ones(g.n_elements)[:, None] A = g.assembly_dense(K_field) n_dofs = (N+1)**dim assert A.shape == (n_dofs, n_dofs) @@ -130,7 +131,7 @@ def test_solve_system(): assert pressure.shape == (dim, *dim * [N + 1]) -#@pytest.mark.skip +@pytest.mark.skip def test_solve_2d(): dim = 2 order = 1 @@ -139,10 +140,40 @@ def test_solve_2d(): x = g.barycenters()[0, :] K_const = np.diag([1, 1]) #K_const = np.ones((dim, dim)) - K_const = fields.tn_to_voigt(K_const[:, :, None]) - K_field = K_const.T * x[:, None] + K_const = fields.tn_to_voigt(K_const[None, :, :]) + K_field = K_const * x[:, None] #K_field = K_const.T * np.ones_like(x)[:, None] p_grads = np.eye(dim) pressure = g.solve_system(K_field, p_grads) xy_grid = [np.linspace(0, g.size[i], g.ax_dofs[i]) for i in range(2)] - fem_plot.plot_pressure_fields(*xy_grid, pressure) \ No newline at end of file + fem_plot.plot_pressure_fields(*xy_grid, pressure) + +@pytest.mark.skip() +def test_upsacale_2d(): + K_const = np.diag([10, 100]) + K_const = fields.tn_to_voigt(K_const[None, :, :]) + K_field = K_const * np.ones((8, 8))[:, :, None] + K_eff = fem.upscale(K_field) + assert np.allclose(K_eff, K_const[0, :]) + + +def test_upscale_parallel_plates(): + cube = [1, 1, 1] + for dim in [2, 3]: + plates = dfn.FractureSet.parallel_plates( + box = cube, + normal = [1, 0, 0] + ) + + +def single_fracture_distance_function(): + """ + Determine effective tensor as a function of the voxel center distance from + the fracture plane and angle. + lattitude : 0 - pi/4 : 9 + longitude : 0 - pi/4, up to pi/2 for validation : 9 + distance : 9 levels + :return: about 1000 runs, also test of perfromance + use 128^3 grid + """ + pass \ No newline at end of file From 7cb5cc1dad528e42340dcd1c21ca630864ac6e52 Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Tue, 26 Mar 2024 10:09:17 +0100 Subject: [PATCH 04/34] Add bgem.core from Endorse --- src/bgem/core/__init__.py | 7 + src/bgem/core/common.py | 147 +++++++++++++++++++++ src/bgem/core/config.py | 257 +++++++++++++++++++++++++++++++++++++ src/bgem/core/flow_call.py | 144 +++++++++++++++++++++ src/bgem/core/memoize.py | 191 +++++++++++++++++++++++++++ src/bgem/core/report.py | 20 +++ 6 files changed, 766 insertions(+) create mode 100644 src/bgem/core/__init__.py create mode 100644 src/bgem/core/common.py create mode 100644 src/bgem/core/config.py create mode 100644 src/bgem/core/flow_call.py create mode 100644 src/bgem/core/memoize.py create mode 100644 src/bgem/core/report.py diff --git a/src/bgem/core/__init__.py b/src/bgem/core/__init__.py new file mode 100644 index 0000000..9be4025 --- /dev/null +++ b/src/bgem/core/__init__.py @@ -0,0 +1,7 @@ +from .memoize import EndorseCache, memoize, File +from .common import substitute_placeholders, sample_from_population, workdir +from .config import dotdict, load_config, apply_variant, dump_config +from .report import report +from .flow_call import call_flow, FlowOutput + +year = 365.2425 * 24 * 60 * 60 diff --git a/src/bgem/core/common.py b/src/bgem/core/common.py new file mode 100644 index 0000000..63c15fe --- /dev/null +++ b/src/bgem/core/common.py @@ -0,0 +1,147 @@ +import os.path +from typing import * +import shutil +from pathlib import Path +import numpy as np +import logging + +from .memoize import File + +class workdir: + """ + Context manager for creation and usage of a workspace dir. + + name: the workspace directory + inputs: list of files and directories to copy into the workspaceand + TODO: fine a sort of robust ad portable reference + clean: if true the workspace would be deleted at the end of the context manager. + TODO: clean_before / clean_after + TODO: File constructor taking current workdir environment, openning virtually copied files. + TODO: Workdir would not perform change of working dir, but provides system interface for: subprocess, file openning + only perform CD just before executing a subprocess also interacts with concept of an executable. + portable reference and with lazy evaluation. Optional true copy possible. + """ + CopyArgs = Union[str, Tuple[str, str]] + def __init__(self, name:str="sandbox", inputs:List[CopyArgs] = None, clean=False): + + if inputs is None: + inputs = [] + self._inputs = inputs + self.work_dir = os.path.abspath(name) + Path(self.work_dir).mkdir(parents=True, exist_ok=True) + self._clean = clean + self._orig_dir = os.getcwd() + + def copy(self, src, dest=None): + """ + :param src: Realtive or absolute path. + :param dest: Relative path with respect to work dir. + Default is the same as the relative source path, + for abs path it is the just the last name in the path. + """ + if isinstance(src, File): + src = src.path + if isinstance(dest, File): + dest = dest.path + #if dest == ".": + # if os.path.isabs(src): + # dest = os.path.basename(src) + # else: + # dest = src + if dest is None: + dest = "" + dest = os.path.join(self.work_dir, dest, os.path.basename(src)) + dest_dir, _ = os.path.split(dest) + if not os.path.isdir(dest_dir): + #print(f"MAKE DIR: {dest_dir}") + Path(dest_dir).mkdir(parents=True, exist_ok=True) + abs_src = os.path.abspath(src) + + # ensure that we always update the target + if os.path.isdir(dest): + shutil.rmtree(dest) + elif os.path.isfile(dest): + os.remove(dest) + + # TODO: perform copy, link or redirectio to src during extraction of the File object from dictionary + # assumes custom tag for file, file_link, file_copy etc. + if os.path.isdir(src): + #print(f"COPY DIR: {abs_src} TO DESTINATION: {dest}") + shutil.copytree(abs_src, dest, dirs_exist_ok=True) + else: + try: + shutil.copy2(abs_src, dest) + except FileNotFoundError: + FileNotFoundError(f"COPY FILE: {abs_src} TO DESTINATION: {dest}") + + def __enter__(self): + for item in self._inputs: + #print(f"treat workspace item: {item}") + if isinstance(item, Tuple): + self.copy(*item) + else: + self.copy(item) + os.chdir(self.work_dir) + + return self.work_dir + + def __exit__(self, type, value, traceback): + os.chdir(self._orig_dir) + if self._clean: + shutil.rmtree(self.work_dir) + + +def substitute_placeholders(file_in: str, file_out: str, params: Dict[str, Any]): + """ + In the template `file_in` substitute the placeholders in format '' + according to the dict `params`. Write the result to `file_out`. + TODO: set Files into params, in order to compute hash from them. + TODO: raise for missing value in dictionary + """ + used_params = [] + files = [] + with open(file_in, 'r') as src: + text = src.read() + for name, value in params.items(): + if isinstance(value, File): + files.append(value) + value = value.path + placeholder = '<%s>' % name + n_repl = text.count(placeholder) + if n_repl > 0: + used_params.append(name) + text = text.replace(placeholder, str(value)) + with open(file_out, 'w') as dst: + dst.write(text) + + return File(file_out, files), used_params + + +# Directory for all flow123d main input templates. +# These are considered part of the software. + +# TODO: running with stdout/ stderr capture, test for errors, log but only pass to the main in the case of +# true error + + +def sample_from_population(n_samples:int, frequency:Union[np.array, int]): + if type(frequency) is int: + frequency = np.full(len(frequency), 1, dtype=int) + else: + frequency = np.array(frequency, dtype=int) + + cumul_freq = np.cumsum(frequency) + total_samples = np.sum(frequency) + samples = np.random.randint(0, total_samples, size=n_samples + 1) + samples[-1] = total_samples # stopper + sample_seq = np.sort(samples) + # put samples into bins given by cumul_freq + bin_samples = np.empty_like(samples) + i_sample = 0 + for ifreq, c_freq in enumerate(cumul_freq): + + while sample_seq[i_sample] < c_freq: + bin_samples[i_sample] = ifreq + i_sample += 1 + + return bin_samples[:-1] diff --git a/src/bgem/core/config.py b/src/bgem/core/config.py new file mode 100644 index 0000000..96f5a11 --- /dev/null +++ b/src/bgem/core/config.py @@ -0,0 +1,257 @@ +from dataclasses import dataclass +from typing import * + +import os +import yaml +import re +from socket import gethostname +from glob import iglob + +from yamlinclude import YamlIncludeConstructor +from yamlinclude.constructor import WILDCARDS_REGEX, get_reader_class_by_name + + +class YamlLimitedSafeLoader(type): + """Meta YAML loader that skips the resolution of the specified YAML tags.""" + def __new__(cls, name, bases, namespace, do_not_resolve: List[str]) -> Type[yaml.SafeLoader]: + do_not_resolve = set(do_not_resolve) + implicit_resolvers = { + key: [(tag, regex) for tag, regex in mappings if tag not in do_not_resolve] + for key, mappings in yaml.SafeLoader.yaml_implicit_resolvers.items() + } + return super().__new__( + cls, + name, + (yaml.SafeLoader, *bases), + {**namespace, "yaml_implicit_resolvers": implicit_resolvers}, + ) + +class YamlNoTimestampSafeLoader( + metaclass=YamlLimitedSafeLoader, do_not_resolve={"tag:yaml.org,2002:timestamp"} +): + """A safe YAML loader that leaves timestamps as strings.""" + pass + +class dotdict(dict): + """ + dot.notation access to dictionary attributes + TODO: keep somehow reference to the original YAML in order to report better + KeyError origin. + """ + __setattr__ = dict.__setitem__ + __delattr__ = dict.__delitem__ + + def __getattr__(self, item): + try: + return self[item] + except KeyError: + return self.__getattribute__(item) + + @classmethod + def create(cls, cfg : Any): + """ + - recursively replace all dicts by the dotdict. + """ + if isinstance(cfg, dict): + items = ( (k, cls.create(v)) for k,v in cfg.items()) + return dotdict(items) + elif isinstance(cfg, list): + return [cls.create(i) for i in cfg] + elif isinstance(cfg, tuple): + return tuple([cls.create(i) for i in cfg]) + else: + return cfg + + @staticmethod + def serialize(cfg): + if isinstance(cfg, (dict, dotdict)): + return { k:dotdict.serialize(v) for k,v in cfg.items()} + elif isinstance(cfg, list): + return [dotdict.serialize(i) for i in cfg] + elif isinstance(cfg, tuple): + return tuple([dotdict.serialize(i) for i in cfg]) + else: + return cfg + +Key = Union[str, int] +Path = Tuple[Key] +VariantPatch = Dict[str, dotdict] + +@dataclass +class PathIter: + path: Path + # full address path + i: int = 0 + # actual level of the path; initial -1 is before first call to `idx` or `key`. + + def is_leaf(self): + return self.i == len(self.path) + + def idx(self): + try: + return int(self.path[self.i]), PathIter(self.path, self.i + 1) + except ValueError: + raise IndexError(f"Variant substitution: IndexError at address: '{self.address()}'.") + + def key(self): + key = self.path[self.i] + if len(key) > 0 and not key[0].isdigit(): + return key, PathIter(self.path, self.i + 1) + else: + raise KeyError(f"Variant substitution: KeyError at address: '{self.address()}'.") + + def address(self): + sub_path = self.path[:self.i + 1] + return '/'.join([str(v) for v in sub_path]) + + +def _item_update(key:Key, val:dotdict, sub_path:Key, sub:dotdict): + sub_key, path = sub_path + if key == sub_key: + if path.empty(): + # Recursion termination + return sub + else: + return deep_update(val, path, sub) + else: + return val + +def deep_update(cfg: dotdict, iter:PathIter, substitute:dotdict): + if iter.is_leaf(): + return substitute + new_cfg = dotdict(cfg) + if isinstance(cfg, list): + key, sub_path = iter.idx() + elif isinstance(cfg, (dict, dotdict)): + key, sub_path = iter.key() + else: + raise TypeError(f"Variant substitution: Unknown type {type(cfg)}") + new_cfg[key] = deep_update(cfg[key], sub_path, substitute) + return new_cfg + + + +def apply_variant(cfg:dotdict, variant:VariantPatch) -> dotdict: + """ + In the `variant` dict the keys are interpreted as the address + in the YAML file. The address is a list of strings and ints separated by '/' + and representing an item of the YAML file. + For every `(address, value)` item of the `variant` dict the referenced item + in `cfg` is replaced by `value`. + + Implemented by recursion with copy of changed collections. + May be slow for too many variant items and substitution of the large collection. + :param cfg: + :param variant: dictionary path -> dotdict + :return: + """ + new_cfg = cfg + for path_str, val in variant.items(): + path = tuple(path_str.split('/')) + assert path + new_cfg = deep_update(new_cfg, PathIter(path), val) + return new_cfg + +class YamlInclude(YamlIncludeConstructor): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.included_files = [] + + def load( + self, + loader, + pathname: str, + recursive: bool = False, + encoding: str = '', + reader: str = '' + ): # pylint:disable=too-many-arguments + if not encoding: + encoding = self._encoding or self.DEFAULT_ENCODING + if self._base_dir: + pathname = os.path.join(self._base_dir, pathname) + reader_clz = None + if reader: + reader_clz = get_reader_class_by_name(reader) + if re.match(WILDCARDS_REGEX, pathname): + result = [] + iterable = iglob(pathname, recursive=recursive) + for path in filter(os.path.isfile, iterable): + self.included_files.append(path) + if reader_clz: + result.append(reader_clz(path, encoding=encoding, loader_class=type(loader))()) + else: + result.append(self._read_file(path, loader, encoding)) + return result + self.included_files.append(pathname) + if reader_clz: + return reader_clz(pathname, encoding=encoding, loader_class=type(loader))() + return self._read_file(pathname, loader, encoding) + +def resolve_machine_configuration(cfg:dotdict, hostname) -> dotdict: + # resolve machine configuration + if 'machine_config' not in cfg: + return cfg + if hostname is None: + hostname = gethostname() + machine_cfg = cfg.machine_config.get(hostname, None) + if machine_cfg is None: + machine_cfg = cfg.machine_config.get('__default__', None) + if machine_cfg is None: + raise KeyError(f"Missing hostname: {hostname} in 'cfg.machine_config'.") + cfg.machine_config = machine_cfg + return cfg + +def load_config(path, collect_files=False, hostname=None): + """ + Load configuration from given file replace, dictionaries by dotdict + uses pyyaml-tags namely for: + include tag: + geometry: <% include(path="config_geometry.yaml")> + """ + instance = YamlInclude.add_to_loader_class(loader_class=YamlNoTimestampSafeLoader, base_dir=os.path.dirname(path)) + cfg_dir = os.path.dirname(path) + with open(path) as f: + cfg = yaml.load(f, Loader=YamlNoTimestampSafeLoader) + cfg['_config_root_dir'] = os.path.abspath(cfg_dir) + dd = dotdict.create(cfg) + dd = resolve_machine_configuration(dd, hostname) + if collect_files: + referenced = instance.included_files + referenced.append(path) + referenced.extend(collect_referenced_files(dd, ['.', cfg_dir])) + dd['_file_refs'] = referenced + return dd + +def dump_config(config): + with open("__config_resolved.yaml", "w") as f: + yaml.dump(config, f) + +def path_search(filename, path): + if not isinstance(filename, str): + return [] + # Abs paths intentionally not included + # if os.path.isabs(filename): + # if os.path.isabs(filename) and os.path.isfile(filename): + # return [os.path.abspath(filename)] + # else: + # return [] + for dir in path: + full_name = os.path.join(dir, filename) + if os.path.isfile(full_name): + return [os.path.abspath(full_name)] + return [] + +FilePath = NewType('FilePath', str) +def collect_referenced_files(cfg:dotdict, search_path:List[str]) -> List[FilePath]: + if isinstance(cfg, (dict, dotdict)): + referenced = [collect_referenced_files(v, search_path) for v in cfg.values()] + elif isinstance(cfg, (list, tuple)): + referenced = [collect_referenced_files(v, search_path) for v in cfg] + else: + return path_search(cfg, search_path) + # flatten + return [i for l in referenced for i in l] + + + + diff --git a/src/bgem/core/flow_call.py b/src/bgem/core/flow_call.py new file mode 100644 index 0000000..af706f4 --- /dev/null +++ b/src/bgem/core/flow_call.py @@ -0,0 +1,144 @@ +from typing import * +import logging +import os +import attrs +from . import dotdict, memoize, File, report, substitute_placeholders, workdir +import subprocess +from pathlib import Path +import yaml + +def search_file(basename, extensions): + """ + Return first found file or None. + """ + if type(extensions) is str: + extensions = (extensions,) + for ext in extensions: + if os.path.isfile(basename + ext): + return File(basename + ext) + return None + +class EquationOutput: + def __init__(self, eq_name, balance_name): + self.eq_name: str = eq_name + self.spatial_file: File = search_file(eq_name+"_fields", (".msh", ".pvd")) + self.balance_file: File = search_file(balance_name+"_balance", ".txt"), + self.observe_file: File = search_file(eq_name+"_observe", ".yaml") + + def _load_yaml_output(self, file, basename): + if file is None: + raise FileNotFoundError(f"Not found Flow123d output file: {self.eq_name}_{basename}.yaml.") + with open(file.path, "r") as f: + loaded_yaml = yaml.load(f, yaml.CSafeLoader) + return dotdict.create(loaded_yaml) + + def observe_dict(self): + return self._load_yaml_output(self.observe_file, 'observe') + + def balance_dict(self): + return self._load_yaml_output(self.balance_file, 'balance') + + def balance_df(self): + """ + create a dataframe for the Balance file + rows for times, columns are tuple (region, value), + values =[ flux, flux_in, flux_out, mass, source, source_in, source_out, flux_increment, source_increment, flux_cumulative, source_cumulative, error ] + :return: + TODO: ... + """ + dict = self.balance_dict() + pass + + + +class FlowOutput: + + def __init__(self, process: subprocess.CompletedProcess, stdout: File, stderr: File, output_dir="output"): + self.process = process + self.stdout = stdout + self.stderr = stderr + with workdir(output_dir): + self.log = File("flow123.0.log") + # TODO: flow ver 4.0 unify output file names + self.hydro = EquationOutput("flow", "water") + self.solute = EquationOutput("solute", "mass") + self.mechanic = EquationOutput("mechanics", "mechanics") + + @property + def success(self): + return self.process.returncode == 0 + + def check_conv_reasons(self): + """ + Check correct convergence of the solver. + Reports the divergence reason and returns false in case of divergence. + """ + with open(self.log.path, "r") as f: + for line in f: + tokens = line.split(" ") + try: + i = tokens.index('convergence') + if tokens[i + 1] == 'reason': + value = tokens[i + 2].rstrip(",") + conv_reason = int(value) + if conv_reason < 0: + print("Failed to converge: ", conv_reason) + return False + except ValueError: + continue + return True + +#@memoize +def _prepare_inputs(file_in, params): + in_dir, template = os.path.split(file_in) + root = template.removesuffix(".yaml").removesuffix("_tmpl") + template_path = Path(file_in).rename(Path(in_dir) / (root + "_tmpl.yaml")) + #suffix = "_tmpl.yaml" + #assert template[-len(suffix):] == suffix + #filebase = template[:-len(suffix)] + main_input = Path(in_dir) / (root + ".yaml") + main_input, used_params = substitute_placeholders(str(template_path), str(main_input), params) + return main_input + +#@memoize +def _flow_subprocess(arguments, main_input): + filebase, ext = os.path.splitext(os.path.basename(main_input.path)) + arguments.append(main_input.path) + logging.info("Running Flow123d: " + " ".join(arguments)) + + stdout_path = filebase + "_stdout" + stderr_path = filebase + "_stderr" + with open(stdout_path, "w") as stdout: + with open(stderr_path, "w") as stderr: + print("Call: ", ' '.join(arguments)) + completed = subprocess.run(arguments, stdout=stdout, stderr=stderr) + return File(stdout_path), File(stderr_path), completed + +#@report +#@memoize +def call_flow(cfg:'dotdict', file_in:File, params: Dict[str,str]) -> FlowOutput: + """ + Run Flow123d in actual work dir with main input given be given template and dictionary of parameters. + + 1. prepare the main input file from filebase_in + "_tmpl.yamlL" + 2. run Flow123d + + TODO: pass only flow configuration + """ + main_input = _prepare_inputs(file_in, params) + stdout, stderr, completed = _flow_subprocess(cfg.flow_executable.copy(), main_input) + logging.info(f"Exit status: {completed.returncode}") + if completed.returncode != 0: + with open(stderr.path, "r") as stderr: + print(stderr.read()) + raise Exception("Flow123d ended with error") + + fo = FlowOutput(completed, stdout.path, stderr.path) + conv_check = fo.check_conv_reasons() + logging.info(f"converged: {conv_check}") + return fo + +# TODO: +# - call_flow variant with creating dir, copy, + + diff --git a/src/bgem/core/memoize.py b/src/bgem/core/memoize.py new file mode 100644 index 0000000..342adb2 --- /dev/null +++ b/src/bgem/core/memoize.py @@ -0,0 +1,191 @@ +import logging +from typing import * +#import redis_cache +import hashlib +from functools import wraps +import time +import os + + +""" +TODO: modify redis_simple_cache or our memoize decorator to hash also function code + see https://stackoverflow.com/questions/18134087/how-do-i-check-if-a-python-function-changed-in-live-code + that one should aslo hash called function .. the whole tree + more over we should also hash over serialization of classes +""" + +class EndorseCache: + pass + # __instance__ = None + # @staticmethod + # def instance(*args, **kwargs): + # if EndorseCache.__instance__ is None: + # EndorseCache.__instance__ = EndorseCache(*args, **kwargs) + # return EndorseCache.__instance__ + # + # def __init__(self, host="localhost", port=6379): + # # TODO: possibly start redis server + # self.cache = redis_cache.SimpleCache(10000, hashkeys=True, host=host, port=port) + # + # + # def expire_all(self): + # self.cache.expire_all_in_set() + +# Workaround missing module in the function call key +# def memoize(): +# endorse_cache = EndorseCache.__instance__ +# def decorator(fn): +# # redis-simple-cache does not include the function module into the key +# # we poss in a functions with additional parameter +# def key_fn(fn_id , *args, **kwargs): +# return fn(*args, **kwargs) +# +# modif_fn = redis_cache.cache_it(limit=10000, expire=redis_cache.DEFAULT_EXPIRY, cache=endorse_cache.cache)(key_fn) +# +# @wraps(fn) +# def wrapper(*args, **kwargs): +# return modif_fn((fn.__name__, fn.__module__), *args, **kwargs) +# return wrapper +# return decorator + +def memoize(fn): + endorse_cache = EndorseCache.instance() + redis_cache_deco = redis_cache.cache_it(limit=10000, expire=redis_cache.DEFAULT_EXPIRY, cache=endorse_cache.cache) + return redis_cache_deco(fn) + + + + +class File: + """ + An object that should represent a file as a computation result. + Contains the path and the file content hash. + The system should also prevent modification of the files that are already created. + To this end one has to use File.open instead of the standard open(). + Current usage: + + with File.open(path, "w") as f: + f.write... + + return File.from_handle(f) # check that handel was opened by File.open and is closed, performs hash. + + Ideally, the File class could operate as the file handle and context manager. + However that means calling system open() and then modify its __exit__ method. + However I was unable to do that. Seems like __exit__ is changed, but changed to the original one smowere latter as + it is not called. Other possibility is to wrap standard file handle and use it like: + + @joblib.task + def make_file(file_path, content):` + with File.open(file_path, mode="w") as f: # calls self.handle = open(file_path, mode) + f.handle.write(content) + # called File.__exit__ which calls close(self.handle) and performs hashing. + return f + + TODO: there is an (unsuccessful) effort to provide special handle for writting. + TODO: Override deserialization in order to check that the file is unchanged. + Seems that caching just returns the object without actuall checking. + """ + + # @classmethod + # def handle(cls, fhandle): + # return File(fhandle.name) + + # @classmethod + # def output(cls, path): + # """ + # Create File instance intended for write. + # The hash is computed after call close of the of open() handle. + # Path is checked to not exists yet. + # """ + # return cls(path, postponed=True) + _hash_fn = hashlib.md5 + def __init__(self, path: str, files:List['File'] = None): # , hash:Union[bytes, str]=None) #, postponed=False): + """ + For file 'path' create object containing both path and content hash. + Optionaly the files referenced by the file 'path' could be passed by `files` argument + in order to include their hashes. + :param path: str + :param files: List of referenced files. + """ + self.path = os.path.abspath(path) + if files is None: + files = [] + self.referenced_files = files + self._set_hash() + + def __getstate__(self): + return (self.path, self.referenced_files) + + def __setstate__(self, args): + self.path, self.referenced_files = args + self._set_hash() + + def _set_hash(self): + files = self.referenced_files + md5 = self.hash_for_file(self.path) + for f in files: + md5.update(repr(f).encode()) + self.hash = md5.hexdigest() + + @staticmethod + def open(path, mode="wt"): + """ + Mode could only be 'wt' or 'wb', 'x' is added automaticaly. + """ + exclusive_mode = {"w": "x", "wt": "xt", "wb": "xb"}[mode] + # if os.path.isfile(path): + # raise "" + fhandle = open(path, mode=exclusive_mode) # always open for exclusive write + return fhandle + + @classmethod + def from_handle(cls, handle): + assert handle.closed + assert handle.mode.find("x") != -1 + return cls(handle.name) + + def __hash__(self): + if self.hash is None: + raise Exception("Missing hash of output file.") + return hash(self.path, self.hash) + + def __str__(self): + return f"File('{self.path}', hash={self.hash})" + + + """ + Could be used from Python 3.11 + @staticmethod + def hash_for_file(path): + with open(path, "rb") as f: + return hashlib.file_digest(f, "md5") + + md5 = hashlib.md5() + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(block_size), b''): + md5.update(chunk) + return md5.digest() + """ + + @staticmethod + def hash_for_file(path): + ''' + Block size directly depends on the block size of your filesystem + to avoid performances issues + Here I have blocks of 4096 octets (Default NTFS) + ''' + block_size = 256 * 128 + md5 = File._hash_fn() + try: + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(block_size), b''): + md5.update(chunk) + except FileNotFoundError: + raise FileNotFoundError(f"Missing cached file: {path}") + return md5 + + +""" + + +""" diff --git a/src/bgem/core/report.py b/src/bgem/core/report.py new file mode 100644 index 0000000..c768a86 --- /dev/null +++ b/src/bgem/core/report.py @@ -0,0 +1,20 @@ +from functools import wraps +import logging +import time + +__report_indent_level = 0 + +def report(fn): + @wraps(fn) + def do_report(*args, **kwargs): + global __report_indent_level + __report_indent_level += 1 + init_time = time.perf_counter() + result = fn(*args, **kwargs) + duration = time.perf_counter() - init_time + __report_indent_level -= 1 + indent = (__report_indent_level * 2) * " " + logging.info(f"{indent}DONE {fn.__module__}.{fn.__name__} @ {duration}") + return result + return do_report + From 8902654c324bf651181c6b392f295e53dafbc482 Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Tue, 26 Mar 2024 10:10:38 +0100 Subject: [PATCH 05/34] Reference solution for two-scale homogenization test. --- .gitignore | 1 + requirements.txt | 1 + setup.py | 2 +- tests/upscale/flow_upscale_templ.yaml | 51 +++++ tests/upscale/fractures_conf.yaml | 44 ++++ tests/upscale/mesh_class.py | 314 ++++++++++++++++++++++++++ tests/upscale/test_two_scale.py | 270 ++++++++++++++++++++++ 7 files changed, 682 insertions(+), 1 deletion(-) create mode 100644 tests/upscale/flow_upscale_templ.yaml create mode 100644 tests/upscale/fractures_conf.yaml create mode 100644 tests/upscale/mesh_class.py create mode 100644 tests/upscale/test_two_scale.py diff --git a/.gitignore b/.gitignore index c73419b..0923db1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +tests/upscale/2d_flow_homo_samples # Merge *.py.orig diff --git a/requirements.txt b/requirements.txt index 52590b4..28fa45f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,4 +2,5 @@ pytest numpy scipy bih +joblib plotly diff --git a/setup.py b/setup.py index 99da665..749c897 100644 --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ include_package_data=True, zip_safe=False, #install_requires=['numpy', 'scipy', 'bih', 'gmsh-sdk<=4.5.1'], - install_requires=['numpy>=1.13.4', 'pandas', 'scipy', 'bih', 'gmsh>=4.10.4'], + install_requires=['numpy>=1.13.4', 'pandas', 'scipy', 'gmsh>=4.10.4', 'pyyaml-include', 'bih'], # incompatible changes in SDK in release 4.6.0 to be changed in the new release of bgem python_requires='>=3', # extras_require={ diff --git a/tests/upscale/flow_upscale_templ.yaml b/tests/upscale/flow_upscale_templ.yaml new file mode 100644 index 0000000..3a0c3c6 --- /dev/null +++ b/tests/upscale/flow_upscale_templ.yaml @@ -0,0 +1,51 @@ +################# +# Author: Jan Brezina + +flow123d_version: 3.9.0 +problem: !Coupling_Sequential + description: | + Single scale reference field 3D Darcy flow. + Driven by unit pressure gradient. + mesh: + # Input mesh with 'outer' and 'borehole' regions. + mesh_file: + flow_equation: !Flow_Darcy_LMH + output_specific: + nonlinear_solver: + linear_solver: !Petsc + a_tol: 1e-6 + r_tol: 1e-3 + # options: + input_fields: + - region: .BOUNDARY + bc_type: dirichlet + bc_piezo_head: !FieldFormula + value: " @ [x,y,z]" + - region: ALL + conductivity: !FieldFE + mesh_data_file: + field_name: conductivity + cross_section: !FieldFE + mesh_data_file: + field_name: cross_section + #- region: outer + #conductivity: 1e-14 + # TODO: heterogeneous layerd and possibly anisotropic conductivity here + # given as FieldFormula or rather FieldFE computed for particular position. + n_schurs: 2 + output: + fields: + - piezo_head_p0 + #- pressure_p0 + #- pressure_p1 + - velocity_p0 + - region_id + - conductivity + balance: {} + output_stream: + #format: !gmsh + format: !vtk + # variant: ascii + + + diff --git a/tests/upscale/fractures_conf.yaml b/tests/upscale/fractures_conf.yaml new file mode 100644 index 0000000..1eafd47 --- /dev/null +++ b/tests/upscale/fractures_conf.yaml @@ -0,0 +1,44 @@ +# Fracture network configuration, list of fracture families. +# This data set taken from SKB report. + +- name: NS + trend: 292 + plunge: 1 + concentration: 17.8 + power: 2.5 +# r_min: 0.038 + r_min: 1 + r_max: 564 + p_32: 0.094 +- name: NE + trend: 326 + plunge: 2 + concentration: 14.3 + power: 2.7 + r_min: 1 + r_max: 564 + p_32: 0.163 +- name: NW + trend: 60 + plunge: 6 + concentration: 12.9 + power: 3.1 + r_min: 1 + r_max: 564 + p_32: 0.098 +- name: EW + trend: 15 + plunge: 2 + concentration: 14.0 + power: 3.1 + r_min: 1 + r_max: 564 + p_32: 0.039 +- name: HZ + trend: 5 + plunge: 86 + concentration: 15.2 + power: 2.38 + r_min: 1 + r_max: 564 + p_32: 0.141 diff --git a/tests/upscale/mesh_class.py b/tests/upscale/mesh_class.py new file mode 100644 index 0000000..2e8cf8b --- /dev/null +++ b/tests/upscale/mesh_class.py @@ -0,0 +1,314 @@ +from typing import Dict, Tuple, List + +import attrs +import bih +import numpy as np +# from numba import njit +import bisect + +from bgem.gmsh.gmsh_io import GmshIO +from bgem.gmsh import heal_mesh + +from bgem.core import File, memoize, report + + +#@njit +def element_vertices(all_nodes: np.array, node_indices: np.array): + return all_nodes[node_indices[:], :] + + +#@njit +def element_loc_mat(all_nodes: np.array, node_indices: List[int]): + n = element_vertices(all_nodes, node_indices) + return (n[1:, :] - n[0]).T + + +#@njit +def element_compute_volume(all_nodes: np.array, node_indices: List[int]): + return np.linalg.det(element_loc_mat(all_nodes, node_indices)) / 6 + + +@attrs.define +class Element: + mesh: 'Mesh' + type: int + tags: Tuple[int, int] + node_indices: List[int] + + def vertices(self): + return element_vertices(self.mesh.nodes, np.array(self.node_indices, dtype=int)) + + def loc_mat(self): + return element_loc_mat(self.mesh.nodes, self.node_indices) + + def volume(self): + return element_compute_volume(self.mesh.nodes, self.node_indices) + + def barycenter(self): + return np.mean(self.vertices(), axis=0) + + def gmsh_tuple(self, node_map): + node_ids = [node_map[inode] for inode in self.node_indices] + return (self.type, self.tags, node_ids) + + + + +#@memoize +def _load_mesh(mesh_file: 'File', heal_tol = None): + + # mesh_file = mesh_file.path + if heal_tol is None: + gmsh_io = GmshIO(str(mesh_file)) + return Mesh(gmsh_io, file = mesh_file) + else: + hm = heal_mesh.HealMesh.read_mesh(str(mesh_file), node_tol= heal_tol * 0.1) + report(hm.heal_mesh)(gamma_tol=heal_tol) + #hm.move_all(geom_dict["shift_vec"]) + #elm_to_orig_reg = hm.map_regions(new_reg_map) + report(hm.stats_to_yaml)(mesh_file.with_suffix(".heal_stats.yaml")) + #assert hm.healed_mesh_name == mesh_healed + hm.write() + return Mesh.load_mesh(hm.healed_mesh_name, None) + + # !! can not memoize static and class methods (have no name) + + +#@report +#@njit +def mesh_compute_el_volumes(nodes:np.array, node_indices :np.array) -> np.array: + return np.array([element_compute_volume(nodes, ni) for ni in node_indices]) + + +class Mesh: + + @staticmethod + def load_mesh(mesh_file: 'File', heal_tol=None) -> 'Mesh': + return _load_mesh(mesh_file, heal_tol) + + @staticmethod + def empty(mesh_path) -> 'Mesh': + return Mesh(GmshIO(), mesh_path) + + def __init__(self, gmsh_io: GmshIO, file): + + self.gmsh_io : GmshIO = gmsh_io + # TODO: remove relation to file + # rather use a sort of generic wrapper around loadable objects + # in order to relay on the underlaing files for the caching + self.file : 'File' = file + self.reinit() + + + def reinit(self): + # bounding interval hierarchy for the mesh elements + # numbers elements from 0 as they are added + self._update_nodes() + self._update_elements() + + # _boxes: List[bih.AABB] + self._bih: bih.BIH = None + + self._el_volumes:np.array = None + self._el_barycenters:np.array = None + + def _update_nodes(self): + self.node_ids = [] + self.node_indices = {} + self.nodes = np.empty((len(self.gmsh_io.nodes), 3)) + for i, (nid, node) in enumerate(self.gmsh_io.nodes.items()): + self.node_indices[nid] = i + self.node_ids.append(nid) + self.nodes[i, :] = node + + def _update_elements(self): + self.el_ids = [] + self.el_indices = {} + self.elements = [] + for i, (eid, el) in enumerate(self.gmsh_io.elements.items()): + type, tags, node_ids = el + element = Element(self, type, tags, [self.node_indices[nid] for nid in node_ids]) + self.el_indices[eid] = i + self.el_ids.append(eid) + self.elements.append(element) + + def __getstate__(self): + return (self.gmsh_io, self.file) + + def __setstate__(self, args): + self.gmsh_io, self.file = args + self.reinit() + + @property + def bih(self): + if self._bih is None: + self._bih = self._build_bih() + return self._bih + + def _build_bih(self): + el_boxes = [] + for el in self.elements: + node_coords = el.vertices() + box = bih.AABB(node_coords) + el_boxes.append(box) + _bih = bih.BIH() + _bih.add_boxes(el_boxes) + _bih.construct() + return _bih + + + + def candidate_indices(self, box): + list_box = box.tolist() + return self.bih.find_box(bih.AABB(list_box)) + + # def el_volume(self, id): + # return self.elements[self.el_indices[id]].volume() + + @property + #@report + def el_volumes(self): + if self._el_volumes is None: + node_indices = np.array([e.node_indices for e in self.elements], dtype=int) + print(f"Compute el volumes: {self.nodes.shape}, {node_indices.shape}") + self._el_volumes = mesh_compute_el_volumes(self.nodes, node_indices) + return self._el_volumes + + + + def el_barycenters(self): + if self._el_barycenters is None: + self._el_barycenters = np.array([e.barycenter() for e in self.elements]) + return self._el_barycenters + + def fr_map(self, fractures): + # TODO better association mechanism + fr_reg_to_idx = {fr.region.id - 100000 - 2: idx for idx, fr in enumerate(fractures)} + fr_map = [fr_reg_to_idx.get(e.tags[0], len(fractures)) for e in self.elements] + return np.array(fr_map) + + # def el_loc_mat(self, id): + # return self.elements[self.el_indices[id]].loc_mat() + + # def el_barycenter(self, id): + # return self.elements[self.el_indices[id]].barycenter() + + # def el_nodes(self, id): + # return self.elements[self.el_indices[id]].vertices() + + def submesh(self, elements, file_path): + gmesh = GmshIO() + active_nodes = np.full( (len(self.nodes),), False) + for iel in elements: + el = self.elements[iel] + active_nodes[el.node_indices] = True + sub_nodes = self.nodes[active_nodes] + new_for_old_nodes = np.zeros((len(self.nodes),), dtype=int) + new_for_old_nodes[active_nodes] = np.arange(1,len(sub_nodes)+1, dtype=int) + gmesh.nodes = {(nidx+1):node for nidx, node in enumerate(sub_nodes)} + gmesh.elements = {(eidx+100): self.elements[iel].gmsh_tuple(node_map=new_for_old_nodes) for eidx, iel in enumerate(elements)} + #print(gmesh.elements) + gmesh.physical = self.gmsh_io.physical + #gmesh.write(file_path) + gmesh.normalize() + return Mesh(gmesh, "") + + # Returns field P0 values of field. + # Selects the closest time step lower than 'time'. + # TODO: we might do time interpolation + def get_p0_values(self, field_name:str, time): + field_dict = self.gmsh_io.element_data[field_name] + + # determine maximal index of time step, where times[idx] <= time + times = [v.time for v in list(field_dict.values())] + last_time_idx = bisect.bisect_right(times, time) - 1 + + values = field_dict[last_time_idx].values + value_ids = field_dict[last_time_idx].tags + value_to_el_idx = [self.el_indices[iv] for iv in value_ids] + values_mesh = np.empty_like(values) + values_mesh[value_to_el_idx[:]] = values + return values_mesh + + def get_static_p0_values(self, field_name:str): + field_dict = self.gmsh_io.element_data[field_name] + assert len(field_dict) == 1 + values = field_dict[0].values + value_ids = field_dict[0].tags + value_to_el_idx = [self.el_indices[iv] for iv in value_ids] + values_mesh = np.empty_like(values) + values_mesh[value_to_el_idx[:]] = values + return values_mesh + + def get_static_p1_values(self, field_name:str): + field_dict = self.gmsh_io.node_data[field_name] + assert len(field_dict) == 1 + values = field_dict[0].values + value_ids = field_dict[0].tags + value_to_node_idx = [self.node_indices[iv] for iv in value_ids] + values_mesh = np.empty_like(values) + values_mesh[value_to_node_idx[:]] = values + return values_mesh + + + def write_fields(self, file_name:str, fields: Dict[str, np.array]=None) -> 'File': + self.gmsh_io.write(file_name, format="msh2") + if fields is not None: + self.gmsh_io.write_fields(file_name, self.el_ids, fields) + return file_name #File(file_name) + + + def map_regions(self, new_reg_map): + """ + Replace all (reg_id, dim) regions by the new regions. + new_reg_map: (reg_id, dim) -> new (reg_id, dim, reg_name) + return: el_id -> old_reg_id + """ + #print(self.mesh.physical) + #print(new_reg_map) + + new_els = {} + el_to_old_reg = {} + + for el_id, el in self.gmsh_io.elements.items(): + type, tags, nodes = el + tags = list(tags) + old_reg_id = tags[0] + dim = len(nodes) - 1 + old_id_dim = (old_reg_id, dim) + if old_id_dim in new_reg_map: + el_to_old_reg[self.el_indices[el_id]] = old_id_dim + reg_id, reg_dim, reg_name = new_reg_map[old_id_dim] + if reg_dim != dim: + Exception(f"Assigning region of wrong dimension: ele dim: {dim} region dim: {reg_dim}") + self.gmsh_io.physical[reg_name] = (reg_id, reg_dim) + tags[0] = reg_id + self.gmsh_io.elements[el_id] = (type, tags, nodes) + # remove old regions + id_to_reg = {id_dim: k for k, id_dim in self.gmsh_io.physical.items()} + for old_id_dim in new_reg_map.keys(): + if old_id_dim in id_to_reg: + del self.gmsh_io.physical[id_to_reg[old_id_dim]] + + el_ids = self.el_ids + self._update_elements() + assert_idx = np.random.randint(0, len(el_ids), 50) + assert all((el_ids[i] == self.el_ids[i] for i in assert_idx)) + return el_to_old_reg + + def el_dim_slice(self, dim): + i_begin = len(self.elements) + i_end = i_begin + el_type = [21, 1, 2, 4][dim] + for iel, el in enumerate(self.elements): + if el.type == el_type: + i_begin = iel + break + for iel, el in enumerate(self.elements[i_begin:], start=i_begin): + if el.type != el_type: + i_end = iel + break + for iel, el in enumerate(self.elements[i_end:], start=i_end): + if el.type == el_type: + raise IndexError(f"Elements of dimension {dim} does not form a slice.") + return slice(i_begin, i_end, 1) \ No newline at end of file diff --git a/tests/upscale/test_two_scale.py b/tests/upscale/test_two_scale.py new file mode 100644 index 0000000..9ecd310 --- /dev/null +++ b/tests/upscale/test_two_scale.py @@ -0,0 +1,270 @@ +""" +Test of homogenization techniquest within a two-scale problem. +- reference solution is evaluated by Flow123d, using direct rasterization of full DFN sample + The pressure field is projected to the nodes of the rectangular grid, + velocity filed is averaged over rectangular cells. + +- two-scale solution involves: + 1. homogenization of DFN to the rectangular grid ; general permeability tensor field + 2. custom 3d solver for the rectangular grid is used to solve the coarse problem + +- various homogenization techniques could be used, homogenization time is evaluated and compared. +""" +from typing import * +import yaml +import shutil +from pathlib import Path + +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger() + + +import numpy as np +import attrs +from bgem import stochastic +from bgem.gmsh import gmsh, options +from mesh_class import Mesh +from bgem.core import call_flow, dotdict, workdir as workdir_mng +from bgem.upscale import Grid, Fe +import pyvista as pv + +script_dir = Path(__file__).absolute().parent +workdir = script_dir / "sandbox" +from joblib import Memory +memory = Memory(workdir, verbose=0) + + +@attrs.define +class FracturedMedia: + dfn: List[stochastic.Fracture] + fr_cross_section: np.ndarray # shape (n_fractures,) + fr_conductivity: np.ndarray # shape (n_fractures,) + conductvity: float + + @staticmethod + def fracture_cond_params(dfn, unit_cross_section, bulk_conductivity): + # unit_cross_section = 1e-4 + viscosity = 1e-3 + gravity_accel = 10 + density = 1000 + permeability_factor = 1 / 12 + permeability_to_conductivity = gravity_accel * density / viscosity + # fr cond r=100 ~ 80 + # fr cond r=10 ~ 0.8 + fr_r = np.array([fr.r for fr in dfn]) + fr_cross_section = unit_cross_section * fr_r + fr_cond = permeability_to_conductivity * permeability_factor * fr_r ** 2 + return FracturedMedia(dfn, fr_cross_section, fr_cond, bulk_conductivity) + +def fracture_set(seed, size_range, max_frac = None): + rmin, rmax = size_range + box_dimensions = (rmax, rmax, rmax) + with open(script_dir/"fractures_conf.yaml") as f: + pop_cfg = yaml.load(f, Loader=yaml.SafeLoader) + fr_pop = stochastic.Population.initialize_3d(pop_cfg, box_dimensions) + fr_pop.set_sample_range([rmin, rmax], sample_size=max_frac) + print(f"fr set range: {[rmin, rmax]}, fr_lim: {max_frac}, mean population size: {fr_pop.mean_size()}") + pos_gen = stochastic.UniformBoxPosition(fr_pop.domain) + np.random.seed(seed) + fractures = fr_pop.sample(pos_distr=pos_gen, keep_nonempty=True) + for fr in fractures: + fr.region = gmsh.Region.get("fr", 2) + return fractures + +def create_fractures_rectangles(gmsh_geom, fractures, base_shape: gmsh.ObjectSet, + shift = np.array([0,0,0])): + # From given fracture date list 'fractures'. + # transform the base_shape to fracture objects + # fragment fractures by their intersections + # return dict: fracture.region -> GMSHobject with corresponding fracture fragments + if len(fractures) == 0: + return [] + + shapes = [] + for i, fr in enumerate(fractures): + shape = base_shape.copy() + print("fr: ", i, "tag: ", shape.dim_tags) + shape = shape.scale([fr.rx, fr.ry, 1]) \ + .rotate(axis=[0,0,1], angle=fr.shape_angle) \ + .rotate(axis=fr.rotation_axis, angle=fr.rotation_angle) \ + .translate(fr.center + shift) \ + .set_region(fr.region) + + shapes.append(shape) + + fracture_fragments = gmsh_geom.fragment(*shapes) + return fracture_fragments + + +def ref_solution_mesh(work_dir, domain_dimensions, fractures, fr_step, bulk_step): + factory = gmsh.GeometryOCC("homo_cube", verbose=True) + gopt = options.Geometry() + gopt.Tolerance = 0.0001 + gopt.ToleranceBoolean = 0.001 + box = factory.box(domain_dimensions) + + fractures = create_fractures_rectangles(factory, fractures, factory.rectangle()) + fractures_group = factory.group(*fractures).intersect(box) + box_fr, fractures_fr = factory.fragment(box, fractures_group) + fractures_fr.mesh_step(fr_step) #.set_region("fractures") + objects = [box_fr, fractures_fr] + factory.write_brep(str(work_dir / factory.model_name) ) + #factory.mesh_options.CharacteristicLengthMin = cfg.get("min_mesh_step", cfg.boreholes_mesh_step) + factory.mesh_options.CharacteristicLengthMax = bulk_step + #factory.mesh_options.Algorithm = options.Algorithm3d.MMG3D + + # mesh.Algorithm = options.Algorithm2d.MeshAdapt # produce some degenerated 2d elements on fracture boundaries ?? + # mesh.Algorithm = options.Algorithm2d.Delaunay + # mesh.Algorithm = options.Algorithm2d.FrontalDelaunay + + factory.mesh_options.Algorithm = options.Algorithm3d.Delaunay + #mesh.ToleranceInitialDelaunay = 0.01 + # mesh.ToleranceEdgeLength = fracture_mesh_step / 5 + #mesh.CharacteristicLengthFromPoints = True + #factory.mesh_options.CharacteristicLengthFromCurvature = False + #factory.mesh_options.CharacteristicLengthExtendFromBoundary = 2 # co se stane if 1 + #mesh.CharacteristicLengthMin = min_el_size + #mesh.CharacteristicLengthMax = max_el_size + + #factory.keep_only(*objects) + #factory.remove_duplicate_entities() + factory.make_mesh(objects, dim=3) + #factory.write_mesh(me gmsh.MeshFormat.msh2) # unfortunately GMSH only write in version 2 format for the extension 'msh2' + f_name = work_dir / (factory.model_name + ".msh2") + factory.write_mesh(str(f_name), format=gmsh.MeshFormat.msh2) + return f_name + +def fr_cross_section(fractures, cross_to_r): + return [cross_to_r * fr.r for fr in fractures] + + +def fr_field(mesh, fractures, fr_values, bulk_value): + fr_map = mesh.fr_map(fractures) # np.array of fracture indices of elements, n_frac for nonfracture elements + fr_values_ = np.concatenate(( + np.array(fr_values), + np.atleast_1d(bulk_value))) + return fr_values_[fr_map] + + + + +# def velocity_p0(grid_step, min_corner, max_corner, mesh, values): +# """ +# Pressure P1 field projection +# - P0 pressure in barycenters +# - use interpolation to construct P1 Structured grid +# Interpolate: discrete points -> field, using RBF placed at points +# Sample: field -> nodal data, evaluate nodes in source field +# +# Velocity P0 projection +# 1. get array of barycenters and element volumes +# 2. ijk cell coords of each source point +# 3. weights = el_volume / np.add.at(cell_vol_sum, ijk, el_volume)[ijk[:]] +# 4. np.add.at(cell_velocities, ijk, weights * velocities) +# :return: +# """ +# pass + +@memory.cache +def reference_solution(fr_media, target_grid, bc_gradient): + domain_dimensions = target_grid.size + dfn = fr_media.dfn + bulk_conductivity = fr_media.conductivity + + workdir = script_dir / "sandbox" + workdir.mkdir(parents=True, exist_ok=True) + + # Input crssection and conductivity + mesh_file = ref_solution_mesh(workdir, domain_dimensions, dfn, fr_step=10, bulk_step=10) + full_mesh = Mesh.load_mesh(mesh_file, heal_tol = 0.001) # gamma + fields = dict( + conductivity=fr_field(full_mesh, dfn, fr_media.fr_conductivity, bulk_conductivity), + cross_section=fr_field(full_mesh, dfn, fr_media.fr_cross_section, 1.0) + ) + cond_file = full_mesh.write_fields(str(workdir / "input_fields.msh2"), fields) + cond_file = Path(cond_file) + cond_file = cond_file.rename(cond_file.with_suffix(".msh")) + # solution + flow_cfg = dotdict( + flow_executable=[ + "/home/jb/workspace/flow123d/bin/fterm", + "--no-term", +# - flow123d/endorse_ci:a785dd +# - flow123d/ci-gnu:4.0.0a_d61969 + "dbg", + "run", + "--profiler_path", + "profile" + ], + mesh_file=cond_file, + pressure_grad=bc_gradient + ) + f_template = "flow_upscale_templ.yaml" + shutil.copy( (script_dir / f_template), workdir) + with workdir_mng(workdir): + flow_out = call_flow(flow_cfg, f_template, flow_cfg) + + # Project to target grid + print(flow_out) + #vel_p0 = velocity_p0(target_grid, flow_out) + # projection of fields + return flow_out + +def project_ref_solution(flow_out, grid: Grid): + # Velocity P0 projection + # 1. get array of barycenters (source points) and element volumes of the fine mesh + # 2. ijk cell coords of each source point + # 3. weights = el_volume / np.add.at(cell_vol_sum, ijk, el_volume)[ijk[:]] + # 4. np.add.at(cell_velocities, ijk, weights * velocities) + # :return: + pvd_content = pv.get_reader(flow_out.hydro.spatial_file.path) + pvd_content.set_active_time_point(0) + dataset = pvd_content.read()[0] # Take first block of the Multiblock dataset + cell_centers_coords = dataset.cell_centers().points + grid_min_corner = -grid.size / 2 + centers_ijk_grid = (cell_centers_coords - grid_min_corner) // grid.step[None, :] + centers_ijk_grid = centers_ijk_grid.astype(np.int32) + assert np.alltrue(centers_ijk_grid < grid.n_steps[None, :]) + + grid_cell_idx = centers_ijk_grid[:, 0] + grid.n_steps[0] * (centers_ijk_grid[:, 1] + grid.n_steps[1]*centers_ijk_grid[:, 2]) + sized = dataset.compute_cell_sizes() + cell_volume = np.abs(sized.cell_data["Volume"]) + grid_sum_cell_volume = np.zeros(grid.n_elements) + np.add.at(grid_sum_cell_volume, grid_cell_idx, cell_volume) + weights = cell_volume[:] / grid_sum_cell_volume[grid_cell_idx[:]] + velocities = dataset.cell_data['velocity_p0'] + grid_velocities = np.zeros((grid.n_elements, 3)) + wv = weights[:, None] * velocities + for ax in [0, 1, 2]: + np.add.at(grid_velocities[:, ax], grid_cell_idx, wv[:, ax]) + + return grid_velocities.reshape( (*grid.n_steps, 3)) + + +def homogenize(fr_media: FracturedMedia, grid:Grid): + """ + Homogenize fr_media to the conductivity tensor field on grid. + :return: conductivity_field, np.array, shape (n_elements, n_voight) + """ + + + +def test_two_scale(): + # Fracture set + domain_size = 100 + fr_range = (30, domain_size) + dfn = fracture_set(123, fr_range) + fr_media = FracturedMedia.fracture_cond_params(dfn, 1e-4, 0.1) + + + # Coarse Problem + steps = (10, 2, 5) + grid = Grid(domain_size, steps, Fe.Q1(dim=3)) + bc_pressure_gradient = [1, 0, 0] + + flow_out = reference_solution(fr_media, grid, bc_pressure_gradient) + ref_velocity_grid = project_ref_solution(flow_out, grid) + + grid_permitivity = homogenize(fr_media, grid) + pressure_field = grid.solve_system(grid_permitivity, bc_pressure_gradient) \ No newline at end of file From 25fd13b9e1a49c7a393854aa5d659ba34cb54747 Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Wed, 3 Apr 2024 20:47:09 +0200 Subject: [PATCH 06/34] Working anisotropic homogenization, test_rasterize --- src/bgem/upscale/voxelize.py | 383 ++++++++++++++++++++++++++ tests/upscale/decovalex_dfnmap.py | 440 ++++++++++++++++++++++++++++++ tests/upscale/test_rasterize.py | 161 +++++++++++ 3 files changed, 984 insertions(+) create mode 100644 src/bgem/upscale/voxelize.py create mode 100644 tests/upscale/decovalex_dfnmap.py create mode 100644 tests/upscale/test_rasterize.py diff --git a/src/bgem/upscale/voxelize.py b/src/bgem/upscale/voxelize.py new file mode 100644 index 0000000..c9f7ce8 --- /dev/null +++ b/src/bgem/upscale/voxelize.py @@ -0,0 +1,383 @@ +from typing import * +import math + +import attrs +from functools import cached_property +from bgem.stochastic import Fracture +import numpy as np +""" +Voxelization of fracture network. +Task description: +Input: List[Fracutres], DFN sample, fractures just as gemoetric objects. +Output: Intersection arrays: cell_idx, fracture_idx, intersection volume estimate +(taking fr aperture and intersectoin area into account) + +Possible approaches: + +- for each fracture -> AABB -> loop over its cells -> intersection test -> area calculation +- Monte Carlo : random points on each fracture (N ~ r**2), count point numbers in each cell, weights -> area/volume estimate +""" + + + +""" +TODO: +1. Port decovalex solution as the basic building block + - i.e. contributions to full tensor on cells intersectiong the fracture. + - Conservative homogenization, slow. + + 1. AABB grid -> centers of cells within AABB of fracture + 2. Fast selection of active cells: centers_distance < tol + 3. For active cells detect intersection with plane. + - active cells corners -> projection to fr plane + - detect nodes in ellipse, local system + - alternative function to detect nodes within n-polygon + 4. output: interacting cells, cell cords in local system, optiaonly -> estimate of intersection surface + 5. rasterized full tenzor: + - add to all interacting cells + - add multiplied by surface dependent coefficient + + +2. Direct homogenization test, with at least 2 cells across fracture. + Test flow with rasterized fractures, compare different homogenization routines + ENOUGH FOR SURAO + +3. For cells in AABB compute distance in parallel, simple fabric tenzor homogenization. + Possibly faster due to vectorization, possibly more precise for thin fractures. +4. Comparison of conservative homo + direct upscaling on fine grid with fabric homogenization. + +5. Improved determination of candidate cells and distance, differential algorithm. + +""" + +class EllipseShape: + def is_point_inside(self, x, y): + return x**2 + y**2 <= 1 + + def are_points_inside(self, points): + sq = points ** 2 + return sq[:, 0] + sq[:, 1] <= 1 + +def test_EllipseShape(): + ellipse = EllipseShape(6) # Create a hexagon for testing + + # Points to test + inside_point = (0.5, 0.5) + edge_point = (np.cos(np.pi / 6), np.sin(np.pi / 6)) # A point exactly on the edge + outside_point = (1.5, 1.5) + + # Test for a point inside + assert ellipse.is_point_inside(*inside_point), "The point should be inside the ellipse" + + # Test for a point on the edge + # Depending on your implementation's edge handling, this could be either True or False + # Here we assume points on edges are considered inside + assert ellipse.is_point_inside(*edge_point), "The point should be considered inside the ellipse" + + # Test for a point outside + assert not ellipse.is_point_inside(*outside_point), "The point should be outside the ellipse" + + # Test points: inside, on edge, outside + points = np.array([ + [0.5, 0.5], # Inside + [-0.5, -0.5], # Inside + [0.86, 0.5], # On edge (approximately, considering rounding) + [1, 1], # Outside + [0, 0] # Centrally inside + ]) + + expected_results = np.array([ + True, # Inside + True, # Inside + False, # On edge, but due to precision, might be considered outside + False, # Outside + True # Centrally inside + ]) + + actual_results = ellipse.are_points_inside(points) + np.testing.assert_array_equal(actual_results, expected_results, + "The are_points_inside method failed to accurately determine if points are inside the ellipse.") + + +class PolygonShape: + def __init__(self, N): + """ + Initializes a RegularPolygon instance for an N-sided polygon. + + Args: + - N: Number of sides of the regular polygon. + """ + self.N = N + self.theta_segment = 2 * math.pi / N # Angle of each segment + self.R_inscribed = math.cos(self.theta_segment / 2) # Radius of inscribed circle for R=1 + + def is_point_inside(self, x, y): + """ + Tests if a point (x, y) is inside the regular N-sided polygon. + + Args: + - x, y: Coordinates of the point to test. + + Returns: + - True if the point is inside the polygon, False otherwise. + """ + r = math.sqrt(x**2 + y**2) # Convert point to polar coordinates (radius) + theta = math.atan2(y, x) # Angle in polar coordinates + + # Compute the reminder of the angle and the x coordinate of the reminder point + theta_reminder = theta % self.theta_segment + x_reminder = math.cos(theta_reminder) * r + + # Check if the x coordinate of the reminder point is less than + # the radius of the inscribed circle (for R=1) + return x_reminder <= self.R_inscribed + + def are_points_inside(self, points): + """ + Tests if points in a NumPy array are inside the regular N-sided polygon. + Args: + - points: A 2D NumPy array of shape (M, 2), where M is the number of points + and each row represents a point (x, y). + Returns: + - A boolean NumPy array where each element indicates whether the respective + point is inside the polygon. + """ + r = np.sqrt(points[:, 0]**2 + points[:, 1]**2) + theta = np.arctan2(points[:, 1], points[:, 0]) + theta_reminder = theta % self.theta_segment + x_reminder = np.cos(theta_reminder) * r + return x_reminder <= self.R_inscribed + +def test_PolyShape(): + polygon = PolygonShape(6) # Create a hexagon for testing + + # Points to test + inside_point = (0.5, 0.5) + edge_point = (np.cos(np.pi / 6), np.sin(np.pi / 6)) # A point exactly on the edge + outside_point = (1.5, 1.5) + + # Test for a point inside + assert polygon.is_point_inside(*inside_point), "The point should be inside the polygon" + + # Test for a point on the edge + # Depending on your implementation's edge handling, this could be either True or False + # Here we assume points on edges are considered inside + assert polygon.is_point_inside(*edge_point), "The point should be considered inside the polygon" + + # Test for a point outside + assert not polygon.is_point_inside(*outside_point), "The point should be outside the polygon" + + # Test points: inside, on edge, outside + points = np.array([ + [0.5, 0.5], # Inside + [-0.5, -0.5], # Inside + [0.86, 0.5], # On edge (approximately, considering rounding) + [1, 1], # Outside + [0, 0] # Centrally inside + ]) + + expected_results = np.array([ + True, # Inside + True, # Inside + False, # On edge, but due to precision, might be considered outside + False, # Outside + True # Centrally inside + ]) + + actual_results = polygon.are_points_inside(points) + np.testing.assert_array_equal(actual_results, expected_results, + "The are_points_inside method failed to accurately determine if points are inside the polygon.") + + +@attrs.define +class FractureVoxelize: + """ + Auxiliary class with intersection of fractures with a (structured, rectangular) grid. + The class itslef could be used for any types of elements, but the supported voxelization algorithms + are specific for the uniform rectangular grid, allowing different step for each of X, Y, Z directions. + + The intersections could be understood as a sparse matrix for computing cell scalar property as: + i - grid index, j - fracture index + grid_property[i] = (1 - sum_j intersection[i, j]) * bulk_property[i] + sum_j intersection[i, j] * fr_property[j] + + The sparse matrix 'intersection' is formed in terms of the triplex lists: cell_id, fracture_id, volume. + It actualy is intersection_volume[i,j] / cell_volume[i] , the cell_volume is minimum of the volume of the i-th cell + and sum of volumes of the intersectiong fracutres. + + The cached properties for the bulk weight vector and fracture interpolation sparse matrix for efficient multiplication + are provided. + """ + grid: 'Grid' # Any grid composed of numbered cells. + cell_ids: List[int] # For each intersection the cell id. + fr_ids: List[int] # For each intersection the fracture id. + volume: List[float] # For each intersection the intersection fracture volume estimate. + + + + @cached_property + def cell_fr_sums(self): + cell_sums = np.zeros(, dtype=np.float64) + + + def project_property(self, fr_property, bulk_property): + + +class FractureBoundaries3d: + @staticmethod + def build(polygons): + n_fractures, n_points, dim = polygons + assert dim == 3 + assert n_points % 2 == 0 + + # Get AABB and sort coordinates from largest to smallest + aabb_min = polygons.min(axis=1) + aabb_max = polygons.max(axis=1) + aabb_ptp = aabb_max - aabb_min + axes_sort = np.argsort(-aabb_ptp, axis=1) + aabb_min_sort = aabb_min[:, axes_sort] + aabb_max_sort = aabb_max[:, axes_sort] + polygons_sort = polygons[:, :, axes_sort] + # for evary fracture get sequence of points from >=X_min to (aabb_min_sort[:, 1] + aabb_min_sort[:, 1]) / 2 + + # half of points + 1 to get the end point as well. + # We get other half by central symmetry. + selected_indices = (argmin_X[:, None] + np.arange(n_points // 2 + 1)[None, :]) % n_points + + o_grid = np.ogrid[:n_fractures, :3] + all_fractures = np.arange(n_fractures)[:, None, None] + all_dims = np.arange(3)[None, None, :] + half_arc = polygons[all_fractures, selected_indices[:, :, None], all_dims] + """ + 1. Use half arc to generate Y ranges in the X range. + This produces variable size arrays and could not be implemented in Numpy efficeintly. + Use classical loop over fractures and over lines. Generate list of XY celles, compute estimate of XY projection, + interior cells and list of boundary cells. + Interior cells - use normal, Z distance from center, and fracture aperture + to determine tensor contribution, multiply by XY projection for the boundary cells. + """ + + +def form_table(): + pass + + +def unit_area_tab(x, y, z_slack): + """ + Assume 1 > x > y > 0. + 1 > z_slack > 0 + :return: approx area of intersection of fracture plane in distance z_slack from origin + + """ + + +def tensor_contribution(normal, slack, slack_axis, aperture): + """ + Compute contribution to the cell equivalent/fabric tensor. + We assume aperture et most 1/10 of min cell dimension. + + normal - fracture normal vector + slack - vector from cel center to fracture with single nonzero component, the minimum one. + should be relatively close to normal (up to orientation) + angle to normal on unit cube at most 50degs + aperture of the fracture + :return: 3d fabric tensor + + 1. scale to unit cell + 2. approx surface of intersection on unit cell + 3. scale surface back + 3. tn = surf * apperture / 1 * (n otimes n) + 4. scale back + + === + - We will scale whole coord system to have unit cells, possibly scaling back individual equivalent tensors + or fracture eq. tensors. + This will guarantee that slack direction is that max. component of the normal. + """ + normal_reminder = np.abs(np.delete(normal, slack_axis)) / normal[slack_axis] + normal_rel_max = np.max(normal_reminder) + normal_rel_min = np.min(normal_reminder) + area = unit_area_tab(normal_rel_max, normal_rel_min, slack) + rel_area = aperture * area / np.dot(normal, cell) + tn = rel_area * normal[:, None] * normal[None, :] + return tn + + + +__rel_corner = np.array([[0, 0, 0], [1, 0, 0], + [1, 1, 0], [0, 1, 0], + [0, 0, 1], [1, 0, 1], + [1, 1, 1], [0, 1, 1]]) + +def intersect_cell(loc_corners: np.array, ellipse: Fracture) -> bool: + """ + loc_corners - shape (3, 8) + """ + # check if cell center is inside radius of fracture + center = np.mean(loc_corners, axis=1) + if np.sum(np.square(center[0:2] / ellipse.radius)) >= 1: + return False + + # cell center is in ellipse + # find z of cell corners in xyz of fracture + + if np.min(loc_corners[2, :]) >= 0. or np.max(loc_corners[2, :]) <= 0.: + # All coreners on one side => no intersection. + return False + + return True + +def fracture_for_ellipse(grid: Grid, i_ellipse: int, ellipse: Ellipse) -> Fracture: + # calculate rotation matrix for use later in rotating coordinates of nearby cells + direction = np.cross([0,0,1], ellipse.normal) + #cosa = np.dot([0,0,1],normal)/(np.linalg.norm([0,0,1])*np.linalg.norm(normal)) #frobenius norm = length + #above evaluates to normal[2], so: + angle = np.arccos(ellipse.normal[2]) # everything is in radians + mat_to_local = tr.rotation_matrix(angle, direction)[:3, :3].T + #find fracture in domain coordinates so can look for nearby cells + b_box_min = ellipse.translation - np.max(ellipse.radius) + b_box_max = ellipse.translation + np.max(ellipse.radius) + + i_box_min = grid.cell_coord(b_box_min) + i_box_max = grid.cell_coord(b_box_max) + 1 + axis_ranges = [range(max(0, a), min(b, n)) for a, b, n in zip(i_box_min, i_box_max, grid.cell_dimensions)] + + grid_cumul_prod = np.array([1, grid.cell_dimensions[0], grid.cell_dimensions[0] * grid.cell_dimensions[1]]) + cells = [] + # X fastest running + for ijk in itertools.product(*reversed(axis_ranges)): + # make X the first coordinate + ijk = np.flip(np.array(ijk)) + corners = grid.origin[None, :] + (ijk[None, :] + __rel_corner[:, :]) * grid.step[None, :] + loc_corners = mat_to_local @ (corners - ellipse.translation).T + if intersect_cell(loc_corners, ellipse): + logging.log(logging.DEBUG, f" cell {ijk}") + cell_index = ijk @ grid_cumul_prod + cells.append(cell_index) + if len(cells) > 0: + logging.log(logging.INFO, f" #{i_ellipse} fr, {len(cells)} cell intersections") + return Fracture(ellipse, cells) + +def map_dfn(grid, ellipses): + '''Identify intersecting fractures for each cell of the ECPM domain. + Extent of ECPM domain is determined by nx,ny,nz, and d (see below). + ECPM domain can be smaller than the DFN domain. + Return numpy array (fracture) that contains for each cell: + number of intersecting fractures followed by each intersecting fracture id. + + ellipses = list of dictionaries containing normal, translation, xrad, + and yrad for each fracture + origin = [x,y,z] float coordinates of lower left front corner of DFN domain + nx = int number of cells in x in ECPM domain + ny = int number of cells in y in ECPM domain + nz = int number of cells in z in ECPM domain + step = [float, float, float] discretization length in ECPM domain + + JB TODO: allow smaller ECPM domain + ''' + logging.log(logging.INFO, f"Callculating Fracture - Cell intersections ...") + return [fracture_for_ellipse(grid, ie, ellipse) for ie, ellipse in enumerate(ellipses)] + + diff --git a/tests/upscale/decovalex_dfnmap.py b/tests/upscale/decovalex_dfnmap.py new file mode 100644 index 0000000..8448ee0 --- /dev/null +++ b/tests/upscale/decovalex_dfnmap.py @@ -0,0 +1,440 @@ +import csv +import itertools +import logging +import time +from pathlib import Path +from typing import * + +import attrs +import numpy as np + +#from . import decovalec_transform as tr + + +def float_array(x: List[float]) -> np.array: + return np.array(x, dtype=float) + + +def int_array(x: List[float]) -> np.array: + return np.array(x, dtype=int) + + +@attrs.define +class StructuredGrid: + """ + General tensor product grid. + Steps can vary in ecach dimension. + """ + origin: np.array # 3d point + steps: np.array # list of step arrays in each axis, array [steps_x, steps_y, steps_z] [m] + + @property + def cell_dimensions(self): + # [nx, ny, nz], int, number of cells in XYZ + return np.array([len(axis_steps) for axis_steps in self.steps]) + + +@attrs.define +class Grid: + @classmethod + def make_grid(cls, origin, step, dimensions): + s = np.array(step) + d = np.array(dimensions) + cell_dim = (d / s).astype(int) + dim = s * cell_dim + return cls(np.array(origin), s, d, cell_dim) + + origin: np.array # 3d point + step: np.array # [step_x, step_y, step_z] [m] + dimensions: np.array # [lx, ly, lz] [m] + cell_dimensions: np.array # [nx, ny, nz], int, number of cells in XYZ + + def cell_coord(self, x: np.array) -> np.array: + """ + Cell multiindex containing point 'x'. + or None if does not exist. + """ + idx = (x - self.origin) / self.step + return idx.astype(int) + + def trim(self, x): + """ + Project point ot of the grid to its surface. + """ + x = np.maximum(x, self.origin + self.step / 2) + outer_corner = self.origin + self.dimensions - self.step / 2 + x = np.minimum(x, outer_corner) + return x + + def make_xyz_field(self): + return [o + s * np.arange(0, n + 1) for o, n, s in zip(self.origin, self.cell_dimensions, self.step)] + + +@attrs.define +class Ellipse: + normal: np.array = attrs.field(converter=float_array) + # 3D normal vector + translation: np.array = attrs.field(converter=float_array) + # 3D translation vector + radius: np.array = attrs.field(converter=float_array) + # (x_radius, y_ radius) before transformation + + +@attrs.define +class Fracture: + ellipse: Ellipse + cells: List[int] + + +__radiifile = 'radii.dat' +__normalfile = 'normal_vectors.dat' +__transfile = 'translations.dat' +__permfile = 'perm.dat' +__aperturefile = 'aperture.dat' + + +def read_dfn_file(f_path): + with open(f_path, 'r') as file: + rdr = csv.reader(filter(lambda row: row[0] != '#', file), delimiter=' ', skipinitialspace=True) + return [row for row in rdr] + + + +def readEllipse(workdir: Path, ): + '''Read dfnWorks-Version2.0 output files describing radius, orientation, and + location of fractures in space. + Subsequent methods assume elliptical (in fact circular) fractures. + Return a list of dictionaries describing each ellipse. + ''' + + radii = np.array(read_dfn_file(workdir / __radiifile), dtype=float) + shape_family = radii[:, 2] + radii = radii[:, 0:2] + assert radii.shape[1] == 2 + # with open(radiifile,'r') as f: + # radii = f.readlines() #two lines to get rid of at top of file + # radii.pop(0) + # radii.pop(0) + normals = np.array(read_dfn_file(workdir / __normalfile), dtype=float) + assert normals.shape[1] == 3 + # with open(normalfile, 'r') as f: + # normals = f.readlines() #no lines to get rid of at top of file + translations = np.array([t for t in read_dfn_file(workdir / __transfile) if t[-1] != 'R'], dtype=float) + assert translations.shape[1] == 3 + # with open(transfile, 'r') as f: + # temp = f.readlines() #one line to get rid of at top of file + # temp.pop(0) + # translations = [] + # for line in temp: + # if line.split()[-1] != 'R': + # translations.append(line) + ellipses = [Ellipse(np.array(n), np.array(t), np.array(r)) for n, t, r in zip(normals, translations, radii)] + return ellipses + + +def fr_transmissivity_apperture(workdir): + '''Read dfnWorks-Version2.0 output files describing fracture aperture and + permeability. + Return list containing transmissivity for each fracture. + ''' + + # from both files use just third column + permeability = np.array(read_dfn_file(workdir / __permfile), dtype=float)[:, 3] + apperture = np.array(read_dfn_file(workdir / __aperturefile), dtype=float)[:, 3] + return permeability * apperture, apperture + + +__rel_corner = np.array([[0, 0, 0], [1, 0, 0], + [1, 1, 0], [0, 1, 0], + [0, 0, 1], [1, 0, 1], + [1, 1, 1], [0, 1, 1]]) + + +def intersect_cell(loc_corners: np.array, ellipse: Ellipse) -> bool: + """ + loc_corners - shape (3, 8) + """ + # check if cell center is inside radius of fracture + center = np.mean(loc_corners, axis=1) + if np.sum(np.square(center[0:2] / ellipse.radius)) >= 1: + return False + + # cell center is in ellipse + # find z of cell corners in xyz of fracture + + if np.min(loc_corners[2, :]) >= 0. or np.max(loc_corners[2, :]) <= 0.: # fracture lies in z=0 plane + # fracture intersects that cell + return False + + return True + + +def fracture_for_ellipse(grid: Grid, i_ellipse: int, ellipse: Ellipse) -> Fracture: + # calculate rotation matrix for use later in rotating coordinates of nearby cells + normal = ellipse.normal /np.linalg.norm(ellipse.normal) + ellipse.normal = normal + direction = np.cross([0, 0, 1], normal) + # cosa = np.dot([0,0,1],normal)/(np.linalg.norm([0,0,1])*np.linalg.norm(normal)) #frobenius norm = length + # above evaluates to normal[2], so: + angle = np.arccos(normal[2]) # everything is in radians + + from bgem.transform import Transform + # rotation is from ez to normal, local system to ambient system + local_to_ambient = Transform().rotate(direction, angle).matrix[:3, :3] + mat_to_local = local_to_ambient.T + #mat_to_local = tr.rotation_matrix(angle, direction)[:3, :3].T + # find fracture in domain coordinates so can look for nearby cells + b_box_min = ellipse.translation - np.max(ellipse.radius) + b_box_max = ellipse.translation + np.max(ellipse.radius) + + i_box_min = grid.cell_coord(b_box_min) + i_box_max = grid.cell_coord(b_box_max) + 1 + axis_ranges = [range(max(0, a), min(b, n)) for a, b, n in zip(i_box_min, i_box_max, grid.cell_dimensions)] + + grid_cumul_prod = np.array([1, grid.cell_dimensions[0], grid.cell_dimensions[0] * grid.cell_dimensions[1]]) + cells = [] + # X fastest running + for kji in itertools.product(*reversed(axis_ranges)): + # make X the first coordinate + ijk = np.flip(np.array(kji)) + corners = grid.origin[None, :] + (ijk[None, :] + __rel_corner[:, :]) * grid.step[None, :] + loc_corners = mat_to_local @ (corners - ellipse.translation).T + if intersect_cell(loc_corners, ellipse): + logging.log(logging.DEBUG, f" cell {ijk}") + cell_index = ijk @ grid_cumul_prod + cells.append(cell_index) + if len(cells) > 0: + logging.log(logging.INFO, f" #{i_ellipse} fr, {len(cells)} cell intersections") + return Fracture(ellipse, cells) + + +def map_dfn(grid, ellipses): + '''Identify intersecting fractures for each cell of the ECPM domain. + Extent of ECPM domain is determined by nx,ny,nz, and d (see below). + ECPM domain can be smaller than the DFN domain. + Return numpy array (fracture) that contains for each cell: + number of intersecting fractures followed by each intersecting fracture id. + + ellipses = list of dictionaries containing normal, translation, xrad, + and yrad for each fracture + origin = [x,y,z] float coordinates of lower left front corner of DFN domain + nx = int number of cells in x in ECPM domain + ny = int number of cells in y in ECPM domain + nz = int number of cells in z in ECPM domain + step = [float, float, float] discretization length in ECPM domain + + JB TODO: allow smaller ECPM domain + ''' + logging.log(logging.INFO, f"Callculating Fracture - Cell intersections ...") + return [fracture_for_ellipse(grid, ie, ellipse) for ie, ellipse in enumerate(ellipses)] + + +def arange_for_hdf5(grid: Grid, a: np.array) -> np.array: + """ + grid.cell_dimension: (nx, ny, nz) + a - array for cells indexed by: `intersect_cell` with X coordinate the fastest running (i.e. the last one) + return - array with Z coordinate be the fastest running + """ + return a.reshape(grid.cell_dimensions[::-1]).transpose([2, 1, 0]) + + +def porosity_mean(grid: Grid, fractures: List[Fracture], fr_apperture: np.array, bulk_por: float) -> np.array: + '''Calculate fracture porosity for each cell of ECPM intersected by + one or more fractures. Simplifying assumptions: + 1. each fracture crosses the cell parallel to cell faces, + 2. each fracture completely crosses the cell. + 3. fracture volume has porosity 1, matrix has porosity `bulk_por` + + fr_apperture: array of appertures of the `fractures` + ''' + porosity = np.zeros(grid.cell_dimensions.prod(), dtype=float) + for fr, a in zip(fractures, fr_apperture): + if fr.cells: + normal_axis = np.argmax(np.abs(fr.ellipse.normal)) + porosity[fr.cells] += a / grid.step[normal_axis] + porosity = bulk_por + porosity * (1 - bulk_por) + return arange_for_hdf5(grid, porosity) + + +def porosity_min(grid: Grid, fractures: List[Fracture], fr_apperture: np.array, bulk_por: float) -> np.array: + '''Calculate fracture porosity to match the fast transport by the pore velocity field. + one or more fractures. Simplifying assumptions: + 1. each fracture crosses the cell parallel to cell faces, + 2. each fracture completely crosses the cell. + 3. cells containing fractures have porosity given by the fractures only (not realistic, but lead to correct break-through times for advection only simulations.) + cells without fractures have matrix porosity `bulk_por` + + fr_apperture: array of appertures of the `fractures` + ''' + porosity = np.zeros(grid.cell_dimensions.prod(), dtype=float) + for fr, a in zip(fractures, fr_apperture): + if fr.cells: + normal_axis = np.argmax(np.abs(fr.ellipse.normal)) + porosity[fr.cells] += a / grid.step[normal_axis] + + porosity[porosity == 0] = bulk_por + return arange_for_hdf5(grid, porosity) + + +def permIso(grid: Grid, fractures: List[Fracture], fr_transmisivity: np.array, k_background: float) -> np.array: + '''Calculate isotropic permeability for each cell of ECPM intersected by + one or more fractures. Sums fracture transmissivities and divides by + cell length (d) to calculate cell permeability. + Assign background permeability to cells not intersected by fractures. + Returns numpy array of isotropic permeability for each cell in the ECPM. + + fracture = numpy array containing number of fractures in each cell, list of fracture numbers in each cell + T = [] containing intrinsic transmissivity for each fracture + d = length of cell sides + k_background = float background permeability for cells with no fractures in them + ''' + k_iso = np.full(grid.cell_dimensions.prod(), k_background, dtype=float) + for fr, a in zip(fractures, fr_transmisivity): + normal_axis = np.argmax(np.abs(fr.ellipse.normal)) + k_iso[fr.cells] += a / grid.step[normal_axis] + return k_iso #arange_for_hdf5(grid, k_iso).flatten() + + + +def permAnisoRaw(grid: Grid, fractures: List[Fracture], fr_transmisivity: np.array, k_background: float): + '''Calculate anisotropic permeability tensor for each cell of ECPM + intersected by one or more fractures. Discard off-diagonal components + of the tensor. Assign background permeability to cells not intersected + by fractures. + Return numpy array of anisotropic permeability (3 components) for each + cell in the ECPM. + + fracture = numpy array containing number of fractures in each cell, list of fracture numbers in each cell + ellipses = [{}] containing normal and translation vectors for each fracture + T = [] containing intrinsic transmissivity for each fracture + d = length of cell sides + k_background = float background permeability for cells with no fractures in them + ''' + assert len(fractures) == len(fr_transmisivity) + # quick error check + #nfrac = len(ellipses) + #if nfrac != len(T): + # print('ellipses and transmissivity contain different numbers of fractures') + # return + full_tensor = lambda n, fr_cond : fr_cond * (np.eye(3) - n[:, None] * n[None, :]) + fr_tensor = np.array([full_tensor(fr.ellipse.normal, fr_cond) for fr, fr_cond in zip(fractures, fr_transmisivity)]) + #ellipseT = np.zeros((nfrac, 3), '=f8') + #fullTensor = [] + #T_local = np.zeros((3, 3), dtype=np.float) + #t0 = time.time() + # calculate transmissivity tensor in domain coordinates for each ellipse + #for f in range(nfrac): + # normal = ellipses[f]['normal'] + # direction = np.cross(normal, [0, 0, 1]) + # angle = np.arccos(normal[2]) + # M = tr.rotation_matrix(angle, direction) + # Transpose = np.transpose(M[:3, :3]) + # T_local[0, 0] = T[f] + # T_local[1, 1] = T[f] + # permeability = 0 in local z direction of fracture + # T_domain = np.dot(np.dot(M[:3, :3], T_local), Transpose) + + # ellipseT[f][0:3] = [T_domain[0, 0], T_domain[1, 1], T_domain[2, 2]] + # fullTensor.append(T_domain) + + + #t1 = time.time() + #print('time spent calculating fracture transmissivity %f' % (t1 - t0)) + + # in case you were wondering what those off-diagonal terms are: + #t0 = time.time() + #fout = open('Ttensor.txt', 'w') + #for f in range(len(fullTensor)): + # fout.write(str(fullTensor[f])) + # fout.write('\n\n') + #fout.close() + #t1 = time.time() + #print('time spent writing fracture transmissivity %f' % (t1 - t0)) + + # calculate cell effective permeability by adding fracture k to background k + #t0 = time.time() + ncells = grid.cell_dimensions.prod() + k_aniso = np.full((ncells, 3, 3), k_background, dtype=np.float64) + for fr, tn in zip(fractures, fr_tensor): + normal_axis = np.argmax(np.abs(fr.ellipse.normal)) + k_aniso[fr.cells] += tn / grid.step[normal_axis] + return k_aniso #arange_for_hdf5(grid, k_iso).flatten() + + # + # for i in range(ncells): + # if fracture[i][0] != 0: + # for j in range(1, fracture[i][0] + 1): + # fracnum = fracture[i][j] + # if LUMP: # lump off diagonal terms + # # because symmetrical doesn't matter if direction of summing is correct, phew! + # k_aniso[i][0] += np.sum(fullTensor[fracnum - 1][0, :3]) / d + # k_aniso[i][1] += np.sum(fullTensor[fracnum - 1][1, :3]) / d + # k_aniso[i][2] += np.sum(fullTensor[fracnum - 1][2, :3]) / d + # else: # discard off diagonal terms (default) + # k_aniso[i][0] += ellipseT[fracnum - 1][0] / d # ellipseT is 0 indexed, fracture numbers are 1 indexed + # k_aniso[i][1] += ellipseT[fracnum - 1][1] / d + # k_aniso[i][2] += ellipseT[fracnum - 1][2] / d + # + # #t1 = time.time() + # #print('time spent summing cell permeabilities %f' % (t1 - t0)) + # + # return k_aniso + +def aniso_lump(tn_array): + return np.sum(tn_array, axis=-1)[:, None, :] * np.eye(3) + +def aniso_diag(tn_array): + return tn_array * np.eye(3)[None, :, :] + +def count_stuff(filename='mapELLIPSES.txt'): + '''Mapping DFN to ECPM can result in false connections when non-intersecting + fractures happen to intersect the same cell of the ECPM. + Make sweeping assumption that cells with 3 or more fractures in them + are more likely than cells with 2 fractures in them to contain a false + connection and count them as such. + Return counts of 1) total number of cells, 2) number of (active) cells + containing fractures, 3) number of cells containing 3 or more fractures. + + (This method could more efficiently use the fracture array returned by + map_dfn().) + ''' + fin = file(filename, 'r') + cellcount = 0 + count0 = 0 + count1 = 0 + count2 = 0 + count3 = 0 + morethan4 = 0 + for line in fin: + if line.startswith('#'): + continue + elif int(line.split()[3]) == 0: + cellcount += 1 + count0 += 1 + elif int(line.split()[3]) == 1: + cellcount += 1 + count1 += 1 + elif int(line.split()[3]) == 2: + cellcount += 1 + count2 += 1 + elif int(line.split()[3]) == 3: + cellcount += 1 + count3 += 1 + elif int(line.split()[3]) >= 4: + cellcount += 1 + morethan4 += 1 + print('\n') + print('Information for %s ' % filename) + print('Total number of cells in grid %i' % cellcount) + print('Number of cells containing fractures %i' % (cellcount - count0)) + print('Percent active cells %.1f' % (100. * (float(cellcount) - float(count0)) / float(cellcount))) + print('Number of cells containing 1 fracture %i' % (count1)) + print('Number of cells containing 2 fractures %i' % (count2)) + print('Number of cells containing 3 fractures %i' % (count3)) + print('Number of cells containing 4 or more fractures %i' % (morethan4)) + print('Possible false connections %i (cells containing >= 3 fractures)' % (count3 + morethan4)) + fin.close() + count = {'cells': cellcount, 'active': (cellcount - count0), 'false': (count3 + morethan4)} + + return count + diff --git a/tests/upscale/test_rasterize.py b/tests/upscale/test_rasterize.py new file mode 100644 index 0000000..3ae7f53 --- /dev/null +++ b/tests/upscale/test_rasterize.py @@ -0,0 +1,161 @@ +""" +Test of homogenization techniquest within a two-scale problem. +- reference solution is evaluated by Flow123d, using direct rasterization of full DFN sample + The pressure field is projected to the nodes of the rectangular grid, + velocity filed is averaged over rectangular cells. + +- two-scale solution involves: + 1. homogenization of DFN to the rectangular grid ; general permeability tensor field + 2. custom 3d solver for the rectangular grid is used to solve the coarse problem + +- various homogenization techniques could be used, homogenization time is evaluated and compared. +""" +from typing import * +import yaml +import shutil +from pathlib import Path + +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger() + + +import numpy as np +import attrs +import pyvista as pv + +from bgem import stochastic +from bgem.gmsh import gmsh, options +from mesh_class import Mesh +from bgem.core import call_flow, dotdict, workdir as workdir_mng +from bgem.upscale import Grid, Fe, voigt_to_tn, tn_to_voigt +import decovalex_dfnmap as dmap + +script_dir = Path(__file__).absolute().parent +workdir = script_dir / "sandbox" +from joblib import Memory +memory = Memory(workdir, verbose=0) + + +@attrs.define +class FracturedMedia: + dfn: List[stochastic.Fracture] + fr_cross_section: np.ndarray # shape (n_fractures,) + fr_conductivity: np.ndarray # shape (n_fractures,) + conductivity: float + + @staticmethod + def constant_fr(dfn, fr_conductivity, fr_cross_section, bulk_conductivity): + fr_cond = np.full([len(dfn)], fr_conductivity) + fr_cross = np.full([len(dfn)], fr_cross_section) + return FracturedMedia(dfn, fr_cross, fr_cond, bulk_conductivity) + + @staticmethod + def cubic_law_fr(dfn, unit_cross_section, bulk_conductivity): + # unit_cross_section = 1e-4 + viscosity = 1e-3 + gravity_accel = 10 + density = 1000 + permeability_factor = 1 / 12 + permeability_to_conductivity = gravity_accel * density / viscosity + # fr cond r=100 ~ 80 + # fr cond r=10 ~ 0.8 + fr_r = np.array([fr.r for fr in dfn]) + fr_cross_section = unit_cross_section * fr_r + fr_cond = permeability_to_conductivity * permeability_factor * fr_r ** 2 + fr_cond = np.full_like(fr_r, 10) + return FracturedMedia(dfn, fr_cross_section, fr_cond, bulk_conductivity) + +def tst_fracture_set(domain): + R = 2*np.max(domain) + fr = lambda c, n : stochastic.Fracture(stochastic.SquareShape, R, c, n, 0.0, 123, 1) + return [ + #fr([0, 0, 0.7], [0, 0, 1]), + #fr([0, 0.7, 0], [0, 1, 0]), + #fr([0.7, 0, 0], [1, 0, 0]), + #fr([0, 0, 0], [0.5, 0, 1]), + fr([0, 0, 0.7], [0, 0.5, 1]), + #fr([0, 0, 0], [0.1, 1, 1]), + #fr([0, 0, 0], [0.3, 1, 1]), + #fr([0, 0, -0.7], [0.5, 1, 1]), + fr([0, 0, -0.5], [1, 1, 1]) + ] + + +def rasterize_decovalex(dfn, grid:Grid): + ellipses = [dmap.Ellipse(fr.normal, fr.center, fr.scale) for fr in dfn] + d_grid = dmap.Grid.make_grid(grid.origin, grid.step, grid.dimensions) + return dmap.map_dfn(d_grid, ellipses) + +def homo_decovalex_(fr_media: FracturedMedia, grid:Grid): + """ + Homogenize fr_media to the conductivity tensor field on grid. + :return: conductivity_field, np.array, shape (n_elements, n_voight) + """ + ellipses = [dmap.Ellipse(fr.normal, fr.center, fr.scale) for fr in fr_media.dfn] + d_grid = dmap.Grid.make_grid(grid.origin, grid.step, grid.dimensions) + fractures = dmap.map_dfn(d_grid, ellipses) + fr_transmissivity = fr_media.fr_conductivity * fr_media.fr_cross_section + k_iso = dmap.permIso(d_grid, fractures, fr_transmissivity, fr_media.conductivity) + dmap.permAniso(d_grid, fractures, fr_transmissivity, fr_media.conductivity) + k_voigt = k_iso[:, None] * np.array([1, 1, 1, 0, 0, 0])[None, :] + return k_voigt + +def homo_decovalex(fr_media: FracturedMedia, grid:Grid, perm_fn): + """ + Homogenize fr_media to the conductivity tensor field on grid. + :return: conductivity_field, np.array, shape (n_elements, n_voight) + """ + ellipses = [dmap.Ellipse(fr.normal, fr.center, fr.scale) for fr in fr_media.dfn] + d_grid = dmap.Grid.make_grid(grid.origin, grid.step, grid.dimensions) + fractures = dmap.map_dfn(d_grid, ellipses) + fr_transmissivity = fr_media.fr_conductivity * fr_media.fr_cross_section + return perm_fn(d_grid, fractures, fr_transmissivity, fr_media.conductivity) + +def homo_decovalex_iso(fr_media: FracturedMedia, grid:Grid): + perm_fn = lambda *args : dmap.permIso(*args)[:, None, None] * np.eye(3) + return homo_decovalex(fr_media, grid, perm_fn) + +def homo_decovalex_aniso_raw(fr_media: FracturedMedia, grid: Grid): + perm_fn = lambda *args : dmap.permAnisoRaw(*args) + return homo_decovalex(fr_media, grid, perm_fn) + +def homo_decovalex_aniso_diag(fr_media: FracturedMedia, grid: Grid): + perm_fn = lambda *args : dmap.aniso_diag(dmap.permAnisoRaw(*args)) + return homo_decovalex(fr_media, grid, perm_fn) + +def homo_decovalex_aniso_lump(fr_media: FracturedMedia, grid: Grid): + perm_fn = lambda *args : dmap.aniso_lump(dmap.permAnisoRaw(*args)) + return homo_decovalex(fr_media, grid, perm_fn) + +def rasterize_dfn(homo_fns): + # Fracture set + domain_size = 100 + + # Coarse Problem + steps = (10, 12, 14) + grid = Grid(domain_size, steps, Fe.Q1(dim=3), origin=-domain_size / 2) + dfn = tst_fracture_set(grid.dimensions) + fr_media = FracturedMedia.constant_fr(dfn, 10, 1, 0.01) + + xyz_range = [ np.linspace(grid.origin[ax], grid.origin[ax] + grid.dimensions[ax], grid.n_steps[ax] + 1, dtype=np.float32) + for ax in [0, 1, 2] + ] + + x, y, z = np.meshgrid(*xyz_range, indexing='ij') + pv_grid = pv.StructuredGrid(x, y, z) + #points = grid.nodes() + for name, homo_fn in homo_fns.items(): + grid_permitivity = homo_fn(fr_media, grid) + pv_grid.cell_data[name] = grid_permitivity.reshape(-1, 9) + pv_grid.save(str(workdir / "test_resterize.vtk")) + + +def test_reasterize(): + homo_fns=dict( + k_deco_iso=homo_decovalex_iso, + k_deco_aniso_raw=homo_decovalex_aniso_raw, + k_deco_aniso_diag=homo_decovalex_aniso_diag, + k_deco_aniso_lump=homo_decovalex_aniso_lump + ) + rasterize_dfn(homo_fns) From 489aa0003a5393ab6cc500477a57878908f9845d Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Wed, 3 Apr 2024 22:16:17 +0200 Subject: [PATCH 07/34] Minor improvements and fixes. --- src/bgem/stochastic/fracture.py | 278 +++----------------------------- src/bgem/transform.py | 25 +-- src/bgem/upscale/__init__.py | 3 +- src/bgem/upscale/fem.py | 75 ++++++--- 4 files changed, 91 insertions(+), 290 deletions(-) diff --git a/src/bgem/stochastic/fracture.py b/src/bgem/stochastic/fracture.py index a915514..f07c7c7 100644 --- a/src/bgem/stochastic/fracture.py +++ b/src/bgem/stochastic/fracture.py @@ -133,7 +133,11 @@ def rotation_axis(self): return _rotation_axis def axis_angle(self): - axis_angle = normal_to_axis_angle(self.normal)[0,:] + axis, angle = normal_to_axis_angle(self.normal) + return axis, angle + + def axis_angles(self): + axis_angle = normals_to_axis_angles([self.normal])[0,:] _rotation_axis = axis_angle[0:3] _rotation_angle = axis_angle[3] return _rotation_axis, _rotation_angle @@ -306,7 +310,23 @@ def normal_to_axis_angle(normal): ## todo """ z_axis = np.array([0, 0, 1], dtype=float) - norms = normal / np.linalg.norm(normal, axis=1)[:, None] + norms = normal / np.linalg.norm(normal) + cos_angle = np.dot(norms, z_axis) + angle = np.arccos(cos_angle) + # sin_angle = np.sqrt(1-cos_angle**2) + + axis = np.cross(z_axis, norms) + ax_norm = max(np.linalg.norm(axis), 1e-200) + axis = axis / ax_norm + #return axes, angles + return axis, angle + +def normals_to_axis_angles(normals): ## todo + """ + + """ + z_axis = np.array([0, 0, 1], dtype=float) + norms = normals / np.linalg.norm(normals, axis=1)[:, None] cos_angle = norms @ z_axis angles = np.arccos(cos_angle) # sin_angle = np.sqrt(1-cos_angle**2) @@ -317,6 +337,7 @@ def normal_to_axis_angle(normal): ## todo #return axes, angles return np.concatenate([axes, angles[:, None]], axis=1) + def rotate(vectors, axis=None, angle=0.0, axis_angle=None): """ Rotate given vector around given 'axis' by the 'angle'. @@ -451,7 +472,7 @@ def sample_normal(self, size=1): """ raw_normals = self._sample_standard_fisher(size) mean_norm = self._mean_normal() - axis_angle = normal_to_axis_angle(mean_norm[None, :]) + axis_angle = normals_to_axis_angles(mean_norm[None, :]) return rotate(raw_normals, axis_angle=axis_angle[0]) @@ -978,7 +999,7 @@ def sample(self, pos_distr=None, keep_nonempty=False) -> List[Fracture]: for r, normal, sa in zip(diams, fr_normals, shape_angle): #axis, angle = aa[:3], aa[3] center = pos_distr.sample() - fractures.append(Fracture(self.shape_class, r, center, normal[None,:], sa, name, ifam, 1)) + fractures.append(Fracture(self.shape_class, r, center, normal, sa, name, ifam, 1)) return fractures @@ -1077,255 +1098,6 @@ def unit_square_vtxs(): -class Fractures: - # regularization of 2d fractures - def __init__(self, fractures, epsilon): - self.epsilon = epsilon - self.fractures = fractures - self.points = [] - self.lines = [] - self.pt_boxes = [] - self.line_boxes = [] - self.pt_bih = None - self.line_bih = None - self.fracture_ids = [] - # Maps line to its fracture. - - self.make_lines() - self.make_bihs() - - def make_lines(self): - # sort from large to small fractures - self.fractures.sort(key=lambda fr:fr.rx, reverse=True) - base_line = np.array([[-0.5, 0, 0], [0.5, 0, 0]]) - for i_fr, fr in enumerate(self.fractures): - line = FisherOrientation.rotate(base_line * fr.rx, np.array([0, 0, 1]), fr.shape_angle) - line += fr.center - i_pt = len(self.points) - self.points.append(line[0]) - self.points.append(line[1]) - self.lines.append((i_pt, i_pt+1)) - self.fracture_ids.append(i_fr) - - def get_lines(self, fr_range): - lines = {} - fr_min, fr_max = fr_range - for i, (line, fr) in enumerate(zip(self.lines, self.fractures)): - if fr_min <= fr.rx < fr_max: - lines[i] = [self.points[p][:2] for p in line] - return lines - - def make_bihs(self): - import bih - shift = np.array([self.epsilon, self.epsilon, 0]) - for line in self.lines: - pt0, pt1 = self.points[line[0]], self.points[line[1]] - b0 = [(pt0 - shift).tolist(), (pt0 + shift).tolist()] - b1 = [(pt1 - shift).tolist(), (pt1 + shift).tolist()] - box_pt0 = bih.AABB(b0) - box_pt1 = bih.AABB(b1) - line_box = bih.AABB(b0 + b1) - self.pt_boxes.extend([box_pt0, box_pt1]) - self.line_boxes.append(line_box) - self.pt_bih = bih.BIH() - self.pt_bih.add_boxes(self.pt_boxes) - self.line_bih = bih.BIH() - self.line_bih.add_boxes(self.line_boxes) - self.pt_bih.construct() - self.line_bih.construct() - - def find_root(self, i_pt): - i = i_pt - while self.pt_map[i] != i: - i = self.pt_map[i] - root = i - i = i_pt - while self.pt_map[i] != i: - j = self.pt_map[i] - self.pt_map[i] = root - i = j - return root - - def snap_to_line(self, pt, pt0, pt1): - v = pt1 - pt0 - v /= np.linalg.norm(v) - t = v @ (pt - pt0) - if 0 < t < 1: - projected = pt0 + t * v - if np.linalg.norm(projected - pt) < self.epsilon: - return projected - return pt - - - - def simplify(self): - self.pt_map = list(range(len(self.points))) - for i_pt, point in enumerate(self.points): - pt = point.tolist() - for j_pt_box in self.pt_bih.find_point(pt): - if i_pt != j_pt_box and j_pt_box == self.pt_map[j_pt_box] and self.pt_boxes[j_pt_box].contains_point(pt): - self.pt_map[i_pt] = self.find_root(j_pt_box) - break - new_lines = [] - new_fr_ids = [] - for i_ln, ln in enumerate(self.lines): - pt0, pt1 = ln - pt0, pt1 = self.find_root(pt0), self.find_root(pt1) - if pt0 != pt1: - new_lines.append((pt0, pt1)) - new_fr_ids.append(self.fracture_ids[i_ln]) - self.lines = new_lines - self.fracture_ids = new_fr_ids - - for i_pt, point in enumerate(self.points): - if self.pt_map[i_pt] == i_pt: - pt = point.tolist() - for j_line in self.line_bih.find_point(pt): - line = self.lines[j_line] - if i_pt != line[0] and i_pt != line[1] and self.line_boxes[j_line].contains_point(pt): - pt0, pt1 = self.points[line[0]], self.points[line[1]] - self.points[i_pt] = self.snap_to_line(point, pt0, pt1) - break - - def line_fragment(self, i_ln, j_ln): - """ - Compute intersection of the two lines and if its position is well in interior - of both lines, benote it as the fragmen point for both lines. - """ - pt0i, pt1i = (self.points[ipt] for ipt in self.lines[i_ln]) - pt0j, pt1j = (self.points[ipt] for ipt in self.lines[j_ln]) - A = np.stack([pt1i - pt0i, -pt1j + pt0j], axis=1) - b = -pt0i + pt0j - ti, tj = np.linalg.solve(A, b) - if self.epsilon <= ti <= 1 - self.epsilon and self.epsilon <= tj <= 1 - self.epsilon: - X = pt0i + ti * (pt1i - pt0i) - ix = len(self.points) - self.points.append(X) - self._fragment_points[i_ln].append((ti, ix)) - self._fragment_points[j_ln].append((tj, ix)) - - def fragment(self): - """ - Fragment fracture lines, update map from new line IDs to original fracture IDs. - :return: - """ - new_lines = [] - new_fracture_ids = [] - self._fragment_points = [[] for l in self.lines] - for i_ln, line in enumerate(self.lines): - for j_ln in self.line_bih.find_box(self.line_boxes[i_ln]): - if j_ln > i_ln: - self.line_fragment(i_ln, j_ln) - # i_ln line is complete, we can fragment it - last_pt = self.lines[i_ln][0] - fr_id = self.fracture_ids[i_ln] - for t, ix in sorted(self._fragment_points[i_ln]): - new_lines.append(last_pt, ix) - new_fracture_ids.append(fr_id) - last_pt = ix - new_lines.append(last_pt, self.lines[i_ln][1]) - new_fracture_ids.append(fr_id) - self.lines = new_lines - self.fracture_ids = new_fracture_ids - - - - - - # def compute_transformed_shapes(self): - # n_frac = len(self.fractures) - # - # unit_square = unit_square_vtxs() - # z_axis = np.array([0, 0, 1]) - # squares = np.tile(unit_square[None, :, :], (n_frac, 1, 1)) - # center = np.empty((n_frac, 3)) - # trans_matrix = np.empty((n_frac, 3, 3)) - # for i, fr in enumerate(self.fractures): - # vtxs = squares[i, :, :] - # vtxs[:, 1] *= fr.aspect - # vtxs[:, :] *= fr.r - # vtxs = FisherOrientation.rotate(vtxs, z_axis, fr.shape_angle) - # vtxs = FisherOrientation.rotate(vtxs, fr.rotation_axis, fr.rotation_angle) - # vtxs += fr.centre - # squares[i, :, :] = vtxs - # - # center[i, :] = fr.centre - # u_vec = vtxs[1] - vtxs[0] - # u_vec /= (u_vec @ u_vec) - # v_vec = vtxs[2] - vtxs[0] - # u_vec /= (v_vec @ v_vec) - # w_vec = FisherOrientation.rotate(z_axis, fr.rotation_axis, fr.rotation_angle) - # trans_matrix[i, :, 0] = u_vec - # trans_matrix[i, :, 1] = v_vec - # trans_matrix[i, :, 2] = w_vec - # self.squares = squares - # self.center = center - # self.trans_matrix = trans_matrix - # - # def snap_vertices_and_edges(self): - # n_frac = len(self.fractures) - # epsilon = 0.05 # relaitve to the fracture - # min_unit_fr = np.array([0 - epsilon, 0 - epsilon, 0 - epsilon]) - # max_unit_fr = np.array([1 + epsilon, 1 + epsilon, 0 + epsilon]) - # cos_limit = 1 / np.sqrt(1 + (epsilon / 2) ** 2) - # - # all_points = self.squares.reshape(-1, 3) - # - # isec_condidates = [] - # wrong_angle = np.zeros(n_frac) - # for i, fr in enumerate(self.fractures): - # if wrong_angle[i] > 0: - # isec_condidates.append(None) - # continue - # projected = all_points - self.center[i, :][None, :] - # projected = np.reshape(projected @ self.trans_matrix[i, :, :], (-1, 4, 3)) - # - # # get bounding boxes in the loc system - # min_projected = np.min(projected, axis=1) # shape (N, 3) - # max_projected = np.max(projected, axis=1) - # # flag fractures that are out of the box - # flag = np.any(np.logical_or(min_projected > max_unit_fr[None, :], max_projected < min_unit_fr[None, :]), - # axis=1) - # flag[i] = 1 # omit self - # candidates = np.nonzero(flag == 0)[0] # indices of fractures close to 'fr' - # isec_condidates.append(candidates) - # # print("fr: ", i, candidates) - # for i_fr in candidates: - # if i_fr > i: - # cos_angle_of_normals = self.trans_matrix[i, :, 2] @ self.trans_matrix[i_fr, :, 2] - # if cos_angle_of_normals > cos_limit: - # wrong_angle[i_fr] = 1---- - # print("wrong_angle: ", i, i_fr) - # - # # atract vertices - # fr = projected[i_fr] - # flag = np.any(np.logical_or(fr > max_unit_fr[None, :], fr < min_unit_fr[None, :]), axis=1) - # print(np.nonzero(flag == 0)) - - -def fr_intersect(fractures): - """ - 1. create fracture shape vertices (rotated, translated) square - - create vertices of the unit shape - - use FisherOrientation.rotate - 2. intersection of a line with plane/square - 3. intersection of two squares: - - length of the intersection - - angle - - - :param fractures: - :return: - """ - - # project all points to all fractures (getting local coordinates on the fracture system) - # fracture system axis: - # u_vec = vtxs[1] - vtxs[0] - # v_vec = vtxs[2] - vtxs[0] - # w_vec ... unit normal - # fractures with angle that their max distance in the case of intersection - # is not greater the 'epsilon' - - # class Quat: # """ diff --git a/src/bgem/transform.py b/src/bgem/transform.py index 8f14f26..a76a589 100644 --- a/src/bgem/transform.py +++ b/src/bgem/transform.py @@ -175,18 +175,19 @@ def rotate(self, axis, angle, center=(0, 0, 0)): rotate, and then shift back. """ matrix = Transform._identity_matrix() - center = np.array(center, dtype=float) - axis = np.array(axis, dtype=float) - axis /= np.linalg.norm(axis) - - W = np.array( - [[0, -axis[2], axis[1]], - [axis[2], 0, -axis[0]], - [-axis[1], axis[0], 0]]) - M = np.eye(3) + np.sin(angle) * W + 2 * np.sin(angle/2) ** 2 * W @ W - matrix[:, 3] -= center - matrix = M @ matrix - matrix[:, 3] += center + if angle != 0.0: + center = np.array(center, dtype=float) + axis = np.array(axis, dtype=float) + axis /= np.linalg.norm(axis) + + W = np.array( + [[0, -axis[2], axis[1]], + [axis[2], 0, -axis[0]], + [-axis[1], axis[0], 0]]) + M = np.eye(3) + np.sin(angle) * W + 2 * np.sin(angle/2) ** 2 * W @ W + matrix[:, 3] -= center + matrix = M @ matrix + matrix[:, 3] += center return Transform(matrix) @ self def scale(self, scale_vector, center=(0, 0, 0)): diff --git a/src/bgem/upscale/__init__.py b/src/bgem/upscale/__init__.py index 27f66fc..d53b643 100644 --- a/src/bgem/upscale/__init__.py +++ b/src/bgem/upscale/__init__.py @@ -1 +1,2 @@ -from .fem import Fe, Grid, upscale \ No newline at end of file +from .fem import Fe, Grid, upscale +from .fields import voigt_to_tn, tn_to_voigt \ No newline at end of file diff --git a/src/bgem/upscale/fem.py b/src/bgem/upscale/fem.py index aa0c7f0..82e3f5e 100644 --- a/src/bgem/upscale/fem.py +++ b/src/bgem/upscale/fem.py @@ -164,15 +164,17 @@ class Grid: """ Regular grid, distribution of DOFs, System matrix assembly. """ - def __init__(self, size, n_steps, fe: Fe): + def __init__(self, dimensions, n_steps, fe: Fe, origin=[0,0,0]): """ dim - dimension 1, 2, 3 size - domain size (Lx, Ly, Lz) or just scalar L for a cube domain n_steps - number of elements in each axis (nx, ny, nz) or just `n` for isotropic division """ self.dim = fe.dim - # Spatial dimension. - self.size = size * np.ones(self.dim) + # Ambient space dimension. + self.origin = origin * np.ones(self.dim) + # Absolute position of the node zero. + self.dimensions = dimensions * np.ones(self.dim) # Array with physital dimensions of the homogenization domain. self.n_steps = n_steps * np.ones(self.dim, dtype=np.int64) # Int Array with number of elements for each axis. @@ -193,7 +195,7 @@ def __init__(self, size, n_steps, fe: Fe): @cached_property def step(self): # Array with step size in each axis. - return self.size / self.n_steps + return self.dimensions / self.n_steps @property def n_loc_dofs(self): @@ -223,15 +225,6 @@ def dof_to_coord(self): # Array for computiong global dof index from dof int coords. return np.cumprod([1, *self.ax_dofs[:-1]]) - @cached_property - def bc_coords(self): - bc_natur_indeces = self.natur_map[np.arange(self.n_bc_dofs, dtype=np.int64)] - return self.idx_to_coord(bc_natur_indeces) - - - @cached_property - def bc_points(self): - return self.bc_coords * self.step[:, None] def make_numbering(self, dim): # grid of integers, set to (-1) @@ -280,11 +273,40 @@ def make_numbering(self, dim): def barycenters(self): """ - Barycenters of elements - :return: shape (dim, n_els) + Barycenters of elements. + n_els = prod( n_steps ) + :return: shape (n_els, dim) """ bary_axes = [self.step[i] * (np.arange(self.n_steps[i]) + 0.5) for i in range(self.dim)] - return np.stack(np.meshgrid(*bary_axes)).reshape(self.dim, -1) + return np.stack(np.meshgrid(*bary_axes), axis=-1).reshape(-1, self.dim) + self.origin[None, :] + + def nodes(self): + """ + Nodes of the grid. + n_nodes = prod( n_steps + 1 ) + :return: shape (n_nodes, dim) + """ + nodes_axes = [self.step[i] * (np.arange(self.n_steps[i] + 1)) for i in range(self.dim)] + return np.stack(np.meshgrid(*nodes_axes), axis=-1).reshape(-1, self.dim) + self.origin[None, :] + + @cached_property + def bc_coords(self): + """ + ?? todo transpose, refactor + :return: + """ + bc_natur_indeces = self.natur_map[np.arange(self.n_bc_dofs, dtype=np.int64)] + return self.idx_to_coord(bc_natur_indeces) + + @cached_property + def bc_points(self): + """ + todo refactor + :return: + """ + return self.bc_coords * self.step[None, :] + self.origin[None, :] + + def idx_to_coord(self, dof_natur_indices): """ @@ -293,15 +315,15 @@ def idx_to_coord(self, dof_natur_indices): :return: integer coordinates: (dim, *dof_natur_indeces.shape) """ indices = dof_natur_indices - coords = np.empty((self.dim, *dof_natur_indices.shape), dtype=np.int64) + coords = np.empty((*dof_natur_indices.shape, self.dim), dtype=np.int64) for i in range(self.dim-1, 0, -1): - indices, coords[i] = np.divmod(indices, self.dof_to_coord[i]) - coords[0] = indices + indices, coords[:, i] = np.divmod(indices, self.dof_to_coord[i]) + coords[:, 0] = indices return coords def __repr__(self): msg = \ - f"{self.fe} Grid: {self.n_steps} Domain: {self.size}\n" + \ + f"{self.fe} Grid: {self.n_steps} Domain: {self.dimensions}\n" + \ f"Natur Map:\n{self.natur_map}\n" + \ f"El_DOFs:\n{self.el_dofs}\n" return msg @@ -366,7 +388,7 @@ def assembly_dense(self, K_voight_tensors): """ assert K_voight_tensors.shape == (self.n_elements, len(voigt_coords[self.dim])) laplace = self.laplace - # locmat_dofs, n_voight = laplace.shape + # n_voight, locmat_dofs == laplace.shape # Use transpositions of intputs and output in order to enforce cache efficient storage. #loc_matrices = np.zeros((self.n_loc_dofs self.n_loc_dofs, self.n_elements)) #np.matmul(.T[:, None, :], laplace[None, :, :], out=loc_matrices.reshape(-1, self.n_elements).T) @@ -376,17 +398,22 @@ def assembly_dense(self, K_voight_tensors): # Use advanced indexing to add local matrices to the global matrix np.add.at(A, self.loc_mat_ij, loc_matrices) return A + + def solve_system(self, K, p_grad_bc): """ :param K: array, shape: (n_elements, n_voight) - :param p_grad_bc: array, shape: (dim, n_vectors) + cell at position (iX, iY, iZ) has index + iX + n_steps[0] * ( iY + n_steps[1] * iZ) + i.e. the X index is running fastest, + :param p_grad_bc: array, shape: (n_vectors, dim) usually n_vectors >= dim :return: pressure, shape: (n_vectors, n_dofs) """ - d, n_rhs = p_grad_bc.shape + n_rhs, d = p_grad_bc.shape assert d == self.dim A = self.assembly_dense(K) - pressure_bc = p_grad_bc.T @ self.bc_points # (n_vectors, n_bc_dofs) + pressure_bc = p_grad_bc @ self.bc_points.T # (n_vectors, n_bc_dofs) B = pressure_bc @ A[:self.n_bc_dofs, self.n_bc_dofs:] # (n_vectors, n_interior_dofs) pressure = np.empty((n_rhs, self.n_dofs)) pressure[:, :self.n_bc_dofs] = pressure_bc From f54c4caa36103809fce517d54c9a63a37645ef67 Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Sun, 7 Apr 2024 22:49:58 +0200 Subject: [PATCH 08/34] El and dof natural numbering consistent with ndarray. - Z coord fastest running --- src/bgem/upscale/fem.py | 125 ++++++++++++--------- tests/_pycharm_run/pytest fem.run.xml | 19 ++++ tests/upscale/test_fem.py | 152 ++++++++++++++++++++------ 3 files changed, 213 insertions(+), 83 deletions(-) create mode 100644 tests/_pycharm_run/pytest fem.run.xml diff --git a/src/bgem/upscale/fem.py b/src/bgem/upscale/fem.py index 82e3f5e..52c890d 100644 --- a/src/bgem/upscale/fem.py +++ b/src/bgem/upscale/fem.py @@ -91,7 +91,7 @@ class Fe: """ @classmethod - def Q1(cls, dim, order=1): + def Q(cls, dim, order=1): order = order + 1 points = np.linspace(0, 1, order) basis = Q1_1d_basis(points) @@ -151,6 +151,11 @@ def grad_eval(self, points): return result def ref_el_dofs(self): + """ + Positions of the DOFs on the reference element. + ref_el_dofs[:, i] .. position of the i-th dofs + :return: ndarray shape (dim, n_dofs) + """ n = self.n_dofs_1d grid_slice = tuple(self.dim * [slice(0, n)]) return np.mgrid[grid_slice].reshape(self.dim, -1) @@ -163,8 +168,9 @@ def __repr__(self): class Grid: """ Regular grid, distribution of DOFs, System matrix assembly. + Cells and dofs numbered as C-style numpy array, last dimension running the fastest. """ - def __init__(self, dimensions, n_steps, fe: Fe, origin=[0,0,0]): + def __init__(self, dimensions, n_steps, fe: Fe, origin=0): """ dim - dimension 1, 2, 3 size - domain size (Lx, Ly, Lz) or just scalar L for a cube domain @@ -176,8 +182,8 @@ def __init__(self, dimensions, n_steps, fe: Fe, origin=[0,0,0]): # Absolute position of the node zero. self.dimensions = dimensions * np.ones(self.dim) # Array with physital dimensions of the homogenization domain. - self.n_steps = n_steps * np.ones(self.dim, dtype=np.int64) - # Int Array with number of elements for each axis. + self.shape = n_steps * np.ones(self.dim, dtype=np.int64) + # Int Array with number of elements for each axis, i.e. shape of the grid self.fe = fe # Tensor product finite element class. @@ -195,7 +201,7 @@ def __init__(self, dimensions, n_steps, fe: Fe, origin=[0,0,0]): @cached_property def step(self): # Array with step size in each axis. - return self.dimensions / self.n_steps + return self.dimensions / self.shape @property def n_loc_dofs(self): @@ -203,27 +209,37 @@ def n_loc_dofs(self): @property def dofs_shape(self): - return self.n_steps * (np.array(self.fe.n_dofs_1d) - 1) + 1 + """ + Shape of DOFs grid. + :return: + """ + return self.shape * (np.array(self.fe.n_dofs_1d) - 1) + 1 + @property def n_dofs(self): return np.prod(self.dofs_shape) @property def n_elements(self): - return np.prod(self.n_steps) + return np.prod(self.shape) - @property - def ax_dofs(self): - """ - Number of dofs in each axis. - :return: - """ - return self.n_steps * (self.fe.n_dofs_1d - 1) + 1 # shape (dim, ) + # @property + # def ax_dofs(self): + # """ + # Number of dofs in each axis. + # :return: + # """ + # return self.n_steps * (self.fe.n_dofs_1d - 1) + 1 # shape (dim, ) @property - def dof_to_coord(self): + def dof_coord_coef(self): # Array for computiong global dof index from dof int coords. - return np.cumprod([1, *self.ax_dofs[:-1]]) + # + # idx = sum(coord * coord_coef) + # 1D: [1] + # 2D: [ny, 1] + # 3D: [ny*nz, nz, 1] + return np.cumprod([1, *self.dofs_shape[:0:-1]])[::-1] def make_numbering(self, dim): @@ -231,20 +247,20 @@ def make_numbering(self, dim): # go through boundary, enumerate, skip filled values # go through internal nodes, enumerate remaining # reshape -> computation_from_natural - assert self.ax_dofs.shape == (self.dim,) - n_dofs = np.prod(self.ax_dofs) + assert self.dofs_shape.shape == (self.dim,) + n_dofs = np.prod(self.dofs_shape) # mark boundary dofs -1, interior dofs -2 - calc_map = np.full(self.ax_dofs, -1, dtype=np.int64) + calc_map = np.full(self.dofs_shape, -1, dtype=np.int64) interior_slice = tuple(self.dim * [slice(1, -1)]) calc_map[interior_slice] = -2 # construct new numbering of dofs - indices = np.where(calc_map == -1) - self.n_bc_dofs = len(indices[0]) + el_indices = np.where(calc_map == -1) + self.n_bc_dofs = len(el_indices[0]) # print(self.n_bc_dofs, indices) - calc_map[indices] = np.arange(0, self.n_bc_dofs, dtype=np.int64) - indices = np.where(calc_map == -2) - calc_map[indices] = np.arange(self.n_bc_dofs, n_dofs, dtype=np.int64) + calc_map[el_indices] = np.arange(0, self.n_bc_dofs, dtype=np.int64) + el_indices = np.where(calc_map == -2) + calc_map[el_indices] = np.arange(self.n_bc_dofs, n_dofs, dtype=np.int64) calc_map = calc_map.flatten() self.natur_map = np.empty(len(calc_map), dtype=np.int64) self.natur_map[calc_map[:]] = np.arange(len(calc_map), dtype=np.int64) @@ -255,19 +271,20 @@ def make_numbering(self, dim): assert ref_dofs.shape == (self.dim, self.fe.n_dofs_1d ** self.dim) #print(ax.shape, ref_dofs.shape) - ref_dofs = (self.dof_to_coord[None, :] @ ref_dofs).ravel() + # Dof indices on the first cell. + cell_0_dofs = (self.dof_coord_coef[None, :] @ ref_dofs).ravel() #print(ref_dofs.shape) # Creating a meshgrid for each dimension - indices = np.meshgrid(*[np.arange(n) for n in self.n_steps], indexing='ij') + el_indices = np.meshgrid(*[np.arange(n) for n in self.shape], indexing='ij') # Calculating the tensor values based on the formula and axes - el_dofs = np.zeros(self.n_steps, dtype=np.int64) + el_dofs = np.zeros(self.shape, dtype=np.int64) o = self.fe.n_dofs_1d - 1 for d in range(dim): - el_dofs += (self.dof_to_coord[d] * o ** (d + 1)) * indices[d] + el_dofs += (self.dof_coord_coef[d] * o ** (d + 1)) * el_indices[d] #print(el_dofs) - el_dofs = el_dofs[..., None] + ref_dofs[None, :] # shape: nx, nY, nz, loc_dofs + el_dofs = el_dofs[..., None] + cell_0_dofs[None, :] # shape: nx, nY, nz, loc_dofs self.el_dofs = calc_map[el_dofs.reshape(-1, el_dofs.shape[-1])] assert self.el_dofs.shape == (self.n_elements, self.fe.n_dofs) @@ -277,17 +294,21 @@ def barycenters(self): n_els = prod( n_steps ) :return: shape (n_els, dim) """ - bary_axes = [self.step[i] * (np.arange(self.n_steps[i]) + 0.5) for i in range(self.dim)] - return np.stack(np.meshgrid(*bary_axes), axis=-1).reshape(-1, self.dim) + self.origin[None, :] - - def nodes(self): - """ - Nodes of the grid. - n_nodes = prod( n_steps + 1 ) - :return: shape (n_nodes, dim) - """ - nodes_axes = [self.step[i] * (np.arange(self.n_steps[i] + 1)) for i in range(self.dim)] - return np.stack(np.meshgrid(*nodes_axes), axis=-1).reshape(-1, self.dim) + self.origin[None, :] + bary_axes = [self.step[i] * (np.arange(self.shape[i]) + 0.5) for i in range(self.dim)] + mesh_grid = np.meshgrid(*bary_axes, indexing='ij') + mesh_grid_array = np.stack(mesh_grid, axis=-1) + return mesh_grid_array.reshape(-1, self.dim) + self.origin + + # def nodes(self): + # """ + # Nodes of the grid. + # n_nodes = prod( n_steps + 1 ) + # :return: shape (n_nodes, dim) + # """ + # nodes_axes = [self.step[i] * (np.arange(self.n_steps[i] + 1)) for i in range(self.dim)] + # mesh_grid = np.meshgrid(*nodes_axes, indexing='ij') + # mesh_grid_array = np.stack(mesh_grid, axis=-1) + # return mesh_grid_array.reshape(-1, self.dim) + self.origin @cached_property def bc_coords(self): @@ -296,7 +317,7 @@ def bc_coords(self): :return: """ bc_natur_indeces = self.natur_map[np.arange(self.n_bc_dofs, dtype=np.int64)] - return self.idx_to_coord(bc_natur_indeces) + return self.dof_idx_to_coord(bc_natur_indeces) @cached_property def bc_points(self): @@ -308,22 +329,23 @@ def bc_points(self): - def idx_to_coord(self, dof_natur_indices): + def dof_idx_to_coord(self, dof_natur_indices): """ - Produce physical coordinates for given natural dof indices. - :param dof_natur_indices: np.int64 array - :return: integer coordinates: (dim, *dof_natur_indeces.shape) + Produce index coordinates (ix,iy,iz) for given natural dof indices. + :param dof_natur_indices: np.int64 array, shape (n_dofs,) + :return: integer coordinates: (len(dof_natur_indeces), self.dim) """ indices = dof_natur_indices coords = np.empty((*dof_natur_indices.shape, self.dim), dtype=np.int64) for i in range(self.dim-1, 0, -1): - indices, coords[:, i] = np.divmod(indices, self.dof_to_coord[i]) + indices, coords[:, i] = np.divmod(indices, self.dofs_shape[i]) + #indices, coords[:, i] = np.divmod(indices, self.dof_to_coord[i]) coords[:, 0] = indices return coords def __repr__(self): msg = \ - f"{self.fe} Grid: {self.n_steps} Domain: {self.dimensions}\n" + \ + f"{self.fe} Grid: {self.shape} Domain: {self.dimensions}\n" + \ f"Natur Map:\n{self.natur_map}\n" + \ f"El_DOFs:\n{self.el_dofs}\n" return msg @@ -403,9 +425,10 @@ def assembly_dense(self, K_voight_tensors): def solve_system(self, K, p_grad_bc): """ :param K: array, shape: (n_elements, n_voight) + K = array of shape (*self.shape, n_voight).reshape(-1, n_voight) cell at position (iX, iY, iZ) has index - iX + n_steps[0] * ( iY + n_steps[1] * iZ) - i.e. the X index is running fastest, + (iX * self.shape[1] + iY) * self.shape[2] + iZ + i.e. the Z index is running fastest, :param p_grad_bc: array, shape: (n_vectors, dim) usually n_vectors >= dim :return: pressure, shape: (n_vectors, n_dofs) @@ -428,7 +451,7 @@ def field_grad(self, dof_vals): :param dof_vals: (n_vec, n_dofs) :return: (n_vec, n_el, dim) """ - el_dof_vals = dof_vals[:, self.el_dofs[:, :]] # (n_vec, n_el, n_loc_dofs) + el_dof_vals = dof_vals[:, self.natur_map[self.el_dofs[:, :]]] # (n_vec, n_el, n_loc_dofs) quads = np.full((self.dim, 1), 0.5) # Zero order Gaussian quad. Integrates up to deg = 1. grad_basis = self.fe.grad_eval(quads) # (dim, n_loc_dofs, 1) grad_els = grad_basis[None,None,:, :,0] @ el_dof_vals[:,:, :, None] @@ -446,7 +469,7 @@ def upscale(K, domain=None): domain = np.ones(dim) order = 1 - g = Grid(domain, K.shape[:-1], Fe.Q1(dim, order)) + g = Grid(domain, K.shape[:-1], Fe.Q(dim, order)) p_grads = np.eye(dim) K_els = K.reshape((g.n_elements, -1)) pressure = g.solve_system(K_els, p_grads) diff --git a/tests/_pycharm_run/pytest fem.run.xml b/tests/_pycharm_run/pytest fem.run.xml new file mode 100644 index 0000000..7d857f0 --- /dev/null +++ b/tests/_pycharm_run/pytest fem.run.xml @@ -0,0 +1,19 @@ + + + + + \ No newline at end of file diff --git a/tests/upscale/test_fem.py b/tests/upscale/test_fem.py index 3cf9508..8af3c07 100644 --- a/tests/upscale/test_fem.py +++ b/tests/upscale/test_fem.py @@ -1,7 +1,7 @@ import numpy as np import pytest -from bgem.stochastic import dfn +#from bgem.stochastic import dfn from bgem.upscale import fem, fields, fem_plot def basis_1(): @@ -49,7 +49,7 @@ def test_eval_1d(): def test_Fe_Q1(): for dim in range(1, 4): order = 1 - f = fem.Fe.Q1(dim, order) + f = fem.Fe.Q(dim, order) points_1d = np.linspace(0, 1, 2*order + 1) points = np.stack([ points_1d, @@ -71,45 +71,133 @@ def test_flatten_dim(): assert np.allclose(flat_x, x) -def test_grid_numbering(): - # Test Grid numbering - - dim = 1 - order = 2 - g = fem.Grid(100.0, 4, fem.Fe.Q1(dim, order)) - print(g) - - dim = 2 +def test_grid_init(): + g = fem.Grid((100, 150, 200), (4, 3, 2), fem.Fe.Q(3, 1), origin=(-4, -5, -6)) + assert g.dim == 3 + assert np.allclose(g.origin, [-4, -5, -6]) + assert np.allclose(g.dimensions, [100, 150, 200]) + assert np.allclose(g.shape, [4, 3, 2]) + + # basic properties + assert np.allclose(g.step, [25, 50, 100]) + assert g.n_loc_dofs == 8 + assert np.allclose(g.dofs_shape, [5, 4, 3]) + assert g.n_dofs == 60 + assert g.n_elements == 24 + assert np.allclose(g.dof_coord_coef, [12, 3, 1]) + + # numberings + assert g.n_bc_dofs == 60 - 6 + ref_natur_map = [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 17, 18, 20, 21, 22, 23, + 24, 25, 26, 27, 29, 30, 32, 33, 34, 35, + 36, 37, 38, 39, 41, 42, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 16, 19, 28, 31, 40, 43 + ] + assert np.allclose(g.natur_map, ref_natur_map) + # gives natural dof index for given calculation dof index + # natural numbering comes from flattened (ix, iy, iz) dof coordinates + # calculation numbering puts Dirichlet DOFs at the begining + nx, ny, nz = g.shape + for ix in range(g.shape[0]): + for iy in range(g.shape[1]): + for iz in range(g.shape[2]): + i_dof_0 = (ix * (ny+1) + iy) * (nz+1) + iz + + natur_el_dofs = i_dof_0 + np.array([0, 1, (nz+1), (nz+1) + 1, + (nz+1)*(ny+1), (nz+1)*(ny+1) + 1, (nz+1)*(ny+2), (nz+1)*(ny+2) + 1]) + + i_el = (ix * ny + iy) * nz + iz + assert np.allclose(g.natur_map[g.el_dofs[i_el]], natur_el_dofs) + # shape (n_elements, n_local_dofs), DOF indices in calculation numbering + + +def test_barycenters(): + origin = [-4, -5, -6] + g = fem.Grid((100, 150, 200), (4, 3, 2), fem.Fe.Q(3, 1), origin=origin) + xyz_grid = np.meshgrid(*[np.arange(n_els) for n_els in g.shape], indexing='ij') + ref_barycenters = (np.stack(xyz_grid, axis=-1).reshape(-1, 3) + 0.5) * g.step + origin + assert np.allclose(g.barycenters(), ref_barycenters) + +# def test_nodes(): +# origin = [-4, -5, -6] +# g = fem.Grid((100, 150, 200), (4, 3, 2), fem.Fe.Q(3, 1), origin=origin) +# xyz_grid = np.meshgrid(*[np.arange(n_dofs) for n_dofs in g.dofs_shape], indexing='ij') +# ref_barycenters = (np.stack(xyz_grid, axis=-1).reshape(-1, 3) + 0.5) * g.step + origin +# assert np.allclose(g.nodes(), ref_barycenters) + +def test_bc_all(): + origin = [-4, -5, -6] + g = fem.Grid((100, 150, 200), (4, 3, 2), fem.Fe.Q(3, 1), origin=origin) + ref_bc_coord = np.zeros(()) + nx,ny,nz = g.dofs_shape + dof_grid = np.meshgrid(*[np.arange(n_dofs) for n_dofs in g.dofs_shape], indexing='ij') + dof_coords = np.stack(dof_grid, axis=-1).reshape(-1, 3) + ref_bc_coord = [dof_coords[g.natur_map[i_dof]] for i_dof in range(g.n_bc_dofs)] + assert np.all(g.bc_coords == ref_bc_coord) + assert np.allclose(g.bc_points[0], origin) + max_corner = g.origin + g.dimensions + bc_points = g.bc_points + assert np.allclose(bc_points[g.dofs_shape[2]-1], [origin[0], origin[1], max_corner[2]]) + assert np.allclose(bc_points[g.dofs_shape[1]*g.dofs_shape[2]-1], [origin[0], max_corner[1], max_corner[2]]) + assert np.allclose(bc_points[-1], max_corner) + +def grid_numbering_Q1(dim): order = 1 - g = fem.Grid(100.0, 4, fem.Fe.Q1(dim, order)) - print(g) + g = fem.Grid(100.0, 4, fem.Fe.Q(dim, order)) + idx_to_coord = g.dof_coord_coef * g.step[None, :] + ref_barycenters = np.arange(g.n_elements) + np.allclose(g.barycenters(), ) + + + +# +# def test_grid_numbering(): +# # Test Grid numbering +# for dim in [1, 2, 3]: +# grid_numbering_Q1(dim) +# dim = 1 +# order = 2 +# g = fem.Grid(100.0, 4, fem.Fe.Q(dim, order)) +# print(g) +# +# dim = 2 +# order = 1 +# g = fem.Grid(100.0, 4, fem.Fe.Q(dim, order)) +# print(g) +# +# dim = 3 +# order = 1 +# g = fem.Grid(100.0, 3, fem.Fe.Q(dim, order)) +# print(g) +# +# def test_grid_nodes(): - dim = 3 - order = 1 - g = fem.Grid(100.0, 3, fem.Fe.Q1(dim, order)) - print(g) def test_grid_bc(): - g = fem.Grid(10, 2, fem.Fe.Q1(1, 1)) - assert np.all(g.bc_coords == np.array([[0, 2]])) - assert np.allclose(g.bc_points, np.array([[0, 10]])) + g = fem.Grid(10, 2, fem.Fe.Q(1, 1)) + assert np.all(g.bc_coords == np.array([[0], [2]])) + assert np.allclose(g.bc_points, np.array([[0], [10]])) - g = fem.Grid(10, 2, fem.Fe.Q1(2, 1)) - ref = np.array([[0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2]]) + g = fem.Grid(10, 2, fem.Fe.Q(2, 1)) + ref = np.array([[0, 0, 0, 1, 1, 2, 2, 2], [0, 1, 2, 0, 2, 0, 1, 2]]).T assert np.all(g.bc_coords == ref) def test_laplace(): order = 1 N = 3 dim = 2 - g = fem.Grid(N, N, fem.Fe.Q1(dim, order)) + g = fem.Grid(N, N, fem.Fe.Q(dim, order)) l = g.laplace.reshape((-1, g.fe.n_dofs, g.fe.n_dofs)) print("\nlaplace, 2d:\n", l) + def test_grid_assembly(): for dim in range(1, 4): order = 1 N = 3 - g = fem.Grid(30, N, fem.Fe.Q1(dim, order)) + g = fem.Grid(30, N, fem.Fe.Q(dim, order)) K_const = np.diag(np.arange(1, dim + 1)) K_const = fem.tn_to_voigt(K_const[None, :, :]) K_field = K_const * np.ones(g.n_elements)[:, None] @@ -117,27 +205,27 @@ def test_grid_assembly(): n_dofs = (N+1)**dim assert A.shape == (n_dofs, n_dofs) -@pytest.mark.skip +#@pytest.mark.skip def test_solve_system(): for dim in range(1, 4): order = 1 N = 3 - g = fem.Grid(30, N, fem.Fe.Q1(dim, order)) + g = fem.Grid(30, N, fem.Fe.Q(dim, order)) K_const = np.diag(np.arange(1, dim + 1)) - K_const = fem.tn_to_voigt(K_const[:, :, None]) - K_field = K_const.T * np.ones(g.n_elements)[:, None] + K_const = fem.tn_to_voigt(K_const[None, :, :]) + K_field = K_const * np.ones(g.n_elements)[:, None] p_grads = np.eye(dim) pressure = g.solve_system(K_field, p_grads) assert pressure.shape == (dim, *dim * [N + 1]) -@pytest.mark.skip +#@pytest.mark.skip def test_solve_2d(): dim = 2 order = 1 N = 30 - g = fem.Grid(100, N, fem.Fe.Q1(dim, order)) - x = g.barycenters()[0, :] + g = fem.Grid(100, N, fem.Fe.Q(dim, order)) + x = g.barycenters()[:, 0] K_const = np.diag([1, 1]) #K_const = np.ones((dim, dim)) K_const = fields.tn_to_voigt(K_const[None, :, :]) @@ -145,7 +233,7 @@ def test_solve_2d(): #K_field = K_const.T * np.ones_like(x)[:, None] p_grads = np.eye(dim) pressure = g.solve_system(K_field, p_grads) - xy_grid = [np.linspace(0, g.size[i], g.ax_dofs[i]) for i in range(2)] + xy_grid = [np.linspace(0, g.dimensions[i], g.dofs_shape[i]) for i in range(2)] fem_plot.plot_pressure_fields(*xy_grid, pressure) @pytest.mark.skip() From b20ad52ea150775d4f2423a7989a5863f08a16df Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Sun, 7 Apr 2024 23:22:55 +0200 Subject: [PATCH 09/34] Fix numberings in test_two_scale --- src/bgem/stochastic/__init__.py | 3 +- src/bgem/upscale/fem.py | 22 +++++++ tests/upscale/test_rasterize.py | 4 +- tests/upscale/test_two_scale.py | 106 +++++++++++++++++++++++++------- 4 files changed, 110 insertions(+), 25 deletions(-) diff --git a/src/bgem/stochastic/__init__.py b/src/bgem/stochastic/__init__.py index fecaaec..977082f 100644 --- a/src/bgem/stochastic/__init__.py +++ b/src/bgem/stochastic/__init__.py @@ -1 +1,2 @@ -from .fracture import Population, PowerLawSize, UniformBoxPosition, VonMisesOrientation, FisherOrientation, Fracture \ No newline at end of file +from .fracture import Population, PowerLawSize, UniformBoxPosition, VonMisesOrientation, FisherOrientation, \ + Fracture, SquareShape, LineShape, DiscShape, ConvexPolygon \ No newline at end of file diff --git a/src/bgem/upscale/fem.py b/src/bgem/upscale/fem.py index 52c890d..9cbe695 100644 --- a/src/bgem/upscale/fem.py +++ b/src/bgem/upscale/fem.py @@ -299,6 +299,28 @@ def barycenters(self): mesh_grid_array = np.stack(mesh_grid, axis=-1) return mesh_grid_array.reshape(-1, self.dim) + self.origin + def cell_field_C_like(self, cell_array_F_like): + """ + :param cell_array: shape (n_elements, *value_dim) in F-like numbering + :return: Same values rearranged for a C-like indexing, Z index running the fastest + ... used in self + """ + value_shape = cell_array_F_like.shape[1:] + grid_field = cell_array_F_like.reshape(*reversed(self.shape), *value_shape) + transposed = grid_field.transpose(*reversed(range(self.dim))) + return transposed.reshape(-1, *value_shape) + + def cell_field_F_like(self, cell_array_C_like): + """ + :param cell_array: shape (n_elements, *value_dim) in C-like numbering + :return: Same values rearranged for a F-like indexing, X index running the fastest + ... used in PyVista. + """ + value_shape = cell_array_C_like.shape[1:] + grid_field = cell_array_C_like.reshape(*self.shape, *value_shape) + transposed = grid_field.transpose(*reversed(range(self.dim)),-1) + return transposed.reshape(-1, *value_shape) + # def nodes(self): # """ # Nodes of the grid. diff --git a/tests/upscale/test_rasterize.py b/tests/upscale/test_rasterize.py index 3ae7f53..bee65e0 100644 --- a/tests/upscale/test_rasterize.py +++ b/tests/upscale/test_rasterize.py @@ -134,11 +134,11 @@ def rasterize_dfn(homo_fns): # Coarse Problem steps = (10, 12, 14) - grid = Grid(domain_size, steps, Fe.Q1(dim=3), origin=-domain_size / 2) + grid = Grid(domain_size, steps, Fe.Q(dim=3), origin=-domain_size / 2) dfn = tst_fracture_set(grid.dimensions) fr_media = FracturedMedia.constant_fr(dfn, 10, 1, 0.01) - xyz_range = [ np.linspace(grid.origin[ax], grid.origin[ax] + grid.dimensions[ax], grid.n_steps[ax] + 1, dtype=np.float32) + xyz_range = [ np.linspace(grid.origin[ax], grid.origin[ax] + grid.dimensions[ax], grid.shape[ax] + 1, dtype=np.float32) for ax in [0, 1, 2] ] diff --git a/tests/upscale/test_two_scale.py b/tests/upscale/test_two_scale.py index 9ecd310..5054596 100644 --- a/tests/upscale/test_two_scale.py +++ b/tests/upscale/test_two_scale.py @@ -22,12 +22,15 @@ import numpy as np import attrs +import pyvista as pv + from bgem import stochastic from bgem.gmsh import gmsh, options from mesh_class import Mesh from bgem.core import call_flow, dotdict, workdir as workdir_mng -from bgem.upscale import Grid, Fe -import pyvista as pv +from bgem.upscale import Grid, Fe, voigt_to_tn, tn_to_voigt +import decovalex_dfnmap as dmap +from scipy.interpolate import LinearNDInterpolator script_dir = Path(__file__).absolute().parent workdir = script_dir / "sandbox" @@ -40,7 +43,7 @@ class FracturedMedia: dfn: List[stochastic.Fracture] fr_cross_section: np.ndarray # shape (n_fractures,) fr_conductivity: np.ndarray # shape (n_fractures,) - conductvity: float + conductivity: float @staticmethod def fracture_cond_params(dfn, unit_cross_section, bulk_conductivity): @@ -55,6 +58,7 @@ def fracture_cond_params(dfn, unit_cross_section, bulk_conductivity): fr_r = np.array([fr.r for fr in dfn]) fr_cross_section = unit_cross_section * fr_r fr_cond = permeability_to_conductivity * permeability_factor * fr_r ** 2 + fr_cond = np.full_like(fr_r, 10) return FracturedMedia(dfn, fr_cross_section, fr_cond, bulk_conductivity) def fracture_set(seed, size_range, max_frac = None): @@ -167,8 +171,7 @@ def fr_field(mesh, fractures, fr_values, bulk_value): # pass @memory.cache -def reference_solution(fr_media, target_grid, bc_gradient): - domain_dimensions = target_grid.size +def reference_solution(fr_media: FracturedMedia, dimensions, bc_gradient): dfn = fr_media.dfn bulk_conductivity = fr_media.conductivity @@ -176,7 +179,7 @@ def reference_solution(fr_media, target_grid, bc_gradient): workdir.mkdir(parents=True, exist_ok=True) # Input crssection and conductivity - mesh_file = ref_solution_mesh(workdir, domain_dimensions, dfn, fr_step=10, bulk_step=10) + mesh_file = ref_solution_mesh(workdir, dimensions, dfn, fr_step=7, bulk_step=7) full_mesh = Mesh.load_mesh(mesh_file, heal_tol = 0.001) # gamma fields = dict( conductivity=fr_field(full_mesh, dfn, fr_media.fr_conductivity, bulk_conductivity), @@ -211,7 +214,7 @@ def reference_solution(fr_media, target_grid, bc_gradient): # projection of fields return flow_out -def project_ref_solution(flow_out, grid: Grid): +def project_ref_solution_(flow_out, grid: Grid): # Velocity P0 projection # 1. get array of barycenters (source points) and element volumes of the fine mesh # 2. ijk cell coords of each source point @@ -222,49 +225,108 @@ def project_ref_solution(flow_out, grid: Grid): pvd_content.set_active_time_point(0) dataset = pvd_content.read()[0] # Take first block of the Multiblock dataset cell_centers_coords = dataset.cell_centers().points - grid_min_corner = -grid.size / 2 + grid_min_corner = -grid.dimensions / 2 centers_ijk_grid = (cell_centers_coords - grid_min_corner) // grid.step[None, :] centers_ijk_grid = centers_ijk_grid.astype(np.int32) - assert np.alltrue(centers_ijk_grid < grid.n_steps[None, :]) + assert np.alltrue(centers_ijk_grid < grid.shape[None, :]) - grid_cell_idx = centers_ijk_grid[:, 0] + grid.n_steps[0] * (centers_ijk_grid[:, 1] + grid.n_steps[1]*centers_ijk_grid[:, 2]) + grid_cell_idx = centers_ijk_grid[:, 0] + grid.shape[0] * (centers_ijk_grid[:, 1] + grid.shape[1] * centers_ijk_grid[:, 2]) sized = dataset.compute_cell_sizes() cell_volume = np.abs(sized.cell_data["Volume"]) grid_sum_cell_volume = np.zeros(grid.n_elements) np.add.at(grid_sum_cell_volume, grid_cell_idx, cell_volume) weights = cell_volume[:] / grid_sum_cell_volume[grid_cell_idx[:]] + velocities = dataset.cell_data['velocity_p0'] grid_velocities = np.zeros((grid.n_elements, 3)) wv = weights[:, None] * velocities for ax in [0, 1, 2]: np.add.at(grid_velocities[:, ax], grid_cell_idx, wv[:, ax]) - return grid_velocities.reshape( (*grid.n_steps, 3)) + return grid_velocities.reshape((*grid.shape, 3)) -def homogenize(fr_media: FracturedMedia, grid:Grid): +def project_ref_solution(flow_out, grid: Grid): + # Velocity P0 projection + # 1. get array of barycenters (source points) and element volumes of the fine mesh + # 2. ijk cell coords of each source point + # 3. weights = el_volume / np.add.at(cell_vol_sum, ijk, el_volume)[ijk[:]] + # 4. np.add.at(cell_velocities, ijk, weights * velocities) + # :return: + pvd_content = pv.get_reader(flow_out.hydro.spatial_file.path) + pvd_content.set_active_time_point(0) + dataset = pvd_content.read()[0] # Take first block of the Multiblock dataset + cell_centers_coords = dataset.cell_centers().points + velocities = dataset.cell_data['velocity_p0'] + interpolator = LinearNDInterpolator(cell_centers_coords, velocities, 0.0) + grid_velocities = interpolator(grid.barycenters()) + return grid_velocities + + +def homo_decovalex(fr_media: FracturedMedia, grid:Grid): """ Homogenize fr_media to the conductivity tensor field on grid. :return: conductivity_field, np.array, shape (n_elements, n_voight) """ - + ellipses = [dmap.Ellipse(fr.normal, fr.center, fr.scale) for fr in fr_media.dfn] + d_grid = dmap.Grid.make_grid(grid.origin, grid.step, grid.dimensions) + fractures = dmap.map_dfn(d_grid, ellipses) + fr_transmissivity = fr_media.fr_conductivity * fr_media.fr_cross_section + k_iso_zyx = dmap.permIso(d_grid, fractures, fr_transmissivity, fr_media.conductivity) + k_iso_xyz = grid.cell_field_C_like(k_iso_zyx) + k_voigt = k_iso_xyz[:, None] * np.array([1, 1, 1, 0, 0, 0])[None, :] + return k_voigt def test_two_scale(): # Fracture set domain_size = 100 - fr_range = (30, domain_size) + #fr_range = (30, domain_size) + fr_range = (50, domain_size) dfn = fracture_set(123, fr_range) - fr_media = FracturedMedia.fracture_cond_params(dfn, 1e-4, 0.1) + fr_media = FracturedMedia.fracture_cond_params(dfn, 1e-4, 0.01) # Coarse Problem - steps = (10, 2, 5) - grid = Grid(domain_size, steps, Fe.Q1(dim=3)) + steps = (50, 60, 70) + #steps = (3, 4, 5) + grid = Grid(domain_size, steps, Fe.Q(dim=3), origin=-domain_size / 2) bc_pressure_gradient = [1, 0, 0] - flow_out = reference_solution(fr_media, grid, bc_pressure_gradient) - ref_velocity_grid = project_ref_solution(flow_out, grid) - - grid_permitivity = homogenize(fr_media, grid) - pressure_field = grid.solve_system(grid_permitivity, bc_pressure_gradient) \ No newline at end of file + flow_out = reference_solution(fr_media, grid.dimensions, bc_pressure_gradient) + ref_velocity_grid = grid.cell_field_F_like(project_ref_solution(flow_out, grid).reshape((-1, 3))) + + grid_permitivity = homo_decovalex(fr_media, grid) + pressure = grid.solve_system(grid_permitivity, np.array(bc_pressure_gradient)[None, :]) + pressure_flat = pressure.reshape((1, -1)) + #pressure_flat = (grid.nodes() @ bc_pressure_gradient)[None, :] + + grad_pressure = grid.field_grad(pressure_flat) # (n_vectors, n_els, dim) + grad_pressure = grad_pressure[0, :, :][:, :, None] # (n_els, dim, 1) + velocity = -voigt_to_tn(grid_permitivity) @ grad_pressure # (n_els, dim, 1) + #velocity = grad_pressure # (n_els, dim, 1) + velocity = velocity[:, :, 0] # transpose + velocity_zyx = grid.cell_field_F_like(velocity) #.reshape(*grid.n_steps, -1).transpose(2,1,0,3).reshape((-1, 3)) + # Comparison + # origin = [0, 0, 0] + + #pv_grid = pv.StructuredGrid() + xyz_range = [ np.linspace(grid.origin[ax], grid.origin[ax] + grid.dimensions[ax], grid.shape[ax] + 1, dtype=np.float32) + for ax in [0, 1, 2] + ] + + x, y, z = np.meshgrid(*xyz_range, indexing='ij') + pv_grid = pv.StructuredGrid(x, y, z) + #points = grid.nodes() + pv_grid_centers = pv_grid.cell_centers().points + print(grid.barycenters()) + print(pv_grid_centers) + + #pv_grid.dimensions = grid.n_steps + 1 + pv_grid.cell_data['ref_velocity'] = ref_velocity_grid + pv_grid.cell_data['homo_velocity'] = velocity_zyx + pv_grid.cell_data['diff'] = velocity_zyx - ref_velocity_grid + pv_grid.cell_data['homo_cond'] = grid.cell_field_F_like(grid_permitivity) + pv_grid.point_arrays['homo_pressure'] = pressure_flat[0] + + pv_grid.save(str(workdir / "test_result.vtk")) \ No newline at end of file From e13942926fa041069a7917b0439a9811db33f173 Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Fri, 12 Apr 2024 00:16:34 +0200 Subject: [PATCH 10/34] Triangle and tetra refinement. --- tests/upscale/test_fem.py | 4 +- tests/upscale/test_two_scale.py | 196 +++++++++++++++++++++++++++++++- 2 files changed, 194 insertions(+), 6 deletions(-) diff --git a/tests/upscale/test_fem.py b/tests/upscale/test_fem.py index 8af3c07..9e6162a 100644 --- a/tests/upscale/test_fem.py +++ b/tests/upscale/test_fem.py @@ -215,7 +215,7 @@ def test_solve_system(): K_const = fem.tn_to_voigt(K_const[None, :, :]) K_field = K_const * np.ones(g.n_elements)[:, None] p_grads = np.eye(dim) - pressure = g.solve_system(K_field, p_grads) + pressure = g.solve_direct(K_field, p_grads) assert pressure.shape == (dim, *dim * [N + 1]) @@ -232,7 +232,7 @@ def test_solve_2d(): K_field = K_const * x[:, None] #K_field = K_const.T * np.ones_like(x)[:, None] p_grads = np.eye(dim) - pressure = g.solve_system(K_field, p_grads) + pressure = g.solve_direct(K_field, p_grads) xy_grid = [np.linspace(0, g.dimensions[i], g.dofs_shape[i]) for i in range(2)] fem_plot.plot_pressure_fields(*xy_grid, pressure) diff --git a/tests/upscale/test_two_scale.py b/tests/upscale/test_two_scale.py index 5054596..1da5a8a 100644 --- a/tests/upscale/test_two_scale.py +++ b/tests/upscale/test_two_scale.py @@ -11,6 +11,8 @@ - various homogenization techniques could be used, homogenization time is evaluated and compared. """ from typing import * + +import pytest import yaml import shutil from pathlib import Path @@ -246,6 +248,190 @@ def project_ref_solution_(flow_out, grid: Grid): return grid_velocities.reshape((*grid.shape, 3)) + +# Define transformation matrices and index mappings for 2D and 3D refinements +_transformation_matrices = { + 3: np.array([ + [1, 0, 0], # Vertex 0 + [0, 1, 0], # Vertex 1 + [0, 0, 1], # Vertex 2 + [0.5, 0.5, 0], # Midpoint between vertices 0 and 1 + [0, 0.5, 0.5], # Midpoint between vertices 1 and 2 + [0.5, 0, 0.5], # Midpoint between vertices 0 and 2 + ]), + 4: np.array([ + [1, 0, 0, 0], # Vertex 0 + [0, 1, 0, 0], # Vertex 1 + [0, 0, 1, 0], # Vertex 2 + [0, 0, 0, 1], # Vertex 3 + [0.5, 0.5, 0, 0], # Midpoint between vertices 0 and 1 + [0.5, 0, 0.5, 0], # Midpoint between vertices 0 and 2 + [0.5, 0, 0, 0.5], # Midpoint between vertices 0 and 3 + [0, 0.5, 0.5, 0], # Midpoint between vertices 1 and 2 + [0, 0.5, 0, 0.5], # Midpoint between vertices 1 and 3 + [0, 0, 0.5, 0.5], # Midpoint between vertices 2 and 3 + ]) +} + +_index_maps = { + 3: np.array([ + [0, 3, 5], # Triangle 1 + [3, 1, 4], # Triangle 2 + [3, 4, 5], # Triangle 3 + [5, 4, 2] # Triangle 4 + ]), + 4: np.array([ + [0, 4, 5, 6], # Tetrahedron 1 + [1, 4, 7, 8], # Tetrahedron 2 + [2, 5, 7, 9], # Tetrahedron 3 + [3, 6, 8, 9], # Tetrahedron 4 + [4, 5, 6, 7], # Center tetrahedron 1 + [4, 7, 8, 6], # Center tetrahedron 2 + [5, 7, 9, 6], # Center tetrahedron 3 + [6, 8, 9, 7], # Center tetrahedron 4 + ]) +} + + +def refine_element(element, level): + """ + Recursively refines an element (triangle or tetrahedron) in space using matrix multiplication. + + :param element: A numpy array of shape (1, N, M), where N is the number of vertices (3 or 4). + :param level: Integer, the level of refinement. + :return: A numpy array containing the vertices of all refined elements. + """ + if level == 0: + return element + n_tria, num_vertices, dim = element.shape + assert n_tria == 1 + assert num_vertices == dim + 1 + transformation_matrix = _transformation_matrices[num_vertices] + index_map = _index_maps[num_vertices] + # Generate all nodes by applying the transformation matrix to the original vertices + nodes = np.dot(transformation_matrix, element[0]) + # Construct new elements using advanced indexing + new_elements = nodes[index_map] + # Recursively refine each smaller element + result = np.vstack([ + refine_element(new_elem[None, :, :], level - 1) for new_elem in new_elements + ]) + return result + + +def plot_triangles(triangles): + """ + Plots a series of refined triangles. + + :param triangles: A numpy array of shape (N, 3, 2) containing the vertices of all triangles. + """ + import matplotlib.pyplot as plt + import matplotlib.tri as tri + + plt.figure(figsize=(8, 8)) + ax = plt.gca() + + # Flatten the array for plotting + triangles_flat = triangles.reshape(-1, 2) + tri_indices = np.arange(len(triangles_flat)).reshape(-1, 3) + + # Create a Triangulation object + triangulation = tri.Triangulation(triangles_flat[:, 0], triangles_flat[:, 1], tri_indices) + + # Plot the triangulation + ax.triplot(triangulation, 'ko-') + + # Setting the aspect ratio to be equal to ensure the triangle is not distorted + ax.set_aspect('equal') + + # Turn off the grid + ax.grid(False) + + # Setting the limits to get a better view + ax.set_xlim(triangles_flat[:, 0].min() - 0.1, triangles_flat[:, 0].max() + 0.1) + ax.set_ylim(triangles_flat[:, 1].min() - 0.1, triangles_flat[:, 1].max() + 0.1) + + # Add a title + plt.title('Refined Triangles Visualization') + plt.show() + + +def test_refine_triangle(): + # Example usage + initial_triangle = np.array([[[0, 0], [1, 0], [0.5, np.sqrt(3) / 2]]]) + L = 2 # Set the desired level of refinement + + # Refine the triangle + refined_triangles = refine_element(initial_triangle, L) + print(f"Refined Triangles at Level {L}:") + print(refined_triangles) + print("Total triangles:", len(refined_triangles)) + + plot_triangles(refined_triangles) + + + +def plot_tetrahedra(tetrahedra): + """ + Plots a series of refined tetrahedra in 3D. + + :param tetrahedra: A numpy array of shape (N, 4, 3) containing the vertices of all tetrahedra. + """ + import matplotlib.pyplot as plt + from mpl_toolkits.mplot3d.art3d import Poly3DCollection + + fig = plt.figure(figsize=(10, 8)) + ax = fig.add_subplot(111, projection='3d') + + # Flatten the array for plotting + for tet in tetrahedra: + vtx = tet.reshape(-1, 3) + tri = [[vtx[0], vtx[1], vtx[2]], + [vtx[0], vtx[1], vtx[3]], + [vtx[0], vtx[2], vtx[3]], + [vtx[1], vtx[2], vtx[3]]] + for s in tri: + poly = Poly3DCollection([s], edgecolor='k', alpha=0.2, facecolor=np.random.rand(3, )) + ax.add_collection3d(poly) + + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.set_zlim(0, 1) + plt.title('Refined Tetrahedra Visualization') + plt.show() + +def test_refine_tetra(): + initial_tetrahedron = np.array( + [[[0, 0, 0], [1, 0, 0], [0.5, np.sqrt(3) / 2, 0], [0.5, np.sqrt(3) / 6, np.sqrt(6) / 3]]]) + L = 1 # Set the desired level of refinement + + # Refine the tetrahedron + refined_tetrahedra = refine_element(initial_tetrahedron, L) + plot_tetrahedra(refined_tetrahedra) + +""" +Projection using aditional quad points on the source mesh. +We need to generate baricenters of an L-order refinement of a simplex. +Vertices of first reinement of triangle: +vertices coords relative to V0, V1, V2 (bary coords) +T0: (1, 0, 0), (1/2, 1/2, 0), (1/2, 0, 1/2) +T1: (1/2, 1/2, 0), (0, 1, 0), (0, 1/2, 1/2) +T2: (1/2, 1/2, 0), (0, 1/2, 1/2), (0, 0, 1), +T3: (1/2, 1/2, 0), (1/2, 1/2, 0), (1/2, 0, 1/2) + +ON size 2 grid: +T0: (1, 0, 0), (1/2, 1/2, 0), (1/2, 0, 1/2) +T1: (1/2, 1/2, 0), (0, 1, 0), (0, 1/2, 1/2) +T2: (1/2, 1/2, 0), (0, 1/2, 1/2), (0, 0, 1), +T3: (1/2, 1/2, 0), (1/2, 1/2, 0), (1/2, 0, 1/2) + +... tensor (4, 3, 3) ... n_childes, n_source_veritices, n_result verites (same as source) +source_vertices ... shape (n_vertices, coords_3d) +.... T[:n_childs, :n_vertices, :, None] * source_vertices[None, :, None, :] + +Iterative aplication of the tensor + finaly barycenters. +""" + def project_ref_solution(flow_out, grid: Grid): # Velocity P0 projection # 1. get array of barycenters (source points) and element volumes of the fine mesh @@ -277,7 +463,7 @@ def homo_decovalex(fr_media: FracturedMedia, grid:Grid): k_voigt = k_iso_xyz[:, None] * np.array([1, 1, 1, 0, 0, 0])[None, :] return k_voigt - +@pytest.mark.skip def test_two_scale(): # Fracture set domain_size = 100 @@ -288,8 +474,8 @@ def test_two_scale(): # Coarse Problem - steps = (50, 60, 70) - #steps = (3, 4, 5) + #steps = (50, 60, 70) + steps = (3, 4, 5) grid = Grid(domain_size, steps, Fe.Q(dim=3), origin=-domain_size / 2) bc_pressure_gradient = [1, 0, 0] @@ -297,7 +483,9 @@ def test_two_scale(): ref_velocity_grid = grid.cell_field_F_like(project_ref_solution(flow_out, grid).reshape((-1, 3))) grid_permitivity = homo_decovalex(fr_media, grid) - pressure = grid.solve_system(grid_permitivity, np.array(bc_pressure_gradient)[None, :]) + #pressure = grid.solve_direct(grid_permitivity, np.array(bc_pressure_gradient)[None, :]) + pressure = grid.solve_sparse(grid_permitivity, np.array(bc_pressure_gradient)[None, :]) + pressure_flat = pressure.reshape((1, -1)) #pressure_flat = (grid.nodes() @ bc_pressure_gradient)[None, :] From 58d01cde5a69ff6b6ceded53c51392fed5bd1479 Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Mon, 15 Apr 2024 22:39:44 +0200 Subject: [PATCH 11/34] Sparce FEM solver, refinement flow123d projection --- src/bgem/fn.py | 19 ++ src/bgem/upscale/__init__.py | 3 +- src/bgem/upscale/fem.py | 83 ++++++- src/bgem/upscale/voxelize.py | 235 ++++++++++++++---- tests/upscale/decovalex_dfnmap.py | 4 - tests/upscale/flow_upscale_templ.yaml | 1 + tests/upscale/test_two_scale.py | 188 ++++++++++++-- .../{test_rasterize.py => test_voxelize.py} | 78 ++---- 8 files changed, 456 insertions(+), 155 deletions(-) create mode 100644 src/bgem/fn.py rename tests/upscale/{test_rasterize.py => test_voxelize.py} (51%) diff --git a/src/bgem/fn.py b/src/bgem/fn.py new file mode 100644 index 0000000..a6532d1 --- /dev/null +++ b/src/bgem/fn.py @@ -0,0 +1,19 @@ +""" +Various function programming tools. +""" + +def compose(*functions): + """ + Return composition of functions: + compose(A,B,C)(any args) is equivalent to A(B(C(any args)) + + Useful for functional programming and dependency injection. + """ + def composed_function(*args, **kwargs): + # Start by applying the rightmost function with all arguments + result = functions[-1](*args, **kwargs) + # Then apply the rest in reverse order, each to the result of the previous + for f in reversed(functions[:-1]): + result = f(result) + return result + return composed_function \ No newline at end of file diff --git a/src/bgem/upscale/__init__.py b/src/bgem/upscale/__init__.py index d53b643..f2da122 100644 --- a/src/bgem/upscale/__init__.py +++ b/src/bgem/upscale/__init__.py @@ -1,2 +1,3 @@ from .fem import Fe, Grid, upscale -from .fields import voigt_to_tn, tn_to_voigt \ No newline at end of file +from .fields import voigt_to_tn, tn_to_voigt +from .voxelize import FracturedMedia \ No newline at end of file diff --git a/src/bgem/upscale/fem.py b/src/bgem/upscale/fem.py index 9cbe695..fb4c0a8 100644 --- a/src/bgem/upscale/fem.py +++ b/src/bgem/upscale/fem.py @@ -6,6 +6,8 @@ from .fields import tn_to_voigt, voigt_to_tn, voigt_coords from .homogenization import equivalent_posdef_tensor #from bgem.stochastic import dfn +import scipy.sparse as sp +import pyamg def Q1_1d_basis(points): """ @@ -425,8 +427,7 @@ def loc_mat_ij(self): cols = np.tile(self.el_dofs[:, None, :], [1, n_loc_dofs, 1]) return rows, cols - - def assembly_dense(self, K_voight_tensors): + def _loc_matrices(self, K_voight_tensors): """ K_voight_tensors, shape: (n_elements, n_voight) """ @@ -437,14 +438,44 @@ def assembly_dense(self, K_voight_tensors): #loc_matrices = np.zeros((self.n_loc_dofs self.n_loc_dofs, self.n_elements)) #np.matmul(.T[:, None, :], laplace[None, :, :], out=loc_matrices.reshape(-1, self.n_elements).T) loc_matrices = K_voight_tensors[:, None, :] @ laplace[None, :, :] - loc_matrices = loc_matrices.reshape((self.n_elements, self.fe.n_dofs, self.fe.n_dofs)) + return loc_matrices.reshape((self.n_elements, self.fe.n_dofs, self.fe.n_dofs)) + + def assembly_dense(self, K_voight_tensors): + """ + K_voight_tensors, shape: (n_elements, n_voight) + """ + loc_matrices = self._loc_matrices(K_voight_tensors) A = np.zeros((self.n_dofs, self.n_dofs)) # Use advanced indexing to add local matrices to the global matrix np.add.at(A, self.loc_mat_ij, loc_matrices) return A + def assembly_csr(self, K_voight_tensors): + """ + K_voight_tensors, shape: (n_elements, n_voight) + """ + rows, cols = self.loc_mat_ij + values = self._loc_matrices(K_voight_tensors) + rows, cols, values = map(np.ravel, [rows, cols, values]) - def solve_system(self, K, p_grad_bc): + # Drop boundary rows. + bulk_rows = rows >= self.n_bc_dofs + rows, cols, values = [array[bulk_rows] for array in (rows, cols, values)] + rows = rows - self.n_bc_dofs + + # Split boundary and bulk rows + # See also np.split for possible faster implementation + n_dofs = self.n_dofs - self.n_bc_dofs + def sub_mat(rows, cols, vals, condition, n_cols, idx_shift): + sub_cols = cols[condition] - idx_shift + return sp.csr_matrix((vals[condition], (rows[condition], sub_cols)), shape=(n_dofs, n_cols)) + + bulk_cols = (cols >= self.n_bc_dofs) + A_bulk = sub_mat(rows, cols, values, bulk_cols, self.n_dofs - self.n_bc_dofs, self.n_bc_dofs) + A_bc = sub_mat(rows, cols, values, ~bulk_cols, self.n_bc_dofs, 0) + return A_bulk, A_bc + + def solve_direct(self, K, p_grad_bc): """ :param K: array, shape: (n_elements, n_voight) K = array of shape (*self.shape, n_voight).reshape(-1, n_voight) @@ -467,6 +498,35 @@ def solve_system(self, K, p_grad_bc): pressure_natur[:, self.natur_map[:]] = pressure[:, :] return pressure_natur.reshape((n_rhs, *self.dofs_shape)) + + def solve_sparse(self, K, p_grad_bc): + """ + :param K: array, shape: (n_elements, n_voight) + K = array of shape (*self.shape, n_voight).reshape(-1, n_voight) + cell at position (iX, iY, iZ) has index + (iX * self.shape[1] + iY) * self.shape[2] + iZ + i.e. the Z index is running fastest, + :param p_grad_bc: array, shape: (n_vectors, dim) + usually n_vectors >= dim + :return: pressure, shape: (n_vectors, n_dofs) + """ + n_rhs, d = p_grad_bc.shape + assert d == self.dim + A_bulk, A_bc = self.assembly_csr(K) + pressure_bc = p_grad_bc @ self.bc_points.T # (n_vectors, n_bc_dofs) + B = (A_bc @ pressure_bc.T).T # (n_vectors, n_interior_dofs) + + #pyamg.ruge_stuben_solver(A_bulk) + solver = pyamg.smoothed_aggregation_solver(A_bulk, symmetry='symmetric') + pressure = np.empty((n_rhs, self.n_dofs)) + pressure[:, :self.n_bc_dofs] = pressure_bc + for pressure_comp, b in zip(pressure, B): + pressure_comp[self.n_bc_dofs:] = solver.solve(-b) + #pressure[:, ] = np.linalg.solve(A[self.n_bc_dofs:, self.n_bc_dofs:], -B.T).T + pressure_natur = np.empty_like(pressure) + pressure_natur[:, self.natur_map[:]] = pressure[:, :] + return pressure_natur.reshape((n_rhs, *self.dofs_shape)) + def field_grad(self, dof_vals): """ Compute solution gradient in element barycenters. @@ -479,6 +539,19 @@ def field_grad(self, dof_vals): grad_els = grad_basis[None,None,:, :,0] @ el_dof_vals[:,:, :, None] return grad_els[:, :,:,0] + def project_points(self, points): + """ + :param points: array of shape (N, dim) + :return: For each point the index of the containing grid cell. + """ + #grid_min_corner = -grid.dimensions / 2 + centers_ijk_grid = (points - self.origin) // self.step[None, :] + centers_ijk_grid = centers_ijk_grid.astype(np.int32) + assert np.alltrue(centers_ijk_grid < self.shape[None, :]) + grid_cell_idx = centers_ijk_grid[:, 0] + self.shape[0] * ( + centers_ijk_grid[:, 1] + self.shape[1] * centers_ijk_grid[:, 2]) + return grid_cell_idx + def upscale(K, domain=None): """ @@ -494,7 +567,7 @@ def upscale(K, domain=None): g = Grid(domain, K.shape[:-1], Fe.Q(dim, order)) p_grads = np.eye(dim) K_els = K.reshape((g.n_elements, -1)) - pressure = g.solve_system(K_els, p_grads) + pressure = g.solve_direct(K_els, p_grads) #xy_grid = [np.linspace(0, g.size[i], g.ax_dofs[i]) for i in range(2)] #fem_plot.plot_pressure_fields(*xy_grid, pressure) pressure_flat = pressure.reshape((len(p_grads), -1)) diff --git a/src/bgem/upscale/voxelize.py b/src/bgem/upscale/voxelize.py index c9f7ce8..affe734 100644 --- a/src/bgem/upscale/voxelize.py +++ b/src/bgem/upscale/voxelize.py @@ -4,6 +4,7 @@ import attrs from functools import cached_property from bgem.stochastic import Fracture +from bgem.upscale import Grid import numpy as np """ Voxelization of fracture network. @@ -50,6 +51,108 @@ """ + +@attrs.define +class FracturedMedia: + dfn: List[Fracture] + fr_cross_section: np.ndarray # shape (n_fractures,) + fr_conductivity: np.ndarray # shape (n_fractures,) + conductivity: float + + @staticmethod + def fracture_cond_params(dfn, unit_cross_section, bulk_conductivity): + # unit_cross_section = 1e-4 + viscosity = 1e-3 + gravity_accel = 10 + density = 1000 + permeability_factor = 1 / 12 + permeability_to_conductivity = gravity_accel * density / viscosity + # fr cond r=100 ~ 80 + # fr cond r=10 ~ 0.8 + fr_r = np.array([fr.r for fr in dfn]) + fr_cross_section = unit_cross_section * fr_r + fr_cond = permeability_to_conductivity * permeability_factor * fr_r ** 2 + fr_cond = np.full_like(fr_r, 10) + return FracturedMedia(dfn, fr_cross_section, fr_cond, bulk_conductivity) + +@attrs.define +class Intersection: + grid: Grid + fractures: List[Fracture] + i_fr_cell: np.ndarray + factor: np.ndarray = None + +def intersections_decovalex(grid: Grid, fractures: List[Fracture]): + """ + Estimate intersections between grid cells and fractures + + Temporary interface to original map_dfn code inorder to perform one to one test. + """ + import decovalex_dfnmap as dmap + + ellipses = [dmap.Ellipse(fr.normal, fr.center, fr.scale) for fr in fractures] + d_grid = dmap.Grid.make_grid(grid.origin, grid.step, grid.dimensions) + fractures = map_dfn(d_grid, ellipses) + fr, cell = zip([(i_fr, i_cell) for i_fr, fr in enumerate(fractures) for i_cell in fr.cells]) + return Intersection(grid, fractures, np.vstack(fr, cell), None) + +def perm_aniso_fr_values(fractures, fr_transmisivity: np.array, grid_step) -> np.ndarray: + '''Calculate anisotropic permeability tensor for each cell of ECPM + intersected by one or more fractures. Discard off-diagonal components + of the tensor. Assign background permeability to cells not intersected + by fractures. + Return numpy array of anisotropic permeability (3 components) for each + cell in the ECPM. + + fracture = numpy array containing number of fractures in each cell, list of fracture numbers in each cell + ellipses = [{}] containing normal and translation vectors for each fracture + T = [] containing intrinsic transmissivity for each fracture + d = length of cell sides + k_background = float background permeability for cells with no fractures in them + ''' + assert len(fractures) == len(fr_transmisivity) + # Construc array of fracture tensors + def full_tensor(n, fr_cond): + normal_axis_step = grid_step[np.argmax(np.abs(n))] + return fr_cond * (np.eye(3) - n[:, None] * n[None, :]) / normal_axis_step + + return np.array([full_tensor(fr.normal, fr_cond) for fr, fr_cond in zip(fractures, fr_transmisivity)]) + + +def perm_iso_fr_values(fractures, fr_transmisivity: np.array, grid_step) -> np.ndarray: + '''Calculate isotropic permeability for each cell of ECPM intersected by + one or more fractures. Sums fracture transmissivities and divides by + cell length (d) to calculate cell permeability. + Assign background permeability to cells not intersected by fractures. + Returns numpy array of isotropic permeability for each cell in the ECPM. + + fracture = numpy array containing number of fractures in each cell, list of fracture numbers in each cell + T = [] containing intrinsic transmissivity for each fracture + d = length of cell sides + k_background = float background permeability for cells with no fractures in them + ''' + assert len(fractures) == len(fr_transmisivity) + fr_norm = np.array([fr.normal for fr in fractures]) + normalised_transmissivity = fr_transmisivity / grid_step[np.argmax(np.abs(fr_norm), axis=1)] + return normalised_transmissivity + +def _conductivity_decovalex(fr_media: FracturedMedia, grid: Grid, fr_values_fn): + isec = intersections_decovalex(grid, fr_media.dfn) + fr_transmissivity = fr_media.fr_conductivity * fr_media.fr_cross_section + fr_values = fr_values_fn(isec.fractures, fr_transmissivity, isec.grid.step) + # accumulate tensors in cells + ncells = isec.grid.n_elements + k_aniso = np.full((ncells, 3, 3), fr_media.conductivity, dtype=np.float64) + k_aniso[isec.i_fr_cell[1]] += fr_values[isec.i_fr_cell[0]] + return k_aniso #arange_for_hdf5(grid, k_iso).flatten() + +def permeability_aniso_decovalex(fr_media: FracturedMedia, grid: Grid): + return _conductivity_decovalex(fr_media, grid, perm_aniso_fr_values) + +def permeability_iso_decovalex(fr_media: FracturedMedia, grid: Grid): + return _conductivity_decovalex(fr_media, grid, perm_iso_fr_values) + + class EllipseShape: def is_point_inside(self, x, y): return x**2 + y**2 <= 1 @@ -188,6 +291,28 @@ def test_PolyShape(): np.testing.assert_array_equal(actual_results, expected_results, "The are_points_inside method failed to accurately determine if points are inside the polygon.") +def aniso_lump(tn_array): + """ + Convert array of full anisotropic tensors to the array of diagonal + tensors by lumping (summing) tensor rows to the diagonal. + :param tn_array: shape (n, k, k) + """ + assert len(tn_array.shape) == 3 + assert tn_array.shape[1] == tn_array.shape[2] + return np.sum(tn_array, axis=-1)[:, None, :] * np.eye(3) + +def aniso_diag(tn_array): + """ + Convert array of full anisotropic tensors to the array of diagonal + tensors by extraction only diagonal elements. + :param tn_array: shape (n, k, k) + """ + assert len(tn_array.shape) == 3 + assert tn_array.shape[1] == tn_array.shape[2] + return tn_array * np.eye(3)[None, :, :] + + + @attrs.define class FractureVoxelize: @@ -214,13 +339,13 @@ class FractureVoxelize: - @cached_property - def cell_fr_sums(self): - cell_sums = np.zeros(, dtype=np.float64) - + # @cached_property + # def cell_fr_sums(self): + # cell_sums = np.zeros(, dtype=np.float64) + # def project_property(self, fr_property, bulk_property): - + pass class FractureBoundaries3d: @staticmethod @@ -329,55 +454,55 @@ def intersect_cell(loc_corners: np.array, ellipse: Fracture) -> bool: return True -def fracture_for_ellipse(grid: Grid, i_ellipse: int, ellipse: Ellipse) -> Fracture: - # calculate rotation matrix for use later in rotating coordinates of nearby cells - direction = np.cross([0,0,1], ellipse.normal) - #cosa = np.dot([0,0,1],normal)/(np.linalg.norm([0,0,1])*np.linalg.norm(normal)) #frobenius norm = length - #above evaluates to normal[2], so: - angle = np.arccos(ellipse.normal[2]) # everything is in radians - mat_to_local = tr.rotation_matrix(angle, direction)[:3, :3].T - #find fracture in domain coordinates so can look for nearby cells - b_box_min = ellipse.translation - np.max(ellipse.radius) - b_box_max = ellipse.translation + np.max(ellipse.radius) - - i_box_min = grid.cell_coord(b_box_min) - i_box_max = grid.cell_coord(b_box_max) + 1 - axis_ranges = [range(max(0, a), min(b, n)) for a, b, n in zip(i_box_min, i_box_max, grid.cell_dimensions)] - - grid_cumul_prod = np.array([1, grid.cell_dimensions[0], grid.cell_dimensions[0] * grid.cell_dimensions[1]]) - cells = [] - # X fastest running - for ijk in itertools.product(*reversed(axis_ranges)): - # make X the first coordinate - ijk = np.flip(np.array(ijk)) - corners = grid.origin[None, :] + (ijk[None, :] + __rel_corner[:, :]) * grid.step[None, :] - loc_corners = mat_to_local @ (corners - ellipse.translation).T - if intersect_cell(loc_corners, ellipse): - logging.log(logging.DEBUG, f" cell {ijk}") - cell_index = ijk @ grid_cumul_prod - cells.append(cell_index) - if len(cells) > 0: - logging.log(logging.INFO, f" #{i_ellipse} fr, {len(cells)} cell intersections") - return Fracture(ellipse, cells) - -def map_dfn(grid, ellipses): - '''Identify intersecting fractures for each cell of the ECPM domain. - Extent of ECPM domain is determined by nx,ny,nz, and d (see below). - ECPM domain can be smaller than the DFN domain. - Return numpy array (fracture) that contains for each cell: - number of intersecting fractures followed by each intersecting fracture id. - - ellipses = list of dictionaries containing normal, translation, xrad, - and yrad for each fracture - origin = [x,y,z] float coordinates of lower left front corner of DFN domain - nx = int number of cells in x in ECPM domain - ny = int number of cells in y in ECPM domain - nz = int number of cells in z in ECPM domain - step = [float, float, float] discretization length in ECPM domain - - JB TODO: allow smaller ECPM domain - ''' - logging.log(logging.INFO, f"Callculating Fracture - Cell intersections ...") - return [fracture_for_ellipse(grid, ie, ellipse) for ie, ellipse in enumerate(ellipses)] +# def fracture_for_ellipse(grid: Grid, i_ellipse: int, ellipse: Ellipse) -> Fracture: +# # calculate rotation matrix for use later in rotating coordinates of nearby cells +# direction = np.cross([0,0,1], ellipse.normal) +# #cosa = np.dot([0,0,1],normal)/(np.linalg.norm([0,0,1])*np.linalg.norm(normal)) #frobenius norm = length +# #above evaluates to normal[2], so: +# angle = np.arccos(ellipse.normal[2]) # everything is in radians +# mat_to_local = tr.rotation_matrix(angle, direction)[:3, :3].T +# #find fracture in domain coordinates so can look for nearby cells +# b_box_min = ellipse.translation - np.max(ellipse.radius) +# b_box_max = ellipse.translation + np.max(ellipse.radius) +# +# i_box_min = grid.cell_coord(b_box_min) +# i_box_max = grid.cell_coord(b_box_max) + 1 +# axis_ranges = [range(max(0, a), min(b, n)) for a, b, n in zip(i_box_min, i_box_max, grid.cell_dimensions)] +# +# grid_cumul_prod = np.array([1, grid.cell_dimensions[0], grid.cell_dimensions[0] * grid.cell_dimensions[1]]) +# cells = [] +# # X fastest running +# for ijk in itertools.product(*reversed(axis_ranges)): +# # make X the first coordinate +# ijk = np.flip(np.array(ijk)) +# corners = grid.origin[None, :] + (ijk[None, :] + __rel_corner[:, :]) * grid.step[None, :] +# loc_corners = mat_to_local @ (corners - ellipse.translation).T +# if intersect_cell(loc_corners, ellipse): +# logging.log(logging.DEBUG, f" cell {ijk}") +# cell_index = ijk @ grid_cumul_prod +# cells.append(cell_index) +# if len(cells) > 0: +# logging.log(logging.INFO, f" #{i_ellipse} fr, {len(cells)} cell intersections") +# return Fracture(ellipse, cells) +# +# def map_dfn(grid, ellipses): +# '''Identify intersecting fractures for each cell of the ECPM domain. +# Extent of ECPM domain is determined by nx,ny,nz, and d (see below). +# ECPM domain can be smaller than the DFN domain. +# Return numpy array (fracture) that contains for each cell: +# number of intersecting fractures followed by each intersecting fracture id. +# +# ellipses = list of dictionaries containing normal, translation, xrad, +# and yrad for each fracture +# origin = [x,y,z] float coordinates of lower left front corner of DFN domain +# nx = int number of cells in x in ECPM domain +# ny = int number of cells in y in ECPM domain +# nz = int number of cells in z in ECPM domain +# step = [float, float, float] discretization length in ECPM domain +# +# JB TODO: allow smaller ECPM domain +# ''' +# logging.log(logging.INFO, f"Callculating Fracture - Cell intersections ...") +# return [fracture_for_ellipse(grid, ie, ellipse) for ie, ellipse in enumerate(ellipses)] diff --git a/tests/upscale/decovalex_dfnmap.py b/tests/upscale/decovalex_dfnmap.py index 8448ee0..5ff654b 100644 --- a/tests/upscale/decovalex_dfnmap.py +++ b/tests/upscale/decovalex_dfnmap.py @@ -380,11 +380,7 @@ def permAnisoRaw(grid: Grid, fractures: List[Fracture], fr_transmisivity: np.arr # # return k_aniso -def aniso_lump(tn_array): - return np.sum(tn_array, axis=-1)[:, None, :] * np.eye(3) -def aniso_diag(tn_array): - return tn_array * np.eye(3)[None, :, :] def count_stuff(filename='mapELLIPSES.txt'): '''Mapping DFN to ECPM can result in false connections when non-intersecting diff --git a/tests/upscale/flow_upscale_templ.yaml b/tests/upscale/flow_upscale_templ.yaml index 3a0c3c6..2d28fc9 100644 --- a/tests/upscale/flow_upscale_templ.yaml +++ b/tests/upscale/flow_upscale_templ.yaml @@ -38,6 +38,7 @@ problem: !Coupling_Sequential - piezo_head_p0 #- pressure_p0 #- pressure_p1 + - cross_section - velocity_p0 - region_id - conductivity diff --git a/tests/upscale/test_two_scale.py b/tests/upscale/test_two_scale.py index 1da5a8a..8a3a7ab 100644 --- a/tests/upscale/test_two_scale.py +++ b/tests/upscale/test_two_scale.py @@ -30,7 +30,7 @@ from bgem.gmsh import gmsh, options from mesh_class import Mesh from bgem.core import call_flow, dotdict, workdir as workdir_mng -from bgem.upscale import Grid, Fe, voigt_to_tn, tn_to_voigt +from bgem.upscale import Grid, Fe, voigt_to_tn, tn_to_voigt, FracturedMedia, voxelize import decovalex_dfnmap as dmap from scipy.interpolate import LinearNDInterpolator @@ -40,28 +40,6 @@ memory = Memory(workdir, verbose=0) -@attrs.define -class FracturedMedia: - dfn: List[stochastic.Fracture] - fr_cross_section: np.ndarray # shape (n_fractures,) - fr_conductivity: np.ndarray # shape (n_fractures,) - conductivity: float - - @staticmethod - def fracture_cond_params(dfn, unit_cross_section, bulk_conductivity): - # unit_cross_section = 1e-4 - viscosity = 1e-3 - gravity_accel = 10 - density = 1000 - permeability_factor = 1 / 12 - permeability_to_conductivity = gravity_accel * density / viscosity - # fr cond r=100 ~ 80 - # fr cond r=10 ~ 0.8 - fr_r = np.array([fr.r for fr in dfn]) - fr_cross_section = unit_cross_section * fr_r - fr_cond = permeability_to_conductivity * permeability_factor * fr_r ** 2 - fr_cond = np.full_like(fr_r, 10) - return FracturedMedia(dfn, fr_cross_section, fr_cond, bulk_conductivity) def fracture_set(seed, size_range, max_frac = None): rmin, rmax = size_range @@ -247,6 +225,156 @@ def project_ref_solution_(flow_out, grid: Grid): return grid_velocities.reshape((*grid.shape, 3)) +def det33(mat): + """ + mat: (N, 3, 3) + :param mat: + :return: (N, ) + """ + return sum( + np.prod(mat[:, [(col, (row+step)%3) for col in range(3)]]) + for row in [0, 1, 2] for step in [1,2] + ) + +@memory.cache +def refine_barycenters(element, level): + """ + Produce refinement of given element (triangle or tetrahedra), shape (N, n_vertices, 3) + and return barycenters of refined subelements. + """ + return np.mean(refine_element(element, level), axis=1) + +@memory.cache +def project_adaptive_source_quad(flow_out, grid: Grid): + grid_cell_volume = np.prod(grid.step)/27 + + ref_el_2d = np.array([(0, 0), (1, 0), (0, 1)]) + ref_el_3d = np.array([(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)]) + + pvd_content = pv.get_reader(flow_out.hydro.spatial_file.path) + pvd_content.set_active_time_point(0) + dataset = pvd_content.read()[0] # Take first block of the Multiblock dataset + + velocities = dataset.cell_data['velocity_p0'] + cross_section = dataset.cell_data['cross_section'] + + #num_cells = dataset.n_cells + #shifts = np.zeros((num_cells, 3)) + #transform_matrices = np.zeros((num_cells, 3, 3)) + #volumes = np.zeros(num_cells) + + weights_sum = np.zeros((grid.n_elements,)) + grid_velocities = np.zeros((grid.n_elements, 3)) + levels = np.zeros(dataset.n_cells, dtype=np.int32) + # Loop through each cell + for i in range(dataset.n_cells): + cell = dataset.extract_cells(i) + points = cell.points + + if len(points) < 3: + continue # Skip cells with less than 3 vertices + + # Shift: the first vertex of the cell + shift = points[0] + #shifts[i] = shift + + transform_matrix = points[1:] - shift + if len(points) == 4: # Tetrahedron + # For a tetrahedron, we use all three vectors formed from the first vertex + #transform_matrices[i] = transform_matrix[:3].T + # Volume calculation for a tetrahedron: + volume = np.abs(np.linalg.det(transform_matrix[:3])) / 6 + ref_el = ref_el_3d + elif len(points) == 3: # Triangle + # For a triangle, we use only two vectors + #transform_matrices[i, :2] = transform_matrix.T + # Area calculation for a triangle: + volume = 0.5 * np.linalg.norm(np.cross(transform_matrix[0], transform_matrix[1])) * cross_section[i] + ref_el = ref_el_2d + level = max(int(np.log2(volume/grid_cell_volume) / 3.0), 0) + levels[i] = level + ref_barycenters = refine_barycenters(ref_el[None, :, :],level) + barycenters = shift[None, :] + ref_barycenters @ transform_matrix + grid_indices = grid.project_points(barycenters) + weights_sum[grid_indices] += volume + grid_velocities[grid_indices] += volume * velocities[i] + print(np.bincount(levels)) + grid_velocities = grid_velocities / weights_sum[:, None] + return grid_velocities + +# def project_adaptive_source_quad(flow_out, grid: Grid): +# grid_cell_volume = np.prod(grid.step) +# +# pvd_content = pv.get_reader(flow_out.hydro.spatial_file.path) +# pvd_content.set_active_time_point(0) +# dataset = pvd_content.read()[0] # Take first block of the Multiblock dataset +# +# tetrahedrons = dataset.extract_cells(dataset.celltypes == 10) # Extract all tetrahedron cells +# connectivity = tetrahedrons.cells.reshape(-1, 5)[:, 1:] +# nodes = tetrahedrons.points[connectivity] +# jac_mat = nodes[:, 1:, :] - nodes[:, :1, :] +# volumes = det33(jac_mat) / 6 +# levels = int(np.log2(volumes/grid_cell_volume)) +# add_el_idx = lambda barycenters, el_orig : (barycenters, np.full(len(barycenters), el_orig, dtype=np.int32)) +# bulk_barycenters = ( +# add_el_idx(refine_barycenters(tetra, level)) +# for i_el, (tetra, level) in enumerate(zip(tetrahedrons, levels)) +# ) +# bulk_barycenters, orig_els = [np.concatenate(list(lst), axis=0) for lst in bulk_barycenters] +# +# tetrahedrons = dataset.extract_cells(dataset.celltypes == 10) # Extract all tetrahedron cells +# connectivity = tetrahedrons.cells.reshape(-1, 5)[:, 1:] +# nodes = tetrahedrons.points[connectivity] +# jac_mat = nodes[:, 1:, :] - nodes[:, :1, :] +# volumes = det33(jac_mat) / 6 +# levels = int(np.log2(volumes/grid_cell_volume)) +# add_el_idx = lambda barycenters, el_orig : (barycenters, np.full(len(barycenters), el_orig, dtype=np.int32)) +# bulk_barycenters = ( +# add_el_idx(refine_barycenters(tetra, level)) +# for i_el, (tetra, level) in enumerate(zip(tetrahedrons, levels)) +# ) +# bulk_barycenters, orig_els = [np.concatenate(list(lst), axis=0) for lst in bulk_barycenters] +# +# triangles = dataset.extract_cells(dataset.celltypes == 5) # Extract all triangle cells +# +# # Get the connectivity array (indices of nodes forming each cell) +# # This step extracts the indices array where each group of indices forms a cell +# triangle_indices = triangle_cells.cells.reshape(-1, 4)[:, +# 1:] # Skip the first column which is the count of nodes per cell +# +# # Now, to get the nodes of these cells: +# triangle_nodes = triangles.points +# tetrahedron_nodes = tetrahedrons.points +# +# node_points = dataset.points +# cell_nodes_2d = dataset.cells +# cell_nodes_3d = dataset.cells +# +# for cell in cell_nodes_2d: +# nodes = node_points[cell, :] +# assert nodes.shape[0] == 3 +# ref_to_xyz = np.vstack(nodes[1] - nodes[0], nodes[2] - nodes[0]) # (3, 2) +# cell_centers_coords = dataset.cell_centers().points +# grid_min_corner = -grid.dimensions / 2 +# centers_ijk_grid = (cell_centers_coords - grid_min_corner) // grid.step[None, :] +# centers_ijk_grid = centers_ijk_grid.astype(np.int32) +# assert np.alltrue(centers_ijk_grid < grid.shape[None, :]) +# +# grid_cell_idx = centers_ijk_grid[:, 0] + grid.shape[0] * (centers_ijk_grid[:, 1] + grid.shape[1] * centers_ijk_grid[:, 2]) +# sized = dataset.compute_cell_sizes() +# cell_volume = np.abs(sized.cell_data["Volume"]) +# grid_sum_cell_volume = np.zeros(grid.n_elements) +# np.add.at(grid_sum_cell_volume, grid_cell_idx, cell_volume) +# weights = cell_volume[:] / grid_sum_cell_volume[grid_cell_idx[:]] +# +# velocities = dataset.cell_data['velocity_p0'] +# grid_velocities = np.zeros((grid.n_elements, 3)) +# wv = weights[:, None] * velocities +# for ax in [0, 1, 2]: +# np.add.at(grid_velocities[:, ax], grid_cell_idx, wv[:, ax]) +# +# return grid_velocities.reshape((*grid.shape, 3)) + # Define transformation matrices and index mappings for 2D and 3D refinements @@ -355,7 +483,7 @@ def plot_triangles(triangles): plt.title('Refined Triangles Visualization') plt.show() - +@pytest.mark.skip def test_refine_triangle(): # Example usage initial_triangle = np.array([[[0, 0], [1, 0], [0.5, np.sqrt(3) / 2]]]) @@ -400,6 +528,7 @@ def plot_tetrahedra(tetrahedra): plt.title('Refined Tetrahedra Visualization') plt.show() +@pytest.mark.skip def test_refine_tetra(): initial_tetrahedron = np.array( [[[0, 0, 0], [1, 0, 0], [0.5, np.sqrt(3) / 2, 0], [0.5, np.sqrt(3) / 6, np.sqrt(6) / 3]]]) @@ -409,6 +538,7 @@ def test_refine_tetra(): refined_tetrahedra = refine_element(initial_tetrahedron, L) plot_tetrahedra(refined_tetrahedra) + """ Projection using aditional quad points on the source mesh. We need to generate baricenters of an L-order refinement of a simplex. @@ -463,7 +593,7 @@ def homo_decovalex(fr_media: FracturedMedia, grid:Grid): k_voigt = k_iso_xyz[:, None] * np.array([1, 1, 1, 0, 0, 0])[None, :] return k_voigt -@pytest.mark.skip +#@pytest.mark.skip def test_two_scale(): # Fracture set domain_size = 100 @@ -475,12 +605,16 @@ def test_two_scale(): # Coarse Problem #steps = (50, 60, 70) - steps = (3, 4, 5) + steps = (20, 20, 20) + #steps = (50, 60, 70) + #steps = (3, 4, 5) grid = Grid(domain_size, steps, Fe.Q(dim=3), origin=-domain_size / 2) bc_pressure_gradient = [1, 0, 0] flow_out = reference_solution(fr_media, grid.dimensions, bc_pressure_gradient) - ref_velocity_grid = grid.cell_field_F_like(project_ref_solution(flow_out, grid).reshape((-1, 3))) + project_fn = project_adaptive_source_quad + #project_fn = project_ref_solution + ref_velocity_grid = grid.cell_field_F_like(project_fn(flow_out, grid).reshape((-1, 3))) grid_permitivity = homo_decovalex(fr_media, grid) #pressure = grid.solve_direct(grid_permitivity, np.array(bc_pressure_gradient)[None, :]) diff --git a/tests/upscale/test_rasterize.py b/tests/upscale/test_voxelize.py similarity index 51% rename from tests/upscale/test_rasterize.py rename to tests/upscale/test_voxelize.py index bee65e0..c7d94bd 100644 --- a/tests/upscale/test_rasterize.py +++ b/tests/upscale/test_voxelize.py @@ -1,14 +1,10 @@ """ -Test of homogenization techniquest within a two-scale problem. -- reference solution is evaluated by Flow123d, using direct rasterization of full DFN sample - The pressure field is projected to the nodes of the rectangular grid, - velocity filed is averaged over rectangular cells. +Test of homogenization algorithms from voxelize.py +- Homogenization of bulk constant conductivity + discreate fractures with size dependent conductivity. + Reference is decovalex slow solution modified for anisotropic regular grid. + This assigns The same conductivity to all intersection cells. -- two-scale solution involves: - 1. homogenization of DFN to the rectangular grid ; general permeability tensor field - 2. custom 3d solver for the rectangular grid is used to solve the coarse problem - -- various homogenization techniques could be used, homogenization time is evaluated and compared. + In order to develop more precises homogenization techniques, we must use two-scale test problems. """ from typing import * import yaml @@ -25,47 +21,17 @@ import pyvista as pv from bgem import stochastic +from bgem import fn from bgem.gmsh import gmsh, options from mesh_class import Mesh from bgem.core import call_flow, dotdict, workdir as workdir_mng -from bgem.upscale import Grid, Fe, voigt_to_tn, tn_to_voigt -import decovalex_dfnmap as dmap +from bgem.upscale import Grid, Fe, voigt_to_tn, tn_to_voigt, FracturedMedia, voxelize script_dir = Path(__file__).absolute().parent workdir = script_dir / "sandbox" from joblib import Memory memory = Memory(workdir, verbose=0) - -@attrs.define -class FracturedMedia: - dfn: List[stochastic.Fracture] - fr_cross_section: np.ndarray # shape (n_fractures,) - fr_conductivity: np.ndarray # shape (n_fractures,) - conductivity: float - - @staticmethod - def constant_fr(dfn, fr_conductivity, fr_cross_section, bulk_conductivity): - fr_cond = np.full([len(dfn)], fr_conductivity) - fr_cross = np.full([len(dfn)], fr_cross_section) - return FracturedMedia(dfn, fr_cross, fr_cond, bulk_conductivity) - - @staticmethod - def cubic_law_fr(dfn, unit_cross_section, bulk_conductivity): - # unit_cross_section = 1e-4 - viscosity = 1e-3 - gravity_accel = 10 - density = 1000 - permeability_factor = 1 / 12 - permeability_to_conductivity = gravity_accel * density / viscosity - # fr cond r=100 ~ 80 - # fr cond r=10 ~ 0.8 - fr_r = np.array([fr.r for fr in dfn]) - fr_cross_section = unit_cross_section * fr_r - fr_cond = permeability_to_conductivity * permeability_factor * fr_r ** 2 - fr_cond = np.full_like(fr_r, 10) - return FracturedMedia(dfn, fr_cross_section, fr_cond, bulk_conductivity) - def tst_fracture_set(domain): R = 2*np.max(domain) fr = lambda c, n : stochastic.Fracture(stochastic.SquareShape, R, c, n, 0.0, 123, 1) @@ -82,24 +48,6 @@ def tst_fracture_set(domain): ] -def rasterize_decovalex(dfn, grid:Grid): - ellipses = [dmap.Ellipse(fr.normal, fr.center, fr.scale) for fr in dfn] - d_grid = dmap.Grid.make_grid(grid.origin, grid.step, grid.dimensions) - return dmap.map_dfn(d_grid, ellipses) - -def homo_decovalex_(fr_media: FracturedMedia, grid:Grid): - """ - Homogenize fr_media to the conductivity tensor field on grid. - :return: conductivity_field, np.array, shape (n_elements, n_voight) - """ - ellipses = [dmap.Ellipse(fr.normal, fr.center, fr.scale) for fr in fr_media.dfn] - d_grid = dmap.Grid.make_grid(grid.origin, grid.step, grid.dimensions) - fractures = dmap.map_dfn(d_grid, ellipses) - fr_transmissivity = fr_media.fr_conductivity * fr_media.fr_cross_section - k_iso = dmap.permIso(d_grid, fractures, fr_transmissivity, fr_media.conductivity) - dmap.permAniso(d_grid, fractures, fr_transmissivity, fr_media.conductivity) - k_voigt = k_iso[:, None] * np.array([1, 1, 1, 0, 0, 0])[None, :] - return k_voigt def homo_decovalex(fr_media: FracturedMedia, grid:Grid, perm_fn): """ @@ -147,15 +95,19 @@ def rasterize_dfn(homo_fns): #points = grid.nodes() for name, homo_fn in homo_fns.items(): grid_permitivity = homo_fn(fr_media, grid) - pv_grid.cell_data[name] = grid_permitivity.reshape(-1, 9) + if len(grid_permitivity.shape) > 1: + # anisotropic case + assert grid_permitivity.shape[1:] == [3, 3] + grid_permitivity = grid_permitivity.reshape(-1, 9) + pv_grid.cell_data[name] = grid_permitivity pv_grid.save(str(workdir / "test_resterize.vtk")) def test_reasterize(): homo_fns=dict( k_deco_iso=homo_decovalex_iso, - k_deco_aniso_raw=homo_decovalex_aniso_raw, - k_deco_aniso_diag=homo_decovalex_aniso_diag, - k_deco_aniso_lump=homo_decovalex_aniso_lump + k_deco_aniso_raw=voxelize.aniso_decovalex, + k_deco_aniso_diag=fn.compose(voxelize.aniso_diag, voxelize.aniso_decovalex), + k_deco_aniso_lump=fn.compose(voxelize.aniso_lump, voxelize.aniso_decovalex) ) rasterize_dfn(homo_fns) From 90845591d5312d3d2db90df004e933c420bfa118 Mon Sep 17 00:00:00 2001 From: Jan Brezina Date: Fri, 19 Apr 2024 13:53:31 +0200 Subject: [PATCH 12/34] Improved creation of Population --- .run/pytest in stochastic.run.xml | 4 +- src/bgem/stochastic/fracture.py | 135 ++++++++++++++++++-------- tests/stochastic/test_dfn.py | 26 +++-- tests/stochastic/test_fracture.py | 11 ++- tests/stochastic/test_hausdorf_dfn.py | 15 +-- tests/stochastic/test_positions.py | 9 +- tests/stochastic/test_transform.py | 6 +- 7 files changed, 129 insertions(+), 77 deletions(-) diff --git a/.run/pytest in stochastic.run.xml b/.run/pytest in stochastic.run.xml index f59f413..c8f6e8b 100644 --- a/.run/pytest in stochastic.run.xml +++ b/.run/pytest in stochastic.run.xml @@ -3,9 +3,9 @@