diff --git a/.gitignore b/.gitignore index 012fad475..696988ffe 100644 --- a/.gitignore +++ b/.gitignore @@ -170,8 +170,22 @@ cython_debug/ .json .csv .vscode/settings.json +runtime_log.txt examples/paint_plots/data/ examples/paint_plots/plots/ +examples/paint_plots/results/ +examples/paint_plots/scenarios/ +examples/field_optimizations +examples/field_optimizations/data_for_stral/ +examples/field_optimizations/measured_data/ +examples/field_optimizations/plots/ +examples/field_optimizations/results/ +examples/field_optimizations/scenarios/ +examples/field_optimizations/metadata +examples/hyperparameter_optimization/logs +examples/hyperparameter_optimization/scenarios +examples/hyperparameter_optimization/results +examples/hyperparameter_optimization/plots/ # Local config files. **/config.local.json diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 6de56c5c7..4c8e3bc44 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -60,7 +60,7 @@ representative at an online or offline event. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at -max.pargmann@dlr.de. +artist@lists.kit.edu. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the diff --git a/NOTICE b/NOTICE index 03fd1029b..546800dd5 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ This software includes code developed by Jan Ebert and Max Pargmann. -Please find a reproduction of their intial license below: +Please find a reproduction of their initial license below: Copyright [2024] [Max Pargmann and Jan Ebert] diff --git a/artist/core/blocking.py b/artist/core/blocking.py new file mode 100644 index 000000000..5e9504f59 --- /dev/null +++ b/artist/core/blocking.py @@ -0,0 +1,1051 @@ +import math +import warnings + +import torch + +from artist.util import config_dictionary +from artist.util.environment_setup import get_device + + +def create_blocking_primitives_rectangle( + blocking_heliostats_surface_points: torch.Tensor, + blocking_heliostats_active_surface_points: torch.Tensor, + epsilon: float = 0.05, + device: torch.device | None = None, +) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Create a representation of a rectangular heliostat blocking plane, by interpolating its corner points. + + The blocking plane for rectangular heliostats is represented by its four + corner points, and its normal vector. The corner points are indexed + counterclockwise. The lower left corner point of a heliostat is indexed + by 0, and so on. Overview of corner points and their indices: + + 3 | 2 + ----- + 0 | 1 + + Assumptions: + - The heliostat is rectangular. + - The heliostat is oriented to the south if it is not aligned. + + Parameters + ---------- + blocking_heliostats_surface_points : torch.Tensor + The unaligned surface points of all heliostats that might block other heliostats. + Tensor of shape [number_of_heliostats, number_of_combined_surface_points_all_facets, 4]. + blocking_heliostats_active_surface_points : torch.Tensor + The aligned surface points of all heliostats that might block other heliostats. + Tensor of shape [number_of_heliostats, number_of_combined_surface_points_all_facets, 4]. + epsilon : float + A small value (default is 0.05). + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + + Returns + ------- + torch.Tensor + The blocking plane corners. + Tensor of shape [number_of_heliostats, 4, 4]. + torch.Tensor + The blocking plane spans in u and v direction. + Tensor of shape [number_of_heliostats, 2, 4]. + torch.Tensor + The blocking plane normals. + Tensor of shape [number_of_heliostats, 3]. + """ + device = get_device(device=device) + + number_of_surfaces = blocking_heliostats_active_surface_points.shape[0] + + min_e = blocking_heliostats_surface_points[:, :, 0].min(dim=1).values + max_e = blocking_heliostats_surface_points[:, :, 0].max(dim=1).values + min_n = blocking_heliostats_surface_points[:, :, 1].min(dim=1).values + max_n = blocking_heliostats_surface_points[:, :, 1].max(dim=1).values + + min_max_values = torch.stack( + [ + torch.stack([min_e, min_n], dim=1), + torch.stack([max_e, min_n], dim=1), + torch.stack([max_e, max_n], dim=1), + torch.stack([min_e, max_n], dim=1), + ], + dim=1, + ) + + surface_points_2d = blocking_heliostats_surface_points[:, :, :2] + distances_to_surface_points = torch.abs( + surface_points_2d[:, :, None, :] - min_max_values[:, None, :, :] + ) + mask = (distances_to_surface_points < epsilon).all(-1) + + corner_points_indices = mask.float().argmax(dim=1) + surface_indices = torch.arange(number_of_surfaces, device=device)[:, None] + corners = blocking_heliostats_active_surface_points[ + surface_indices, corner_points_indices + ] + + spans = torch.zeros((number_of_surfaces, 2, 4), device=device) + spans[:, 0] = corners[:, 1] - corners[:, 0] + spans[:, 1] = corners[:, 3] - corners[:, 0] + + plane_normals = torch.nn.functional.normalize( + torch.cross(spans[:, 0, :3], spans[:, 1, :3], dim=-1), dim=-1 + ) + + return corners, spans, plane_normals + + +def create_blocking_primitives_rectangles_by_index( + blocking_heliostats_active_surface_points: torch.Tensor, + device: torch.device | None = None, +) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Create a representation of a rectangular heliostat blocking plane, by the known indices of its corner points. + + The blocking plane for rectangular heliostats is represented by its four + corner points, and its normal vector. The corner points are indexed + counterclockwise. The lower left corner point of a heliostat is indexed + by 0, and so on. Overview of corner points and their indices: + + 3 | 2 + ----- + 0 | 1 + + Assumptions: + - The heliostat is rectangular in shape, each facet is also rectangular. + - There are four facets ordered in two columns and two rows. + - Each facet has an equal amount of surface points -> number_of_surface_points / 4 + - Each facet has an equal amount of points along its width and its height -> math.sqrt(number_of_surface_points / 4) + + Parameters + ---------- + blocking_heliostats_active_surface_points : torch.Tensor + The aligned surface points of all heliostats that might block other heliostats. + Tensor of shape [number_of_heliostats, number_of_combined_surface_points_all_facets, 4]. + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + + Returns + ------- + torch.Tensor + The blocking plane corners. + Tensor of shape [number_of_heliostats, 4, 4]. + torch.Tensor + The blocking plane spans in u and v direction. + Tensor of shape [number_of_heliostats, 2, 4]. + torch.Tensor + The blocking plane normals. + Tensor of shape [number_of_heliostats, 3]. + """ + device = get_device(device=device) + + number_of_surfaces, number_of_surface_points, _ = ( + blocking_heliostats_active_surface_points.shape + ) + + corners = torch.zeros((number_of_surfaces, 4, 4), device=device) + + # Lower left. + corners[:, 0] = blocking_heliostats_active_surface_points[ + :, int(number_of_surface_points / 2) + ] + # Lower right. + corners[:, 3] = blocking_heliostats_active_surface_points[ + :, int(number_of_surface_points - math.sqrt(number_of_surface_points / 4)) + ] + # Upper right. + corners[:, 2] = blocking_heliostats_active_surface_points[ + :, int((number_of_surface_points / 2) - 1) + ] + # Upper left. + corners[:, 1] = blocking_heliostats_active_surface_points[ + :, int(math.sqrt(number_of_surface_points / 4) - 1) + ] + + spans = torch.zeros((number_of_surfaces, 2, 4), device=device) + spans[:, 0] = corners[:, 1] - corners[:, 0] + spans[:, 1] = corners[:, 3] - corners[:, 0] + + plane_normals = torch.nn.functional.normalize( + torch.cross(spans[:, 0, :3], spans[:, 1, :3], dim=-1), dim=-1 + ) + + return corners, spans, plane_normals + + +def soft_ray_blocking_mask( + ray_origins: torch.Tensor, + ray_directions: torch.Tensor, + blocking_primitives_corners: torch.Tensor, + blocking_primitives_spans: torch.Tensor, + blocking_primitives_normals: torch.Tensor, + distances_to_target: torch.Tensor, + epsilon: float = 1e-6, + softness: float = 50.0, +) -> torch.Tensor: + r""" + Compute a mask indicating which rays are blocked, using a soft, differentiable approach. + + Calculate ray plane intersections and the distances of the intersection from the ray origin. + Depending on the intersections and the distances, rays are blocked if they cannot reach the target. + The blocking is made differentiable by using sigmoid functions to approximate binary transitions + with soft boundaries. + For each ray and each blocking plane the intersection point and distance is computed by solving the + plane equation: + + .. math:: + + (\mathbf{p} - \mathbf{p_0}) \cdot \mathbf{n} = 0 + + \mathbf{p} = \mathbf{l_0} + \mathbf{l} d + + ((\mathbf{l_0} + \mathbf{l} d) - \mathbf{p_0}) \cdot \mathbf{n} = 0 + + d = \frac{(\mathbf{p_0}-\mathbf{l_0})\cdot \mathbf{n}}{\mathbf{l}\cdot \mathbf{n}} + + \mathbf{p_intersection} = \mathbf{l_0} + \mathbf{l}d + + where :math:`\mathbf{p}` are the points on the plane (ray_origins), :math:`\mathbf{p_0}` is a single point on the plane + (corner_0), :math:`\mathbf{n}` is the normal vector of the plane (blocking_planes_normals), :math:`\mathbf{l}` is the unit + vector describing the direction of the line (ray_directions), :math:`\mathbf{l_0}` is a point on the line (ray_origins), + :math:`d` is the distance from the ray origin to the point of intersection. + In the final output of this method values near 0 mean no blocking and values near 1 mean full blocking (there is at least + one blocking primitive in front of the heliostat). + + Parameters + ---------- + ray_origins : torch.Tensor + The origin points of the rays, i.e. the surface points. + Tensor of shape [number_of_heliostats, number_of_combined_surface_points_all_facets, 4]. + ray_directions : torch.Tensor + The ray directions. + Tensor of shape [number_of_heliostats, number_of_rays, number_of_combined_surface_normals_all_facets, 4]. + blocking_primitives_corners : torch.Tensor + The blocking primitives corner points. + Tensor of shape [number_of_blocking_primitives, 4, 4]. + blocking_primitives_spans: torch.Tensor + The blocking primitives spans in u and v direction. + Tensor of shape [number_of_blocking_primitives, 2, 4]. + blocking_primitives_normals : torch.Tensor + The blocking primitives normals. + Tensor of shape [number_of_blocking_primitives, 3] + distances_to_target : torch.Tensor + Tensor of shape [number_of_heliostats, number_of_rays, number_of_combined_surface_normals_all_facets]. + epsilon : float + A small value (default is 1e-6). + softness : float + Controls how soft the sigmoid approximates the blocking (default is 50.0). + + Returns + ------- + torch.Tensor + A soft blocking mask. + Tensor of shape [number_of_blocking_primitives, number_of_rays, number_of_combined_surface_points_all_facets]. + """ + ray_origins = ray_origins[:, None, :, None, :3] + ray_directions = ray_directions[:, :, :, None, :3] + + corner_0 = blocking_primitives_corners[None, None, None, :, 0, :3] + span_u = blocking_primitives_spans[None, None, None, :, 0, :3] + span_v = blocking_primitives_spans[None, None, None, :, 1, :3] + blocking_primitives_normals = blocking_primitives_normals[None, None, None, :, :3] + + denominator = torch.sum(ray_directions * blocking_primitives_normals, dim=-1) + distances_to_blocking_planes = torch.sum( + (corner_0 - ray_origins) * blocking_primitives_normals, dim=-1 + ) / (denominator + epsilon) + blocking_planes_in_front_of_heliostats = torch.sigmoid( + softness * (distances_to_blocking_planes - 1e-3) + ) + + intersection_points = ( + ray_origins + distances_to_blocking_planes[..., None] * ray_directions + ) + intersection_offset_from_corner = intersection_points - corner_0 + + # Compute point of intersection in local plane coordinates. + span_u_squared_norm = torch.sum(span_u * span_u, dim=-1) + span_v_squared_norm = torch.sum(span_v * span_v, dim=-1) + span_uv_dot = torch.sum(span_u * span_v, dim=-1) + offset_projection_u = torch.sum(intersection_offset_from_corner * span_u, dim=-1) + offset_projection_v = torch.sum(intersection_offset_from_corner * span_v, dim=-1) + det = ( + span_u_squared_norm * span_v_squared_norm - span_uv_dot * span_uv_dot + epsilon + ) + u_coordinate_on_plane = ( + offset_projection_u * span_v_squared_norm - offset_projection_v * span_uv_dot + ) / det + v_coordinate_on_plane = ( + offset_projection_v * span_u_squared_norm - offset_projection_u * span_uv_dot + ) / det + + # Mask values near 1 if intersection within parallelogram (plane), mask values near 0, if intersection outside plane boundaries. + blocking_within_plane = ( + torch.sigmoid(softness * u_coordinate_on_plane) + * torch.sigmoid(softness * (1 - u_coordinate_on_plane)) + * torch.sigmoid(softness * v_coordinate_on_plane) + * torch.sigmoid(softness * (1 - v_coordinate_on_plane)) + ) + + # Mask values near 1 if blocking plane in front of target, mask values near 0, if blocking plane behind target. + blocking_planes_in_front_of_target = torch.sigmoid( + softness * (distances_to_target.unsqueeze(-1) - distances_to_blocking_planes) + ) + + blocking_mask_per_plane = ( + blocking_within_plane + * blocking_planes_in_front_of_heliostats + * blocking_planes_in_front_of_target + ) + blocked = 1 - torch.prod(1 - blocking_mask_per_plane, dim=-1) + + return blocked + + +def expand_bits(integers: torch.Tensor) -> torch.Tensor: + """ + Expand the lower 10 bits of an integer into 30 bits by inserting 2 zero bits between each original bit. + + Parameters + ---------- + integers : torch.Tensor + Integer coordinates with values in [0, 1023] (10 bits). + Tensor of shape [number_of_blocking_planes]. + + Returns + ------- + torch.Tensor + Integer coordinates expanded from 10 bits to 30 bits. + Tensor of shape [number_of_blocking_planes]. + """ + # Keep only the lower 10 bits. + expanded_integers = integers & 0b1111111111 + # Spread and mask bits to achieve pattern with two 0 bits in between. + expanded_integers = ( + expanded_integers | (expanded_integers << 16) + ) & 0b111000000000000001111111 + expanded_integers = ( + expanded_integers | (expanded_integers << 8) + ) & 0b111000001111000000001111 + expanded_integers = ( + expanded_integers | (expanded_integers << 4) + ) & 0b110000110000110000110011 + expanded_integers = ( + expanded_integers | (expanded_integers << 2) + ) & 0b1001001001001001001001001001 + + return expanded_integers.to(torch.int64) + + +def morton_codes( + coordinates: torch.Tensor, epsilon: float = 1e-6, device: torch.device | None = None +) -> torch.Tensor: + """ + Map 3D points to a single integer value corresponding to its Morton Code. + + Spatially nearby points have similar Morton codes. Morton codes are also sometimes referred to as + Z-order curve codes. They are computed by bit-interleaving the binary representations of the 3D + x, y, z coordinates. + The padding around the bounding boxes is necessary to avoid divisions by zero and integer + overflows. The relative padding scales with the field size. + + Reference: Morton, G.M. (1966) A Computer Oriented Geodetic Data Base and a New Technique in File + Sequencing. IBM Ltd., Ottawa. + + Parameters + ---------- + coordinates : torch.Tensor + The coordinates to transform into Morton codes. + Tensor of shape [number_of_blocking_planes, 3]. + epsilon : float + A small epsilon value (default is 1e-6). + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + + Returns + ------- + torch.Tensor + The converted integers in Morton code. + Tensor of shape [number_of_blocking_planes]. + """ + device = get_device(device=device) + + # The 10 bits per axis should not be changed. 10 bits per axis means 1024 discrete positions along + # each dimension and 30 bits in total. This is the maximum amount of bits per axis fitting into a + # single 32-bit integer and is enough even for scenes with more than hundred thousand blocking planes. + bits = 10 + + # Compute bounding box around all coordinates. + mins = coordinates.min(dim=0).values + maxs = coordinates.max(dim=0).values + padding = (maxs - mins) * epsilon + epsilon + bounding_box_min = mins - padding + bounding_box_max = maxs + padding + + # Normalize coordinates to [0,1 - epsilon). + spans = bounding_box_max - bounding_box_min + spans[spans == 0] = 1.0 + norm = (coordinates - bounding_box_min[None, :]) / spans[None, :] + norm = norm.clamp(0.0, 1.0 - epsilon) + + # Determine number of discrete positions along each axis (1024). + scale = float(1 << bits) + + # Scale normalized coordinates to integer values from 0 to 1024. + qi = (norm * scale).to(torch.int64) + xi = qi[:, 0].to(torch.int64) + yi = qi[:, 1].to(torch.int64) + zi = qi[:, 2].to(torch.int64) + + # Prepare the interleaving. + # Spread 10 bits into 30 bits with 2 zero bits between each bit. + xx = expand_bits(xi) + # Spread with additional shift to the left for y. + yy = expand_bits(yi) << 1 + # Spread with 2 additional shifts to the left for z. + zz = expand_bits(zi) << 2 + + code = (xx | yy | zz).to(torch.int64) + + return code + + +def most_significant_differing_bit( + differing_bits: torch.Tensor, device: torch.device | None = None +) -> torch.Tensor: + """ + Compute the most significant bit (MSB) indices. + + The MSB index is the position of the highest set bit in the binary representation + of the integer value. The bit positions start at 0, which is the least significant bit. + For x = 0, the MSB is undefined and -1 will be returned. This method uses a float-based + log2, combined with the floor operation as a fast and safe MSB implementation. This works + for positive integers only and and also only for Morton codes up to 30 bits, as the + torch.log2() is safe for float32. + + Parameters + ---------- + differing_bits : torch.Tensor + Integer values. + Tensor of shape [number_of_blocking_planes]. + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + + Returns + ------- + torch.Tensor + Most significant bits. + Tensor of shape [number_of_blocking_planes]. + """ + device = get_device(device=device) + + differing_bits = differing_bits.to(torch.float32) + + nonzero_mask = differing_bits != 0 + most_significant_bits = torch.full_like( + differing_bits, -1, dtype=torch.int64, device=device + ) + + if nonzero_mask.any(): + msb = torch.floor(torch.log2(differing_bits[nonzero_mask])).to(torch.int64) + most_significant_bits[nonzero_mask] = msb + + return most_significant_bits + + +def longest_common_prefix( + codes: torch.Tensor, + i: torch.Tensor, + j: torch.Tensor, + total_bits: int = 30, + device: torch.device | None = None, +) -> torch.Tensor: + """ + Compute the longest common prefix (LCP) between pairs of Morton codes. + + The longest common prefix (LCP) indicates how similar two Morton codes are and therefore also + indicates how close (spatially) two blocking objects are. The LCP is the number of highest-order + bits that are identical in two Morton codes. + + Parameters + ---------- + codes : torch.Tensor + Sorted Morton codes as int64. + Tensor of shape [number_of_blocking_planes]. + i : torch.Tensor + Lower indices selecting the first Morton codes for the comparison. + Tensor of shape [number_of_blocking_planes]. + j : torch.Tensor + Upper indices selecting the second Morton codes for the comparison. + Tensor of shape [number_of_blocking_planes]. + total_bits : int + Total number of bits used in the Morton codes (default is 30). + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + + Returns + ------- + torch.Tensor + The longest common prefixes in the range from 0 to total_bits. + Tensor of shape [number_of_blocking_planes]. + """ + device = get_device(device=device) + + differing_bits = codes[i] ^ codes[j] + most_significant_differing_bits = most_significant_differing_bit( + differing_bits, device=device + ) + longest_common_prefixes = torch.where( + differing_bits == 0, + torch.full_like( + most_significant_differing_bits, + total_bits, + dtype=torch.int64, + device=device, + ), + (total_bits - 1) - most_significant_differing_bits, + ) + return longest_common_prefixes + + +def range_to_node_id( + start_indices: torch.Tensor, end_indices: torch.Tensor, leaf_offset: int +) -> torch.Tensor: + """ + Convert a range of sorted primitives into node indices. + + When the start index is equal to the end index there it will be a leaf node with the id: leaf offset + start index + Otherwise it will be an internal node with the minimum of the start and end index as id. + + Parameters + ---------- + start_indices : torch.Tensor + Start indices of the node ranges. + Tensor of shape [number_of_blocking_planes]. + end_indices : torch.Tensor + End indices of the node ranges. + Tensor of shape [number_of_blocking_planes]. + leaf_offset : int + Offset index in the node array where leaf nodes start. + + Returns + ------- + torch.Tensor + Node indices corresponding to the given ranges. + """ + leaf_node = (leaf_offset + start_indices).to(torch.int32) + internal_node = torch.minimum(start_indices, end_indices).to(torch.int32) + + node_indices = torch.where(start_indices == end_indices, leaf_node, internal_node) + + return node_indices + + +@torch.no_grad() +def build_linear_bounding_volume_hierarchies( + blocking_primitives_corners: torch.Tensor, device: torch.device | None = None +) -> dict[str, torch.Tensor]: + """ + Build linear bounding volume heirachies (LBVHs). + + Reference: Tero Karras. Maximizing Parallelism in the Construction of BVHs, Octrees, and k‑d Trees. + In Proceedings of the Fourth ACM SIGGRAPH / Eurographics Symposium on High‑Performance Graphics (HPG 2012) + + Parameters + ---------- + blocking_primitives_corners : torch.Tensor + Corner points of each blocking primitive. + Tensor of shape [number_of_blocking_primitives, 4, 4]. + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + + Returns + ------- + dict[str, torch.Tensor] + - left, right: Indices of the left and right child of each LBVH node (-1 if leave). + - aabb_min, aabb_max: axis aligned bounding boxes. + - is_leaf: boolean, indicating whether a node is a leaf node. + - primitive_index: indicates which primitives are contained. + """ + device = get_device(device=device) + + number_of_blocking_primitives = blocking_primitives_corners.shape[0] + blocker_ids = torch.arange(number_of_blocking_primitives, device=device) + + if number_of_blocking_primitives == 0: + return { + config_dictionary.left_node: torch.empty( + (0,), dtype=torch.int32, device=device + ), + config_dictionary.right_node: torch.empty( + (0,), dtype=torch.int32, device=device + ), + config_dictionary.aabb_min: torch.empty((0, 3), device=device), + config_dictionary.aabb_max: torch.empty((0, 3), device=device), + config_dictionary.is_leaf: torch.empty( + (0,), dtype=torch.bool, device=device + ), + config_dictionary.primitive_index: torch.empty( + (0,), dtype=torch.int32, device=device + ), + } + + # Compute sorted Morton code representations for each blocking primitive. + primitive_mins = blocking_primitives_corners.min(dim=1).values + primitive_maxs = blocking_primitives_corners.max(dim=1).values + centroids = blocking_primitives_corners.mean(dim=1) + + codes = morton_codes(coordinates=centroids, epsilon=1e-6, device=device) + sorted_codes, sorted_primitive_indices = torch.sort(codes) + + # Analyse similarities between Morton codes and determine the direction to the more similar Morton codes, in the sorted array: -1 = to the left, +1 = to the right. + # The similarity is evaluated by computing leading common prefix lengths for all neighboring pairs of Morton codes. + if number_of_blocking_primitives > 1: + lcp_right = longest_common_prefix( + codes=sorted_codes, + i=blocker_ids, + j=torch.clamp(blocker_ids + 1, max=number_of_blocking_primitives - 1), + device=device, + ) + lcp_left = longest_common_prefix( + codes=sorted_codes, + i=torch.clamp(blocker_ids - 1, min=0), + j=blocker_ids, + device=device, + ) + lcp_right[-1] = -1 + lcp_left[0] = -1 + else: + lcp_right = torch.tensor([-1], dtype=torch.int64, device=device) + lcp_left = torch.tensor([-1], dtype=torch.int64, device=device) + + direction_to_similar_codes = torch.where( + lcp_right > lcp_left, + torch.ones(number_of_blocking_primitives, dtype=torch.int64, device=device), + -torch.ones(number_of_blocking_primitives, dtype=torch.int64, device=device), + ) + + # Find threshold (delta_min) for node expansion by determining how similar the next Morton code in the chosen direction is. + # Find the range of blocking primitives that share a common prefix larger than delta_min. + # Find the contiguous range of Morton codes that belong together. + # In the exponential search (the step size doubles in each iteration), find the farthest index j along direction d[i] where LCP > delta_min[i]. + neighbor_indices = blocker_ids - direction_to_similar_codes + mask_out_of_bounds = (neighbor_indices >= 0) & ( + neighbor_indices < number_of_blocking_primitives + ) + neighbor_indices = torch.clamp( + neighbor_indices, 0, number_of_blocking_primitives - 1 + ) + + delta_min = longest_common_prefix( + codes=sorted_codes, i=blocker_ids, j=neighbor_indices, device=device + ) + delta_min = torch.where( + mask_out_of_bounds, delta_min, torch.full_like(delta_min, -1, device=device) + ) + + max = ( + math.ceil(math.log2(number_of_blocking_primitives)) + if number_of_blocking_primitives > 1 + else 1 + ) + farthest_expansion = torch.zeros( + number_of_blocking_primitives, dtype=torch.int64, device=device + ) + + for k in range(0, max + 1): + step = 1 << k + candidate_indices = blocker_ids + direction_to_similar_codes * ( + farthest_expansion + step + ) + mask_out_of_bounds_candidates = (candidate_indices >= 0) & ( + candidate_indices < number_of_blocking_primitives + ) + candidate_indices = torch.clamp( + candidate_indices, 0, number_of_blocking_primitives - 1 + ) + candidates_lcp = longest_common_prefix( + sorted_codes, blocker_ids, candidate_indices, device=device + ) + mask = mask_out_of_bounds_candidates & (candidates_lcp > delta_min) + farthest_expansion = torch.where( + mask, farthest_expansion + step, farthest_expansion + ) + + farthest_index = blocker_ids + direction_to_similar_codes * farthest_expansion + farthest_index = torch.clamp(farthest_index, 0, number_of_blocking_primitives - 1) + + # Construct binary radix tree. + # The range [first[i], last[i]] corresponds to the spatial cluster of blocking primitives that share a common prefix in Morton code. + # Compute splits to build LBVH tree, each internal node is assigned two children. + min_index = torch.minimum(blocker_ids, farthest_index) + max_index = torch.maximum(blocker_ids, farthest_index) + split = min_index.clone() + span = max_index - min_index + max_span = span.max().item() if span.numel() > 0 else 0 + if max_span < 1: + pass + else: + max_k = math.floor(math.log2(max_span)) if max_span > 0 else 0 + for k in range(max_k, -1, -1): + step_k = 1 << k + candidate_indices = split + step_k + valid = candidate_indices < max_index + candidates_indices = torch.clamp( + candidate_indices, 0, number_of_blocking_primitives - 1 + ) + candidates_incremented_indices = torch.clamp( + candidate_indices + 1, 0, number_of_blocking_primitives - 1 + ) + candidates_lcp = longest_common_prefix( + codes=sorted_codes, i=min_index, j=candidates_indices, device=device + ) + candidates_incremented_lcp = longest_common_prefix( + codes=sorted_codes, + i=min_index, + j=candidates_incremented_indices, + device=device, + ) + mask = valid & (candidates_lcp > candidates_incremented_lcp) + split = torch.where(mask, split + step_k, split) + + # LBVH: + # left, right: Indices of the left and right child of each node (-1 if not set). + # aabb_min, aabb_max: axis aligned bounding box of the node. + # is_leaf: boolean, indicating whether a node is a leaf node. + # primitive_index: indicates which primitive is contained, -1 for internal nodes. + total_nodes = 2 * number_of_blocking_primitives - 1 + left = torch.full((total_nodes,), -1, dtype=torch.int32, device=device) + right = torch.full((total_nodes,), -1, dtype=torch.int32, device=device) + aabb_min = torch.zeros((total_nodes, 3), dtype=torch.float32, device=device) + aabb_max = torch.zeros((total_nodes, 3), dtype=torch.float32, device=device) + is_leaf = torch.zeros((total_nodes,), dtype=torch.bool, device=device) + primitive_index = torch.full((total_nodes,), -1, dtype=torch.int32, device=device) + + # In the Karras LBVH approach the leaf nodes are stored at the end of the node array. + leaf_offset = number_of_blocking_primitives - 1 + internal_count = number_of_blocking_primitives - 1 + internal_nodes_indices = torch.arange( + 0, internal_count, dtype=torch.int64, device=device + ) + + # Map the original primitive index via the sorted_primitive_indices. + aabb_min[leaf_offset : leaf_offset + number_of_blocking_primitives] = ( + primitive_mins[sorted_primitive_indices] + ) + aabb_max[leaf_offset : leaf_offset + number_of_blocking_primitives] = ( + primitive_maxs[sorted_primitive_indices] + ) + is_leaf[leaf_offset : leaf_offset + number_of_blocking_primitives] = True + primitive_index[leaf_offset : leaf_offset + number_of_blocking_primitives] = ( + sorted_primitive_indices.to(torch.int32) + ) + + # left child node id corresponds to range [first[i], split[i]]. + # right child node id corresponds to range [split[i]+1, last[i]]. + left_child_nodes = range_to_node_id( + start_indices=min_index[:internal_count], + end_indices=split[:internal_count], + leaf_offset=leaf_offset, + ) + right_child_nodes = range_to_node_id( + start_indices=split[:internal_count] + 1, + end_indices=max_index[:internal_count], + leaf_offset=leaf_offset, + ) + + # Detect cycles and replace by leaves. + left_child_ids = torch.where( + left_child_nodes == internal_nodes_indices, + leaf_offset + min_index[:internal_count], + left_child_nodes, + ) + right_child_ids = torch.where( + right_child_nodes == internal_nodes_indices, + leaf_offset + max_index[:internal_count], + right_child_nodes, + ) + left[internal_nodes_indices] = left_child_ids.to(dtype=torch.int32, device=device) + right[internal_nodes_indices] = right_child_ids.to(dtype=torch.int32, device=device) + is_leaf[internal_nodes_indices] = False + + # Compute axis aligned bounding boxes (AABB) for internal nodes by combining child boxes. + # The Karras mapping ensures internal nodes form a DAG that can be evaluated in ascending order. + nodes_with_complete_aabb = torch.zeros( + internal_count, dtype=torch.bool, device=device + ) + left_internal = left[:internal_count].to(dtype=torch.int64, device=device) + right_internal = right[:internal_count].to(dtype=torch.int64, device=device) + rounds = 0 + while not nodes_with_complete_aabb.all() and rounds < internal_count: + left_is_internal = left_internal < leaf_offset + internal_mask = ( + left_is_internal & (left_internal >= 0) & (left_internal < internal_count) + ) + left_done = torch.ones_like(left_is_internal, dtype=torch.bool, device=device) + left_done[internal_mask] = nodes_with_complete_aabb[ + left_internal[internal_mask] + ] + + right_is_internal = right_internal < leaf_offset + internal_mask = ( + right_is_internal + & (right_internal >= 0) + & (right_internal < internal_count) + ) + right_done = torch.ones_like(right_is_internal, dtype=torch.bool, device=device) + right_done[internal_mask] = nodes_with_complete_aabb[ + right_internal[internal_mask] + ] + + nodes_to_be_computed_next = (~nodes_with_complete_aabb) & left_done & right_done + if not nodes_to_be_computed_next.any(): + break + + next_nodes_indices = torch.nonzero(nodes_to_be_computed_next, as_tuple=True)[ + 0 + ].to(device) + left_index = left_internal[next_nodes_indices] + right_index = right_internal[next_nodes_indices] + + mins = torch.minimum(aabb_min[left_index], aabb_min[right_index]) + maxs = torch.maximum(aabb_max[left_index], aabb_max[right_index]) + aabb_min[next_nodes_indices] = mins + aabb_max[next_nodes_indices] = maxs + nodes_with_complete_aabb[next_nodes_indices] = True + rounds += 1 + + # Slow fallback logic if some axis aligned bounding boxes have not been computed. + if not nodes_with_complete_aabb.all(): + incomplete = torch.nonzero(~nodes_with_complete_aabb, as_tuple=True)[0] + warnings.warn( + f"LBVH AABB fallback computation (very slow): {incomplete.numel()} internal nodes did not receive AABBs via DAG propagation.", + RuntimeWarning, + ) + for node in incomplete.tolist(): + min = int(min_index[node].item()) + max = int(max_index[node].item()) + leaf_nodes_slice = ( + torch.arange(min, max + 1, device=device, dtype=torch.int64) + + leaf_offset + ) + aabb_min[node] = torch.min(aabb_min[leaf_nodes_slice], dim=0).values + aabb_max[node] = torch.max(aabb_max[leaf_nodes_slice], dim=0).values + + return { + config_dictionary.left_node: left, + config_dictionary.right_node: right, + config_dictionary.aabb_min: aabb_min, + config_dictionary.aabb_max: aabb_max, + config_dictionary.is_leaf: is_leaf, + config_dictionary.primitive_index: primitive_index, + } + + +def ray_aabb_intersect( + ray_origins: torch.Tensor, + inverse_ray_directions: torch.Tensor, + aabb_min: torch.Tensor, + aabb_max: torch.Tensor, +) -> tuple[torch.Tensor, torch.Tensor]: + """ + Compute intersection distances between rays and axis aligned bounding boxes (AABBs). + + This method uses the slab method and the inverse ray direction for more efficient computation. + + Parameters + ---------- + ray_origins : torch.Tensor + Ray origins. + Tensor of shape [total_number_of_rays, 3]. + inverse_ray_directions : torch.Tensor + Precomputed inverse ray directions. + Tensor of shape [total_number_of_rays, 3]. + aabb_min : torch.Tensor + Minimum corner points of the AABBs. + Tensor of shape [total_number_of_rays, 3]. + aabb_max : torch.Tensor + Maximum corner points of the AABBs. + Tensor of shape [total_number_of_rays, 3]. + + Returns + ------- + entry_distance_to_aabb : torch.Tensor + Entry distance along each ray to the AABBs. + Tensor of shape [total_number_of_rays]. + exit_distance_to_aabb : torch.Tensor + Exit distance along each ray to the AABBs. + Tensor of shape [total_number_of_rays]. + """ + min_distance = (aabb_min - ray_origins) * inverse_ray_directions + max_distance = (aabb_max - ray_origins) * inverse_ray_directions + entry_distance_to_aabb = torch.minimum(min_distance, max_distance).amax(dim=-1) + exit_distance_to_aabb = torch.maximum(min_distance, max_distance).amin(dim=-1) + return entry_distance_to_aabb, exit_distance_to_aabb + + +@torch.no_grad() +def lbvh_filter_blocking_planes( + points_at_ray_origins: torch.Tensor, + ray_directions: torch.Tensor, + blocking_primitives_corners: torch.Tensor, + ray_to_heliostat_mapping: torch.Tensor, + max_stack_size: int = 128, + device: torch.device | None = None, +) -> torch.Tensor: + """ + Apply the LBVH filter to filter out blocking planes that are not hit. + + Parameters + ---------- + points_at_ray_origins : torch.Tensor + Origin points of the rays, i.e. the surface points, expanded in the ray dimension. + Tensor of shape [number_of_heliostats, number_of_rays, number_of_combined_surface_normals_all_facets, 3]. + ray_directions : torch.Tensor + The ray directions. + Tensor of shape [number_of_heliostats, number_of_rays, number_of_combined_surface_normals_all_facets, 3]. + blocking_primitives_corners : torch.Tensor + The blocking primitives corner points. + Tensor of shape [number_of_blocking_planes, 4, 3]. + ray_to_heliostat_mapping : torch.Tensor + Mapping indicating which ray is reflected by which heliostat. + Tensor of shape [total_number_of_rays]. + max_stack_size : int + Maximum stack size for the depth-first LBVH traversal (default is 128). + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + + Returns + ------- + torch.Tensor + The indices of the blocking primitives that are hit. + Tensor of shape [number_of_hit_blocking_planes]. + """ + device = get_device(device=device) + + lbvh = build_linear_bounding_volume_hierarchies( + blocking_primitives_corners=blocking_primitives_corners, device=device + ) + + left = lbvh[config_dictionary.left_node] + right = lbvh[config_dictionary.right_node] + aabb_min = lbvh[config_dictionary.aabb_min] + aabb_max = lbvh[config_dictionary.aabb_max] + is_leaf = lbvh[config_dictionary.is_leaf] + primitive_index = lbvh[config_dictionary.primitive_index] + + ray_origins = points_at_ray_origins.reshape(-1, 3) + ray_directions = ray_directions.reshape(-1, 3) + total_number_of_rays = ray_origins.shape[0] + number_of_primitives = blocking_primitives_corners.shape[0] + + node_traversal_stack = torch.full( + (total_number_of_rays, max_stack_size), + -1, + dtype=torch.int32, + device=device, + ) + node_traversal_stack[:, 0] = 0 + stack_pointer = torch.ones(total_number_of_rays, dtype=torch.int32, device=device) + + mask_hits_per_ray = torch.zeros( + (total_number_of_rays, number_of_primitives), dtype=torch.bool, device=device + ) + inverse_directions = 1.0 / (ray_directions + 1e-12) + active_rays = torch.arange(total_number_of_rays, device=device) + + # LBVH Traversal (Depth-first, per-ray stack-based traversal of the LBVH). + while active_rays.numel() > 0: + top_index = stack_pointer[active_rays] - 1 + nodes = node_traversal_stack[active_rays, top_index] + stack_pointer[active_rays] -= 1 + + # Filter out rays that miss the AABBs. + entry_distance_to_aabb, exit_distance_to_aabb = ray_aabb_intersect( + ray_origins[active_rays], + inverse_directions[active_rays], + aabb_min[nodes], + aabb_max[nodes], + ) + mask_hit = (exit_distance_to_aabb >= entry_distance_to_aabb) & ( + exit_distance_to_aabb > 1e-6 + ) + + if mask_hit.any(): + hit_rays = active_rays[mask_hit] + hit_nodes = nodes[mask_hit] + leaf_mask = is_leaf[hit_nodes] + + if leaf_mask.any(): + leaf_rays = hit_rays[leaf_mask] + leaf_nodes = hit_nodes[leaf_mask] + leaf_prims = primitive_index[leaf_nodes] + mask_hits_per_ray[leaf_rays, leaf_prims] = True + + if (~leaf_mask).any(): + internal_rays = hit_rays[~leaf_mask] + internal_nodes = hit_nodes[~leaf_mask] + + left_child_nodes = left[internal_nodes] + right_child_nodes = right[internal_nodes] + + has_left = left_child_nodes >= 0 + has_right = right_child_nodes >= 0 + + index_for_children = stack_pointer[internal_rays].clone() + index_for_left_children = index_for_children.clone() + index_for_left_children[~has_left] = -1 + index_for_right_children = index_for_children + has_left.to(torch.int32) + index_for_right_children[~has_right] = -1 + + if ( + (index_for_left_children >= max_stack_size) + & (index_for_left_children != -1) + ).any() or ( + (index_for_right_children >= max_stack_size) + & (index_for_right_children != -1) + ).any(): + raise RuntimeError( + "Stack overflow in LBVH traversal, max_stack too small." + ) + + if has_left.any(): + rows = internal_rays[has_left] + cols = index_for_left_children[has_left] + values = left_child_nodes[has_left] + node_traversal_stack[rows, cols] = values + stack_pointer[rows] = cols + 1 + + if has_right.any(): + rows = internal_rays[has_right] + cols = index_for_right_children[has_right] + values = right_child_nodes[has_right] + node_traversal_stack[rows, cols] = values + stack_pointer[rows] = cols + 1 + + active_rays = torch.nonzero(stack_pointer > 0, as_tuple=True)[0] + + # Remove self-hits (ray hits its the blocking primitive from which it originates). + primitive_owner = torch.arange(number_of_primitives, device=device).view(1, -1) + ray_owner = ray_to_heliostat_mapping.view(-1, 1) + non_self = mask_hits_per_ray & (ray_owner != primitive_owner) + filtered_blocking_primitive_indices = torch.nonzero( + non_self.any(dim=0), as_tuple=True + )[0] + + return filtered_blocking_primitive_indices diff --git a/artist/core/core_utils.py b/artist/core/core_utils.py index 9f9b4cdf0..43dd50200 100644 --- a/artist/core/core_utils.py +++ b/artist/core/core_utils.py @@ -3,27 +3,21 @@ from artist.util.environment_setup import get_device -def per_heliostat_reduction( - per_sample_values: torch.Tensor, - active_heliostats_mask: torch.Tensor, +def mean_loss_per_heliostat( + loss_per_sample: torch.Tensor, + number_of_samples_per_heliostat: int, device: torch.device | None = None, ) -> torch.Tensor: """ - Compute mean losses for each heliostat with multiple samples. - - If the active heliostats of one group have different amounts of samples to train on, i.e., - one heliostat is trained with more samples than another, this function makes sure that - each heliostat still contributes equally to the overall loss of the group. This function - computes the mean loss for each heliostat. + Calculate the mean loss per heliostat from a loss per sample. Parameters ---------- - per_sample_values : torch.Tensor - The per sample values to be reduced. + loss_per_sample : torch.Tensor + Loss per sample. Tensor of shape [number_of_samples]. - active_heliostats_mask : torch.Tensor - A mask defining which heliostats are activated. - Tensor of shape [number_of_heliostats]. + number_of_samples_per_heliostat : int + Number of samples per heliostat. device : torch.device | None The device on which to perform computations or load tensors and models (default is None). If None, ``ARTIST`` will automatically select the most appropriate @@ -32,67 +26,20 @@ def per_heliostat_reduction( Returns ------- torch.Tensor - The mean loss per heliostat. + Loss per heliostat. Tensor of shape [number_of_heliostats]. """ device = get_device(device=device) - # A sample to heliostat index mapping. - heliostat_ids = torch.repeat_interleave( - torch.arange(len(active_heliostats_mask), device=device), - active_heliostats_mask, - ) + number_of_chunks = int(loss_per_sample.numel() // number_of_samples_per_heliostat) + loss_per_sample = loss_per_sample[ + : number_of_chunks * number_of_samples_per_heliostat + ] - loss_sum_per_heliostat = torch.zeros(len(active_heliostats_mask), device=device) - loss_sum_per_heliostat = loss_sum_per_heliostat.index_add( - 0, heliostat_ids, per_sample_values + loss_reshaped = loss_per_sample.view( + number_of_chunks, number_of_samples_per_heliostat ) - # Compute MSE loss per heliostat on each rank. - number_of_samples_per_heliostat = torch.zeros( - len(active_heliostats_mask), device=device - ) - number_of_samples_per_heliostat.index_add_( - 0, heliostat_ids, torch.ones_like(per_sample_values, device=device) - ) - - counts_clamped = number_of_samples_per_heliostat.clamp_min(1.0) - mean_loss_per_heliostat = loss_sum_per_heliostat / counts_clamped - mean_loss_per_heliostat = torch.where( - number_of_samples_per_heliostat > 0, mean_loss_per_heliostat, torch.inf - ) + mean_loss_per_heliostat = loss_reshaped.mean(dim=1) return mean_loss_per_heliostat - - -def scale_loss( - loss: torch.Tensor, reference: torch.Tensor, weight: float -) -> torch.Tensor: - """ - Scale one loss so that its weighted contribution is a ratio of the reference loss. - - Parameters - ---------- - loss : torch.Tensor - The loss to be scaled. - Tensor of shape [1]. - reference : torch.Tensor - The reference loss. - Tensor of shape [1]. - weight : float - The weight or ratio used for the scaling. - - Returns - ------- - torch.Tensor - The scaled loss. - Tensor of shape [1]. - """ - epsilon = 1e-12 - scale = (reference * weight) / (loss + epsilon) - scaled_loss = loss * scale - - inf_mask = torch.isinf(loss) - scaled_loss[inf_mask] = loss[inf_mask] - - return scaled_loss diff --git a/artist/core/heliostat_ray_tracer.py b/artist/core/heliostat_ray_tracer.py index 6289ac189..207a57654 100644 --- a/artist/core/heliostat_ray_tracer.py +++ b/artist/core/heliostat_ray_tracer.py @@ -5,6 +5,7 @@ from torch.utils.data import DataLoader, Dataset, Sampler import artist.util.index_mapping +from artist.core import blocking if TYPE_CHECKING: from artist.field.heliostat_group import HeliostatGroup @@ -34,7 +35,7 @@ def __init__( self, light_source: LightSource, number_of_points_per_heliostat: int, - number_of_heliostats: int, + number_of_active_heliostats: int, random_seed: int = 7, ) -> None: """ @@ -52,14 +53,14 @@ def __init__( The light source used to model the distortions. number_of_points_per_heliostat : int The number of points on the heliostats for which distortions are created. - number_of_heliostats : int - The number of heliostats in the scenario. + number_of_active_heliostats : int + The number of active heliostats in the scenario. random_seed : int The random seed used for generating the distortions (default is 7). """ self.distortions_u, self.distortions_e = light_source.get_distortions( number_of_points=number_of_points_per_heliostat, - number_of_heliostats=number_of_heliostats, + number_of_active_heliostats=number_of_active_heliostats, random_seed=random_seed, ) @@ -107,16 +108,8 @@ class RestrictedDistributedSampler(Sampler): Attributes ---------- - number_of_samples : int - The number of samples in the dataset. - world_size : int - The world size or total number of processes. - rank : int - The rank of the current process. - number_of_active_ranks : int - The number of processes that will receive data. - number_of_samples_per_rank : int - The number of samples per rank. + rank_indices : int + The indices corresponding to the ranks assigned samples. See Also -------- @@ -126,6 +119,7 @@ class RestrictedDistributedSampler(Sampler): def __init__( self, number_of_samples: int, + number_of_active_heliostats: int, world_size: int = 1, rank: int = 0, ) -> None: @@ -135,26 +129,33 @@ def __init__( Parameters ---------- number_of_samples : int - The length of the dataset or total number of samples. + Length of the dataset or total number of samples. + number_of_active_heliostats : int + Number of active heliostats. world_size : int - The world size or total number of processes (default is 1). + World size or total number of processes (default is 1). rank : int - The rank of the current process (default is 0). + Rank of the current process (default is 0). """ super().__init__() - self.number_of_samples = number_of_samples - self.world_size = world_size - self.rank = rank + number_of_active_ranks = min(number_of_active_heliostats, world_size) + self.rank_indices = [] + + if rank < number_of_active_ranks: + number_of_samples_per_heliostat = ( + number_of_samples // number_of_active_heliostats + ) + indices: list[int] = [] - # Adjust num_replicas if dataset is smaller than world_size. - self.number_of_active_ranks = min(self.number_of_samples, self.world_size) + for index in range(number_of_active_heliostats): + if index % number_of_active_ranks == rank: + start = index * number_of_samples_per_heliostat + end = start + number_of_samples_per_heliostat + indices.extend(range(start, end)) - # Only assign data to first active ranks. - self.number_of_samples_per_rank = ( - self.number_of_samples // self.number_of_active_ranks - if self.rank < self.number_of_active_ranks - else 0 - ) + self.rank_indices = indices + else: + self.rank_indices = [] def __iter__(self) -> Iterator[int]: """ @@ -165,11 +166,7 @@ def __iter__(self) -> Iterator[int]: Iterator[int] An iterator over indices for the current rank. """ - rank_indices = [] - for i in range(self.rank, self.number_of_samples, self.world_size): - rank_indices.append(i) - - return iter(rank_indices) + return iter(self.rank_indices) class HeliostatRayTracer: @@ -182,12 +179,14 @@ class HeliostatRayTracer: The scenario used to perform ray tracing. heliostat_group : HeliostatGroup The selected heliostat group containing active heliostats. + blocking_active : bool + Indicates whether blocking is activated. world_size : int The world size i.e., the overall number of processes. rank : int The rank, i.e., individual process ID. batch_size : int - The amount of samples (Heliostats) processed parallel within a single rank. + The amount of samples (heliostats) processed in parallel within a single rank. light_source : LightSource The light source emitting the traced rays. distortions_dataset : DistortionsDataset @@ -199,9 +198,19 @@ class HeliostatRayTracer: bitmap_resolution : int The resolution of the bitmap in both directions. Tensor of shape [2]. + ray_magnitude : float + Magnitude of each single ray. + blocking_heliostat_surfaces : torch.Tensor + The heliostat surfaces considered during blocking calculations. + Tensor of shape [number_of_heliostats, number_of_combined_surface_points_all_facets, 4]. + blocking_heliostat_surfaces_active : torch.Tensor + The aligned heliostat surfaces considered during blocking calculations. + Tensor of shape [number_of_heliostats, number_of_combined_surface_points_all_facets, 4]. Methods ------- + get_sampler_indices() + Get the indices assigned to the current rank by the distributed sampler. trace_rays() Perform heliostat ray tracing. scatter_rays() @@ -216,6 +225,7 @@ def __init__( self, scenario: Scenario, heliostat_group: "HeliostatGroup", + blocking_active: bool = True, world_size: int = 1, rank: int = 0, batch_size: int = 100, @@ -226,6 +236,7 @@ def __init__( artist.util.index_mapping.bitmap_resolution, ] ), + dni: float | None = None, ) -> None: """ Initialize the heliostat ray tracer. @@ -242,6 +253,8 @@ def __init__( The scenario used to perform ray tracing. heliostat_group : HeliostatGroup The selected heliostat group containing active heliostats. + blocking_active : bool + Flag indicating whether blocking is activated (default is True). world_size : int The world size i.e., the overall number of processes (default is 1). rank : int @@ -253,9 +266,12 @@ def __init__( bitmap_resolution : torch.Tensor The resolution of the bitmap in both directions. (default is torch.tensor([256,256])). Tensor of shape [2]. + dni : float | None + Direct normal irradiance in W/m^2 (default is None -> ray magnitude = 1.0). """ self.scenario = scenario self.heliostat_group = heliostat_group + self.blocking_active = blocking_active self.world_size = world_size self.rank = rank @@ -271,12 +287,15 @@ def __init__( number_of_points_per_heliostat=self.heliostat_group.active_surface_points.shape[ index_mapping.number_of_surface_points_dimension ], - number_of_heliostats=self.heliostat_group.number_of_active_heliostats, + number_of_active_heliostats=self.heliostat_group.number_of_active_heliostats, random_seed=random_seed, ) # Create restricted distributed sampler. self.distortions_sampler = RestrictedDistributedSampler( number_of_samples=len(self.distortions_dataset), + number_of_active_heliostats=( + self.heliostat_group.active_heliostats_mask > 0 + ).sum(), world_size=self.world_size, rank=self.rank, ) @@ -290,11 +309,65 @@ def __init__( self.bitmap_resolution = bitmap_resolution + if self.blocking_active: + self.blocking_heliostat_surfaces = torch.cat( + [ + group.surface_points + for group in self.scenario.heliostat_field.heliostat_groups + ] + ) + blocking_heliostat_surfaces_active_list = [] + for group in self.scenario.heliostat_field.heliostat_groups: + if group.active_heliostats_mask.sum() <= 0: + blocking_heliostat_surfaces_active_list.append( + group.surface_points + group.positions.unsqueeze(1) + ) + if group.active_heliostats_mask.sum() > 0: + heliostat_mask = torch.cumsum(group.active_heliostats_mask, dim=0) + start_indices = heliostat_mask - group.active_heliostats_mask + blocking_heliostat_surfaces_active_list.append( + group.active_surface_points[start_indices] + ) + self.blocking_heliostat_surfaces_active = torch.cat( + blocking_heliostat_surfaces_active_list + ) + + if dni is not None: + # Calculate surface area per heliostat. + canting_norm = (torch.norm(self.heliostat_group.canting[0], dim=1)[0])[:2] + dimensions = (canting_norm * 4) + 0.02 + heliostat_surface_area = dimensions[0] * dimensions[1] + # Calculate ray magnitude. + power_single_heliostat = dni * heliostat_surface_area + rays_per_heliostat = ( + self.heliostat_group.surface_points.shape[1] + * self.scenario.light_sources.light_source_list[0].number_of_rays + ) + self.ray_magnitude = power_single_heliostat / rays_per_heliostat + else: + self.ray_magnitude = 1.0 + + def get_sampler_indices(self) -> torch.Tensor: + """ + Get the indices assigned to the current rank by the distributed sampler. + + Returns + ------- + torch.Tensor + Indices of the distortions dataset that are assigned to this rank. + Tensor of shape [number of samples assigned to the current rank]. + """ + return torch.tensor( + self.distortions_sampler.rank_indices, + device=self.distortions_dataset.distortions_u.device, + ) + def trace_rays( self, incident_ray_directions: torch.Tensor, active_heliostats_mask: torch.Tensor, target_area_mask: torch.Tensor, + ray_extinction_factor: float = 0.0, device: torch.device | None = None, ) -> torch.Tensor: """ @@ -302,8 +375,9 @@ def trace_rays( Scatter the rays according to the distortions, calculate the intersections with the target planes, and sample the resulting bitmaps on the target areas. The bitmaps are generated separately for each - active heliostat and can be accessed individually or they can be combined to get the total flux - density distribution for all heliostats on all target areas. + active heliostat and are accessed individually. + If blocking is activated in the ``HeliostatRayTracer``, rays that are blocked by other heliostats are + filtered out. Parameters ---------- @@ -317,6 +391,8 @@ def trace_rays( target_area_mask : torch.Tensor The indices of the target areas for each active heliostat. Tensor of shape [number_of_active_heliostats]. + ray_extinction_factor : float + Amount of global ray extinction, responsible for shading (default is 0.0 -> no extinction). device : torch.device | None The device on which to perform computations or load tensors and models (default is None). If None, ``ARTIST`` will automatically select the most appropriate @@ -339,15 +415,6 @@ def trace_rays( self.heliostat_group.active_heliostats_mask, active_heliostats_mask ), "Some heliostats were not aligned and cannot be raytraced." - flux_distributions = torch.zeros( - ( - self.heliostat_group.number_of_active_heliostats, - self.bitmap_resolution[index_mapping.unbatched_bitmap_u], - self.bitmap_resolution[index_mapping.unbatched_bitmap_e], - ), - device=device, - ) - self.heliostat_group.preferred_reflection_directions = raytracing_utils.reflect( incident_ray_directions=incident_ray_directions.unsqueeze( index_mapping.number_rays_per_point @@ -355,6 +422,17 @@ def trace_rays( reflection_surface_normals=self.heliostat_group.active_surface_normals, ) + if self.blocking_active: + ( + blocking_primitives_corners, + blocking_primitives_spans, + blocking_primitives_normals, + ) = blocking.create_blocking_primitives_rectangles_by_index( + blocking_heliostats_active_surface_points=self.blocking_heliostat_surfaces_active, + device=device, + ) + + flux_distributions = [] for batch_index, (batch_u, batch_e) in enumerate(self.distortions_loader): sampler_indices = list(self.distortions_sampler) @@ -390,17 +468,75 @@ def trace_rays( ) ) - bitmaps = self.sample_bitmaps( + # The variable blocked is all zeros if there is no blocking at all in the scene. + # If blocking was activated in the HeliostatRaytracer, blocking will be computed. + number_of_heliostats, number_of_rays, number_of_points, _ = ( + intersections.shape + ) + blocked = torch.zeros( + (number_of_heliostats, number_of_rays, number_of_points), + device=device, + ) + if self.blocking_active: + points_at_ray_origins = self.heliostat_group.active_surface_points[ + active_heliostats_mask_batch, None, :, :3 + ].expand(-1, self.light_source.number_of_rays, -1, -1) + ray_to_heliostat_mapping = torch.arange( + number_of_heliostats, device=device + ).repeat_interleave(number_of_rays * number_of_points) + + filtered_blocking_primitive_indices = ( + blocking.lbvh_filter_blocking_planes( + points_at_ray_origins=points_at_ray_origins, + ray_directions=rays.ray_directions[..., :3], + blocking_primitives_corners=blocking_primitives_corners[ + ..., :3 + ], + ray_to_heliostat_mapping=ray_to_heliostat_mapping, + max_stack_size=128, + device=device, + ) + ) + + if filtered_blocking_primitive_indices.numel() > 0: + blocked = blocking.soft_ray_blocking_mask( + ray_origins=self.heliostat_group.active_surface_points[ + active_heliostats_mask_batch + ], + ray_directions=rays.ray_directions, + blocking_primitives_corners=blocking_primitives_corners[ + filtered_blocking_primitive_indices + ], + blocking_primitives_spans=blocking_primitives_spans[ + filtered_blocking_primitive_indices + ], + blocking_primitives_normals=blocking_primitives_normals[ + filtered_blocking_primitive_indices + ], + distances_to_target=torch.norm( + intersections[..., :3] - points_at_ray_origins, dim=-1 + ), + epsilon=1e-6, + softness=50.0, + ) + + intensities = ( + absolute_intensities * (1 - blocked) * (1 - ray_extinction_factor) + ) + + batch_bitmaps = self.sample_bitmaps( intersections=intersections, - absolute_intensities=absolute_intensities, + absolute_intensities=intensities, active_heliostats_mask=active_heliostats_mask_batch, target_area_mask=target_area_mask[active_heliostats_mask_batch], device=device, ) - flux_distributions = flux_distributions + bitmaps + flux_distributions.append(batch_bitmaps) + + combined = torch.cat(flux_distributions, dim=0) - return flux_distributions + return combined def scatter_rays( self, @@ -448,8 +584,10 @@ def scatter_rays( return Rays( ray_directions=scattered_rays, - ray_magnitudes=torch.ones( - scattered_rays.shape[: index_mapping.ray_directions], device=device + ray_magnitudes=torch.full( + (scattered_rays.shape[: index_mapping.ray_directions]), + self.ray_magnitude, + device=device, ), ) @@ -493,6 +631,10 @@ def sample_bitmaps( """ device = get_device(device=device) + num_heliostats = active_heliostats_mask.sum() + bitmap_height = self.bitmap_resolution[index_mapping.unbatched_bitmap_u] + bitmap_width = self.bitmap_resolution[index_mapping.unbatched_bitmap_e] + plane_widths = ( self.scenario.target_areas.dimensions[target_area_mask][ :, index_mapping.target_area_width @@ -525,40 +667,30 @@ def sample_bitmaps( intersections.shape[index_mapping.number_rays_per_point] * intersections.shape[index_mapping.surface_points] ) - absolute_intensities = absolute_intensities.reshape(-1, total_intersections) - # Determine the x- and y-positions of the intersections with the target areas, scaled to the bitmap resolutions. - dx_intersections = ( + # Determine the e- and u-positions of the intersections with the target areas, scaled to the bitmap resolutions. + # Here we decide that the bottom left corner of the 2D bitmap is the origin of the flux image that is computed. + target_intersections_e = ( intersections[:, :, :, index_mapping.e] + plane_widths / 2 - plane_centers_e ) - dy_intersections = ( + target_intersections_u = ( intersections[:, :, :, index_mapping.u] + plane_heights / 2 - plane_centers_u ) - # Selection of valid intersection indices within the bounds of the target areas or within a little boundary outside the target areas. - intersection_indices_1 = ( - (-1 <= dx_intersections) - & (dx_intersections < plane_widths + 1) - & (-1 <= dy_intersections) - & (dy_intersections < plane_heights + 1) - ) - - # dx_intersections and dy_intersections contain intersection coordinates ranging from 0 to target_area.plane_e/_u. - # x_intersections and y_intersections contain those intersection coordinates scaled to a range from 0 to bitmap_resolution_e/_u. - # Additionally a mask is applied, only the intersections where intersection_indices == True are kept, the tensors are flattened. - x_intersections = ( - dx_intersections - / plane_widths - * self.bitmap_resolution[index_mapping.unbatched_bitmap_e] + # target_intersections_e and target_intersections_u contain intersection coordinates ranging from 0 to target_area.plane_e/_u. + # bitmap_intersections_e and bitmap_intersections_u contain those intersection coordinates scaled to a range from 0 to bitmap_resolution_e/_u - 1. + # We scale to bitmap_width - 1 and bitmap_height - 1, because the indices start at 0 and end at bitmap_width - 1 or bitmap_height - 1 + bitmap_intersections_e = ( + target_intersections_e / plane_widths * (bitmap_width - 1) ).reshape(-1, total_intersections) - y_intersections = ( - dy_intersections - / plane_heights - * self.bitmap_resolution[index_mapping.unbatched_bitmap_u] + bitmap_intersections_u = ( + target_intersections_u / plane_heights * (bitmap_height - 1) ).reshape(-1, total_intersections) + absolute_intensities = absolute_intensities.reshape(-1, total_intersections) + # We assume a continuously positioned value in-between four # discretely positioned pixels, similar to this: # @@ -571,155 +703,118 @@ def sample_bitmaps( # continuous value anywhere in-between the four pixels we sample. # That the "." may be anywhere in-between the four pixels is not # shown in the ASCII diagram, but is important to keep in mind. - - # The lower-valued neighboring pixels (for x this corresponds to 1 - # and 4, for y to 3 and 4). - x_indices_low = x_intersections.to(torch.int32) - y_indices_low = y_intersections.to(torch.int32) - - # The higher-valued neighboring pixels (for x this corresponds to 2 - # and 3, for y to 1 and 2). - x_indices_high = x_indices_low + 1 - y_indices_high = y_indices_low + 1 - - x_indices = torch.zeros( - ( - intersections.shape[index_mapping.heliostat_dimension], - total_intersections * 4, - ), - device=device, - dtype=torch.int32, - ) - - x_indices[:, :total_intersections] = x_indices_low - x_indices[ - :, total_intersections : total_intersections * index_mapping.second_pixel - ] = x_indices_high - x_indices[ - :, - total_intersections * index_mapping.second_pixel : total_intersections - * index_mapping.third_pixel, - ] = x_indices_high - x_indices[:, total_intersections * index_mapping.third_pixel :] = x_indices_low - - y_indices = torch.zeros( - ( - intersections.shape[index_mapping.heliostat_dimension], - total_intersections * 4, - ), - device=device, - dtype=torch.int32, - ) - - y_indices[:, :total_intersections] = y_indices_high - y_indices[ - :, total_intersections : total_intersections * index_mapping.second_pixel - ] = y_indices_high - y_indices[ - :, - total_intersections * index_mapping.second_pixel : total_intersections - * index_mapping.third_pixel, - ] = y_indices_low - y_indices[:, total_intersections * index_mapping.third_pixel :] = y_indices_low + # The western and lower neighbored pixels are saved in indices_low_e and indices_low_u + # (for e this corresponds to pixel 1 and 4, for u to 3 and 4). + # The eastern and upper neighbored pixels are accessed via indices_low_e + 1 and + # indices_low_u + 1 (for e this corresponds to 2 and 3, for u to 1 and 2). + # We use the straight-through estimator trick for differentiability. + indices_floor_e = torch.floor(bitmap_intersections_e) + indices_floor_u = torch.floor(bitmap_intersections_u) + + indices_low_e = ( + bitmap_intersections_e + (indices_floor_e - bitmap_intersections_e).detach() + ).to(torch.int32) + indices_low_u = ( + bitmap_intersections_u + (indices_floor_u - bitmap_intersections_u).detach() + ).to(torch.int32) # When distributing the continuously positioned value/intensity to - # the discretely positioned pixels, we give the corresponding - # "influence" of the value to each neighbor. Here, we calculate this - # influence for each neighbor. - - # x-value influence in 1 and 4. - x_low_influences = x_indices_high - x_intersections - # y-value influence in 3 and 4. - y_low_influences = y_indices_high - y_intersections - # x-value influence in 2 and 3. - x_high_influences = x_intersections - x_indices_low - # y-value influence in 1 and 2. - y_high_influences = y_intersections - y_indices_low + # the discretely positioned pixels, we assign the corresponding + # contribution to each neighbor based on its distance to the original, + # continuous intersection point. + # e-value contribution to 1 and 4. + contributions_low_e = indices_low_e + 1 - bitmap_intersections_e + # u-value contribution to 3 and 4. + contributions_low_u = indices_low_u + 1 - bitmap_intersections_u + # e-value contribution to 2 and 3. + contributions_high_e = bitmap_intersections_e - indices_low_e + # u-value contribution to 1 and 2. + contributions_high_u = bitmap_intersections_u - indices_low_u # We now calculate the distributed intensities for each neighboring # pixel and assign the correctly ordered indices to the intensities # so we know where to position them. The numbers correspond to the # ASCII diagram above. intensities_pixel_1 = ( - x_low_influences * y_high_influences * absolute_intensities + contributions_low_e * contributions_high_u * absolute_intensities ) intensities_pixel_2 = ( - x_high_influences * y_high_influences * absolute_intensities + contributions_high_e * contributions_high_u * absolute_intensities ) intensities_pixel_3 = ( - x_high_influences * y_low_influences * absolute_intensities - ) - intensities_pixel_4 = x_low_influences * y_low_influences * absolute_intensities - - intensities = torch.zeros( - (intersections.shape[0], total_intersections * 4), device=device - ) - intensities[:, :total_intersections] = intensities_pixel_1.reshape( - -1, total_intersections + contributions_high_e * contributions_low_u * absolute_intensities ) - intensities[ - :, total_intersections : total_intersections * index_mapping.second_pixel - ] = intensities_pixel_2.reshape(-1, total_intersections) - intensities[ - :, - total_intersections * index_mapping.second_pixel : total_intersections - * index_mapping.third_pixel, - ] = intensities_pixel_3.reshape(-1, total_intersections) - intensities[:, total_intersections * index_mapping.third_pixel :] = ( - intensities_pixel_4.reshape(-1, total_intersections) + intensities_pixel_4 = ( + contributions_low_e * contributions_low_u * absolute_intensities ) # For the distributions, we regarded even those neighboring pixels that are - # _not_ part of the image but within a little boundary outside of the image as well. + # _not_ part of the image. # That is why here, we set up a mask to choose only those indices that are actually # in the bitmap (i.e., we prevent out-of-bounds access). - intersection_indices_2 = ( - (0 <= x_indices) - & (x_indices < self.bitmap_resolution[index_mapping.unbatched_bitmap_e]) - & (0 <= y_indices) - & (y_indices < self.bitmap_resolution[index_mapping.unbatched_bitmap_u]) - ) - - final_intersection_indices = ( - intersection_indices_1.reshape(-1, total_intersections).repeat( - 1, self.heliostat_group.number_of_facets_per_heliostat - ) - & intersection_indices_2 - ) - mask = final_intersection_indices.flatten() - - active_heliostat_indices = torch.nonzero( - active_heliostats_mask, as_tuple=False - ).squeeze() - heliostat_indices = torch.repeat_interleave( - active_heliostat_indices, - total_intersections * self.heliostat_group.number_of_facets_per_heliostat, + intersection_indices_on_target = ( + (0 <= bitmap_intersections_e) + & (bitmap_intersections_e < bitmap_width - 1) + & (0 <= bitmap_intersections_u) + & (bitmap_intersections_u < bitmap_height - 1) ) # Flux density maps for each active heliostat. - bitmaps_per_heliostat = torch.zeros( - ( - self.heliostat_group.number_of_active_heliostats, - self.bitmap_resolution[index_mapping.unbatched_bitmap_u], - self.bitmap_resolution[index_mapping.unbatched_bitmap_e], - ), - dtype=dx_intersections.dtype, - device=device, - ) - - # Add up all distributed intensities in the corresponding indices. - bitmaps_per_heliostat.index_put_( - ( - heliostat_indices[mask], - self.bitmap_resolution[index_mapping.unbatched_bitmap_u] - - 1 - - y_indices[final_intersection_indices], - self.bitmap_resolution[index_mapping.unbatched_bitmap_e] - - 1 - - x_indices[final_intersection_indices], - ), - intensities[final_intersection_indices], - accumulate=True, + bitmaps_flat = torch.zeros( + (num_heliostats, bitmap_height * bitmap_width), device=device + ) + + # scatter_add_ can only handle flat tensors per batch. That is why the bitmaps are flattened. + # As an example: A bitmap with width = 4 and height = 2 has a total of 8 pixels. + # Therefore, flattened, the indices range from 0 to 7. + # 0 1 2 3 + # [0,0] [0,1] [0,2] [0,3] + # [1,0] [1,1] [1,2] [1,3] + # 4 5 6 7 + # The element at position [1,2] in the 2D array is at index 6 in the flattened tensor. + # To convert the pixel indices from their 2D representation to a flattened version we need + # to compute the row indices times the bitmap_width plus the column indices. + # In the example this is 1 * 4 + 2 = 6 + # In our more general case that is: + # flattened_indices = indices_u * bitmap_width + indices_e + # Since tensor indices have their origin of (0,0) in the top left, but our image indices have their + # origin in the bottom left, we need to flip the row (u) indices. That is: + # flattened_indices = ((bitmap_height - 1) - indices_u) * bitmap_width + indices_e + # The column indices also need to be flipped because the the more intuitive way to look at flux prediction + # bitmaps, is to imagine oneself to stand in the heliostat field looking at the receiver. + # This means that we look at the backside of the flux images. This corresponds to a flip of left and right. + # Therefore our final indices are: + # flattened_indices = (((bitmap_height - 1) - indices_u) * bitmap_width + ((bitmap_width - 1) - indices_e)) + # (For the flips, we need to subtract 1 from bitmap_height and bitmap_width, because this flips indices, we + # do not need to subtract the 1 for the multiplication with bitmap_width because there we are interested in the + # number of elements, not the indices.) + index_3 = ( + ((bitmap_height - 1) - indices_low_u) * bitmap_width + + ((bitmap_width - 1) - (indices_low_e + 1)) + ).long() + index_4 = ( + ((bitmap_height - 1) - indices_low_u) * bitmap_width + + ((bitmap_width - 1) - indices_low_e) + ).long() + + # We need to filter out out of bounds indices. scatter_add_ cannot handle advanced indexing in its parameters, + # therefore we cannot filter out invalid intersections by their indices. Instead we set all out of bounds indices + # to 0, that way they do not cause index out of bounds errors, and we also set the intensities at these indices + # to 0 so they do not add to the flux. + index_3[~intersection_indices_on_target] = 0 + index_4[~intersection_indices_on_target] = 0 + intensities_pixel_1 = intensities_pixel_1 * intersection_indices_on_target + intensities_pixel_2 = intensities_pixel_2 * intersection_indices_on_target + intensities_pixel_3 = intensities_pixel_3 * intersection_indices_on_target + intensities_pixel_4 = intensities_pixel_4 * intersection_indices_on_target + + bitmaps_flat.scatter_add_(1, index_4 + 1, intensities_pixel_1) + bitmaps_flat.scatter_add_(1, index_3 + 1, intensities_pixel_2) + bitmaps_flat.scatter_add_(1, index_3, intensities_pixel_3) + bitmaps_flat.scatter_add_(1, index_4, intensities_pixel_4) + + bitmaps_per_heliostat = bitmaps_flat.view( + num_heliostats, bitmap_height, bitmap_width ) return bitmaps_per_heliostat diff --git a/artist/core/kinematic_reconstructor.py b/artist/core/kinematic_reconstructor.py index cdbc5c3d2..f6d81d048 100644 --- a/artist/core/kinematic_reconstructor.py +++ b/artist/core/kinematic_reconstructor.py @@ -5,8 +5,7 @@ import torch from torch.optim.lr_scheduler import LRScheduler -from artist.core import learning_rate_schedulers -from artist.core.core_utils import per_heliostat_reduction +from artist.core import core_utils, learning_rate_schedulers from artist.core.heliostat_ray_tracer import HeliostatRayTracer from artist.core.loss_functions import Loss from artist.data_parser.calibration_data_parser import CalibrationDataParser @@ -36,11 +35,17 @@ class KinematicReconstructor: The scenario. data : dict[str, CalibrationDataParser | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]]] The data parser and the mapping of heliostat name and calibration data. - optimization_configuration : dict[str, Any] - The parameters for the optimizer, learning rate scheduler, regularizers and early stopping. + optimizer_dict : dict[str, Any] + The parameters for the optimization. + scheduler_dict : dict[str, Any] + The parameters for the scheduler. reconstruction_method : str The reconstruction method. Currently only reconstruction via ray tracing is available. + Note + ---- + Each heliostat selected for reconstruction needs to have the same amount of samples as all others. + Methods ------- reconstruct_kinematic() @@ -82,7 +87,9 @@ def __init__( self.ddp_setup = ddp_setup self.scenario = scenario self.data = data - self.optimization_configuration = optimization_configuration + self.optimizer_dict = optimization_configuration[config_dictionary.optimization] + self.scheduler_dict = optimization_configuration[config_dictionary.scheduler] + if ( reconstruction_method == config_dictionary.kinematic_reconstruction_raytracing @@ -210,37 +217,45 @@ def _reconstruct_kinematic_parameters_with_raytracing( heliostat_group.kinematic.rotation_deviation_parameters.requires_grad_(), heliostat_group.kinematic.actuators.optimizable_parameters.requires_grad_(), ], - lr=self.optimization_configuration[ - config_dictionary.initial_learning_rate - ], + lr=float( + self.optimizer_dict[config_dictionary.initial_learning_rate] + ), ) # Create a learning rate scheduler. scheduler_fn = getattr( learning_rate_schedulers, - self.optimization_configuration[config_dictionary.scheduler], + self.scheduler_dict[config_dictionary.scheduler_type], ) scheduler: LRScheduler = scheduler_fn( - optimizer=optimizer, - parameters=self.optimization_configuration[ - config_dictionary.scheduler_parameters + optimizer=optimizer, parameters=self.scheduler_dict + ) + + # Set up early stopping. + early_stopper = learning_rate_schedulers.EarlyStopping( + window_size=self.optimizer_dict[ + config_dictionary.early_stopping_window + ], + patience=self.optimizer_dict[ + config_dictionary.early_stopping_patience ], + min_improvement=self.optimizer_dict[ + config_dictionary.early_stopping_delta + ], + relative=True, ) # Start the optimization. loss = torch.inf - best_loss = torch.inf - patience_counter = 0 epoch = 0 log_step = ( - self.optimization_configuration[config_dictionary.max_epoch] - if self.optimization_configuration[config_dictionary.log_step] == 0 - else self.optimization_configuration[config_dictionary.log_step] + self.optimizer_dict[config_dictionary.max_epoch] + if self.optimizer_dict[config_dictionary.log_step] == 0 + else self.optimizer_dict[config_dictionary.log_step] ) while ( - loss > self.optimization_configuration[config_dictionary.tolerance] - and epoch - <= self.optimization_configuration[config_dictionary.max_epoch] + loss > float(self.optimizer_dict[config_dictionary.tolerance]) + and epoch <= self.optimizer_dict[config_dictionary.max_epoch] ): optimizer.zero_grad() @@ -257,15 +272,16 @@ def _reconstruct_kinematic_parameters_with_raytracing( device=device, ) - # Create a parallelized ray tracer. + # Create a parallelized ray tracer. Blocking is always deactivated for this reconstruction. ray_tracer = HeliostatRayTracer( scenario=self.scenario, heliostat_group=heliostat_group, + blocking_active=False, world_size=self.ddp_setup[ config_dictionary.heliostat_group_world_size ], rank=self.ddp_setup[config_dictionary.heliostat_group_rank], - batch_size=heliostat_group.number_of_active_heliostats, + batch_size=self.optimizer_dict[config_dictionary.batch_size], random_seed=self.ddp_setup[ config_dictionary.heliostat_group_rank ], @@ -279,28 +295,32 @@ def _reconstruct_kinematic_parameters_with_raytracing( device=device, ) - if self.ddp_setup[config_dictionary.is_nested]: - flux_distributions = torch.distributed.nn.functional.all_reduce( - flux_distributions, - group=self.ddp_setup[config_dictionary.process_subgroup], - op=torch.distributed.ReduceOp.SUM, - ) + sample_indices_for_local_rank = ray_tracer.get_sampler_indices() loss_per_sample = loss_definition( prediction=flux_distributions, - ground_truth=focal_spots_measured, - target_area_mask=target_area_mask, + ground_truth=focal_spots_measured[ + sample_indices_for_local_rank + ], + target_area_mask=target_area_mask[ + sample_indices_for_local_rank + ], reduction_dimensions=(index_mapping.focal_spots,), device=device, ) - loss_per_heliostat = per_heliostat_reduction( - per_sample_values=loss_per_sample, - active_heliostats_mask=active_heliostats_mask, + number_of_samples_per_heliostat = int( + heliostat_group.active_heliostats_mask.sum() + / (heliostat_group.active_heliostats_mask > 0).sum() + ) + + loss_per_heliostat = core_utils.mean_loss_per_heliostat( + loss_per_sample=loss_per_sample, + number_of_samples_per_heliostat=number_of_samples_per_heliostat, device=device, ) - loss = loss_per_heliostat[torch.isfinite(loss_per_heliostat)].sum() + loss = loss_per_heliostat.mean() loss.backward() @@ -318,6 +338,19 @@ def _reconstruct_kinematic_parameters_with_raytracing( ], ) ) + param.grad /= self.ddp_setup[ + config_dictionary.heliostat_group_world_size + ] + + torch.nn.utils.clip_grad_norm_( + [heliostat_group.kinematic.rotation_deviation_parameters], + max_norm=1.0, + ) + + torch.nn.utils.clip_grad_norm_( + [heliostat_group.kinematic.actuators.optimizable_parameters], + max_norm=1.0, + ) optimizer.step() if isinstance( @@ -332,38 +365,34 @@ def _reconstruct_kinematic_parameters_with_raytracing( f"Rank: {rank}, Epoch: {epoch}, Loss: {loss}, LR: {optimizer.param_groups[index_mapping.optimizer_param_group_0]['lr']}", ) - # Early stopping when loss has reached a plateau. - if ( - loss - < best_loss - - self.optimization_configuration[ - config_dictionary.early_stopping_delta - ] - ): - best_loss = loss - patience_counter = 0 - else: - patience_counter += 1 - if ( - patience_counter - >= self.optimization_configuration[ - config_dictionary.early_stopping_patience - ] - ): - log.info( - f"Early stopping at epoch {epoch}. The loss did not improve significantly for {patience_counter} epochs." - ) + # Early stopping when loss did not improve since a predefined number of epochs. + stop = early_stopper.step(loss) + + if stop: + log.info(f"Early stopping at epoch {epoch}.") break epoch += 1 - final_loss_per_heliostat[ - final_loss_start_indices[ - heliostat_group_index - ] : final_loss_start_indices[heliostat_group_index + 1] - ] = loss_per_heliostat + local_indices = ( + sample_indices_for_local_rank[::number_of_samples_per_heliostat] + // number_of_samples_per_heliostat + ) + + global_active_indices = torch.nonzero( + active_heliostats_mask != 0, as_tuple=True + )[0] + + rank_active_indices_global = global_active_indices[local_indices] + + final_indices = ( + rank_active_indices_global + + final_loss_start_indices[heliostat_group_index] + ) + + final_loss_per_heliostat[final_indices] = loss_per_heliostat - log.info(f"Rank: {rank}, kinematic parameters optimized.") + log.info(f"Rank: {rank}, Kinematic reconstructed.") if self.ddp_setup[config_dictionary.is_distributed]: for index, heliostat_group in enumerate( diff --git a/artist/core/learning_rate_schedulers.py b/artist/core/learning_rate_schedulers.py index c5693de34..19eb498cc 100644 --- a/artist/core/learning_rate_schedulers.py +++ b/artist/core/learning_rate_schedulers.py @@ -1,3 +1,6 @@ +from collections import deque +from typing import Deque + import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LRScheduler @@ -25,7 +28,7 @@ def exponential( An exponential learning rate scheduler. """ scheduler = torch.optim.lr_scheduler.ExponentialLR( - optimizer, gamma=parameters[config_dictionary.gamma] + optimizer, gamma=float(parameters[config_dictionary.gamma]) ) return scheduler @@ -52,8 +55,8 @@ def cyclic( """ scheduler = torch.optim.lr_scheduler.CyclicLR( optimizer, - base_lr=parameters[config_dictionary.min], - max_lr=parameters[config_dictionary.max], + base_lr=float(parameters[config_dictionary.min]), + max_lr=float(parameters[config_dictionary.max]), step_size_up=parameters[config_dictionary.step_size_up], ) @@ -81,11 +84,106 @@ def reduce_on_plateau( """ scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, - factor=parameters[config_dictionary.reduce_factor], + factor=float(parameters[config_dictionary.reduce_factor]), patience=parameters[config_dictionary.patience], - threshold=parameters[config_dictionary.threshold], + threshold=float(parameters[config_dictionary.threshold]), cooldown=parameters[config_dictionary.cooldown], - min_lr=parameters[config_dictionary.min], + min_lr=float(parameters[config_dictionary.min]), ) return scheduler + + +class EarlyStopping: + """ + Implement early stopping. + + Stops optimization when the loss improvement trend over a the last few epochs + falls below a given threshold. + + Attributes + ---------- + window_size : int + Number of epochs used to estimate loss trend (default is 10). + patience : int + Number of consecutive non-improving windows before stopping (default is 20). + min_improvement : float + Minimum required improvement over the window to reset patience (default is 1e-4). + relative : bool + Indicates wether improvement is normalized by loss magnitude (default is True). + eps : float + Small value for stability (default is 1e-8). + loss_history : Deque + Loss values of the past epochs. + counter : int + Counter for the epochs. + + Methods + ------- + step() + Update stopping state. + """ + + def __init__( + self, + window_size: int = 10, + patience: int = 20, + min_improvement: float = 1e-4, + relative: bool = True, + eps: float = 1e-8, + ) -> None: + """ + Initialize the early stopping. + + Parameters + ---------- + window_size : int + Number of epochs used to estimate loss trend (default is 10). + patience : int + Number of consecutive non-improving windows before stopping (default is 20). + min_improvement : float + Minimum required improvement over the window to reset patience (default is 1e-4). + relative : bool + Indicates wether improvement is normalized by loss magnitude (default is True). + eps : float + Small value for stability (default is 1e-8). + """ + self.window_size = window_size + self.patience = patience + self.min_improvement = min_improvement + self.relative = relative + self.eps = eps + + self.loss_history: Deque[float] = deque(maxlen=window_size) + self.counter = 0 + + def step(self, loss: float) -> bool: + """ + Update stopping state. + + Parameters + ---------- + loss : float + Current loss value. + + Returns + ------- + bool + True if optimization should stop, otherwise False. + """ + self.loss_history.append(loss) + + if len(self.loss_history) < self.window_size: + return False + + improvement = self.loss_history[0] - self.loss_history[-1] + + if self.relative: + improvement /= max(abs(self.loss_history[0]), self.eps) + + if improvement > self.min_improvement: + self.counter = 0 + else: + self.counter += 1 + + return self.counter >= self.patience diff --git a/artist/core/loss_functions.py b/artist/core/loss_functions.py index 9b6c001c6..76f460f6d 100644 --- a/artist/core/loss_functions.py +++ b/artist/core/loss_functions.py @@ -3,7 +3,7 @@ import torch from artist.scenario.scenario import Scenario -from artist.util import config_dictionary, index_mapping, utils +from artist.util import index_mapping, utils from artist.util.environment_setup import get_device @@ -64,6 +64,10 @@ class VectorLoss(Loss): ---------- loss_function : torch.nn.Module A torch module implementing a loss. + + See Also + -------- + :class:`Loss` : Reference to the parent class. """ def __init__(self) -> None: @@ -116,7 +120,7 @@ def __call__( class FocalSpotLoss(Loss): """ - A loss defined as the elementwise squared distance (Euclidean distance) between predicted focal spots and the ground truth. + A loss defined as euclidean distance between the predicted focal spot coordinate and the ground truth coordinate. Attributes ---------- @@ -124,6 +128,10 @@ class FocalSpotLoss(Loss): A torch module implementing a loss. scenario : Scenario The scenario. + + See Also + -------- + :class:`Loss` : Reference to the parent class. """ def __init__(self, scenario: Scenario) -> None: @@ -135,7 +143,7 @@ def __init__(self, scenario: Scenario) -> None: scenario : Scenario The scenario. """ - super().__init__(loss_function=torch.nn.MSELoss(reduction="none")) + super().__init__(loss_function=None) self.scenario = scenario def __call__( @@ -170,7 +178,7 @@ def __call__( Returns ------- torch.Tensor - The summed MSE focal spot loss reduced along the specified dimensions. + The focal spot loss. Tensor of shape [number_of_samples]. """ expected_kwargs = ["reduction_dimensions", "device", "target_area_mask"] @@ -200,14 +208,14 @@ def __call__( device=device, ) - loss = self.loss_function(focal_spot, ground_truth) + loss = torch.norm(focal_spot[:, :3] - ground_truth[:, :3], dim=1) - return loss.sum(dim=kwargs["reduction_dimensions"]) + return loss class PixelLoss(Loss): """ - A loss defined as the elementwise squared distance (Euclidean distance) between each pixel of predicted bitmaps and the ground truth. + A loss defined as the elementwise squared error between each pixel of predicted bitmaps and the ground truth. Attributes ---------- @@ -215,6 +223,10 @@ class PixelLoss(Loss): A torch module implementing a loss. scenario : Scenario The scenario. + + See Also + -------- + :class:`Loss` : Reference to the parent class. """ def __init__(self, scenario: Scenario) -> None: @@ -275,38 +287,8 @@ def __call__( + " ".join(errors) ) - device = get_device(device=kwargs["device"]) - - target_area_mask = kwargs["target_area_mask"] - - normalized_predictions = utils.normalize_bitmaps( - flux_distributions=prediction, - target_area_widths=self.scenario.target_areas.dimensions[target_area_mask][ - :, index_mapping.target_area_width - ], - target_area_heights=self.scenario.target_areas.dimensions[target_area_mask][ - :, index_mapping.target_area_height - ], - number_of_rays=self.scenario.light_sources.light_source_list[ - index_mapping.first_light_source - ].number_of_rays, - ) - normalized_ground_truth = utils.normalize_bitmaps( - flux_distributions=ground_truth, - target_area_widths=torch.full( - (ground_truth.shape[index_mapping.heliostat_dimension],), - config_dictionary.utis_crop_width, - device=device, - ), - target_area_heights=torch.full( - (ground_truth.shape[index_mapping.heliostat_dimension],), - config_dictionary.utis_crop_height, - device=device, - ), - number_of_rays=ground_truth.sum( - dim=[index_mapping.batched_bitmap_e, index_mapping.batched_bitmap_u] - ), - ) + normalized_predictions = prediction + normalized_ground_truth = ground_truth loss = self.loss_function(normalized_predictions, normalized_ground_truth) @@ -326,7 +308,7 @@ class KLDivergenceLoss(Loss): def __init__(self) -> None: """Initialize the Kullback-Leibler divergence loss.""" super().__init__( - loss_function=torch.nn.KLDivLoss(reduction="none", log_target=False) + loss_function=torch.nn.KLDivLoss(reduction="none", log_target=True) ) def __call__( @@ -387,7 +369,6 @@ def __call__( if prediction.min() < 0: prediction = prediction - prediction.min() - # Normalize. eps = 1e-12 ground_truth_distributions = torch.nn.functional.normalize( ground_truth, @@ -404,7 +385,7 @@ def __call__( loss = self.loss_function( torch.log(predicted_distributions + eps), - ground_truth_distributions, + torch.log(ground_truth_distributions + eps), ) return loss.sum(dim=kwargs["reduction_dimensions"]) @@ -418,6 +399,10 @@ class AngleLoss(Loss): ---------- loss_function : torch.nn.Module A torch module implementing a loss. + + See Also + -------- + :class:`Loss` : Reference to the parent class. """ def __init__(self) -> None: @@ -450,8 +435,8 @@ def __call__( The summed loss reduced along the specified dimensions. Tensor of shape [number_of_samples]. """ - cos_sim = self.loss_function(prediction, ground_truth) + cosine_similarity = self.loss_function(prediction, ground_truth) - loss = 1.0 - cos_sim + loss = 1.0 - cosine_similarity return loss diff --git a/artist/core/motor_position_optimizer.py b/artist/core/motor_position_optimizer.py index ffe3c9476..fd777c8c3 100644 --- a/artist/core/motor_position_optimizer.py +++ b/artist/core/motor_position_optimizer.py @@ -6,7 +6,7 @@ from artist.core import learning_rate_schedulers from artist.core.heliostat_ray_tracer import HeliostatRayTracer -from artist.core.loss_functions import Loss +from artist.core.loss_functions import FocalSpotLoss, KLDivergenceLoss, Loss from artist.field.heliostat_group import HeliostatGroup from artist.scenario.scenario import Scenario from artist.util import config_dictionary, index_mapping @@ -20,14 +20,22 @@ class MotorPositionsOptimizer: """ An optimizer used to find optimal motor positions for the heliostats. + The optimization loss is defined by the loss between the combined predicted and target + flux densities. Additionally there is one constraint that maximizes the flux integral and + one that constraints the maximum pixel intensity (maximum allowed flux density). + Attributes ---------- ddp_setup : dict[str, Any] Information about the distributed environment, process_groups, devices, ranks, world_Size, heliostat group to ranks mapping. scenario : Scenario The scenario. - optimization_configuration : dict[str, Any] - The parameters for the optimizer, learning rate scheduler, regularizers and early stopping. + optimizer_dict : dict[str, Any] + The parameters for the optimization. + scheduler_dict : dict[str, Any] + The parameters for the scheduler. + constraint_dict : dict[str, Any] + The parameters for the constraints. incident_ray_direction : torch.Tensor The incident ray direction during the optimization. Tensor of shape [4]. @@ -36,9 +44,13 @@ class MotorPositionsOptimizer: ground_truth : torch.Tensor The desired focal spot or distribution. Tensor of shape [4] or tensor of shape [bitmap_resolution_e, bitmap_resolution_u]. + dni : float + Direct normal irradiance in W/m^2. bitmap_resolution : torch.Tensor The resolution of all bitmaps during reconstruction. Tensor of shape [2]. + epsilon : float + A small value. Methods ------- @@ -54,7 +66,9 @@ def __init__( incident_ray_direction: torch.Tensor, target_area_index: int, ground_truth: torch.Tensor, + dni: float, bitmap_resolution: torch.Tensor = torch.tensor([256, 256]), + epsilon: float | None = 1e-12, device: torch.device | None = None, ) -> None: """ @@ -76,9 +90,13 @@ def __init__( ground_truth : torch.Tensor The desired focal spot or distribution. Tensor of shape [4] or tensor of shape [bitmap_resolution_e, bitmap_resolution_u]. + dni : float + Direct normal irradiance in W/m^2. bitmap_resolution : torch.Tensor The resolution of all bitmaps during optimization (default is torch.tensor([256,256])). Tensor of shape [2]. + epsilon : float | None + A small value (default is 1e-12). device : torch.device | None The device on which to perform computations or load tensors and models (default is None). If None, ``ARTIST`` will automatically select the most appropriate @@ -93,11 +111,15 @@ def __init__( self.ddp_setup = ddp_setup self.scenario = scenario - self.optimization_configuration = optimization_configuration + self.optimizer_dict = optimization_configuration[config_dictionary.optimization] + self.scheduler_dict = optimization_configuration[config_dictionary.scheduler] + self.constraint_dict = optimization_configuration[config_dictionary.constraints] self.incident_ray_direction = incident_ray_direction self.target_area_index = target_area_index self.ground_truth = ground_truth + self.dni = dni self.bitmap_resolution = bitmap_resolution.to(device) + self.epsilon = epsilon def optimize( self, @@ -237,34 +259,43 @@ def optimize( optimizer = torch.optim.Adam( optimizable_parameters_all_groups, - lr=self.optimization_configuration[config_dictionary.initial_learning_rate], + lr=float(self.optimizer_dict[config_dictionary.initial_learning_rate]), ) # Create a learning rate scheduler. scheduler_fn = getattr( learning_rate_schedulers, - self.optimization_configuration[config_dictionary.scheduler], + self.scheduler_dict[config_dictionary.scheduler_type], ) scheduler: LRScheduler = scheduler_fn( - optimizer=optimizer, - parameters=self.optimization_configuration[ - config_dictionary.scheduler_parameters - ], + optimizer=optimizer, parameters=self.scheduler_dict + ) + + # Set up early stopping. + early_stopper = learning_rate_schedulers.EarlyStopping( + window_size=self.optimizer_dict[config_dictionary.early_stopping_window], + patience=self.optimizer_dict[config_dictionary.early_stopping_patience], + min_improvement=self.optimizer_dict[config_dictionary.early_stopping_delta], + relative=True, ) + lambda_energy = None + rho_energy = self.constraint_dict[config_dictionary.rho_energy] + max_flux_density = self.constraint_dict[config_dictionary.max_flux_density] + rho_pixel = self.constraint_dict[config_dictionary.rho_pixel] + lambda_lr = self.constraint_dict[config_dictionary.lambda_lr] + # Start the optimization. loss = torch.inf - best_loss = torch.inf - patience_counter = 0 epoch = 0 log_step = ( - self.optimization_configuration[config_dictionary.max_epoch] - if self.optimization_configuration[config_dictionary.log_step] == 0 - else self.optimization_configuration[config_dictionary.log_step] + self.optimizer_dict[config_dictionary.max_epoch] + if self.optimizer_dict[config_dictionary.log_step] == 0 + else self.optimizer_dict[config_dictionary.log_step] ) while ( - loss > self.optimization_configuration[config_dictionary.tolerance] - and epoch <= self.optimization_configuration[config_dictionary.max_epoch] + loss > float(self.optimizer_dict[config_dictionary.tolerance]) + and epoch <= self.optimizer_dict[config_dictionary.max_epoch] ): optimizer.zero_grad() @@ -279,7 +310,7 @@ def optimize( for heliostat_group_index in self.ddp_setup[ config_dictionary.groups_to_ranks_mapping ][rank]: - heliostat_group: HeliostatGroup = ( + heliostat_alignment_group: HeliostatGroup = ( self.scenario.heliostat_field.heliostat_groups[ heliostat_group_index ] @@ -291,14 +322,14 @@ def optimize( "params" ][heliostat_group_index] ) - heliostat_group.kinematic.motor_positions = ( + heliostat_alignment_group.kinematic.motor_positions = ( initial_motor_positions_all_groups[heliostat_group_index] + motor_positions_normalized * scales_all_groups[heliostat_group_index] ) # Activate heliostats. - heliostat_group.activate_heliostats( + heliostat_alignment_group.activate_heliostats( active_heliostats_mask=active_heliostats_masks_all_groups[ heliostat_group_index ], @@ -306,25 +337,36 @@ def optimize( ) # Align heliostats. - heliostat_group.align_surfaces_with_motor_positions( - motor_positions=heliostat_group.kinematic.active_motor_positions, + heliostat_alignment_group.align_surfaces_with_motor_positions( + motor_positions=heliostat_alignment_group.kinematic.active_motor_positions, active_heliostats_mask=active_heliostats_masks_all_groups[ heliostat_group_index ], device=device, ) + for heliostat_group_index in self.ddp_setup[ + config_dictionary.groups_to_ranks_mapping + ][rank]: + heliostat_group: HeliostatGroup = ( + self.scenario.heliostat_field.heliostat_groups[ + heliostat_group_index + ] + ) + # Create a ray tracer. ray_tracer = HeliostatRayTracer( scenario=self.scenario, heliostat_group=heliostat_group, + blocking_active=True, world_size=self.ddp_setup[ config_dictionary.heliostat_group_world_size ], rank=self.ddp_setup[config_dictionary.heliostat_group_rank], - batch_size=heliostat_group.number_of_active_heliostats, + batch_size=self.optimizer_dict[config_dictionary.batch_size], random_seed=self.ddp_setup[config_dictionary.heliostat_group_rank], bitmap_resolution=self.bitmap_resolution, + dni=self.dni, ) # Perform heliostat-based ray tracing. @@ -340,19 +382,12 @@ def optimize( ], device=device, ) - - if self.ddp_setup[config_dictionary.is_nested]: - flux_distributions = torch.distributed.nn.functional.all_reduce( - flux_distributions, - group=self.ddp_setup[config_dictionary.process_subgroup], - op=torch.distributed.ReduceOp.SUM, - ) - + sample_indices_for_local_rank = ray_tracer.get_sampler_indices() flux_distribution_on_target = ray_tracer.get_bitmaps_per_target( bitmaps_per_heliostat=flux_distributions, target_area_mask=target_area_masks_all_groups[ heliostat_group_index - ], + ][sample_indices_for_local_rank], device=device, )[self.target_area_index] @@ -364,18 +399,66 @@ def optimize( op=torch.distributed.ReduceOp.SUM, ) - loss = loss_definition( + # Flux loss. + flux_loss = loss_definition( prediction=total_flux.unsqueeze(index_mapping.heliostat_dimension), ground_truth=self.ground_truth.unsqueeze( index_mapping.heliostat_dimension ), target_area_mask=torch.tensor([self.target_area_index], device=device), - reduction_dimensions=(index_mapping.heliostat_dimension,), + reduction_dimensions=( + index_mapping.batched_bitmap_e, + index_mapping.batched_bitmap_u, + ), device=device, - ).sum() + ) + + if isinstance(loss_definition, FocalSpotLoss): + loss = flux_loss + + if isinstance(loss_definition, KLDivergenceLoss): + # Augmented Lagrangian energy integral. + energy_integral_prediction = total_flux.sum() + energy_integral_target = self.ground_truth.sum() + g_energy = torch.relu( + (energy_integral_target - energy_integral_prediction) + / (energy_integral_target + self.epsilon) + ) + # Regularizer, maximum allowable flux density. + pixel_violation = (total_flux - max_flux_density) / ( + max_flux_density + self.epsilon + ) + pixel_violation = torch.clamp(pixel_violation, min=0.0) + pixel_constraint_loss = rho_pixel * (pixel_violation**2).mean() + + if lambda_energy is None: + lambda_energy = torch.clamp( + flux_loss.detach() / (g_energy + 1e-12), min=1.0 + ) + loss = ( + flux_loss + + lambda_energy * g_energy + + 0.5 * rho_energy * (g_energy**2) + + pixel_constraint_loss + ) + with torch.no_grad(): + lambda_energy = torch.clamp( + lambda_energy + lambda_lr * g_energy.detach(), min=0.0 + ) loss.backward() + # Reduce gradients across all ranks (global process group) + if self.ddp_setup[config_dictionary.is_distributed]: + for param_group in optimizer.param_groups: + for param in param_group["params"]: + if param.grad is not None: + torch.distributed.all_reduce( + param.grad, op=torch.distributed.ReduceOp.SUM + ) + # Average the gradients + param.grad /= self.ddp_setup[config_dictionary.world_size] + optimizer.step() if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau): scheduler.step(loss.detach()) @@ -387,27 +470,11 @@ def optimize( f"Epoch: {epoch}, Loss: {loss.item()}, LR: {optimizer.param_groups[index_mapping.optimizer_param_group_0]['lr']}", ) - # Early stopping when loss has reached a plateau. - if ( - loss - < best_loss - - self.optimization_configuration[ - config_dictionary.early_stopping_delta - ] - ): - best_loss = loss - patience_counter = 0 - else: - patience_counter += 1 - if ( - patience_counter - >= self.optimization_configuration[ - config_dictionary.early_stopping_patience - ] - ): - log.info( - f"Early stopping at epoch {epoch}. The loss did not improve significantly for {patience_counter} epochs." - ) + # Early stopping when loss did not improve since a predefined number of epochs. + stop = early_stopper.step(loss) + + if stop: + log.info(f"Early stopping at epoch {epoch}.") break epoch += 1 diff --git a/artist/core/regularizers.py b/artist/core/regularizers.py index 4c9a20d1d..2468c87db 100644 --- a/artist/core/regularizers.py +++ b/artist/core/regularizers.py @@ -2,9 +2,6 @@ import torch -from artist.util import config_dictionary, index_mapping -from artist.util.environment_setup import get_device - class Regularizer: """ @@ -12,31 +9,25 @@ class Regularizer: Attributes ---------- - weight : float - The weight of the regularization term. reduction_dimensions : tuple[int, ...] The dimensions along which to reduce the regularization term. """ - def __init__(self, weight: float, reduction_dimensions: tuple[int, ...]) -> None: + def __init__(self, reduction_dimensions: tuple[int, ...]) -> None: """ Initialize the base regularizer. Parameters ---------- - weight : float - The weight of the regularization term. reduction_dimensions : tuple[int, ...] The dimensions along which to reduce the regularization term. """ - self.weight = weight self.reduction_dimensions = reduction_dimensions def __call__( self, - original_surface_points: torch.Tensor, - surface_points: torch.Tensor, - surface_normals: torch.Tensor, + current_control_points: torch.Tensor, + original_control_points: torch.Tensor, device: torch.device | None = None, **kwargs: Any, ) -> torch.Tensor: @@ -45,15 +36,12 @@ def __call__( Parameters ---------- - original_surface_points : torch.Tensor - The original surface points. - Tensor of shape [number_of_surfaces, number_of_facets_per_surface, number_of_surface_points, 4]. - surface_points : torch.Tensor - The surface points of the predicted surface. - Tensor of shape [number_of_surfaces, number_of_facets_per_surface, number_of_surface_points, 4]. - surface_normals : torch.Tensor - The surface normals of the predicted surface. - Tensor of shape [number_of_surfaces, number_of_facets_per_surface, number_of_surface_normals, 4]. + current_control_points : torch.Tensor + The current control points. + Tensor of shape [number_of_heliostats, number_of_facets_per_surface, number_of_control_points_u_direction, number_of_control_points_v_direction, 3]. + original_control_points : torch.Tensor + The current control points. + Tensor of shape [number_of_heliostats, number_of_facets_per_surface, number_of_control_points_u_direction, number_of_control_points_v_direction, 3]. device : torch.device | None The device on which to perform computations or load tensors and models (default is None). If None, ``ARTIST`` will automatically select the most appropriate @@ -69,266 +57,134 @@ def __call__( raise NotImplementedError("Must be overridden!") -class TotalVariationRegularizer(Regularizer): +class SmoothnessRegularizer(Regularizer): """ - A regularizer measuring the total variation in a surface. + Penalize localized control-point variations to enforce smooth deformations. - Attributes - ---------- - weight : float - The weight of the regularization term. - reduction_dimensions : tuple[int] - The dimensions along which to reduce the regularization term. - surface : str - Specifies which part of a surface is regularized (either the surface points or the surface normals). - number_of_neighbors : int - The number of nearest neighbors to consider. - sigma : float | None - Determines how quickly the weight falls off as the distance increases. - batch_size : int - Used to process smaller batches of points instead of creating full distance matrices for all points. - epsilon : float - A small value used to prevent divisions by zero. + The regularization is applied to the displacement of control points relative to the original + surface control points using a discrete second-order Laplacian operator. + + See Also + -------- + :class:`Regularizer` : Reference to the parent class. """ - def __init__( - self, - weight: float, - reduction_dimensions: tuple[int, ...], - surface: str, - number_of_neighbors: int = 20, - sigma: float | None = None, - batch_size: int = 512, - epsilon: float = 1e-8, - ) -> None: + def __init__(self, reduction_dimensions: tuple[int, ...]) -> None: """ - Initialize the total variation regularizer. + Initialize the regularizer. Parameters ---------- - weight : float - The weight of the regularization term. reduction_dimensions : tuple[int, ...] - The dimensions along which to reduce the regularization term. - surface : str - Specifies which part of a surface is regularized (either the surface points or the surface normals). - number_of_neighbors : int - The number of nearest neighbors to consider (default is 20). - sigma : float | None - Determines how quickly the weight falls off as the distance increases (default is None). - batch_size : int - Used to process smaller batches of points instead of creating full distance matrices for all points (default is 512). - epsilon : float - A small value used to prevent divisions by zero (default is 1e-8). + Dimensions along which to reduce the loss. """ - self.weight = weight - self.reduction_dimensions = reduction_dimensions - self.surface = surface - self.number_of_neighbors = number_of_neighbors - self.sigma = sigma - self.batch_size = batch_size - self.epsilon = epsilon + super().__init__(reduction_dimensions) def __call__( self, - original_surface_points: torch.Tensor, - surface_points: torch.Tensor, - surface_normals: torch.Tensor, + current_control_points: torch.Tensor, + original_control_points: torch.Tensor, device: torch.device | None = None, **kwargs: Any, ) -> torch.Tensor: - r""" - Compute the regularization. + """ + Compute the Laplacian regularization loss. - This regularization suppresses the noise in the surface. It measures the noise in the surface by - taking absolute differences in the z-values of the provided points. This loss implementation - focuses on local smoothness by applying a Gaussian distance weight and thereby letting - closer points contribute more. + The loss measures how much each control-point displacement differs from the average of its four immediate + neighbors, thereby penalizing localized, non-smooth deformations. Parameters ---------- - original_surface_points : torch.Tensor - The original surface points. - Tensor of shape [number_of_surfaces, number_of_facets_per_surface, number_of_surface_points, 4]. - surface_points : torch.Tensor - The surface points of the predicted surface. - Tensor of shape [number_of_surfaces, number_of_facets_per_surface, number_of_surface_points, 4]. - surface_normals : torch.Tensor - The surface normals of the predicted surface. - Tensor of shape [number_of_surfaces, number_of_facets_per_surface, number_of_surface_normals, 4]. + current_control_points : torch.Tensor + The current control points. + Tensor of shape [number_of_heliostats, number_of_facets_per_surface, number_of_control_points_u_direction, number_of_control_points_v_direction, 3]. + original_control_points : torch.Tensor + The current control points. + Tensor of shape [number_of_heliostats, number_of_facets_per_surface, number_of_control_points_u_direction, number_of_control_points_v_direction, 3]. device : torch.device | None The device on which to perform computations or load tensors and models (default is None). If None, ``ARTIST`` will automatically select the most appropriate device (CUDA or CPU) based on availability and OS. - \*\*kwargs : Any - Keyword arguments. Returns ------- torch.Tensor - The total variation loss for all provided surfaces. - Tensor of shape [number_of_surfaces]. + Laplacian regularization loss per surface. """ - device = get_device(device=device) - - if self.surface == config_dictionary.surface_points: - regularization_variable = surface_points - if self.surface == config_dictionary.surface_normals: - regularization_variable = surface_normals + control_points_delta = current_control_points - original_control_points - number_of_surfaces, number_of_facets, number_of_surface_points_per_facet, _ = ( - regularization_variable.shape + # Pad to handle edges with replication. + delta_padded = torch.nn.functional.pad( + control_points_delta, (0, 0, 1, 1, 1, 1), mode="replicate" ) - coordinates = regularization_variable[:, :, :, : index_mapping.z_coordinates] - z_values = regularization_variable[:, :, :, index_mapping.z_coordinates] - - if self.sigma is None: - coordinates_std = coordinates.std(dim=1).mean().item() - sigma = max(coordinates_std * 0.1, 1e-6) - else: - sigma = float(self.sigma + 1e-12) - variation_loss_sum = torch.zeros( - (number_of_surfaces, number_of_facets), device=device - ) - number_of_valid_neighbors = torch.zeros( - (number_of_surfaces, number_of_facets), device=device + # Discrete Laplacian of all neighbors (up, down, left, right). + laplace = ( + 4 * control_points_delta + - delta_padded[:, :, :-2, 1:-1, :] + - delta_padded[:, :, 2:, 1:-1, :] + - delta_padded[:, :, 1:-1, :-2, :] + - delta_padded[:, :, 1:-1, 2:, :] ) - # Iterate over query points in batches to limit memory usage. - for start_index in range( - 0, number_of_surface_points_per_facet, self.batch_size - ): - end_index = min( - start_index + self.batch_size, number_of_surface_points_per_facet - ) - number_of_points_in_batch = end_index - start_index - - batch_coordinates = coordinates[:, :, start_index:end_index, :] - batch_z_values = z_values[:, :, start_index:end_index] - - # Compute pairwise distances between the current batch coordinates and all coordinates and exclude identities. - distances = torch.cdist(batch_coordinates, coordinates) - rows = torch.arange(number_of_points_in_batch, device=device) - cols = (start_index + rows).to(device) - self_mask = torch.zeros_like(distances, dtype=torch.bool) - self_mask[:, :, rows, cols] = True - masked_distances = torch.where( - self_mask, torch.full_like(distances, 1e9), distances - ) - - # Select the k-nearest neighbors (or fewer if the coordinate is near an edge). - number_of_neighbors_to_select = min( - self.number_of_neighbors, number_of_surface_points_per_facet - 1 - ) - selected_distances, selected_indices = torch.topk( - masked_distances, - number_of_neighbors_to_select, - largest=False, - dim=index_mapping.neighboring_points, - ) - valid_mask = selected_distances < 1e9 - - # Get all z_values of the selected neighbors and the absolute z_value_variations. - z_values_neighbors = torch.gather( - z_values.unsqueeze(index_mapping.points_batch).expand( - -1, -1, number_of_points_in_batch, -1 - ), - 3, - selected_indices, - ) - z_value_variations = torch.abs( - batch_z_values.unsqueeze(index_mapping.z_value_variations) - - z_values_neighbors - ) - z_value_variations = z_value_variations * valid_mask.type_as( - z_value_variations - ) - - # Accumulate weighted z_value_variations. - weights = torch.exp(-0.5 * (selected_distances / sigma) ** 2) - weights = weights * valid_mask.type_as(weights) - variation_loss_sum = variation_loss_sum + ( - weights * z_value_variations - ).sum(dim=(index_mapping.points_batch, index_mapping.z_value_variations)) - number_of_valid_neighbors = number_of_valid_neighbors + valid_mask.type_as( - z_value_variations - ).sum(dim=(index_mapping.points_batch, index_mapping.z_value_variations)) - - # Batched total variation losses. - variation_loss = variation_loss_sum / (number_of_valid_neighbors + self.epsilon) - - return variation_loss.sum(dim=self.reduction_dimensions) + laplacian_loss = (laplace**2).mean(dim=(2, 3, 4)) + laplacian_loss = laplacian_loss.sum(dim=self.reduction_dimensions) + + return laplacian_loss class IdealSurfaceRegularizer(Regularizer): """ - A regularizer measuring the difference between a predicted surface and the ideal. + Penalizes deviations of control points from the original control points. - Attributes - ---------- - weight : float - The weight of the regularization term. - reduction_dimensions : tuple[int] - The dimensions along which to reduce the regularization term. + See Also + -------- + :class:`Regularizer` : Reference to the parent class. """ - def __init__(self, weight: float, reduction_dimensions: tuple[int, ...]) -> None: + def __init__(self, reduction_dimensions: tuple[int, ...]) -> None: """ - Initialize the ideal surface regularizer. + Initialize the regularizer. Parameters ---------- - weight : float - The weight of the regularization term. reduction_dimensions : tuple[int, ...] - The dimensions along which to reduce the regularization term. + Dimensions along which to reduce the loss. """ - self.weight = weight - self.reduction_dimensions = reduction_dimensions + super().__init__(reduction_dimensions) def __call__( self, - original_surface_points: torch.Tensor, - surface_points: torch.Tensor, - surface_normals: torch.Tensor, + current_control_points: torch.Tensor, + original_control_points: torch.Tensor, device: torch.device | None = None, **kwargs: Any, ) -> torch.Tensor: - r""" - Compute the regularization. - - This regularization suppresses large changes in the control points positions. The real - surface is expected to be close to the ideal surface, therefore large changes are penalized. + """ + Compute the L2 loss between current control points and original control points. Parameters ---------- - original_surface_points : torch.Tensor - The original surface points. - Tensor of shape [number_of_surfaces, number_of_facets_per_surface, number_of_surface_points, 4]. - surface_points : torch.Tensor - The surface points of the predicted surface. - Tensor of shape [number_of_surfaces, number_of_facets_per_surface, number_of_surface_points, 4]. - surface_normals : torch.Tensor - The surface normals of the predicted surface. - Tensor of shape [number_of_surfaces, number_of_facets_per_surface, number_of_surface_normals, 4]. + current_control_points : torch.Tensor + The current control points. + Tensor of shape [number_of_heliostats, number_of_facets_per_surface, number_of_control_points_u_direction, number_of_control_points_v_direction, 3]. + original_control_points : torch.Tensor + The current control points. + Tensor of shape [number_of_heliostats, number_of_facets_per_surface, number_of_control_points_u_direction, number_of_control_points_v_direction, 3]. device : torch.device | None The device on which to perform computations or load tensors and models (default is None). If None, ``ARTIST`` will automatically select the most appropriate device (CUDA or CPU) based on availability and OS. - \*\*kwargs : Any - Keyword arguments. Returns ------- torch.Tensor - The differences from the predicted surfaces to the ideal surfaces. - Tensor of shape [number_of_surfaces]. + L2 deviation loss per surface. """ - loss_function = torch.nn.MSELoss(reduction="none") - - loss = loss_function(original_surface_points, surface_points) + delta = current_control_points - original_control_points + delta_squared = delta**2 - reduced_loss = loss.sum(dim=self.reduction_dimensions) + per_facet_loss = delta_squared.mean(dim=(2, 3, 4)) + loss = per_facet_loss.sum(dim=self.reduction_dimensions) - return reduced_loss + return loss diff --git a/artist/core/surface_reconstructor.py b/artist/core/surface_reconstructor.py index 8a1ef6c53..c1f9e7be6 100644 --- a/artist/core/surface_reconstructor.py +++ b/artist/core/surface_reconstructor.py @@ -5,14 +5,18 @@ import torch from torch.optim.lr_scheduler import LRScheduler -from artist.core import learning_rate_schedulers -from artist.core.core_utils import per_heliostat_reduction, scale_loss +from artist.core import core_utils, learning_rate_schedulers from artist.core.heliostat_ray_tracer import HeliostatRayTracer from artist.core.loss_functions import Loss +from artist.core.regularizers import IdealSurfaceRegularizer, SmoothnessRegularizer from artist.data_parser.calibration_data_parser import CalibrationDataParser from artist.field.heliostat_group import HeliostatGroup from artist.scenario.scenario import Scenario -from artist.util import config_dictionary, index_mapping, utils +from artist.util import ( + config_dictionary, + index_mapping, + utils, +) from artist.util.environment_setup import get_device from artist.util.nurbs import NURBSSurfaces @@ -27,6 +31,10 @@ class SurfaceReconstructor: The surface reconstructor learns a surface representation from measured flux density distributions. The optimizable parameters for this optimization process are the NURBS control points. + The reconstruction loss is defined by the loss between the flux density predictions and measurements. + Further, the reconstruction is constrained by energy constraints to preserve energy in the reconstructed + surfaces. There are also optional regularizers to keep the NURBS control points close to the ideal + surface and smooth. Attributes ---------- @@ -36,15 +44,25 @@ class SurfaceReconstructor: The scenario. data : dict[str, CalibrationDataParser | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]]] The data parser and the mapping of heliostat name and calibration data. - optimization_configuration : dict[str, Any] - The parameters for the optimizer, learning rate scheduler, regularizers and early stopping. + optimizer_dict : dict[str, Any] + The parameters for the optimization. + scheduler_dict : dict[str, Any] + The parameters for the scheduler. + constraint_dict : dict[str, Any] + The parameters for the constraints. number_of_surface_points : torch.Tensor The number of surface points of the reconstructed surfaces. Tensor of shape [2]. + epsilon : float + A small value. bitmap_resolution : torch.Tensor The resolution of all bitmaps during reconstruction. Tensor of shape [2]. + Note + ---- + Each heliostat selected for reconstruction needs to have the same amount of samples as all others. + Methods ------- reconstruct_surfaces() @@ -65,6 +83,7 @@ def __init__( optimization_configuration: dict[str, Any], number_of_surface_points: torch.Tensor = torch.tensor([50, 50]), bitmap_resolution: torch.Tensor = torch.tensor([256, 256]), + epsilon: float | None = 1e-12, device: torch.device | None = None, ) -> None: """ @@ -79,13 +98,15 @@ def __init__( data : dict[str, CalibrationDataParser | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]]] The data parser and the mapping of heliostat name and calibration data. optimization_configuration : dict[str, Any] - The parameters for the optimizer, learning rate scheduler and early stopping. + The parameters for the optimizer, learning rate scheduler, early stopping and constraints. number_of_surface_points : torch.Tensor The number of surface points of the reconstructed surfaces (default is torch.tensor([50,50])). Tensor of shape [2]. bitmap_resolution : torch.Tensor The resolution of all bitmaps during reconstruction (default is torch.tensor([256,256])). Tensor of shape [2]. + epsilon : float | None + A small value (default is 1e-12). device : torch.device | None The device on which to perform computations or load tensors and models (default is None). If None, ``ARTIST`` will automatically select the most appropriate @@ -101,9 +122,12 @@ def __init__( self.ddp_setup = ddp_setup self.scenario = scenario self.data = data - self.optimization_configuration = optimization_configuration + self.optimizer_dict = optimization_configuration[config_dictionary.optimization] + self.scheduler_dict = optimization_configuration[config_dictionary.scheduler] + self.constraint_dict = optimization_configuration[config_dictionary.constraints] self.number_of_surface_points = number_of_surface_points.to(device) self.bitmap_resolution = bitmap_resolution.to(device) + self.epsilon = epsilon def reconstruct_surfaces( self, @@ -133,7 +157,7 @@ def reconstruct_surfaces( rank = self.ddp_setup[config_dictionary.rank] if rank == 0: - log.info("Start the surface reconstruction.") + log.info("Beginning surface reconstruction.") final_loss_per_heliostat = torch.full( (self.scenario.heliostat_field.number_of_heliostats_per_group.sum(),), @@ -155,7 +179,6 @@ def reconstruct_surfaces( heliostat_group: HeliostatGroup = ( self.scenario.heliostat_field.heliostat_groups[heliostat_group_index] ) - parser = cast( CalibrationDataParser, self.data[config_dictionary.data_parser] ) @@ -179,41 +202,6 @@ def reconstruct_surfaces( ) if active_heliostats_mask.sum() > 0: - # Crop target fluxes. - cropped_measured_flux_distributions = ( - utils.crop_flux_distributions_around_center( - flux_distributions=measured_flux_distributions, - crop_width=config_dictionary.utis_crop_width, - crop_height=config_dictionary.utis_crop_height, - target_plane_widths=self.scenario.target_areas.dimensions[ - target_area_mask - ][:, index_mapping.target_area_width], - target_plane_heights=self.scenario.target_areas.dimensions[ - target_area_mask - ][:, index_mapping.target_area_height], - device=device, - ) - ) - - # Activate heliostats. - heliostat_group.activate_heliostats( - active_heliostats_mask=active_heliostats_mask, device=device - ) - - # Get the start indices for the separate heliostats in the active_-properties-tensors that contain heliostat duplicates for each sample. - nonzero_active_heliostats_mask = active_heliostats_mask[ - active_heliostats_mask > 0 - ] - start_indices_heliostats = torch.cumsum( - torch.cat( - [ - torch.tensor([0], device=device), - nonzero_active_heliostats_mask[:-1], - ] - ), - dim=index_mapping.heliostat_dimension, - ) - # Create NURBS evaluation points. evaluation_points = ( utils.create_nurbs_evaluation_grid( @@ -223,64 +211,80 @@ def reconstruct_surfaces( .unsqueeze(index_mapping.heliostat_dimension) .unsqueeze(index_mapping.facet_index_unbatched) .expand( - heliostat_group.number_of_active_heliostats, + active_heliostats_mask.sum(), heliostat_group.number_of_facets_per_heliostat, -1, -1, ) ) - original_nurbs_surfaces = NURBSSurfaces( - degrees=heliostat_group.nurbs_degrees, - control_points=heliostat_group.nurbs_control_points, - device=device, - ) - - original_surface_points, _ = ( - original_nurbs_surfaces.calculate_surface_points_and_normals( - evaluation_points=evaluation_points[ - index_mapping.first_heliostat - ] - .unsqueeze(index_mapping.heliostat_dimension) - .expand(heliostat_group.number_of_heliostats, -1, -1, -1), - device=device, - ) - ) + with torch.no_grad(): + original_control_points = heliostat_group.nurbs_control_points[ + active_heliostats_mask > 0 + ].clone() # Create the optimizer. optimizer = torch.optim.Adam( [heliostat_group.nurbs_control_points.requires_grad_()], - lr=self.optimization_configuration[ - config_dictionary.initial_learning_rate - ], + lr=float( + self.optimizer_dict[config_dictionary.initial_learning_rate] + ), ) # Create a learning rate scheduler. scheduler_fn = getattr( learning_rate_schedulers, - self.optimization_configuration[config_dictionary.scheduler], + self.scheduler_dict[config_dictionary.scheduler_type], ) scheduler: LRScheduler = scheduler_fn( - optimizer=optimizer, - parameters=self.optimization_configuration[ - config_dictionary.scheduler_parameters + optimizer=optimizer, parameters=self.scheduler_dict + ) + + # Set up early stopping. + early_stopper = learning_rate_schedulers.EarlyStopping( + window_size=self.optimizer_dict[ + config_dictionary.early_stopping_window + ], + patience=self.optimizer_dict[ + config_dictionary.early_stopping_patience ], + min_improvement=self.optimizer_dict[ + config_dictionary.early_stopping_delta + ], + relative=True, ) + energy_per_flux_reference = torch.zeros_like(active_heliostats_mask) + initial_lambda_energy = self.constraint_dict[ + config_dictionary.initial_lambda_energy + ] + lambda_energy = None + rho_energy = self.constraint_dict[config_dictionary.rho_energy] + energy_tolerance = self.constraint_dict[ + config_dictionary.energy_tolerance + ] + weight_smoothness = self.constraint_dict[ + config_dictionary.weight_smoothness + ] + weight_ideal_surface = self.constraint_dict[ + config_dictionary.weight_ideal_surface + ] + # Start the optimization. loss = torch.inf - best_loss = torch.inf - patience_counter = 0 epoch = 0 log_step = ( - self.optimization_configuration[config_dictionary.max_epoch] - if self.optimization_configuration[config_dictionary.log_step] == 0 - else self.optimization_configuration[config_dictionary.log_step] + self.optimizer_dict[config_dictionary.max_epoch] + if self.optimizer_dict[config_dictionary.log_step] == 0 + else self.optimizer_dict[config_dictionary.log_step] + ) + max_epoch = torch.tensor( + [self.optimizer_dict[config_dictionary.max_epoch]], + device=device, ) while ( - loss > self.optimization_configuration[config_dictionary.tolerance] - and epoch - <= self.optimization_configuration[config_dictionary.max_epoch] + loss > float(self.optimizer_dict[config_dictionary.tolerance]) + and epoch <= max_epoch ): optimizer.zero_grad() @@ -301,7 +305,10 @@ def reconstruct_surfaces( new_surface_points, new_surface_normals, ) = nurbs_surfaces.calculate_surface_points_and_normals( - evaluation_points=evaluation_points, device=device + evaluation_points=evaluation_points, + canting=heliostat_group.active_canting, + facet_translations=heliostat_group.active_facet_translations, + device=device, ) # The alignment module and the ray tracer do not accept facetted points and normals, therefore they need to be reshaped. @@ -330,15 +337,16 @@ def reconstruct_surfaces( device=device, ) - # Create a parallelized ray tracer. + # Create a parallelized ray tracer. Blocking is always deactivated for this reconstruction. ray_tracer = HeliostatRayTracer( scenario=self.scenario, heliostat_group=heliostat_group, + blocking_active=False, world_size=self.ddp_setup[ config_dictionary.heliostat_group_world_size ], rank=self.ddp_setup[config_dictionary.heliostat_group_rank], - batch_size=heliostat_group.number_of_active_heliostats, + batch_size=self.optimizer_dict[config_dictionary.batch_size], random_seed=self.ddp_setup[ config_dictionary.heliostat_group_rank ], @@ -353,13 +361,15 @@ def reconstruct_surfaces( device=device, ) - # Reduce predicted fluxes from all ranks within each subgroup. - if self.ddp_setup[config_dictionary.is_nested]: - flux_distributions = torch.distributed.nn.functional.all_reduce( - flux_distributions, - group=self.ddp_setup[config_dictionary.process_subgroup], - op=torch.distributed.ReduceOp.SUM, - ) + sample_indices_for_local_rank = ray_tracer.get_sampler_indices() + number_of_samples_per_heliostat = int( + heliostat_group.active_heliostats_mask.sum() + / (heliostat_group.active_heliostats_mask > 0).sum() + ) + local_indices = ( + sample_indices_for_local_rank[::number_of_samples_per_heliostat] + // number_of_samples_per_heliostat + ) cropped_flux_distributions = ( utils.crop_flux_distributions_around_center( @@ -367,73 +377,113 @@ def reconstruct_surfaces( crop_width=config_dictionary.utis_crop_width, crop_height=config_dictionary.utis_crop_height, target_plane_widths=self.scenario.target_areas.dimensions[ - target_area_mask + target_area_mask[sample_indices_for_local_rank] ][:, index_mapping.target_area_width], target_plane_heights=self.scenario.target_areas.dimensions[ - target_area_mask + target_area_mask[sample_indices_for_local_rank] ][:, index_mapping.target_area_height], device=device, ) ) - # Loss comparing the predicted flux and the target flux. + # Flux loss. flux_loss_per_sample = loss_definition( prediction=cropped_flux_distributions, - ground_truth=cropped_measured_flux_distributions, - target_area_mask=target_area_mask, + ground_truth=measured_flux_distributions[ + sample_indices_for_local_rank + ], + target_area_mask=target_area_mask[ + sample_indices_for_local_rank + ], reduction_dimensions=( index_mapping.batched_bitmap_e, index_mapping.batched_bitmap_u, ), device=device, ) - - flux_loss_per_heliostat = per_heliostat_reduction( - per_sample_values=flux_loss_per_sample, - active_heliostats_mask=active_heliostats_mask, + flux_loss_per_heliostat = core_utils.mean_loss_per_heliostat( + loss_per_sample=flux_loss_per_sample, + number_of_samples_per_heliostat=number_of_samples_per_heliostat, device=device, ) - # Include regularization terms. - for regularizer in self.optimization_configuration[ - config_dictionary.regularizers - ]: - regularization_term_active_heliostats = regularizer( - original_surface_points=original_surface_points[ - active_heliostats_mask > 0 - ], - surface_points=new_surface_points[start_indices_heliostats], - surface_normals=new_surface_normals[ - start_indices_heliostats - ], + # Augmented Lagrangian. + if epoch == 0: + energy_per_flux_reference = cropped_flux_distributions.sum( + dim=(1, 2) + ).detach() + g_energy = ( + cropped_flux_distributions.sum(dim=(1, 2)) + - energy_per_flux_reference + ) / (energy_per_flux_reference + self.epsilon) + energy_constraint = torch.minimum( + g_energy + energy_tolerance, torch.zeros_like(g_energy) + ) + energy_constraint_per_heliostat = core_utils.mean_loss_per_heliostat( + loss_per_sample=energy_constraint, + number_of_samples_per_heliostat=number_of_samples_per_heliostat, + device=device, + ) + if lambda_energy is None: + lambda_energy = torch.full_like( + energy_constraint_per_heliostat, + initial_lambda_energy, + dtype=torch.float32, device=device, ) + constraint = ( + lambda_energy.detach() * energy_constraint_per_heliostat + + 0.5 * rho_energy * energy_constraint_per_heliostat**2 + ) - regularization_term_per_heliostat = torch.full( - (active_heliostats_mask.shape[0],), - float("inf"), - device=device, - ) - regularization_term_per_heliostat[ - active_heliostats_mask > 0 - ] = regularization_term_active_heliostats - - scaled_regularization_term_per_heliostat = scale_loss( - loss=regularization_term_per_heliostat, - reference=flux_loss_per_heliostat, - weight=regularizer.weight, - ) + # Regularization terms. + smoothness_loss_per_heliostat = torch.zeros_like( + flux_loss_per_heliostat, device=device + ) + ideal_surface_loss_per_heliostat = torch.zeros_like( + flux_loss_per_heliostat, device=device + ) + if self.constraint_dict[config_dictionary.regularizers] is not None: + for regularizer in self.constraint_dict[ + config_dictionary.regularizers + ]: + regularization_term_active_heliostats = regularizer( + current_control_points=heliostat_group.active_nurbs_control_points[ + ::number_of_samples_per_heliostat + ][local_indices], + original_control_points=original_control_points[ + local_indices + ], + device=device, + ) + if isinstance(regularizer, SmoothnessRegularizer): + smoothness_loss_per_heliostat = ( + regularization_term_active_heliostats + ) + if isinstance(regularizer, IdealSurfaceRegularizer): + ideal_surface_loss_per_heliostat = ( + regularization_term_active_heliostats + ) + alpha = ( + weight_smoothness + * flux_loss_per_heliostat.mean() + / (smoothness_loss_per_heliostat.mean() + self.epsilon) + ) + beta = ( + weight_ideal_surface + * flux_loss_per_heliostat.mean() + / (ideal_surface_loss_per_heliostat.mean() + self.epsilon) + ) - flux_loss_per_heliostat = ( - flux_loss_per_heliostat - + scaled_regularization_term_per_heliostat - ) + total_loss_per_heliostat = ( + flux_loss_per_heliostat + + constraint + + alpha * smoothness_loss_per_heliostat + + beta * ideal_surface_loss_per_heliostat + ) - flux_loss_mean = flux_loss_per_heliostat[ - torch.isfinite(flux_loss_per_heliostat) - ].mean() - loss = flux_loss_mean - loss.backward() + total_loss = total_loss_per_heliostat.mean() + total_loss.backward() if self.ddp_setup[config_dictionary.is_nested]: # Reduce gradients within each heliostat group. @@ -447,6 +497,9 @@ def reconstruct_surfaces( config_dictionary.process_subgroup ], ) + param.grad /= self.ddp_setup[ + config_dictionary.heliostat_group_world_size + ] # Keep the surfaces in their original geometric shape by locking the control points on the outer edges. optimizer.param_groups[index_mapping.optimizer_param_group_0][ @@ -464,47 +517,44 @@ def reconstruct_surfaces( if isinstance( scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau ): - scheduler.step(loss.detach()) + scheduler.step(total_loss.detach()) else: scheduler.step() - if epoch % log_step == 0 and rank == 0: - log.info( - f"Epoch: {epoch}, Loss: {flux_loss_per_heliostat.tolist()}, LR: {optimizer.param_groups[index_mapping.optimizer_param_group_0]['lr']}", + with torch.no_grad(): + lambda_energy += ( + rho_energy * energy_constraint_per_heliostat.detach() ) + lambda_energy.clamp_(min=0.0) - # Early stopping when loss has reached a plateau. - if ( - loss - < best_loss - - self.optimization_configuration[ - config_dictionary.early_stopping_delta - ] - ): - best_loss = loss - patience_counter = 0 - else: - patience_counter += 1 - if ( - patience_counter - >= self.optimization_configuration[ - config_dictionary.early_stopping_patience - ] - ): + if epoch % log_step == 0 and rank == 0: log.info( - f"Early stopping at epoch {epoch}. The loss did not improve significantly for {patience_counter} epochs." + f"Rank: {rank}, Epoch: {epoch}, Loss: {total_loss}, LR: {optimizer.param_groups[index_mapping.optimizer_param_group_0]['lr']}" ) + + # Early stopping when loss did not improve since a predefined number of epochs. + stop = early_stopper.step(loss) + + if stop: + log.info(f"Early stopping at epoch {epoch}.") break epoch += 1 - final_loss_per_heliostat[ - final_loss_start_indices[ - heliostat_group_index - ] : final_loss_start_indices[heliostat_group_index + 1] - ] = flux_loss_per_heliostat + global_active_indices = torch.nonzero( + active_heliostats_mask != 0, as_tuple=True + )[0] + + rank_active_indices_global = global_active_indices[local_indices] - log.info(f"Rank: {rank}, surfaces reconstructed.") + final_indices = ( + rank_active_indices_global + + final_loss_start_indices[heliostat_group_index] + ) + + final_loss_per_heliostat[final_indices] = total_loss_per_heliostat + + log.info(f"Rank: {rank}, Surfaces reconstructed.") if self.ddp_setup[config_dictionary.is_distributed]: for index, heliostat_group in enumerate( @@ -523,6 +573,8 @@ def reconstruct_surfaces( log.info(f"Rank: {rank}, synchronized after surface reconstruction.") + self.scenario.heliostat_field.update_surfaces(device=device) + return final_loss_per_heliostat @staticmethod diff --git a/artist/data_parser/h5_scenario_parser.py b/artist/data_parser/h5_scenario_parser.py index 0da309572..acf1d1e9c 100644 --- a/artist/data_parser/h5_scenario_parser.py +++ b/artist/data_parser/h5_scenario_parser.py @@ -640,20 +640,30 @@ def linear_actuators( ) # For all linear actuators in the rigid body kinematic: - # Adapt initial angle of actuator number one according to kinematic initial orientation. - # ARTIST always expects heliostats to be initially oriented to the south [0.0, -1.0, 0.0] (in ENU). - # The first actuator always rotates along the east-axis. - # Since the actuator coordinate system is relative to the heliostat orientation, the initial angle - # of actuator number one needs to be transformed accordingly. - actuator_parameters_optimizable[ - index_mapping.actuator_initial_angle, index_mapping.actuator_one_index - ] = utils.transform_initial_angle( - initial_angle=actuator_parameters_optimizable[ - index_mapping.actuator_initial_angle, index_mapping.actuator_one_index - ].unsqueeze(0), - initial_orientation=initial_orientation, + # Adapt the initial angle of actuator number one according to the initial surface orientation. + # - ARTIST always expects heliostats to be initially oriented to the south -> artist_standard_orientation = [0.0, -1.0, 0.0] (in ENU). + # - The first actuator rotates along the east-axis. + # - The "initial angle" of the actuator is a relative angle: it defines the actuator's initial angle relative to the physical geometry of the surface. + # - Surfaces in ARTIST are always provided oriented upwards ([0, 0, 1]), even if the initial orientation in a database deviates. + # -> The surface points and normals are always sampled from a model (converted nurbs from deflectometry or ideal nurbs) that lays + # flat on the ground, i.e., the surface normals are always pointing upwards [0.0, 0.0, 1.0]. + # - The final orientation needs to be computed correctly from the surface orientation and the relative actuator initial angle. + # - To ensure final orientations remain consistent, first the rotation from the standard orientation (ARTIST: south) to the actual surface + # orientation is computed (rotation from south (ARTIST) to up (surfaces). This rotation is projected along the + # rotation axis of the first actuator (east-axis) and added to the initial angle. This compensates for the different orientations of the sampled + # surfaces and the relative turning axis of the first actuator. + surface_orientation = torch.tensor([0.0, 0.0, 1.0, 0.0], device=device) + artist_standard_orientation = torch.tensor([0.0, -1.0, 0.0, 0.0], device=device) + axis, angle = utils.rotation_angle_and_axis( + from_orientation=artist_standard_orientation, + to_orientation=surface_orientation, device=device, ) + delta_angle = axis[0] * angle + + actuator_parameters_optimizable[ + index_mapping.actuator_initial_angle, index_mapping.actuator_one_index + ] += delta_angle return actuator_parameters_non_optimizable, actuator_parameters_optimizable diff --git a/artist/data_parser/paint_calibration_parser.py b/artist/data_parser/paint_calibration_parser.py index 28046abbe..4606df831 100644 --- a/artist/data_parser/paint_calibration_parser.py +++ b/artist/data_parser/paint_calibration_parser.py @@ -252,7 +252,9 @@ def _parse_calibration_data( total_samples = sum(replication_counter[name] for name in heliostat_names) calibration_replications = torch.tensor( - [replication_counter[name] for name in heliostat_names], device=device + [replication_counter[name] for name in heliostat_names], + dtype=torch.int32, + device=device, ) target_area_mapping = torch.empty( diff --git a/artist/field/heliostat_field.py b/artist/field/heliostat_field.py index 05bd52555..654b83cd2 100644 --- a/artist/field/heliostat_field.py +++ b/artist/field/heliostat_field.py @@ -1,4 +1,5 @@ import logging +import math from collections import defaultdict from collections.abc import Sequence from typing import TYPE_CHECKING, cast @@ -11,6 +12,7 @@ from artist.data_parser import h5_scenario_parser from artist.field.heliostat_group import HeliostatGroup from artist.field.surface import Surface +from artist.util.nurbs import NURBSSurfaces if TYPE_CHECKING: from artist.scenario.configuration_classes import ( @@ -45,6 +47,8 @@ class HeliostatField: ------- from_hdf5() Load a heliostat field from an HDF5 file. + update_surfaces() + Update surface points and normals using new nurbs control points. """ def __init__( @@ -337,6 +341,8 @@ def from_hdf5( ].append( surface.get_surface_points_and_normals( number_of_points_per_facet=number_of_surface_points_per_facet, + canting=canting, + facet_translations=facet_translation_vectors, device=device, )[index_mapping.surface_points_from_tuple] ) @@ -345,9 +351,17 @@ def from_hdf5( ].append( surface.get_surface_points_and_normals( number_of_points_per_facet=number_of_surface_points_per_facet, + canting=canting, + facet_translations=facet_translation_vectors, device=device, )[index_mapping.surface_normals_from_tuple] ) + grouped_field_data[heliostat_group_key][ + config_dictionary.facets_canting + ].append(canting) + grouped_field_data[heliostat_group_key][ + config_dictionary.facet_translations + ].append(facet_translation_vectors) grouped_field_data[heliostat_group_key][ config_dictionary.facet_control_points ].append(control_points) @@ -403,6 +417,12 @@ def from_hdf5( config_dictionary.surface_normals ], ).reshape(number_of_heliostats_in_group, -1, 4), + canting=grouped_field_data[heliostat_group_name][ + config_dictionary.facets_canting + ], + facet_translations=grouped_field_data[heliostat_group_name][ + config_dictionary.facet_translations + ], nurbs_control_points=grouped_field_data[heliostat_group_name][ config_dictionary.facet_control_points ], @@ -431,3 +451,71 @@ def from_hdf5( ) return cls(heliostat_groups=heliostat_groups, device=device) + + def update_surfaces( + self, + device: torch.device | None = None, + ) -> None: + """ + Update surface points and normals using new nurbs control points. + + Parameter + --------- + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + """ + device = get_device(device=device) + + for heliostat_group in self.heliostat_groups: + number_of_surface_points_per_facet = int( + math.sqrt( + heliostat_group.surface_points.shape[1] + / heliostat_group.number_of_facets_per_heliostat + ) + ) + evaluation_points = ( + utils.create_nurbs_evaluation_grid( + number_of_evaluation_points=torch.tensor( + [ + number_of_surface_points_per_facet, + number_of_surface_points_per_facet, + ], + device=device, + ), + device=device, + ) + .unsqueeze(index_mapping.heliostat_dimension) + .unsqueeze(index_mapping.facet_index_unbatched) + .expand( + heliostat_group.number_of_heliostats, + heliostat_group.number_of_facets_per_heliostat, + -1, + -1, + ) + ) + nurbs_surfaces = NURBSSurfaces( + degrees=heliostat_group.nurbs_degrees, + control_points=heliostat_group.nurbs_control_points.detach(), + device=device, + ) + ( + new_surface_points, + new_surface_normals, + ) = nurbs_surfaces.calculate_surface_points_and_normals( + evaluation_points=evaluation_points, + canting=heliostat_group.canting, + facet_translations=heliostat_group.facet_translations, + device=device, + ) + heliostat_group.surface_points = new_surface_points.reshape( + heliostat_group.surface_points.shape[0], + -1, + 4, + ).detach() + heliostat_group.surface_normals = new_surface_normals.reshape( + heliostat_group.surface_points.shape[0], + -1, + 4, + ).detach() diff --git a/artist/field/heliostat_group.py b/artist/field/heliostat_group.py index ba3870cef..d2906da52 100644 --- a/artist/field/heliostat_group.py +++ b/artist/field/heliostat_group.py @@ -81,6 +81,8 @@ def __init__( positions: torch.Tensor, surface_points: torch.Tensor, surface_normals: torch.Tensor, + canting: torch.Tensor, + facet_translations: torch.Tensor, initial_orientations: torch.Tensor, nurbs_control_points: torch.Tensor, nurbs_degrees: torch.Tensor, @@ -126,6 +128,8 @@ def __init__( self.positions = positions self.surface_points = surface_points self.surface_normals = surface_normals + self.canting = canting + self.facet_translations = facet_translations self.initial_orientations = initial_orientations self.nurbs_control_points = nurbs_control_points @@ -134,7 +138,7 @@ def __init__( self.kinematic = Kinematic() self.number_of_active_heliostats = 0 - self.active_heliostats_mask = torch.empty( + self.active_heliostats_mask = torch.zeros( self.number_of_heliostats, device=device ) self.active_surface_points = torch.empty_like( @@ -143,6 +147,10 @@ def __init__( self.active_surface_normals = torch.empty_like( self.surface_normals, device=device ) + self.active_canting = torch.empty_like(self.canting, device=device) + self.active_facet_translations = torch.empty_like( + self.facet_translations, device=device + ) self.active_nurbs_control_points = torch.empty_like( self.nurbs_control_points, device=device ) @@ -253,6 +261,12 @@ def activate_heliostats( self.active_surface_normals = self.surface_normals.repeat_interleave( active_heliostats_mask, dim=0 ) + self.active_canting = self.canting.repeat_interleave( + active_heliostats_mask, dim=0 + ) + self.active_facet_translations = self.facet_translations.repeat_interleave( + active_heliostats_mask, dim=0 + ) self.active_nurbs_control_points = self.nurbs_control_points.repeat_interleave( active_heliostats_mask, dim=0 ) diff --git a/artist/field/heliostat_group_rigid_body.py b/artist/field/heliostat_group_rigid_body.py index db3f237d9..b48a7fe77 100644 --- a/artist/field/heliostat_group_rigid_body.py +++ b/artist/field/heliostat_group_rigid_body.py @@ -87,6 +87,8 @@ def __init__( positions: torch.Tensor, surface_points: torch.Tensor, surface_normals: torch.Tensor, + canting: torch.Tensor, + facet_translations: torch.Tensor, initial_orientations: torch.Tensor, nurbs_control_points: torch.Tensor, nurbs_degrees: torch.Tensor, @@ -143,6 +145,8 @@ def __init__( positions=positions, surface_points=surface_points, surface_normals=surface_normals, + canting=canting, + facet_translations=facet_translations, initial_orientations=initial_orientations, nurbs_control_points=nurbs_control_points, nurbs_degrees=nurbs_degrees, diff --git a/artist/field/kinematic_rigid_body.py b/artist/field/kinematic_rigid_body.py index acfc03554..dd4063d04 100644 --- a/artist/field/kinematic_rigid_body.py +++ b/artist/field/kinematic_rigid_body.py @@ -311,8 +311,14 @@ def _apply_initial_orientation_offsets( The orientation matrices with the initial orientation offset. Tensor of shape [number_of_active_heliostats, 4, 4]. """ + # The surface points and normals are always sampled from a model (converted nurbs from deflectometry or ideal nurbs) that lays + # flat on the ground, i.e., the surface normals are pointing upwards [0.0, 0.0, 1.0]. Since ARTIST expects the points and normals + # to be initially oriented to the south, an extra rotation needs to be applied. + sampled_surface_model_orientation = torch.tensor( + [[0.0, 0.0, 1.0, 0.0]], device=device + ).expand(self.number_of_active_heliostats, 4) east_angles, north_angles, up_angles = utils.decompose_rotations( - initial_vector=self.active_initial_orientations, + initial_vector=sampled_surface_model_orientation, target_vector=self.artist_standard_orientation, device=device, ) diff --git a/artist/field/surface.py b/artist/field/surface.py index 31268a23c..3a42c97d0 100644 --- a/artist/field/surface.py +++ b/artist/field/surface.py @@ -46,22 +46,25 @@ def __init__( """ device = get_device(device=device) - self.nurbs_facets = [] + degrees = surface_config.facet_list[index_mapping.first_facet].degrees + control_points = [] for facet_config in surface_config.facet_list: - self.nurbs_facets.append( - NURBSSurfaces( - degrees=facet_config.degrees, - control_points=facet_config.control_points.unsqueeze(0).unsqueeze( - 0 - ), - device=device, - ) - ) + control_points.append(facet_config.control_points) + + control_points = torch.stack(control_points) + + self.nurbs_surface = NURBSSurfaces( + degrees=degrees, + control_points=control_points.unsqueeze(index_mapping.heliostat_dimension), + device=device, + ) def get_surface_points_and_normals( self, number_of_points_per_facet: torch.Tensor, + canting: torch.Tensor, + facet_translations: torch.Tensor, device: torch.device | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ @@ -86,35 +89,35 @@ def get_surface_points_and_normals( """ device = get_device(device=device) - evaluation_points = utils.create_nurbs_evaluation_grid( - number_of_evaluation_points=number_of_points_per_facet, device=device + evaluation_points = ( + utils.create_nurbs_evaluation_grid( + number_of_evaluation_points=number_of_points_per_facet, device=device + ) + .unsqueeze(index_mapping.heliostat_dimension) + .unsqueeze(index_mapping.facet_index_unbatched) + .expand(1, self.nurbs_surface.number_of_facets_per_surface, -1, -1) ) - # The surface points and surface normals will be returned as tensors of shape: - # [number_of_facets, number_of_surface_points_per_facet, 4] and - # [number_of_facets, number_of_surface_normals_per_facet, 4]. - surface_points = torch.empty( - len(self.nurbs_facets), - evaluation_points.shape[ - index_mapping.number_of_points_or_normals_per_facet - ], - 4, - device=device, - ) - surface_normals = torch.empty( - len(self.nurbs_facets), - evaluation_points.shape[ - index_mapping.number_of_points_or_normals_per_facet - ], - 4, - device=device, - ) - for i, nurbs_facet in enumerate(self.nurbs_facets): + if torch.all(self.nurbs_surface.control_points[..., 2] == 0): + ( + surface_points, + surface_normals, + ) = self.nurbs_surface.calculate_surface_points_and_normals( + evaluation_points=evaluation_points, + canting=canting.unsqueeze(index_mapping.heliostat_dimension), + facet_translations=facet_translations.unsqueeze( + index_mapping.heliostat_dimension + ), + device=device, + ) + else: ( - surface_points[i], - surface_normals[i], - ) = nurbs_facet.calculate_surface_points_and_normals( - evaluation_points=evaluation_points.unsqueeze(0).unsqueeze(0), + surface_points, + surface_normals, + ) = self.nurbs_surface.calculate_surface_points_and_normals( + evaluation_points=evaluation_points, + canting=None, + facet_translations=None, device=device, ) return surface_points, surface_normals diff --git a/artist/scenario/scenario.py b/artist/scenario/scenario.py index 81f2e2901..2f1599587 100644 --- a/artist/scenario/scenario.py +++ b/artist/scenario/scenario.py @@ -36,6 +36,8 @@ class Scenario: Methods ------- + get_number_of_heliostat_groups_from_hdf5() + Get the number of heliostat groups to initiate distributed setup from the HDF5 scenario file. load_scenario_from_hdf5() Class method to load the scenario from an HDF5 file. index_mapping() diff --git a/artist/scenario/surface_generator.py b/artist/scenario/surface_generator.py index ca31d1e0a..fe6388dae 100644 --- a/artist/scenario/surface_generator.py +++ b/artist/scenario/surface_generator.py @@ -22,7 +22,7 @@ class SurfaceGenerator: Tensor of shape [2]. degrees : torch.Tensor Degree of the NURBS along each direction of each 2D facet. - Tensor of shape [2]. + Tensor of shape [2]. Methods ------- @@ -193,7 +193,10 @@ def fit_nurbs( epoch = 0 while loss > tolerance and epoch <= max_epoch: points, normals = nurbs_surface.calculate_surface_points_and_normals( - evaluation_points=evaluation_points, device=device + evaluation_points=evaluation_points, + canting=None, + facet_translations=None, + device=device, ) optimizer.zero_grad() diff --git a/artist/scene/light_source.py b/artist/scene/light_source.py index 3cefd1e0d..f2fc5e5d5 100644 --- a/artist/scene/light_source.py +++ b/artist/scene/light_source.py @@ -69,7 +69,7 @@ def from_hdf5( def get_distortions( self, number_of_points: int, - number_of_heliostats: int, + number_of_active_heliostats: int, random_seed: int = 7, ) -> tuple[torch.Tensor, torch.Tensor]: """ @@ -84,7 +84,7 @@ def get_distortions( The number of points on the heliostat from which rays are reflected. number_of_facets : int The number of facets for each heliostat (default: 4). - number_of_heliostats : int + number_of_active_heliostats : int The number of heliostats in the scenario (default: 1). random_seed : int The random seed to enable result replication (default: 7). diff --git a/artist/scene/sun.py b/artist/scene/sun.py index 0ba882d29..a99b231a2 100644 --- a/artist/scene/sun.py +++ b/artist/scene/sun.py @@ -213,7 +213,7 @@ def from_hdf5( def get_distortions( self, number_of_points: int, - number_of_heliostats: int, + number_of_active_heliostats: int, random_seed: int = 7, ) -> tuple[torch.Tensor, torch.Tensor]: """ @@ -225,7 +225,7 @@ def get_distortions( The number of points on the heliostat from which rays are reflected. number_of_facets : int The number of facets for each heliostat (default: 4). - number_of_heliostats : int + number_of_active_heliostats : int The number of heliostats in the scenario (default: 1). random_seed : int The random seed to enable result replication (default: 7). @@ -240,7 +240,7 @@ def get_distortions( distortions_u, distortions_e = self.distribution.sample( ( - number_of_heliostats, + number_of_active_heliostats, self.number_of_rays, number_of_points, ), diff --git a/artist/util/__init__.py b/artist/util/__init__.py index 269eabbe0..6942a6e1c 100644 --- a/artist/util/__init__.py +++ b/artist/util/__init__.py @@ -2,6 +2,8 @@ import logging import sys +import time +from functools import wraps from pathlib import Path import colorlog @@ -63,3 +65,76 @@ def set_logger_config( file_handler.setFormatter(simple_formatter) base_logger.addHandler(file_handler) base_logger.setLevel(level) + + +def set_runtime_logger( + log_file: str | Path = "runtime_log.txt", + level: int = logging.INFO, +) -> logging.Logger: + """ + Configure and return a shared runtime logger that logs execution times of functions. + + Parameters + ---------- + log_file : str | Path + The file path to write runtime logs. + level : int + The logging level (default is logging.INFO). + + Returns + ------- + logging.Logger + The configured runtime logger. + """ + logger_name = "artist.runtime" + logger = logging.getLogger(logger_name) + logger.setLevel(level) + + if not logger.handlers: + log_file = Path(log_file) + log_file.parent.mkdir(parents=True, exist_ok=True) + + file_handler = logging.FileHandler(log_file) + file_handler.setFormatter( + logging.Formatter( + "[%(asctime)s][%(name)s][%(levelname)s] - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + ) + logger.addHandler(file_handler) + + return logger + + +def track_runtime(logger: logging.Logger): + """ + Track and log start, finish, and duration of function execution. + + Parameters + ---------- + logger : logging.Logger + The runtime logger. + + Returns + ------- + Callable + The decorated function with runtime tracking. + """ + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + func_name = func.__name__ + logger.info(f"{func_name} started") + start_time = time.perf_counter() + result = func(*args, **kwargs) + duration = time.perf_counter() - start_time + logger.info(f"{func_name} finished in {duration:.3f}s") + return result + + return wrapper + + return decorator + + +runtime_log = set_runtime_logger("./runtime_log.txt") diff --git a/artist/util/config_dictionary.py b/artist/util/config_dictionary.py index 6afdfb706..efe3c7b8c 100644 --- a/artist/util/config_dictionary.py +++ b/artist/util/config_dictionary.py @@ -49,6 +49,8 @@ """Key to access the translation vector indicating the position of the facet relative to the center of the heliostat.""" facets_canting = "canting" """Key to access the facet canting in the east and north direction.""" +facet_translations = "facet_translations" +"""Key to access the facet translation vectors.""" first_joint_translation_e = "first_joint_translation_e" """Key to access the kinematic first joint translation in the east direction.""" @@ -123,7 +125,6 @@ """Int defining that the actuator used is a linear actuator.""" linear_actuator_int = 0 - surface_prototype_key = "surface" """Key to access the surface prototype.""" kinematic_prototype_key = "kinematic" @@ -225,22 +226,28 @@ heliostat_data_mapping = "heliostat_data_mapping" """Key to access a heliostat data mapping.""" +optimization = "optimization" +"""Key to access the dict for the optimization.""" initial_learning_rate = "initial_learning_rate" """Key to access the initial learning rate of an optimizer.""" tolerance = "tolerance" """Key to access the tolerance of an optimization.""" max_epoch = "max_epoch" """Key to access the maximum epoch number of an optimization.""" +batch_size = "batch_size" +"""Key to access the batch size for a ray tracer.""" log_step = "log_step" """Key to access the step for log statements of an optimization.""" early_stopping_delta = "early_stopping_delta" """Key to access the early stopping delta of an optimization.""" early_stopping_patience = "early_stopping_patience" """Key to access the early stopping patience of an optimization.""" +early_stopping_window = "early_stopping_window" +"""Key to access the early stopping window.""" scheduler = "scheduler" -"""Key to access the name of a learning rate scheduler.""" -scheduler_parameters = "scheduler_parameters" -"""Key to access the learning rate scheduler parameters.""" +"""Key to access the learning rate scheduler.""" +scheduler_type = "scheduler_type" +"""Key to access the scheduler type.""" exponential = "exponential" """Key to access an exponential learning rate scheduler.""" cyclic = "cyclic" @@ -263,28 +270,27 @@ """Key to access the threshold of a reduce on plateau learning rate scheduler.""" cooldown = "cooldown" """Key to access the cooldown parameter of a reduce on plateau learning rate scheduler.""" +constraints = "constraints" +"""Key to access the constraints dict.""" regularizers = "regularizers" """Key to access regularizers.""" -regularization_callable = "regularization_callable" -"""Key to access the callable name of a regularizer.""" -weight = "weight" -"""Key to access the weight of a regularizer.""" -regularizers_parameters = "regularizers_parameters" -"""Key to access the parameters of a regularizer.""" -vector_loss = "vector_loss" -"""Key to access the vector loss function.""" -total_variation_loss = "total_variation_loss" -"""Key to access the total variation loss function.""" -ideal_surface_loss = "ideal_surface_loss" -"""Key to access the ideal surface loss regularizer.""" -total_variation_loss_points = "total_variation_loss_points" -"""Key to access the total variation loss regularizer for surface points.""" -total_variation_loss_normals = "total_variation_loss_normals" -"""Key to access the total variation loss regularizer for surface normals.""" -number_of_neighbors = "number_of_neighbors" -"""Key to access the number of neighbors parameter of a regularizer.""" -sigma = "sigma" -"""Key to access the sigma parameter of a regularizer.""" +weight_smoothness = "weight_smoothness" +"""Key to access the weight for the smoothness regularizer.""" +weight_ideal_surface = "weight_ideal_surface" +"""Key to access the weight for the ideal surface regularizer.""" +initial_lambda_energy = "initial_lambda_energy" +"""Key to access the initial lambda for the energy constraint.""" +rho_energy = "rho_energy" +"""Key to access rho for the energy constraint.""" +energy_tolerance = "energy_tolerance" +"""Key to access the tolerance for the energy constraint.""" +max_flux_density = "max_flux_density" +"""Key to access the maximum allowed flux density.""" +rho_pixel = "rho_pixel" +"""Key to access rho for the energy per pixel constraint.""" +lambda_lr = "lambda_lr" +"""Key to access the learning rate for the lambda in the constraint.""" + device = "device" """Key to access the device.""" @@ -306,3 +312,16 @@ """Key to access the world size within a process subgroups.""" ranks_to_groups_mapping = "ranks_to_groups_mapping" """Key to access the mapping from ranks to heliostat groups.""" + +left_node = "left" +"""Key to access the left nodes in a binary radix tree used for blocking.""" +right_node = "right" +"""Key to access the right nodes in a binary radix tree used for blocking.""" +aabb_min = "aabb_min" +"""Key to access the minimum of the axis aligned bounding boxes.""" +aabb_max = "aabb_max" +"""Key to access the maximum of the axis aligned bounding boxes.""" +is_leaf = "is_leaf" +"""Key to access the leaf property of the binary radix trees used in blocking.""" +primitive_index = "primitive_index" +"""Key to access the blocking primitives indices.""" diff --git a/artist/util/nurbs.py b/artist/util/nurbs.py index 956199da9..1138e9795 100644 --- a/artist/util/nurbs.py +++ b/artist/util/nurbs.py @@ -1,6 +1,6 @@ import torch -from artist.util import index_mapping +from artist.util import index_mapping, utils from artist.util.environment_setup import get_device @@ -34,9 +34,9 @@ class NURBSSurfaces(torch.nn.Module): Methods ------- - calculate_knot_vector() + calculate_uniform_knot_vectors() Calculate the knot vectors for all surfaces in one direction. - find_span() + find_spans() Determine the knot spans in one direction. basis_functions_and_derivatives() Compute the nonzero derivatives of the basis functions up to the nth-derivative. @@ -488,6 +488,8 @@ def _batched_gather_control_points( def calculate_surface_points_and_normals( self, evaluation_points: torch.Tensor, + canting: torch.Tensor, + facet_translations: torch.Tensor, device: torch.device | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ @@ -623,7 +625,7 @@ def calculate_surface_points_and_normals( basis_values_derivatives_v[t][s].unsqueeze(-1) * temp[s] ) - normals = torch.linalg.cross( + surface_normals = torch.linalg.cross( derivatives[ :, :, @@ -641,31 +643,70 @@ def calculate_surface_points_and_normals( :3, ], ) - normals = torch.nn.functional.normalize( - normals, dim=index_mapping.nurbs_normals + + surface_points_homogenous = derivatives[ + :, + :, + :, + index_mapping.nurbs_derivative_order_0, + index_mapping.nurbs_derivative_order_0, + ] + + homogenous_weights = surface_points_homogenous[:, :, :, 3:4] + surface_points = ( + surface_points_homogenous[:, :, :, : index_mapping.slice_fourth_dimension] + / homogenous_weights + ) + + surface_points = torch.cat( + ( + surface_points, + torch.ones( + tuple(surface_points.shape[:3]) + (1,), + device=device, + ), + ), + dim=index_mapping.nurbs_normals, + ) + + surface_normals = torch.nn.functional.normalize( + surface_normals, dim=index_mapping.nurbs_normals ) - normals = torch.cat( + surface_normals = torch.cat( ( - normals, + surface_normals, torch.zeros( - tuple(normals.shape[: index_mapping.nurbs_normals]) + (1,), + tuple(surface_normals.shape[: index_mapping.nurbs_normals]) + (1,), device=device, ), ), dim=index_mapping.nurbs_normals, ) - return derivatives[ - :, - :, - :, - index_mapping.nurbs_derivative_order_0, - index_mapping.nurbs_derivative_order_0, - ], normals + if canting is not None: + canted_surface_points = utils.perform_canting( + canting_angles=canting, data=surface_points, device=device + ) + transformed_surface_points = ( + canted_surface_points + + facet_translations.reshape( + self.number_of_surfaces, self.number_of_facets_per_surface, 1, 4 + ) + ) + transformed_surface_normals = utils.perform_canting( + canting_angles=canting, data=surface_normals, device=device + ) + return transformed_surface_points, transformed_surface_normals + + return surface_points, surface_normals def forward( - self, evaluation_points: torch.Tensor, device: torch.device | None = None + self, + evaluation_points: torch.Tensor, + canting: torch.Tensor, + facet_translations: torch.Tensor, + device: torch.device | None = None, ) -> tuple[torch.Tensor, torch.Tensor]: """ Specify the forward operation of the NURBS, i.e., calculate the surface points and normals. @@ -692,5 +733,8 @@ def forward( device = get_device(device=device) return self.calculate_surface_points_and_normals( - evaluation_points=evaluation_points, device=device + evaluation_points=evaluation_points, + canting=canting, + facet_translations=facet_translations, + device=device, ) diff --git a/artist/util/raytracing_utils.py b/artist/util/raytracing_utils.py index a866e334b..25978f813 100644 --- a/artist/util/raytracing_utils.py +++ b/artist/util/raytracing_utils.py @@ -71,11 +71,6 @@ def line_plane_intersections( If None, ``ARTIST`` will automatically select the most appropriate device (CUDA or CPU) based on availability and OS. - Raises - ------ - ValueError - If there are no intersections on the front of the target plane. - Returns ------- torch.Tensor @@ -93,55 +88,41 @@ def line_plane_intersections( dtype=torch.int32, device=device, ) + plane_normals = target_areas.normal_vectors[target_area_mask] + plane_centers = target_areas.centers[target_area_mask] # Use Lambert’s Cosine Law to calculate the relative intensities of the reflected rays on the planes. # The relative intensities are calculated by taking the dot product (matrix multiplication) of the planes' # unit normal vectors and the normalized ray-direction vectors, pointing from the planes to the sources. # This determines how much the rays align with the plane normals. - relative_intensities = ( - -rays.ray_directions - * target_areas.normal_vectors[target_area_mask][:, None, None, :] - ).sum(dim=index_mapping.ray_intensities) + relative_intensities = (-rays.ray_directions * plane_normals[:, None, None, :]).sum( + dim=-1 + ) - if (relative_intensities <= epsilon).all(): - raise ValueError("No ray intersections on the front of the target area planes.") + front_facing_mask = relative_intensities > epsilon # Calculate the intersections on the plane of each ray. # First, calculate the projections of the ray origins onto the planes' normals. # This indicates how far the ray origins are from the planes (along the normal directions of the planes). # Next, calculate the scalar distances along the ray directions from the ray origins to the intersection points on the planes. # This indicates how far the intersection points are along the rays' directions. - intersection_distances = ( - ( - (points_at_ray_origins - target_areas.centers[target_area_mask][:, None, :]) - * target_areas.normal_vectors[target_area_mask][:, None, :] - ).sum(dim=index_mapping.intersection_distances) - ).unsqueeze(index_mapping.number_rays_per_point) / relative_intensities - - # Combine to get the intersections - intersections = points_at_ray_origins.unsqueeze( - index_mapping.number_rays_per_point - ) + rays.ray_directions * intersection_distances.unsqueeze( - index_mapping.intersection_distances_batched + numerator = ( + (points_at_ray_origins - plane_centers[:, None, :]) * plane_normals[:, None, :] + ).sum(dim=-1)[:, None, :] + + intersection_distances = torch.where( + front_facing_mask, + numerator / torch.clamp(relative_intensities, min=epsilon), + torch.zeros_like(relative_intensities), ) - # Calculate the absolute intensities of the rays hitting the target planes. - # Use the inverse-square law for distance attenuations from the heliostats to target planes. - distance_attenuations = ( - 1 - / ( - torch.norm( - ( - points_at_ray_origins - - target_areas.centers[target_area_mask][:, None, :] - ), - dim=index_mapping.points_dimension, - ) - ** 2 - ) - ).unsqueeze(index_mapping.number_rays_per_point) - absolute_intensities = ( - rays.ray_magnitudes * relative_intensities * distance_attenuations + intersections = ( + points_at_ray_origins[:, None, :, :] + + rays.ray_directions * intersection_distances[:, :, :, None] ) + absolute_intensities = rays.ray_magnitudes * relative_intensities + + absolute_intensities = absolute_intensities * front_facing_mask + return intersections, absolute_intensities diff --git a/artist/util/utils.py b/artist/util/utils.py index 8047eb4f7..9a110ed21 100644 --- a/artist/util/utils.py +++ b/artist/util/utils.py @@ -442,59 +442,22 @@ def decompose_rotations( return theta_components[:, 0], theta_components[:, 1], theta_components[:, 2] -def angle_between_vectors( - vector_1: torch.Tensor, vector_2: torch.Tensor -) -> torch.Tensor: - """ - Calculate the angle between two vectors. - - Parameters - ---------- - vector_1 : torch.Tensor - The first vector. - vector_2 : torch.Tensor - The second vector. - - Return - ------ - torch.Tensor - The angle between the input vectors. - """ - dot_product = torch.dot(vector_1, vector_2) - - norm_u = torch.norm(vector_1) - norm_v = torch.norm(vector_2) - - angle = dot_product / (norm_u * norm_v) - - angle = torch.clamp(angle, -1.0, 1.0) - - angle = torch.acos(angle) - - return angle - - -def transform_initial_angle( - initial_angle: torch.Tensor, - initial_orientation: torch.Tensor, +def rotation_angle_and_axis( + from_orientation: torch.Tensor, + to_orientation: torch.Tensor, device: torch.device | None = None, -) -> torch.Tensor: +) -> tuple[torch.Tensor, torch.Tensor]: """ - Compute the transformed angle of an initial angle in a rotated coordinate system. - - This function accounts for a known offset, the initial angle, in the - initial orientation vector. The offset represents a rotation around the - east-axis. When the coordinate system is rotated to align - the initial orientation with the ``ARTIST`` standard orientation, the axis for - the offset rotation also changes. This function calculates the equivalent - transformed angle for the offset in the rotated coordinate system. + Compute the rotation axis and angle between to orientations. Parameters ---------- - initial_angle : torch.Tensor - The initial angle, or offset along the east-axis. - initial_orientation : torch.Tensor - The initial orientation of the coordinate system. + from_orientation : torch.Tensor + The original orientation. + Tensor of shape [4]. + to_orientation : torch.Tensor + The rotated orientation. + Tensor of shape [4]. device : torch.device | None The device on which to perform computations or load tensors and models (default is None). If None, ``ARTIST`` will automatically select the most appropriate @@ -503,29 +466,28 @@ def transform_initial_angle( Returns ------- torch.Tensor - The transformed angle in the rotated coordinate system. + The rotation axis. + Tensor of shape [3]. + torch.Tensor + The angle of the rotation. + Tensor of shape [1]. """ device = get_device(device=device) - # ARTIST is oriented towards the south ([0.0, -1.0, 0.0]) ENU. - artist_standard_orientation = torch.tensor([0.0, -1.0, 0.0, 0.0], device=device) + from_orientation = from_orientation[:3] / torch.norm(from_orientation[:3]) + to_orientation = to_orientation[:3] / torch.norm(to_orientation[:3]) - # Apply the rotation by the initial angle to the initial orientation. - initial_orientation_with_offset = initial_orientation @ rotate_e( - e=initial_angle, - device=device, - ).squeeze(index_mapping.unbatched_tensor_values) - - # Compute the transformed angle relative to the reference orientation. - transformed_initial_angle = angle_between_vectors( - initial_orientation[: index_mapping.slice_fourth_dimension], - initial_orientation_with_offset[: index_mapping.slice_fourth_dimension], - ) - angle_between_vectors( - initial_orientation[: index_mapping.slice_fourth_dimension], - artist_standard_orientation[: index_mapping.slice_fourth_dimension], + axis = torch.linalg.cross(from_orientation, to_orientation) + axis_norm = torch.norm(axis) + if axis_norm < 1e-6: + return torch.tensor([1.0, 0.0, 0.0], device=device), torch.tensor( + 0.0, device=device + ) + axis = axis / axis_norm + angle = torch.acos( + torch.clamp(torch.dot(from_orientation, to_orientation), -1.0, 1.0) ) - - return transformed_initial_angle + return axis, angle def get_center_of_mass( @@ -750,121 +712,69 @@ def create_ideal_canted_nurbs_control_points( control_points[:, :, :, index_mapping.n] = control_points_n control_points[:, :, :, index_mapping.u] = 0 - # The control points for each facet are initialized as a flat equidistant grid centered around the origin. - # Each facet needs to be canted according to the provided angles and translated to the actual facet position. - rotation_matrix = torch.zeros((number_of_facets, 4, 4), device=device) + return control_points - rotation_matrix[:, :, index_mapping.e] = torch.nn.functional.normalize( - canting[:, index_mapping.e] - ) - rotation_matrix[:, :, index_mapping.n] = torch.nn.functional.normalize( - canting[:, index_mapping.n] - ) - rotation_matrix[:, : index_mapping.slice_fourth_dimension, index_mapping.u] = ( - torch.nn.functional.normalize( - torch.linalg.cross( - rotation_matrix[ - :, : index_mapping.slice_fourth_dimension, index_mapping.e - ], - rotation_matrix[ - :, : index_mapping.slice_fourth_dimension, index_mapping.n - ], - ), - dim=0, - ) - ) - - rotation_matrix[ - :, index_mapping.transform_homogenous, index_mapping.transform_homogenous - ] = 1.0 - canted_points = ( - convert_3d_points_to_4d_format(points=control_points, device=device).reshape( - number_of_facets, -1, 4 - ) - @ rotation_matrix.mT - ).reshape( - number_of_facets, - control_points.shape[index_mapping.control_points_u_facet_batched], - control_points.shape[index_mapping.control_points_v_facet_batched], - 4, - ) - - canted_with_translation = ( - canted_points + facet_translation_vectors[:, None, None, :] - ) - - return canted_with_translation[:, :, :, : index_mapping.slice_fourth_dimension] - - -def normalize_bitmaps( - flux_distributions: torch.Tensor, - target_area_widths: torch.Tensor, - target_area_heights: torch.Tensor, - number_of_rays: torch.Tensor | int, +def perform_canting( + canting_angles: torch.Tensor, + data: torch.Tensor, + inverse: bool = False, + device: torch.device | None = None, ) -> torch.Tensor: """ - Normalize a bitmap. + Perform canting (rotation) on data like surface points or surface normals. Parameters ---------- - flux_distributions : torch.Tensor - The flux distributions to be normalized. - Tensor of shape [number_of_bitmaps, bitmap_resolution_e, bitmap_resolution_u]. - target_area_widths : torch.Tensor - The target area widths. - Tensor of shape [number_of_bitmaps]. - target_area_heights : torch.Tensor - The target area heights. - Tensor of shape [number_of_bitmaps]. - number_of_rays : torch.Tensor | int - The number of rays used to generate the flux. - Tensor of shape [number_of_bitmaps]. + canting_angles torch.Tensor + Canting angles. + Tensor of shape [number_of_surfaces, number_of_facets, 2, 4]. + data : torch.Tensor + Data to be canted. + Tensor of shape [number_of_surfaces, number_of_facets, number_of_points_per_Facet, 4]. + inverse : bool + Indicating the direction of the rotation. Use inverse=False for canting and inverse=True for decanting (default is False). + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. Returns ------- torch.Tensor - The normalized and scaled flux density distributions. - Tensor of shape [number_of_bitmaps, bitmap_resolution_e, bitmap_resolution_u]. + The (de-)canted data. + Tensor of shape [number_of_surfaces, number_of_facets, number_of_points_per_Facet, 4]. """ - plane_areas = target_area_widths * target_area_heights - num_pixels = ( - flux_distributions.shape[index_mapping.batched_bitmap_e] - * flux_distributions.shape[index_mapping.batched_bitmap_u] + number_of_surfaces = data.shape[index_mapping.heliostat_dimension] + number_of_facets_per_surface = data.shape[index_mapping.facet_dimension] + rotation_matrix = torch.zeros( + (number_of_surfaces, number_of_facets_per_surface, 4, 4), device=device ) - plane_area_per_pixel = plane_areas / num_pixels - normalized_fluxes = flux_distributions / ( - number_of_rays * plane_area_per_pixel - ).unsqueeze(-1).unsqueeze(-1) + e = canting_angles[:, :, index_mapping.e, : index_mapping.slice_fourth_dimension] + n = canting_angles[:, :, index_mapping.n, : index_mapping.slice_fourth_dimension] + u = torch.linalg.cross(e, n, dim=2) - std = torch.std( - normalized_fluxes, - dim=(index_mapping.batched_bitmap_e, index_mapping.batched_bitmap_u), - keepdim=True, + rotation_matrix[:, :, : index_mapping.slice_fourth_dimension, index_mapping.e] = ( + torch.nn.functional.normalize(e, dim=-1) + ) + rotation_matrix[:, :, : index_mapping.slice_fourth_dimension, index_mapping.n] = ( + torch.nn.functional.normalize(n, dim=-1) + ) + rotation_matrix[:, :, : index_mapping.slice_fourth_dimension, index_mapping.u] = ( + torch.nn.functional.normalize(u, dim=-1) ) - std = std + 1e-6 - - standardized = ( - normalized_fluxes - - torch.mean( - normalized_fluxes, - dim=(index_mapping.batched_bitmap_e, index_mapping.batched_bitmap_u), - keepdim=True, - ) - ) / std - valid_mask = ( - flux_distributions.sum( - dim=(index_mapping.batched_bitmap_e, index_mapping.batched_bitmap_u), - keepdim=True, - ) - != 0 - ).float() + rotation_matrix[ + :, :, index_mapping.transform_homogenous, index_mapping.transform_homogenous + ] = 1.0 - result = standardized * valid_mask + if inverse: + canted_data = data @ rotation_matrix + else: + canted_data = data @ rotation_matrix.mT - return result + return canted_data def trapezoid_distribution( diff --git a/docs/tutorial_distributed_raytracing.rst b/docs/tutorial_distributed_raytracing.rst index c3f15b5a7..76c0f1f53 100644 --- a/docs/tutorial_distributed_raytracing.rst +++ b/docs/tutorial_distributed_raytracing.rst @@ -73,7 +73,7 @@ To map each heliostat with its designated target area and incident ray direction .. code-block:: - # heliostat_target_light_source_mapping = [ + heliostat_target_light_source_mapping = [ ("heliostat_1", "target_name_2", incident_ray_direction_tensor_1), ("heliostat_2", "target_name_2", incident_ray_direction_tensor_2), (...) diff --git a/docs/tutorial_kinematic_reconstruction.rst b/docs/tutorial_kinematic_reconstruction.rst index 904ac7f0d..ecf232574 100644 --- a/docs/tutorial_kinematic_reconstruction.rst +++ b/docs/tutorial_kinematic_reconstruction.rst @@ -92,7 +92,7 @@ AA31, AA39, and AC43. Next, you can load the scenario and set up the distributed environment as in previous tutorials. -Configuring Scheduler and Optimizer +Configuring Optimizer and Scheduler ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ As in the :ref:`surface reconstruction` tutorial, the kinematic reconstructor also uses the @@ -101,30 +101,30 @@ and the optimization configuration: .. code-block:: - scheduler = ( - config_dictionary.exponential - ) # exponential, cyclic or reduce_on_plateau - scheduler_parameters = { + optimizer_dict = { + config_dictionary.initial_learning_rate: 0.0005, + config_dictionary.tolerance: 0.0005, + config_dictionary.max_epoch: 100, + config_dictionary.batch_size: 50, + config_dictionary.log_step: 3, + config_dictionary.early_stopping_delta: 1e-4, + config_dictionary.early_stopping_patience: 300, + config_dictionary.early_stopping_window: 300, + } + scheduler_dict = { + config_dictionary.scheduler_type: config_dictionary.reduce_on_plateau, config_dictionary.gamma: 0.9, config_dictionary.min: 1e-6, config_dictionary.max: 1e-3, config_dictionary.step_size_up: 500, - config_dictionary.reduce_factor: 0.3, - config_dictionary.patience: 10, + config_dictionary.reduce_factor: 0.0001, + config_dictionary.patience: 50, config_dictionary.threshold: 1e-3, config_dictionary.cooldown: 10, } - - # Set optimization parameters. optimization_configuration = { - config_dictionary.initial_learning_rate: 0.0005, - config_dictionary.tolerance: 0.0005, - config_dictionary.max_epoch: 1000, - config_dictionary.log_step: 100, - config_dictionary.early_stopping_delta: 1e-4, - config_dictionary.early_stopping_patience: 10, - config_dictionary.scheduler: scheduler, - config_dictionary.scheduler_parameters: scheduler_parameters, + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, } Now we are ready to set up the kinematic reconstructor. diff --git a/docs/tutorial_motor_position_optimization.rst b/docs/tutorial_motor_position_optimization.rst index 9b792d037..3c40edf95 100644 --- a/docs/tutorial_motor_position_optimization.rst +++ b/docs/tutorial_motor_position_optimization.rst @@ -19,64 +19,100 @@ The tutorial will walk you through the key concepts needed to perform this optim Before starting this scenario make sure you already know how to :ref:`load a scenario`, run ``ARTIST`` in a :ref:`distributed environment`, and understand the structure of a :ref:`scenario`. If you are not using your own scenario, we recommend using one of the -"test_scenario_paint_multiple_heliostat_groups_deflectometry.h5" or "test_scenario_paint_multiple_heliostat_groups_ideal.h5" -scenarios provided in the "scenarios" folder. +``test_scenario_paint_multiple_heliostat_groups_deflectometry.h5`` or ``test_scenario_paint_multiple_heliostat_groups_ideal.h5`` +scenarios provided in the ``/scenarios`` folder. Before getting started, you need to load the scenario and set up the distributed environment, as in previous tutorials. Motor position optimization aims to optimize the motor positions of multiple heliostats to achieve a desired flux density on the solar tower. In this case, we focus on achieving a trapezoid distribution on the receiver, which is equivalent to all areas of the receiver receiving the same amount of sunlight. This would lead to and optimal flux distribution and -improve operation of the power plant. Therefore, we have to define the ground truth with a trapezoid distribution and -also set the loss function as the ``KLDivergenceLoss``: +improve operation of the power plant. Therefore, we have to define the ground truth with a trapezoid distribution. .. code-block:: e_trapezoid = utils.trapezoid_distribution( - total_width=256, slope_width=30, plateau_width=180, device=device + total_width=256, slope_width=30, plateau_width=110, device=device ) u_trapezoid = utils.trapezoid_distribution( - total_width=256, slope_width=30, plateau_width=180, device=device + total_width=256, slope_width=30, plateau_width=110, device=device ) - ground_truth = u_trapezoid.unsqueeze(index_mapping.unbatched_bitmap_u) * e_trapezoid.unsqueeze(index_mapping.unbatched_bitmap_e) + ground_truth = u_trapezoid.unsqueeze( + index_mapping.unbatched_bitmap_u + ) * e_trapezoid.unsqueeze(index_mapping.unbatched_bitmap_e) + +For the motor position optimization the flux integral is essential as we usually want to maximize the energy on the receiver while optimally distributing the single heliostat fluxes. +To simulate the energy on the receiver we need to assign meaningful magnitudes to each single ray. This is done by providing the ``dni`` parameter. +The DNI is the insolation measured at a given location on Earth with a surface element perpendicular to the Sun's rays, excluding diffuse insolation. +The DNI needs to be provided in W/m^2 and is then automatically converted to ray magnitudes. +The DNI is a parameter in the ``MotorPositionsOptimizer``, as we will later see. +You can pass a DNI directly into a ``HeliostatRayTracer`` anywhere else in ``ARTIST`` too, but in the previous two reconstructions it is not necessary. +The ``ground_truth`` distribution we aim for now needs to be scaled with a ``target_flux_integral`` scalar value. + +.. code-block:: + target_flux_integral = 10000 + ground_truth = (ground_truth / ground_truth.sum()) * target_flux_integral + +Next we set the loss function as the ``KLDivergenceLoss``: + +.. code-block:: + loss_definition = KLDivergenceLoss() The ``KLDivergenceLoss`` measures how one probability distribution is different from a second, reference distribution. In this case the reference distribution is the trapezoid distribution, which we compare to the collective distribution generated by all heliostats in the scenario. -Before we can perform the optimization, we also need to define the learning rate scheduler and the optimizer configuration. +Before we can perform the optimization, we also need to define the optimization configuration. Internally, the ``torch.optim.Adam`` optimizer is used, but the optimal parameters may differ depending on the data or specific use case. In this tutorial we define the following scheduler and optimization configuration: .. code-block:: - - scheduler = ( - config_dictionary.exponential - ) - scheduler_parameters = { + # Set optimizer parameters. + optimizer_dict = { + config_dictionary.initial_learning_rate: 3e-4, + config_dictionary.tolerance: 0.0005, + config_dictionary.max_epoch: 30, + config_dictionary.batch_size: 50, + config_dictionary.log_step: 3, + config_dictionary.early_stopping_delta: 1e-4, + config_dictionary.early_stopping_patience: 100, + config_dictionary.early_stopping_window: 100, + } + # Configure the learning rate scheduler. + scheduler_dict = { + config_dictionary.scheduler_type: config_dictionary.reduce_on_plateau, config_dictionary.gamma: 0.9, config_dictionary.min: 1e-6, config_dictionary.max: 1e-3, config_dictionary.step_size_up: 500, config_dictionary.reduce_factor: 0.3, - config_dictionary.patience: 10, + config_dictionary.patience: 100, config_dictionary.threshold: 1e-3, config_dictionary.cooldown: 10, } - - # Set optimizer parameters. + # Configure the regularizers and constraints. + constraint_dict = { + config_dictionary.rho_energy: 1.0, + config_dictionary.lambda_lr: 0.1, + config_dictionary.max_flux_density: 1e10, + config_dictionary.rho_pixel: 1.0, + } + # Combine configurations. optimization_configuration = { - config_dictionary.initial_learning_rate: 1e-3, - config_dictionary.tolerance: 0.0005, - config_dictionary.max_epoch: 50, - config_dictionary.log_step: 10, - config_dictionary.early_stopping_delta: 1e-4, - config_dictionary.early_stopping_patience: 10, - config_dictionary.scheduler: scheduler, - config_dictionary.scheduler_parameters: scheduler_parameters, + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + config_dictionary.constraints: constraint_dict, } +The optimization configuration is a combination of optimizer parameters, scheduler parameters and the learning constraints. +For the motor position optimization we have two constraints. With ``rho_energy`` and ``lambda_lr`` we define the parameters for Augmented Lagrangian coefficients. +They enforce that the flux integral strives to maximize itself during the optimization. +Furthermore there are the ``max_flux_density`` and ``rho_pixel`` parameters. +They constrain the flux at the pixel level. The parameter ``max_flux_density`` defines the maximum allowable flux density per pixel. +The parameter ``rho_pixel`` controls the strength of the penalty applied when this limit is exceeded. +This is particularly important because, in a real power plant, the receiver is subject to strict safety limits on the allowable flux density. Exceeding this limit could lead to material damage. + Now we are finally done, the final step is to create a ``MotorPositionsOptimizer`` object and to run the ``optimize()`` method to perform the actual optimization. @@ -90,7 +126,7 @@ method to perform the actual optimization. incident_ray_direction=torch.tensor([0.0, 1.0, 0.0, 0.0], device=device), target_area_index=1, ground_truth=ground_truth, - bitmap_resolution=torch.tensor([256, 256], device=device), + dni=dni, device=device, ) diff --git a/docs/tutorial_surface_reconstruction.rst b/docs/tutorial_surface_reconstruction.rst index bc9a1dc28..74a1bf283 100644 --- a/docs/tutorial_surface_reconstruction.rst +++ b/docs/tutorial_surface_reconstruction.rst @@ -102,84 +102,79 @@ in the generated image individually. loss_definition = KLDivergenceLoss() -Regularizers -^^^^^^^^^^^^ +Optimizer, Scheduler, Regularizer and Constraints Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Regularizers are used to prevent overfitting and ensure that the reconstructed surface is smooth and similar to an ideal -surface. In this tutorial we consider two regularizers: - -- ``IdealSurfaceRegularizer``: Pushes the reconstructed surface towards the shape of an ideal, perfectly flat or canted surface. The idea here, is that we know the general canting and shape of a flat surface and what is unknown is the minute deformations. Therefore, any dramatic changes should be avoided and in general the learnt surface should be similar to the ideal surface, apart from these minute deviations. -- ``TotalVariationRegularizer``: This regularizer promotes smoothness by penalizing large gradients. The idea behind this regularize is that neighboring points on the surface should be similar, therefore very large differences between points is unrealistic. We apply this regularize to both the surface normals and the surface points. +The surface reconstruction internally uses the ``torch.optim.Adam`` optimizer. Depending on the data you use, different +parameters may perform better for the optimizer - including a different learning rate scheduler. Therefore, we first have +to define our learning rate schedular, here we use an exponential schedular, but good results have also been achieved with +an cyclic or reduce on plateau scheduler: .. code-block:: - # Configure regularizers and their weights. - ideal_surface_regularizer = IdealSurfaceRegularizer( - weight=0.4, reduction_dimensions=(index_mapping.facet_dimension, index_mapping.points_dimension, index_mapping.coordinates_dimension) - ) - total_variation_regularizer_points = TotalVariationRegularizer( - weight=0.3, - reduction_dimensions=(index_mapping.facet_dimension,), - surface=config_dictionary.surface_points, - number_of_neighbors=1000, - sigma=1e-3, - ) - total_variation_regularizer_normals = TotalVariationRegularizer( - weight=0.8, - reduction_dimensions=(index_mapping.facet_dimension,), - surface=config_dictionary.surface_points, - number_of_neighbors=1000, - sigma=1e-3, - ) + # Configure the optimization. + optimizer_dict = { + config_dictionary.initial_learning_rate: 1e-4, + config_dictionary.tolerance: 1e-5, + config_dictionary.max_epoch: 30, + config_dictionary.batch_size: 30, + config_dictionary.log_step: 1, + config_dictionary.early_stopping_delta: 1e-4, + config_dictionary.early_stopping_patience: 100, + config_dictionary.early_stopping_window: 100, + } + # Configure the learning rate scheduler. + scheduler_dict = { + config_dictionary.scheduler_type: config_dictionary.exponential, + config_dictionary.gamma: 0.99, + config_dictionary.min: 1e-6, + config_dictionary.max: 1e-2, + config_dictionary.step_size_up: 100, + config_dictionary.reduce_factor: 0.5, + config_dictionary.patience: 10, + config_dictionary.threshold: 1e-4, + config_dictionary.cooldown: 5, + } -Finally, these regularizers are added into a list which we will later use in the surface reconstruction: +Regularizers are used to prevent overfitting and ensure that the reconstructed surface is smooth and similar to an ideal +surface. In this tutorial we consider two regularizers: + +- ``IdealSurfaceRegularizer``: Pushes the reconstructed surface towards the shape of an ideal, perfectly flat or canted surface. The idea here, is that we know the general canting and shape of a flat surface and what is unknown is the minute deformations. Therefore, any dramatic changes should be avoided and in general the learnt surface should be similar to the ideal surface, apart from these minute deviations. +- ``SmoothnessRegularizer``: This regularizer promotes smoothness by penalizing large gradients. The idea behind this regularize is that neighboring points on the surface should be similar, therefore very large differences between points is unrealistic. We apply this regularize to both the NURBS control points. .. code-block:: + # Configure regularizers. + ideal_surface_regularizer = IdealSurfaceRegularizer(reduction_dimensions=(1,)) + smoothness_regularizer = SmoothnessRegularizer(reduction_dimensions=(1,)) regularizers = [ ideal_surface_regularizer, - total_variation_regularizer_points, - total_variation_regularizer_normals, + smoothness_regularizer, ] - -Scheduler and Optimizer Configuration -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The surface reconstruction internally uses the ``torch.optim.Adam`` optimizer. Depending on the data you use, different -parameters may perform better for the optimizer - including a different learning rate scheduler. Therefore, we first have -to define our learning rate schedular, here we use an exponential schedular, but good results have also been achieved with -an cyclic or reduce on plateau scheduler: - -.. code-block:: - - scheduler = ( - config_dictionary.exponential - ) - scheduler_parameters = { - config_dictionary.gamma: 0.9, - config_dictionary.min: 1e-6, - config_dictionary.max: 1e-3, - config_dictionary.step_size_up: 500, - config_dictionary.reduce_factor: 0.3, - config_dictionary.patience: 10, - config_dictionary.threshold: 1e-3, - config_dictionary.cooldown: 10, + constraint_dict = { + config_dictionary.regularizers: regularizers, + config_dictionary.weight_smoothness: 0.005, + config_dictionary.weight_ideal_surface: 0.005, + config_dictionary.initial_lambda_energy: 0.1, + config_dictionary.rho_energy: 1.0, + config_dictionary.energy_tolerance: 0.01, } -Given the scheduler we can now define the optimization parameters in the ``optimization_configuration`` dictionary: +As you can see, there are further parameters in the ``constraints`` dictionary than necessary for the two regularizers mentioned before. To further stabilize the reconstruction there is one additional constraints. +This constraint considers the flux integral of the raytraced flux images from the predicted surfaces. During reconstruction the flux integral may not change significantly. +The parameters ``initial_lambda_energy`` and ``rho_energy`` are the Augmented Lagrangian coefficients used to enforce this energy conservation constraint. +The multiplier ``lambda_energy`` represents the Lagrange multiplier associated with the energy integral constraint. It linearly penalizes violations and is updated iteratively during optimization based on the current constraint violation. +If the predicted energy deviates from the reference energy, lambda increases, thereby strengthening the enforcement of the constraint in the next iteration. +The parameter rho is the quadratic penalty weight. It controls how strongly deviations from the reference energy are penalized through the squared constraint term. +The ``energy_tolerance`` describes how much the flux integral may vary relative to the initial surface. +We can now define the combined optimization parameters in the ``optimization_configuration`` dictionary: .. code-block:: optimization_configuration = { - config_dictionary.initial_learning_rate: 1e-4, - config_dictionary.tolerance: 0.00005, - config_dictionary.max_epoch: 500, - config_dictionary.log_step: 10, - config_dictionary.early_stopping_delta: 1e-4, - config_dictionary.early_stopping_patience: 10, - config_dictionary.scheduler: scheduler, - config_dictionary.scheduler_parameters: scheduler_parameters, - config_dictionary.regularizers: regularizers, + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + config_dictionary.constraints: constraint_dict, } **Note:** These parameters have performed well on our data and in our tests, however we cannot guarantee that they will diff --git a/examples/hyperparameter_optimization/INSTRUCTIONS.md b/examples/hyperparameter_optimization/INSTRUCTIONS.md deleted file mode 100644 index 4febc741b..000000000 --- a/examples/hyperparameter_optimization/INSTRUCTIONS.md +++ /dev/null @@ -1,50 +0,0 @@ -# Example Code for a Hyperparameter Optimization for the Surface Reconstruction - -This examples folder contains code to replicate the hyperparameter optimization in our CVPR paper. - -## Configuration YAML - -To make the execution of this code easier, the main configuration parameters are included in a ``hpo_config.yaml`` -file. It is also possible to provide all these arguments as command line arguments when executing the scripts. -Additionally, if no arguments and no configuration file is provided, default values will be used -- which will probably -lead to the scripts failing. - -To make sure the configuration is successfully loaded, please provide the path to the configuration file via the ``--config`` -command line argument when executing the script. If no argument is provided the script will look for the ``hpo_config.yaml`` -located in the ``hyperparameter_optimization`` working directory, however this option is not failsafe, and we always suggest providing the command -line argument. - -Here is an overview of the configuration parameters contained within the configuration file and what they mean: - -- `metadata_root`: The root directory in which the metadata will be saved, i.e., a folder with the name "metadata" will be saved within this directory. -- `metadata_file_name`: The file name for the metadata downloaded, if you do not change anything the STAC client from ``PAINT`` will automatically download the metadata and save it to "calibration_metadata_all_heliostats.csv" in the "metadata" folder. -- `data_dir`: The directory in which all ``PAINT`` data will be saved. This data is required for the plots. -- `tower_file_name`: The name of the file containing the tower measurements. If you do not change anything, the STAC client from ``PAINT`` will automatically download this data to the file "WRI1030197-tower-measurements.json" saved within the data directory. -- `scenarios_dir`: The name of the directory to save the ``ARTIST`` scenarios required for generating results. -- `results_dir`: The name of the directory to save the results from the hyperparameter optimization and surface reconstruction before plotting. -- `plots_dir`: The name of the directory to save the plots. -- `propulate_logs_dir`: The name of the directory where the ``propulate``-hpo will save all logs. -- `heliostat_for_reconstruction`: A dictionary containing a mapping from a "heliostat ID" to a "calibration measurement ID". The provided heliostat will be reconstructed from the provided calibration measurements. -- `reconstruction_parameter_ranges`: The hyperparameter ranges handed to ``propulate`` from which to find optimal parameters. -- `device`: The device used for the computation. - -## How to Use: - -In order to replicate these hpo results presented in our paper it is important to execute the code in the correct order. - -### Run First - -You must first run the code to download the data, this consists of two scripts: - -1. ``download_metadata.py``: This script will download all the metadata associated with the ``PAINT`` database. It will take a while to run. -2. ``download_data.py``: Based on the metadata, this script will now download all the required calibration, deflectometry, and tower data from the ``PAINT`` database required for the plots. It will also take a while to run. - -### Hyperparameter Optimization with ``propulate`` - -The hyperparameter search is done using ``propulate``. Afterwards it is possible to visualize the results. Please run the following scripts in the correct order: - -1. ``surface_reconstruction_viable_heliostats_list.py``: This script will iterate through the downloaded data and populate a list with file names that contain the measurements required for the hpo. -2. ``surface_reconstruction_generate_scenario.py``: This script will generate the ``ARTIST`` scenarios required for the hpo, the surface reconstruction and the plots. -3. ``surface_reconstruction_hyperparameter_search.py``: This script will perform the hpo with ``propulate`` and save the results for plotting. -4. ``surface_reconstruction_results.py``: This script will perform surface reconstruction once with the optimal hyperparameters and save the surface and flux results. -5. ``surface_reconstruction_plot.py``: This script will generate the flux prediction plots and reconstructed surface plots and save them. diff --git a/examples/hyperparameter_optimization/config.yaml b/examples/hyperparameter_optimization/config.yaml new file mode 100644 index 000000000..c2da7dca6 --- /dev/null +++ b/examples/hyperparameter_optimization/config.yaml @@ -0,0 +1,54 @@ +metadata_root: "./" +metadata_file_name: "calibration_metadata_all_heliostats.csv" +data_dir: "/base/path/data" +tower_file_name: "WRI1030197-tower-measurements.json" +scenarios_dir: "./examples/hyperparameter_optimization/scenarios" +results_dir: "./examples/hyperparameter_optimization/results" +plots_dir: "./examples/hyperparameter_optimization/plots" +propulate_logs_dir: "./examples/hyperparameter_optimization/logs" +minimum_number_of_measurements: 2 +number_of_points_to_plot: 200 +kinematic_reconstruction_image_type: "flux" +surface_reconstruction_image_type: "flux-centered" +excluded_heliostats_for_reconstruction: ["BE20", "AP14"] +parameter_ranges_kinematic: + initial_learning_rate: [1e-7, 1e-2] + scheduler: ["exponential", "reduce_on_plateau", "cyclic"] + min_learning_rate: [1e-9, 1e-6] + max_learning_rate: [1e-4, 1e-2] + step_size_up: [100, 500] + reduce_factor: [0.05, 0.5] + patience: [3, 50] + threshold: [1e-6, 1e-3] + cooldown: [2, 20] + gamma: [0.85, 0.999] +parameter_ranges_surface: + number_of_surface_points: [30, 90] + number_of_rays: [50, 120] + number_of_control_points: [4, 12] + nurbs_degree: [2, 3] + initial_learning_rate: [1e-7, 1e-3] + scheduler: ["exponential", "reduce_on_plateau", "cyclic"] + min_learning_rate: [1e-9, 1e-7] + max_learning_rate: [1e-4, 1e-2] + sample_limit: [2, 6] + step_size_up: [100, 500] + reduce_factor: [0.05, 0.5] + patience: [3, 50] + threshold: [1e-6, 1e-3] + cooldown: [2, 20] + gamma: [0.85, 0.999] + ideal_regularizer_weight: [0.0, 1.0] + smoothness_regularizer_weight: [0.0, 1.0] +parameter_ranges_motor_positions: + initial_learning_rate: [1e-7, 1e-3] + scheduler: ["exponential", "reduce_on_plateau", "cyclic"] + min_learning_rate: [1e-9, 1e-6] + max_learning_rate: [1e-4, 1e-2] + step_size_up: [100, 500] + reduce_factor: [0.05, 0.5] + patience: [3, 50] + threshold: [1e-6, 1e-3] + cooldown: [2, 20] + gamma: [0.85, 0.999] +device: "cuda" diff --git a/examples/hyperparameter_optimization/generate_plots.py b/examples/hyperparameter_optimization/generate_plots.py new file mode 100644 index 000000000..bcfa9770f --- /dev/null +++ b/examples/hyperparameter_optimization/generate_plots.py @@ -0,0 +1,789 @@ +import argparse +import pathlib +import warnings +from typing import Any + +import numpy as np +import torch +import yaml +from matplotlib import pyplot as plt +from matplotlib.gridspec import GridSpec +from scipy.stats import gaussian_kde + +from artist.util import utils +from artist.util.environment_setup import get_device + +plot_colors = { + "darkblue": "#002864", + "lightblue": "#14c8ff", + "darkred": "#cd5c5c", + "darkgray": "#686868", +} + + +def plot_kinematic_reconstruction_fluxes( + reconstruction_results: dict[str, dict[str, Any]], save_dir: pathlib.Path +) -> None: + """ + Plot the distribution of reconstruction errors. + + This function plots histograms and kernel density estimations of the pointing errors in reconstruction when comparing + HeliOS and UTIS as methods for focal spot centroid extraction. + + Parameters + ---------- + reconstruction_results : dict[str, dict[str, Any]] + A dictionary containing the reconstruction results. + save_dir : pathlib.Path + Directory used for saving the plot. + """ + plt.rcParams["text.usetex"] = True + plt.rcParams["text.latex.preamble"] = r"\usepackage{cmbright}" + plt.rcParams["text.latex.preamble"] = r"\setlength{\parindent}{0pt}" + + cmap = "inferno" + + results = reconstruction_results["flux"] + + n_rows = 1 + n_cols = 3 + + fig = plt.figure(figsize=(6, 4)) + gs = GridSpec( + n_rows, + n_cols, + figure=fig, + left=0.02, + right=0.98, + top=0.99, + bottom=0.02, + wspace=0.01, + hspace=0.01, + width_ratios=[1, 1, 1], + ) + + axes = np.empty((n_rows, n_cols), dtype=object) + + for i in range(n_rows): + for j in range(n_cols): + axes[i, j] = fig.add_subplot(gs[i, j]) + axes[i, j].axis("off") + + col_labels = [ + "Calibration Flux", + "Default\\\\Kinematic", + "Reconstructed\\\\Kinematic", + ] + heliostat_names = [list(results.keys())[-1]] + positions = [ + reconstruction_results["loss"][heliostat]["position"] + for heliostat in heliostat_names + ] + + for col_index in range(n_cols): + axes[0, col_index].set_title( + rf"\textbf{{{col_labels[col_index]}}}", fontsize=18, ha="center" + ) + + for row_index in range(n_rows): + flux_data = results[heliostat_names[row_index]]["fluxes"].cpu().detach() + for col_index in range(n_cols): + position = positions[row_index] + position_str = ", ".join(f"{x:.2f}" for x in position[:3]) + + axes[row_index, col_index].imshow(flux_data[col_index], cmap=cmap) + axes[row_index, 0].text( + -0.05, + 0.5, + rf"\textbf{{Heliostat: {heliostat_names[row_index]}}}", + transform=axes[row_index, 0].transAxes, + fontsize=18, + ha="right", + va="center", + ) + axes[row_index, 0].text( + -0.05, + 0.4, + r"\textit{ENU Position:}", + transform=axes[row_index, 0].transAxes, + fontsize=12, + color=plot_colors["darkgray"], + ha="right", + va="center", + ) + axes[row_index, 0].text( + -0.05, + 0.30, + rf"\textit{{{position_str}}}", + transform=axes[row_index, 0].transAxes, + fontsize=12, + color=plot_colors["darkgray"], + ha="right", + va="center", + ) + + if not save_dir.is_dir(): + save_dir.mkdir(parents=True, exist_ok=True) + filename = save_dir / "reconstruction_kinematic_fluxes.pdf" + fig.savefig(filename, dpi=300, bbox_inches="tight") + plt.close(fig) + + print(f"Saved reconstruction flux plot at: {filename}.") + + +def plot_error_distribution( + reconstruction_results: dict[str, dict[str, Any]], save_dir: pathlib.Path +) -> None: + """ + Plot the distribution of reconstruction errors. + + This function plots histograms and kernel density estimations of the pointing errors in reconstruction when comparing + HeliOS and UTIS as methods for focal spot centroid extraction. + + Parameters + ---------- + reconstruction_results : dict[str, dict[str, Any]] + A dictionary containing the reconstruction results. + save_dir : pathlib.Path + Directory used for saving the plot. + """ + # Set Plot style. + plt.rcParams["text.usetex"] = True + plt.rcParams["text.latex.preamble"] = r"\usepackage{cmbright}" + plt.rcParams["text.latex.preamble"] = r"\setlength{\parindent}{0pt}" + + # Convert losses to list. + errors_in_meters = [ + data["loss"] for data in reconstruction_results["loss"].values() + ] + + # Convert to angular error in mrad + positions = np.array( + [data["position"] for data in reconstruction_results["loss"].values()], + dtype=float, + ) + distances = np.linalg.norm(positions[:, :2], axis=1) + errors_in_mrad = (errors_in_meters / distances) * 1000 + + for errors, name, color in zip( + [errors_in_meters, errors_in_mrad], ["meters", "mrad"], ["lightblue", "darkred"] + ): + x_max = max(errors) + x_vals = np.linspace(0, x_max, 100) + kde = gaussian_kde(errors, bw_method="scott") + kde_values = kde(x_vals) + mean = np.mean(errors) + + fig, ax = plt.subplots(figsize=(6, 4)) + + ax.hist( + errors, + bins=25, + range=(0, x_max), + density=True, + alpha=0.3, + label="Loss Histogram", + color=plot_colors[color], + ) + ax.plot( + x_vals, + kde_values, + label="KDE", + color=plot_colors[color], + ) + ax.axvline( + mean, + color=plot_colors[color], + linestyle="--", + label=f"Mean: {mean:.2f} {name}", + ) + + ax.set_xlabel(f"\\textbf{{Pointing Error}} \n{{\\small {name}}}") + ax.set_ylabel("\\textbf{Density}") + ax.legend(fontsize=8) + ax.grid(True) + + if not save_dir.is_dir(): + save_dir.mkdir(parents=True, exist_ok=True) + filename = save_dir / f"error_distribution_{name}.pdf" + fig.savefig(filename, dpi=300, bbox_inches="tight") + + print(f"Saved reconstruction error distribution plot at: {filename}.") + + +def plot_linear_and_angular_error_against_distance( + reconstruction_results: dict[str, dict[str, Any]], + number_of_points_to_plot: int, + save_dir: pathlib.Path, + random_seed: int, +) -> None: + """ + Plot both reconstruction error in meters (left y-axis) and mrad (right y-axis) against the distance from the tower. + + Parameters + ---------- + reconstruction_results : dict[str, dict[str, Any]] + A dictionary containing the reconstruction results. + number_of_points_to_plot : int + Number of points to randomly select and plot. + save_dir : pathlib.Path + Directory used for saving the plot. + random_seed : int + Random seed for reproducibility. + """ + plt.rcParams["text.usetex"] = True + plt.rcParams["text.latex.preamble"] = r"\usepackage{cmbright}" + plt.rcParams["text.latex.preamble"] = r"\setlength{\parindent}{0pt}" + + positions_list = [ + data["position"] for data in reconstruction_results["loss"].values() + ] + error_list_in_meters = [ + data["loss"] for data in reconstruction_results["loss"].values() + ] + + positions = np.array(positions_list, dtype=float) + errors_in_meters = np.array(error_list_in_meters, dtype=float) + + distances = np.linalg.norm(positions[:, :2], axis=1) + + np.random.seed(random_seed) + total_data_points = len(distances) + if number_of_points_to_plot >= total_data_points: + selected_indices = np.arange(total_data_points) + else: + selected_indices = np.random.choice( + total_data_points, number_of_points_to_plot, replace=False + ) + + distances = distances[selected_indices] + errors_in_meters = errors_in_meters[selected_indices] + errors_in_mrad = (errors_in_meters / distances) * 1000 + + fig, ax_m = plt.subplots(figsize=(7, 4)) + ax_m.scatter( + distances, + errors_in_meters, + color=plot_colors["lightblue"], + marker="o", + label="Error (m)", + alpha=0.7, + ) + + fit_meters = np.poly1d(np.polyfit(distances, errors_in_meters, 1)) + x_vals = np.linspace(distances.min(), distances.max(), 200) + ax_m.plot( + x_vals, fit_meters(x_vals), color=plot_colors["lightblue"], linestyle="--" + ) + ax_m.set_xlabel("\\textbf{Heliostat Distance from Tower [m]}") + ax_m.set_ylabel( + "\\textbf{Mean Pointing Error [m]}", + color=plot_colors["lightblue"], + ) + ax_m.grid(True) + + ax_a = ax_m.twinx() + ax_a.scatter( + distances, + errors_in_mrad, + color=plot_colors["darkred"], + marker="^", + label="Error (mrad)", + alpha=0.7, + ) + + fit_a = np.poly1d(np.polyfit(distances, errors_in_mrad, 1)) + ax_a.plot(x_vals, fit_a(x_vals), color="darkred", linestyle="--") + ax_a.set_ylabel("\\textbf{Mean Pointing Error [mrad]}", color="darkred") + ax_a.tick_params(axis="y", labelcolor="black") + + handles_m, labels_m = ax_m.get_legend_handles_labels() + handles_a, labels_a = ax_a.get_legend_handles_labels() + ax_m.legend( + handles_m + handles_a, + labels_m + labels_a, + fontsize=8, + loc="upper right", + ncol=2, + ) + + save_path = save_dir / "reconstruction_error_distance_dual_axis.pdf" + fig.savefig(save_path, dpi=300, bbox_inches="tight") + print(f"Saved dual-axis plot at: {save_path}") + + +def plot_motor_pos_fluxes( + reconstruction_results: dict[str, Any], save_dir: pathlib.Path +) -> None: + """ + Plot the distribution of reconstruction errors. + + This function plots histograms and kernel density estimations of the pointing errors in reconstruction when comparing + HeliOS and UTIS as methods for focal spot centroid extraction. + + Parameters + ---------- + reconstruction_results : dict[str, dict[str, Any]] + A dictionary containing the reconstruction results. + save_dir : pathlib.Path + Directory used for saving the plot. + """ + plt.rcParams["text.usetex"] = True + plt.rcParams["text.latex.preamble"] = r"\usepackage{cmbright}" + + cmap = "inferno" + n_cols = 3 + + fig = plt.figure(figsize=(9.5, 8)) + gs = GridSpec( + 2, + n_cols, + figure=fig, + left=0.05, + right=0.95, + top=0.95, + bottom=0.15, + height_ratios=[1, 0.05], + wspace=0.01, + hspace=0.01, + ) + axes = [] + + # Compute global min and max for shared color scale + all_flux_data = [ + reconstruction_results[key].cpu().detach() + for key in ["flux_before", "flux_after", "target_distribution"] + ] + vmin = min([data.min() for data in all_flux_data]) + vmax = max([data.max() for data in all_flux_data]) + + for i, key in enumerate(["flux_before", "flux_after", "target_distribution"]): + ax = fig.add_subplot(gs[0, i]) + ax.axis("off") + flux_data = reconstruction_results[key].cpu().detach() + im = ax.imshow(flux_data, cmap=cmap, vmin=vmin, vmax=vmax) # Shared color scale + axes.append(ax) + + pos = ax.get_position() + fig.text( + x=pos.x0 + pos.width / 2, + y=pos.y0 - 0.03, + s=str(flux_data.sum()), + ha="center", + va="top", + fontsize=18, + ) + + axes[0].set_title(r"\textbf{Aim Points Centered}", fontsize=18, ha="center") + axes[1].set_title(r"\textbf{Aim Points Optimized}", fontsize=18, ha="center") + axes[2].set_title(r"\textbf{Target Distribution}", fontsize=18, ha="center") + + # Add a single horizontal colorbar beneath all subplots + cbar_ax = fig.add_subplot(gs[1, :]) # spans all columns + cbar = fig.colorbar(im, cax=cbar_ax, orientation="horizontal") + cbar.ax.tick_params(labelsize=14) + + if not save_dir.is_dir(): + save_dir.mkdir(parents=True, exist_ok=True) + filename = save_dir / "motor_pos_plots.pdf" + fig.savefig(filename, dpi=300, bbox_inches="tight") + plt.close(fig) + print(f"Saved reconstruction flux plot at: {filename}.") + + +def plot_surface_reconstruction( + reconstruction_results: dict[str, Any], + save_dir: pathlib.Path, +) -> None: + """ + Plot the surface reconstruction results. + + Parameters + ---------- + results : dict[str, Any] + Results of the surface reconstruction. + save_dir : pathlib.Path + Path to the location where the plots are saved. + """ + fig, axes = plt.subplots(3, 7, figsize=(35, 15)) + for index, heliostat_name in enumerate(list(reconstruction_results["flux"])[:3]): + heliostat_data = reconstruction_results["flux"][heliostat_name] + axes[index, 0].imshow( + heliostat_data["fluxes"][0].cpu().detach(), cmap="inferno" + ) + axes[index, 0].set_title("Calibration Flux") + axes[index, 0].axis("off") + + axes[index, 1].imshow( + heliostat_data["fluxes"][1].cpu().detach(), cmap="inferno" + ) + axes[index, 1].set_title("Surface not reconstructed") + axes[index, 1].axis("off") + + axes[index, 2].imshow( + heliostat_data["fluxes"][2].cpu().detach(), cmap="inferno" + ) + axes[index, 2].set_title("Surface reconstructed") + axes[index, 2].axis("off") + + reference_direction = torch.tensor([0.0, 0.0, 1.0], device=torch.device("cpu")) + canting = heliostat_data["canting"].cpu().detach() + + # Process original deflectometry data. + deflectometry = torch.stack( + ( + reconstruction_results["deflectometry"][heliostat_name][ + "surface_points" + ] + .cpu() + .detach(), + reconstruction_results["deflectometry"][heliostat_name][ + "surface_normals" + ] + .cpu() + .detach(), + ) + ).reshape(2, 4, -1, 4) + deflectometry_uncanted = utils.perform_canting( + canting_angles=canting.expand(2, -1, -1, -1), + data=deflectometry, + inverse=True, + device=torch.device("cpu"), + ) + deflectometry_points_original = deflectometry_uncanted[0, :, :, :3].reshape( + -1, 3 + ) + deflectometry_normals_original = torch.nn.functional.normalize( + deflectometry_uncanted[1, :, :, :3], dim=-1 + ).reshape(-1, 3) + cos_theta_deflectometry_original = ( + deflectometry_normals_original @ reference_direction + ) + angles_deflectometry_original = torch.clip( + torch.arccos(torch.clip(cos_theta_deflectometry_original, -1.0, 1.0)), + -0.1, + 0.1, + ) + sc3 = axes[index, 3].scatter( + x=deflectometry_points_original[:, 0], + y=deflectometry_points_original[:, 1], + c=deflectometry_points_original[:, 2], + cmap="inferno", + vmin=0.0345, + vmax=0.036, + ) + axes[index, 3].set_title("Deflectometry Points original") + axes[index, 3].axis("off") + axes[index, 3].set_aspect("equal", adjustable="box") + cbar3 = fig.colorbar( + sc3, ax=axes[index, 3], orientation="horizontal", fraction=0.046, pad=0.1 + ) + cbar3.set_label("m") + + sc4 = axes[index, 4].scatter( + x=deflectometry_points_original[:, 0], + y=deflectometry_points_original[:, 1], + c=angles_deflectometry_original, + cmap="inferno", + vmin=0.0, + vmax=0.005, + ) + axes[index, 4].set_title("Deflectometry normals") + axes[index, 4].axis("off") + axes[index, 4].set_aspect("equal", adjustable="box") + cbar4 = fig.colorbar( + sc4, ax=axes[index, 4], orientation="horizontal", fraction=0.046, pad=0.1 + ) + cbar4.set_label("Angle (rad)") + + # Process reconstructed data. + points_uncanted = utils.perform_canting( + canting_angles=canting.expand(2, -1, -1, -1), + data=heliostat_data["surface_points"].cpu().detach().reshape(2, 4, -1, 4), + inverse=True, + device=torch.device("cpu"), + ) + normals_uncanted = utils.perform_canting( + canting_angles=canting.expand(2, -1, -1, -1), + data=heliostat_data["surface_normals"].cpu().detach().reshape(2, 4, -1, 4), + inverse=True, + device=torch.device("cpu"), + ) + reconstructed_points = points_uncanted[1, :, :, :3].reshape(-1, 3) + reconstructed_normals = torch.nn.functional.normalize( + normals_uncanted[1, :, :, :3], dim=-1 + ).reshape(-1, 3) + cos_theta_reconstructed = reconstructed_normals @ reference_direction + angles_reconstructed = torch.clip( + torch.arccos(torch.clip(cos_theta_reconstructed, -1.0, 1.0)), -0.1, 0.1 + ) + sc5 = axes[index, 5].scatter( + x=reconstructed_points[:, 0], + y=reconstructed_points[:, 1], + c=reconstructed_points[:, 2], + cmap="inferno", + vmin=0.0345, + vmax=0.036, + ) + axes[index, 5].set_title("Reconstructed Surface (Points)") + axes[index, 5].axis("off") + axes[index, 5].set_aspect("equal", adjustable="box") + cbar5 = fig.colorbar( + sc5, ax=axes[index, 5], orientation="horizontal", fraction=0.046, pad=0.1 + ) + cbar5.set_label("m") + + sc6 = axes[index, 6].scatter( + x=reconstructed_points[:, 0], + y=reconstructed_points[:, 1], + c=angles_reconstructed, + cmap="inferno", + vmin=0.0, + vmax=0.005, + ) + axes[index, 6].set_title("Reconstructed normals") + axes[index, 6].axis("off") + axes[index, 6].set_aspect("equal", adjustable="box") + cbar6 = fig.colorbar( + sc6, ax=axes[index, 6], orientation="horizontal", fraction=0.046, pad=0.1 + ) + cbar6.set_label("Angle (rad)") + + plt.tight_layout() + plt.savefig( + save_dir / "results_surface_reconstruction.png", + bbox_inches="tight", + pad_inches=1, + ) + + +def plot_heliostat_positions( + surface_scenario: dict[str, Any], + kinematic_scenario: dict[str, Any], + save_dir: pathlib.Path, +) -> None: + """ + Plot heliostat positions. + + Parameters + ---------- + surface_scenario : dict[str, Any] + Results of surface reconstruction. + kinematic_scenario : dict[str, Any] + Results of kinematic reconstruction. + save_dir : pathlib.Path + Directory to save the plots. + """ + plt.rcParams["text.usetex"] = True + plt.rcParams["text.latex.preamble"] = r"\usepackage{cmbright}" + plt.rcParams["text.latex.preamble"] = r"\setlength{\parindent}{0pt}" + + for scenario in [surface_scenario, kinematic_scenario]: + positions_list = [data["position"] for data in scenario["loss"].values()] + + index = [i for i, d in enumerate(scenario["loss"].keys()) if "BD32" in d] + + fig, ax = plt.subplots(figsize=(6, 4)) + + x = [row[0] for row in positions_list] + y = [row[1] for row in positions_list] + + ax.scatter( + x=x, + y=y, + c=plot_colors["lightblue"], + s=2, + ) + + ax.scatter( + [x[index[0]]], + [y[index[0]]], + facecolors="none", + edgecolors="red", + s=2, + linewidths=2, + label="BD32", + ) + + ax.plot([-2 / 2, 2 / 2], [0, 0], color="red", linewidth=2) + ax.grid(True) + + ax.set_xlabel("\\textbf{East-West distance to tower [m]}") + ax.set_ylabel("\\textbf{North-South distance to tower [m]}") + ax.legend(fontsize=8) + ax.grid(True) + + if not save_dir.is_dir(): + save_dir.mkdir(parents=True, exist_ok=True) + filename = save_dir / f"heliostat_positions_{len(positions_list)}.pdf" + fig.savefig(filename, dpi=300, bbox_inches="tight") + + print(f"Saved position plot at: {filename}.") + + +if __name__ == "__main__": + """ + Generate plots based on the kinematic reconstruction results. + + This script loads the results from the ``ARTIST`` reconstruction and generates two plots, one comparing the loss when + using different centroid extraction methods and one comparing the loss as a function of distance from the tower. + + Parameters + ---------- + config : str + Path to the configuration file. + device : str + Device to use for the computation. + results_dir : str + Path to directory where the results are saved. + plots_dir : str + Path to the directory where the plots are saved. + number_of_points_to_plot : int + Number of data points to plot in the distance error plot. + random_seed : int + Random seed for the selection of points to plot. + """ + + # Set default location for configuration file. + script_dir = pathlib.Path(__file__).resolve().parent + default_config_path = script_dir / "config.yaml" + + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + help="Path to the YAML configuration file.", + default=default_config_path, + ) + + # Parse the config argument first to load the configuration. + args, unknown = parser.parse_known_args() + config_path = pathlib.Path(args.config) + config = {} + if config_path.exists(): + try: + with open(config_path, "r") as f: + config = yaml.safe_load(f) + except yaml.YAMLError as exc: + warnings.warn(f"Error parsing YAML file: {exc}.") + else: + warnings.warn( + f"Warning: Configuration file not found at {config_path}. Using defaults." + ) + + # Add remaining arguments to the parser with defaults loaded from the config. + device_default = config.get("device", "cuda") + results_dir_default = config.get("results_dir", "./examples/paint_plots/results") + plots_dir_default = config.get("plots_dir", "./examples/paint_plots/plots") + number_of_points_to_plot_default = config.get("number_of_points_to_plot", 100) + random_seed_default = config.get("random_seed", 7) + + parser.add_argument( + "--device", + type=str, + help="Device to use.", + default=device_default, + ) + parser.add_argument( + "--results_dir", + type=str, + help="Path to load the results.", + default=results_dir_default, + ) + parser.add_argument( + "--plots_dir", + type=str, + help="Path to save the plots.", + default=plots_dir_default, + ) + parser.add_argument( + "--number_of_points_to_plot", + type=int, + help="Number of data points to plot in the distance error plot.", + default=number_of_points_to_plot_default, + ) + parser.add_argument( + "--random_seed", + type=int, + help="Random seed for the selection of points to plot.", + default=random_seed_default, + ) + + # Re-parse the full set of arguments. + args = parser.parse_args(args=unknown) + + device = get_device(torch.device(args.device)) + + results_path = ( + pathlib.Path(args.results_dir) / "kinematic_reconstruction_results.pt" + ) + if not results_path.exists(): + raise FileNotFoundError( + f"Results file not found: {results_path}. Please run ``reconstruction_generate_results.py``" + f"or adjust the location of the results file and try again!" + ) + + reconstruction_results = torch.load( + results_path, + weights_only=False, + map_location=device, + ) + + results_path_motor_pos = ( + pathlib.Path(args.results_dir) / "motor_position_optimization_results.pt" + ) + if not results_path_motor_pos.exists(): + raise FileNotFoundError( + f"Results file not found: {results_path_motor_pos}. Please run ``reconstruction_generate_results.py``" + f"or adjust the location of the results file and try again!" + ) + + results_motor_pos = torch.load( + results_path_motor_pos, + weights_only=False, + map_location=device, + ) + + results_path_surface = ( + pathlib.Path(args.results_dir) / "surface_reconstruction_results.pt" + ) + if not results_path_surface.exists(): + raise FileNotFoundError( + f"Results file not found: {results_path_surface}. Please run ``reconstruction_generate_results.py``" + f"or adjust the location of the results file and try again!" + ) + + results_surface = torch.load( + results_path_surface, + weights_only=False, + map_location=device, + ) + + plots_path = pathlib.Path(args.plots_dir) + + plot_error_distribution( + reconstruction_results=reconstruction_results, save_dir=plots_path + ) + + plot_linear_and_angular_error_against_distance( + reconstruction_results=reconstruction_results, + number_of_points_to_plot=args.number_of_points_to_plot, + save_dir=plots_path, + random_seed=args.random_seed, + ) + + plot_kinematic_reconstruction_fluxes( + reconstruction_results=reconstruction_results, save_dir=plots_path + ) + + plot_surface_reconstruction( + reconstruction_results=results_surface, save_dir=plots_path + ) + + plot_motor_pos_fluxes(reconstruction_results=results_motor_pos, save_dir=plots_path) + + plot_heliostat_positions( + surface_scenario=results_surface, + kinematic_scenario=reconstruction_results, + save_dir=plots_path, + ) diff --git a/examples/hyperparameter_optimization/generate_results_kinematic.py b/examples/hyperparameter_optimization/generate_results_kinematic.py new file mode 100644 index 000000000..5ea3861f3 --- /dev/null +++ b/examples/hyperparameter_optimization/generate_results_kinematic.py @@ -0,0 +1,482 @@ +import argparse +import json +import pathlib +import warnings +from typing import Any, cast + +import h5py +import torch +import yaml + +from artist.core import loss_functions +from artist.core.heliostat_ray_tracer import HeliostatRayTracer +from artist.core.kinematic_reconstructor import KinematicReconstructor +from artist.data_parser.calibration_data_parser import CalibrationDataParser +from artist.data_parser.paint_calibration_parser import PaintCalibrationDataParser +from artist.field.heliostat_group import HeliostatGroup +from artist.scenario.scenario import Scenario +from artist.util import config_dictionary, set_logger_config +from artist.util.environment_setup import get_device, setup_distributed_environment + +set_logger_config() +torch.manual_seed(7) +torch.cuda.manual_seed(7) + + +def merge_data( + unoptimized_data: dict[str, dict[str, torch.Tensor]], + optimized_data: dict[str, dict[str, torch.Tensor]], +) -> dict[str, dict[str, torch.Tensor]]: + """ + Merge data dictionaries. + + Parameters + ---------- + unoptimized_data : dict[str, dict[str, torch.Tensor]] + Data dictionary containing unoptimized data. + optimized_data : dict[str, dict[str, torch.Tensor]] + Data dictionary containing optimized data. + + Returns + ------- + dict[str, dict[str, torch.Tensor]] + The combined data dictionary. + """ + merged = {} + + for heliostat in unoptimized_data.keys(): + fluxes = torch.stack( + ( + unoptimized_data[heliostat]["measured_flux"], + unoptimized_data[heliostat]["artist_flux"], + optimized_data[heliostat]["artist_flux"], + ) + ) + + merged[heliostat] = { + "fluxes": fluxes, + } + + if len(unoptimized_data[heliostat]) > 2: + surface_points = torch.stack( + ( + unoptimized_data[heliostat]["surface_points"], + optimized_data[heliostat]["surface_points"], + ) + ) + surface_normals = torch.stack( + ( + unoptimized_data[heliostat]["surface_normals"], + optimized_data[heliostat]["surface_normals"], + ) + ) + canting = optimized_data[heliostat]["canting"] + facet_translations = optimized_data[heliostat]["facet_translations"] + + merged[heliostat] = { + "fluxes": fluxes, + "surface_points": surface_points, + "surface_normals": surface_normals, + "canting": canting, + "facet_translations": facet_translations, + } + + return merged + + +def data_for_flux_plots( + scenario: Scenario, + ddp_setup: dict[str, Any], + heliostat_data: dict[ + str, + CalibrationDataParser + | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], + ], + device: torch.device | None = None, +) -> dict[str, dict[str, torch.Tensor]]: + """ + Extract heliostat kinematic information. + + Parameters + ---------- + scenario : Scenario + The scenario. + ddp_setup : dict[str, Any] + Information about the distributed environment, process_groups, devices, ranks, world_Size, heliostat group to ranks mapping. + heliostat_data : dict[str, CalibrationDataParser | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]]] + Heliostat and calibration measurement data. + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + + Returns + ------- + dict[str, dict[str, torch.Tensor]] + Dictionary containing kinematic data per heliostat. + """ + device = get_device(device) + + bitmaps_for_plots = {} + + for heliostat_group_index in ddp_setup[config_dictionary.groups_to_ranks_mapping][ + ddp_setup[config_dictionary.rank] + ]: + heliostat_group: HeliostatGroup = scenario.heliostat_field.heliostat_groups[ + heliostat_group_index + ] + + parser = cast( + CalibrationDataParser, heliostat_data[config_dictionary.data_parser] + ) + heliostat_mapping = cast( + list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], + heliostat_data[config_dictionary.heliostat_data_mapping], + ) + ( + measured_fluxes, + _, + incident_ray_directions, + _, + active_heliostats_mask, + target_area_mask, + ) = parser.parse_data_for_reconstruction( + heliostat_data_mapping=heliostat_mapping, + heliostat_group=heliostat_group, + scenario=scenario, + device=device, + ) + + heliostat_group.activate_heliostats( + active_heliostats_mask=active_heliostats_mask, device=device + ) + + heliostat_group.align_surfaces_with_incident_ray_directions( + aim_points=scenario.target_areas.centers[target_area_mask], + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + device=device, + ) + + scenario.set_number_of_rays(number_of_rays=300) + + ray_tracer = HeliostatRayTracer( + scenario=scenario, + heliostat_group=heliostat_group, + blocking_active=False, + world_size=ddp_setup[config_dictionary.heliostat_group_world_size], + rank=ddp_setup[config_dictionary.heliostat_group_rank], + batch_size=heliostat_group.number_of_active_heliostats, + random_seed=ddp_setup[config_dictionary.heliostat_group_rank], + ) + + bitmaps_per_heliostat = ray_tracer.trace_rays( + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + target_area_mask=target_area_mask, + device=device, + ) + + names = [ + heliostat_group.names[i] + for i in torch.nonzero(active_heliostats_mask).squeeze() + ] + + for i, heliostat in enumerate(names): + bitmaps_for_plots[heliostat] = { + "artist_flux": bitmaps_per_heliostat[i], + "measured_flux": measured_fluxes[i], + } + + return bitmaps_for_plots + + +def generate_reconstruction_results( + scenario_path: pathlib.Path, + heliostat_data_mapping: list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], + hyperparameters: dict[str, Any], + device: torch.device, +) -> dict[str, dict[str, Any]]: + """ + Perform kinematic reconstruction in ``ARTIST`` and save results. + + Parameters + ---------- + scenario_path : pathlib.Path + Path to reconstruction scenario. + heliostat_data_mapping : list[tuple[str, list[pathlib.Path], list[pathlib.Path]]] + Data mapping for each heliostat, containing a list of tuples with the heliostat name, the path to the calibration + properties file, and the path to the flux images. + hyperparameters : dict[str, Any] + Optimized hyperparameters. + device : torch.device | None + Device used for optimization and tensor allocations. + + Returns + ------- + dict[str, dict[str, Any]] + Mapping from heliostat name to per-centroid loss arrays and, later, positions. + """ + device = get_device(device=device) + + loss_dict: dict = {} + + number_of_heliostat_groups = Scenario.get_number_of_heliostat_groups_from_hdf5( + scenario_path=scenario_path + ) + + with setup_distributed_environment( + number_of_heliostat_groups=number_of_heliostat_groups, + device=device, + ) as ddp_setup: + with h5py.File(scenario_path, "r") as scenario_file: + scenario = Scenario.load_scenario_from_hdf5( + scenario_file=scenario_file, + number_of_surface_points_per_facet=torch.tensor([5, 5], device=device), + device=device, + ) + + positions = scenario.heliostat_field.heliostat_groups[0].positions + names = scenario.heliostat_field.heliostat_groups[0].names + heliostats: list[str] = [] + distances = torch.linalg.norm(positions, dim=1) + + for target in [50, 100, 150, 200]: + closest = torch.abs(distances - target) + _, indices = torch.topk(closest, k=1, largest=False) + heliostats.extend(names[i] for i in indices.tolist()) + + kinematic_reconstruction_method = ( + config_dictionary.kinematic_reconstruction_raytracing + ) + + optimizer_dict = { + config_dictionary.initial_learning_rate: hyperparameters[ + "initial_learning_rate" + ], + config_dictionary.tolerance: 0, + config_dictionary.max_epoch: 10, + config_dictionary.batch_size: 500, + config_dictionary.log_step: 1, + config_dictionary.early_stopping_delta: 1e-6, + config_dictionary.early_stopping_patience: 4000, + config_dictionary.early_stopping_window: 1000, + } + scheduler_dict = { + config_dictionary.scheduler_type: hyperparameters["scheduler"], + config_dictionary.gamma: hyperparameters["gamma"], + config_dictionary.min: hyperparameters["min_learning_rate"], + config_dictionary.max: hyperparameters["max_learning_rate"], + config_dictionary.step_size_up: hyperparameters["step_size_up"], + config_dictionary.reduce_factor: hyperparameters["reduce_factor"], + config_dictionary.patience: hyperparameters["patience"], + config_dictionary.threshold: hyperparameters["threshold"], + config_dictionary.cooldown: hyperparameters["cooldown"], + } + optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + } + + data: dict[ + str, + CalibrationDataParser + | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], + ] = { + config_dictionary.data_parser: PaintCalibrationDataParser( + sample_limit=2, centroid_extraction_method="UTIS" + ), + config_dictionary.heliostat_data_mapping: heliostat_data_mapping, + } + + data_plot: dict[ + str, + CalibrationDataParser + | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], + ] = { + config_dictionary.data_parser: PaintCalibrationDataParser( + sample_limit=1, centroid_extraction_method="UTIS" + ), + config_dictionary.heliostat_data_mapping: [ + entry for entry in heliostat_data_mapping if entry[0] in heliostats + ], + } + + loss_definition = loss_functions.FocalSpotLoss(scenario=scenario) + + kinematic_reconstructor = KinematicReconstructor( + ddp_setup=ddp_setup, + scenario=scenario, + data=data, + optimization_configuration=optimization_configuration, + reconstruction_method=kinematic_reconstruction_method, + ) + + flux_plot_data_before = data_for_flux_plots( + scenario=scenario, + ddp_setup=ddp_setup, + heliostat_data=data_plot, + device=device, + ) + + per_heliostat_losses = kinematic_reconstructor.reconstruct_kinematic( + loss_definition=loss_definition, device=device + ) + + flux_plot_data_after = data_for_flux_plots( + scenario=scenario, + ddp_setup=ddp_setup, + heliostat_data=data_plot, + device=device, + ) + + for heliostat_group in scenario.heliostat_field.heliostat_groups: + for index, name in enumerate(heliostat_group.names): + loss_dict.setdefault(name, {}) + loss_dict[name]["loss"] = per_heliostat_losses[index].detach().item() + + flux_data = merge_data(flux_plot_data_before, flux_plot_data_after) + + # Include heliostat position. + for group in scenario.heliostat_field.heliostat_groups: + for name, position in zip(group.names, group.positions): + loss_dict[name]["position"] = position.clone().detach().cpu().tolist() + + results = {"loss": loss_dict, "flux": flux_data} + + return results + + +if __name__ == "__main__": + """ + Generate results with the optimized parameters. + + Parameters + ---------- + config : str + Path to the configuration file. + device : str + Device to use for the computation. + data_dir : str + Path to the data directory. + heliostat_for_reconstruction : dict[str, list[int]] + The heliostat and its calibration numbers. + results_dir : str + Path to where the results will be saved. + scenarios_dir : str + Path to the directory containing the scenarios. + """ + # Set default location for configuration file. + script_dir = pathlib.Path(__file__).resolve().parent + default_config_path = script_dir / "config.yaml" + + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + help="Path to the YAML configuration file.", + default=default_config_path, + ) + + # Parse the config argument first to load the configuration. + args, unknown = parser.parse_known_args() + config_path = pathlib.Path(args.config) + config = {} + if config_path.exists(): + try: + with open(config_path, "r") as f: + config = yaml.safe_load(f) + except yaml.YAMLError as exc: + warnings.warn(f"Error parsing YAML file: {exc}") + else: + warnings.warn( + f"Warning: Configuration file not found at {config_path}. Using defaults." + ) + + # Add remaining arguments to the parser with defaults loaded from the config. + data_dir_default = config.get("data_dir", "./paint_data") + device_default = config.get("device", "cuda") + scenarios_dir_default = config.get( + "scenarios_dir", "./examples/hyperparameter_optimization/scenarios" + ) + results_dir_default = config.get( + "results_dir", "./examples/hyperparameter_optimization/results" + ) + + parser.add_argument( + "--device", + type=str, + help="Device to use.", + default=device_default, + ) + parser.add_argument( + "--data_dir", + type=str, + help="Path to downloaded paint data.", + default=data_dir_default, + ) + parser.add_argument( + "--scenarios_dir", + type=str, + help="Path to directory containing the generated scenarios.", + default=scenarios_dir_default, + ) + parser.add_argument( + "--results_dir", + type=str, + help="Path to save the results.", + default=results_dir_default, + ) + + # Re-parse the full set of arguments. + args = parser.parse_args(args=unknown) + device = get_device(torch.device(args.device)) + data_dir = pathlib.Path(args.data_dir) + results_dir = pathlib.Path(args.results_dir) + + # Define scenario path. + scenario_file = pathlib.Path(args.scenarios_dir) / "ideal_scenario_kinematic.h5" + if not scenario_file.exists(): + raise FileNotFoundError( + f"The reconstruction scenario located at {scenario_file} could not be found! Please run the ``generate_scenario.py`` to generate this scenario, or adjust the file path and try again." + ) + + viable_heliostats_data = ( + pathlib.Path(args.results_dir) / "viable_heliostats_kinematic.json" + ) + if not viable_heliostats_data.exists(): + raise FileNotFoundError( + f"The viable heliostat list located at {viable_heliostats_data} could not be not found! Please run the ``generate_viable_heliostat_list.py`` script to generate this list, or adjust the file path and try again." + ) + + # Load viable heliostats data. + with open(viable_heliostats_data, "r") as f: + viable_heliostats = json.load(f) + + heliostat_data_mapping: list[tuple[str, list[pathlib.Path], list[pathlib.Path]]] = [ + ( + item["name"], + [pathlib.Path(p) for p in item["calibrations"]], + [pathlib.Path(p) for p in item["kinematic_reconstruction_flux_images"]], + ) + for item in viable_heliostats + ] + + with open(results_dir / "hpo_results_kinematic.json", "r") as file: + hyperparameters = json.load(file) + + reconstruction_results = generate_reconstruction_results( + scenario_path=scenario_file, + heliostat_data_mapping=heliostat_data_mapping, + hyperparameters=hyperparameters, + device=device, + ) + + results_path = ( + pathlib.Path(args.results_dir) / "kinematic_reconstruction_results.pt" + ) + if not results_path.parent.is_dir(): + results_path.parent.mkdir(parents=True, exist_ok=True) + + torch.save(reconstruction_results, results_path) + print(f"Reconstruction results saved to {results_path}") diff --git a/examples/hyperparameter_optimization/generate_results_motor_position.py b/examples/hyperparameter_optimization/generate_results_motor_position.py new file mode 100644 index 000000000..e6bb37d65 --- /dev/null +++ b/examples/hyperparameter_optimization/generate_results_motor_position.py @@ -0,0 +1,416 @@ +import argparse +import json +import pathlib +import warnings +from typing import Any + +import h5py +import torch +import yaml + +from artist.core import loss_functions +from artist.core.heliostat_ray_tracer import HeliostatRayTracer +from artist.core.motor_position_optimizer import MotorPositionsOptimizer +from artist.scenario.scenario import Scenario +from artist.util import config_dictionary, index_mapping, set_logger_config, utils +from artist.util.environment_setup import get_device, setup_distributed_environment + +set_logger_config() +torch.manual_seed(7) +torch.cuda.manual_seed(7) + + +def data_for_flux_plots( + scenario: Scenario, + incident_ray_direction: torch.Tensor, + target_area_index: int, + dni: float, + id: str, + device: torch.device | None = None, +) -> dict[str, dict[str, torch.Tensor]]: + """ + Extract heliostat kinematic information. + + Parameters + ---------- + scenario : Scenario + The scenario. + incident_ray_direction : torch.Tensor + The incident ray direction during the optimization. + Tensor of shape [4]. + target_area_index : int + The index of the target used for the optimization. + dni : float + Direct normal irradiance in W/m^2. + id : str + Identifier fluxes. + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + + Returns + ------- + dict[str, dict[str, torch.Tensor]] + Dictionary containing kinematic data per heliostat. + """ + device = get_device(device) + + bitmap_resolution = torch.tensor([256, 256], device=device) + + total_flux = torch.zeros( + ( + bitmap_resolution[index_mapping.unbatched_bitmap_e], + bitmap_resolution[index_mapping.unbatched_bitmap_u], + ), + device=device, + ) + + for heliostat_group_index, heliostat_group in enumerate( + scenario.heliostat_field.heliostat_groups + ): + (active_heliostats_mask, target_area_mask, incident_ray_directions) = ( + scenario.index_mapping( + heliostat_group=heliostat_group, + single_incident_ray_direction=incident_ray_direction, + single_target_area_index=target_area_index, + device=device, + ) + ) + + # Activate heliostats. + heliostat_group.activate_heliostats( + active_heliostats_mask=active_heliostats_mask, + device=device, + ) + + # Align Heliostats. + if id == "before": + heliostat_group.align_surfaces_with_incident_ray_directions( + aim_points=scenario.target_areas.centers[target_area_mask], + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + device=device, + ) + elif id == "after": + heliostat_group.align_surfaces_with_motor_positions( + motor_positions=heliostat_group.kinematic.active_motor_positions, + active_heliostats_mask=active_heliostats_mask, + device=device, + ) + + for heliostat_group_index, heliostat_group in enumerate( + scenario.heliostat_field.heliostat_groups + ): + (active_heliostats_mask, target_area_mask, incident_ray_directions) = ( + scenario.index_mapping( + heliostat_group=heliostat_group, + single_incident_ray_direction=incident_ray_direction, + single_target_area_index=target_area_index, + device=device, + ) + ) + + # Create a ray tracer. + ray_tracer = HeliostatRayTracer( + scenario=scenario, + heliostat_group=heliostat_group, + blocking_active=True, + batch_size=100, + bitmap_resolution=bitmap_resolution, + dni=dni, + ) + + # Perform heliostat-based ray tracing. + bitmaps_per_heliostat = ray_tracer.trace_rays( + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + target_area_mask=target_area_mask, + device=device, + ) + + flux_distribution_on_target = ray_tracer.get_bitmaps_per_target( + bitmaps_per_heliostat=bitmaps_per_heliostat, + target_area_mask=target_area_mask, + device=device, + )[target_area_index] + + total_flux += flux_distribution_on_target + + return total_flux + + +def generate_reconstruction_results( + scenario_path: pathlib.Path, + incident_ray_direction: torch.Tensor, + target_area_index: int, + target_distribution: torch.Tensor, + dni: float, + hyperparameters: dict[str, Any], + device: torch.device, +) -> dict[str, dict[str, Any]]: + """ + Perform kinematic reconstruction in ``ARTIST`` and save results. + + This function performs the kinematic reconstruction in ``ARTIST`` and saves the results. Reconstruction is compared when using the + focal spot centroids extracted from HELIOS and the focal spot centroids extracted from UTIS. The results are saved + for plotting later. + + Parameters + ---------- + scenario_path : pathlib.Path + Path to reconstruction scenario. + incident_ray_direction : torch.Tensor + The incident ray direction during the optimization. + Tensor of shape [4]. + target_area_index : int + The index of the target used for the optimization. + target_distribution : torch.Tensor + The desired focal spot or distribution. + Tensor of shape [4] or tensor of shape [bitmap_resolution_e, bitmap_resolution_u]. + dni : float + Direct normal irradiance in W/m^2. + hyperparameters : dict[str, Any] + Optimized hyperparameters. + device : torch.device | None + Device used for optimization and tensor allocations. + + Returns + ------- + dict[str, dict[str, Any]] + Mapping from heliostat name to per-centroid loss arrays and, later, positions. + """ + device = get_device(device=device) + + number_of_heliostat_groups = Scenario.get_number_of_heliostat_groups_from_hdf5( + scenario_path=scenario_path + ) + + with setup_distributed_environment( + number_of_heliostat_groups=number_of_heliostat_groups, + device=device, + ) as ddp_setup: + with h5py.File(scenario_path, "r") as scenario_file: + scenario = Scenario.load_scenario_from_hdf5( + scenario_file=scenario_file, + device=device, + ) + + scenario.set_number_of_rays(number_of_rays=3) + optimizer_dict = { + config_dictionary.initial_learning_rate: hyperparameters[ + "initial_learning_rate" + ], + config_dictionary.tolerance: 0, + config_dictionary.max_epoch: 3, + config_dictionary.batch_size: 100, + config_dictionary.log_step: 1, + config_dictionary.early_stopping_delta: 1e-4, + config_dictionary.early_stopping_patience: 150, + config_dictionary.early_stopping_window: 150, + } + scheduler_dict = { + config_dictionary.scheduler_type: hyperparameters["scheduler"], + config_dictionary.gamma: hyperparameters["gamma"], + config_dictionary.min: hyperparameters["min_learning_rate"], + config_dictionary.max: hyperparameters["max_learning_rate"], + config_dictionary.step_size_up: hyperparameters["step_size_up"], + config_dictionary.reduce_factor: hyperparameters["reduce_factor"], + config_dictionary.patience: hyperparameters["patience"], + config_dictionary.threshold: hyperparameters["threshold"], + config_dictionary.cooldown: hyperparameters["cooldown"], + } + constraint_dict = { + config_dictionary.rho_energy: 1.0, + config_dictionary.max_flux_density: 300, + config_dictionary.rho_pixel: 1.0, + config_dictionary.lambda_lr: 0.1, + } + optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + config_dictionary.constraints: constraint_dict, + } + + motor_positions_optimizer = MotorPositionsOptimizer( + ddp_setup=ddp_setup, + scenario=scenario, + optimization_configuration=optimization_configuration, + incident_ray_direction=incident_ray_direction, + target_area_index=target_area_index, + ground_truth=target_distribution, + bitmap_resolution=torch.tensor([256, 256]), + dni=dni, + device=device, + ) + + flux_before = data_for_flux_plots( + scenario=scenario, + incident_ray_direction=incident_ray_direction, + target_area_index=target_area_index, + dni=dni, + id="before", + device=device, + ) + + loss = motor_positions_optimizer.optimize( + loss_definition=loss_functions.KLDivergenceLoss(), device=device + ) + + flux_after = data_for_flux_plots( + scenario=scenario, + incident_ray_direction=incident_ray_direction, + target_area_index=target_area_index, + dni=dni, + id="after", + device=device, + ) + + results = { + "flux_before": flux_before, + "flux_after": flux_after, + "target_distribution": target_distribution, + "loss": loss, + } + + return results + + +if __name__ == "__main__": + """ + Generate results with the optimized parameters. + + Parameters + ---------- + config : str + Path to the configuration file. + device : str + Device to use for the computation. + data_dir : str + Path to the data directory. + heliostat_for_reconstruction : dict[str, list[int]] + The heliostat and its calibration numbers. + results_dir : str + Path to where the results will be saved. + scenarios_dir : str + Path to the directory containing the scenarios. + """ + # Set default location for configuration file. + script_dir = pathlib.Path(__file__).resolve().parent + default_config_path = script_dir / "config.yaml" + + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + help="Path to the YAML configuration file.", + default=default_config_path, + ) + + # Parse the config argument first to load the configuration. + args, unknown = parser.parse_known_args() + config_path = pathlib.Path(args.config) + config = {} + if config_path.exists(): + try: + with open(config_path, "r") as f: + config = yaml.safe_load(f) + except yaml.YAMLError as exc: + warnings.warn(f"Error parsing YAML file: {exc}") + else: + warnings.warn( + f"Warning: Configuration file not found at {config_path}. Using defaults." + ) + + # Add remaining arguments to the parser with defaults loaded from the config. + data_dir_default = config.get("data_dir", "./paint_data") + device_default = config.get("device", "cuda") + scenarios_dir_default = config.get( + "scenarios_dir", "./examples/hyperparameter_optimization/scenarios" + ) + results_dir_default = config.get( + "results_dir", "./examples/hyperparameter_optimization/results" + ) + + parser.add_argument( + "--device", + type=str, + help="Device to use.", + default=device_default, + ) + parser.add_argument( + "--data_dir", + type=str, + help="Path to downloaded paint data.", + default=data_dir_default, + ) + parser.add_argument( + "--scenarios_dir", + type=str, + help="Path to directory containing the generated scenarios.", + default=scenarios_dir_default, + ) + parser.add_argument( + "--results_dir", + type=str, + help="Path to save the results.", + default=results_dir_default, + ) + + # Re-parse the full set of arguments. + args = parser.parse_args(args=unknown) + device = get_device(torch.device(args.device)) + data_dir = pathlib.Path(args.data_dir) + results_dir = pathlib.Path(args.results_dir) + + # Define scenario path. + scenario_file = ( + pathlib.Path(args.scenarios_dir) / "deflectometry_scenario_surface.h5" + ) + if not scenario_file.exists(): + raise FileNotFoundError( + f"The optimization scenario located at {scenario_file} could not be found! Please run the ``generate_scenario.py`` to generate this scenario, or adjust the file path and try again." + ) + + # DNI W/m^2. + dni = 850 + # Incident ray direction. + incident_ray_direction = torch.nn.functional.normalize( + torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) + - torch.tensor([0.0, 0.0, 1.0, 1.0], device=device), + dim=0, + ) + # Receiver. + target_area_index = 1 + # Target distribution. + e_trapezoid = utils.trapezoid_distribution( + total_width=256, slope_width=30, plateau_width=120, device=device + ) + u_trapezoid = utils.trapezoid_distribution( + total_width=256, slope_width=30, plateau_width=120, device=device + ) + eu_trapezoid = u_trapezoid.unsqueeze(1) * e_trapezoid.unsqueeze(0) + + target_distribution = (eu_trapezoid / eu_trapezoid.sum()) * 2810000.00 + + with open(results_dir / "hpo_results_motor_positions.json", "r") as file: + hyperparameters = json.load(file) + + optimization_results = generate_reconstruction_results( + scenario_path=scenario_file, + incident_ray_direction=incident_ray_direction, + target_area_index=target_area_index, + target_distribution=target_distribution, + dni=dni, + hyperparameters=hyperparameters, + device=device, + ) + + results_path = ( + pathlib.Path(args.results_dir) / "motor_position_optimization_results.pt" + ) + if not results_path.parent.is_dir(): + results_path.parent.mkdir(parents=True, exist_ok=True) + + torch.save(optimization_results, results_path) + print(f"Reconstruction results saved to {results_path}") diff --git a/examples/hyperparameter_optimization/generate_results_surface.py b/examples/hyperparameter_optimization/generate_results_surface.py new file mode 100644 index 000000000..540602058 --- /dev/null +++ b/examples/hyperparameter_optimization/generate_results_surface.py @@ -0,0 +1,566 @@ +import argparse +import json +import pathlib +import warnings +from typing import Any, cast + +import h5py +import torch +import yaml + +from artist.core import loss_functions +from artist.core.heliostat_ray_tracer import HeliostatRayTracer +from artist.core.regularizers import IdealSurfaceRegularizer, SmoothnessRegularizer +from artist.core.surface_reconstructor import SurfaceReconstructor +from artist.data_parser.calibration_data_parser import CalibrationDataParser +from artist.data_parser.paint_calibration_parser import PaintCalibrationDataParser +from artist.field.heliostat_group import HeliostatGroup +from artist.scenario.scenario import Scenario +from artist.util import config_dictionary, index_mapping, set_logger_config, utils +from artist.util.environment_setup import get_device, setup_distributed_environment + +set_logger_config() +torch.manual_seed(7) +torch.cuda.manual_seed(7) + + +def merge_data( + unoptimized_data: dict[str, dict[str, torch.Tensor]], + optimized_data: dict[str, dict[str, torch.Tensor]], +) -> dict[str, dict[str, torch.Tensor]]: + """ + Merge data dictionaries. + + Parameters + ---------- + unoptimized_data : dict[str, dict[str, torch.Tensor]] + Data dictionary containing unoptimized data. + optimized_data : dict[str, dict[str, torch.Tensor]] + Data dictionary containing optimized data. + + Returns + ------- + dict[str, dict[str, torch.Tensor]] + The combined data dictionary. + """ + merged = {} + + for heliostat in unoptimized_data.keys(): + fluxes = torch.stack( + ( + unoptimized_data[heliostat]["measured_flux"], + unoptimized_data[heliostat]["artist_flux"], + optimized_data[heliostat]["artist_flux"], + ) + ) + + merged[heliostat] = { + "fluxes": fluxes, + } + + if len(unoptimized_data[heliostat]) > 2: + surface_points = torch.stack( + ( + unoptimized_data[heliostat]["surface_points"], + optimized_data[heliostat]["surface_points"], + ) + ) + surface_normals = torch.stack( + ( + unoptimized_data[heliostat]["surface_normals"], + optimized_data[heliostat]["surface_normals"], + ) + ) + canting = optimized_data[heliostat]["canting"] + facet_translations = optimized_data[heliostat]["facet_translations"] + + merged[heliostat] = { + "fluxes": fluxes, + "surface_points": surface_points, + "surface_normals": surface_normals, + "canting": canting, + "facet_translations": facet_translations, + } + + return merged + + +def data_for_flux_plots( + scenario: Scenario, + ddp_setup: dict[str, Any], + heliostat_data: dict[ + str, + CalibrationDataParser + | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], + ], + device: torch.device | None = None, +) -> dict[str, dict[str, torch.Tensor]]: + """ + Extract heliostat kinematic information. + + Parameters + ---------- + scenario : Scenario + The scenario. + ddp_setup : dict[str, Any] + Information about the distributed environment, process_groups, devices, ranks, world_Size, heliostat group to ranks mapping. + heliostat_data : dict[str, CalibrationDataParser | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]]] + Heliostat and calibration measurement data. + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + + Returns + ------- + dict[str, dict[str, torch.Tensor]] + Dictionary containing kinematic data per heliostat. + """ + device = get_device(device) + + bitmaps_for_plots = {} + + for heliostat_group_index in ddp_setup[config_dictionary.groups_to_ranks_mapping][ + ddp_setup[config_dictionary.rank] + ]: + heliostat_group: HeliostatGroup = scenario.heliostat_field.heliostat_groups[ + heliostat_group_index + ] + + parser = cast( + CalibrationDataParser, heliostat_data[config_dictionary.data_parser] + ) + heliostat_mapping = cast( + list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], + heliostat_data[config_dictionary.heliostat_data_mapping], + ) + ( + measured_fluxes, + _, + incident_ray_directions, + _, + active_heliostats_mask, + target_area_mask, + ) = parser.parse_data_for_reconstruction( + heliostat_data_mapping=heliostat_mapping, + heliostat_group=heliostat_group, + scenario=scenario, + device=device, + ) + + heliostat_group.activate_heliostats( + active_heliostats_mask=active_heliostats_mask, device=device + ) + + heliostat_group.align_surfaces_with_incident_ray_directions( + aim_points=scenario.target_areas.centers[target_area_mask], + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + device=device, + ) + + scenario.set_number_of_rays(number_of_rays=30) + + ray_tracer = HeliostatRayTracer( + scenario=scenario, + heliostat_group=heliostat_group, + blocking_active=False, + world_size=ddp_setup[config_dictionary.heliostat_group_world_size], + rank=ddp_setup[config_dictionary.heliostat_group_rank], + batch_size=heliostat_group.number_of_active_heliostats, + random_seed=ddp_setup[config_dictionary.heliostat_group_rank], + ) + + bitmaps_per_heliostat = ray_tracer.trace_rays( + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + target_area_mask=target_area_mask, + device=device, + ) + + cropped_flux_distributions = utils.crop_flux_distributions_around_center( + flux_distributions=bitmaps_per_heliostat, + crop_width=config_dictionary.utis_crop_width, + crop_height=config_dictionary.utis_crop_height, + target_plane_widths=scenario.target_areas.dimensions[target_area_mask][ + :, index_mapping.target_area_width + ], + target_plane_heights=scenario.target_areas.dimensions[target_area_mask][ + :, index_mapping.target_area_height + ], + device=device, + ) + + names = [ + heliostat_group.names[i] + for i in torch.nonzero(active_heliostats_mask).squeeze() + ] + + for index, heliostat in enumerate(names): + bitmaps_for_plots[heliostat] = { + "measured_flux": measured_fluxes[index], + "artist_flux": cropped_flux_distributions[index], + "surface_points": heliostat_group.surface_points[index], + "surface_normals": heliostat_group.surface_normals[index], + "canting": heliostat_group.active_canting[index], + "facet_translations": heliostat_group.active_facet_translations[index], + } + + return bitmaps_for_plots + + +def generate_reconstruction_results( + scenario_path: pathlib.Path, + heliostat_data_mapping: list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], + hyperparameters: dict[str, Any], + device: torch.device, +) -> dict[str, dict[str, Any]]: + """ + Perform surface reconstruction in ``ARTIST`` and save results. + + Parameters + ---------- + scenario_path : pathlib.Path + Path to reconstruction scenario. + heliostat_data_mapping : list[tuple[str, list[pathlib.Path], list[pathlib.Path]]] + Data mapping for each heliostat, containing a list of tuples with the heliostat name, the path to the calibration + properties file, and the path to the flux images. + hyperparameters : dict[str, Any] + Optimized hyperparameters. + device : torch.device | None + Device used for optimization and tensor allocations. + + Returns + ------- + dict[str, dict[str, Any]] + Mapping from heliostat name to per-centroid loss arrays and, later, positions. + """ + device = get_device(device=device) + + loss_dict: dict = {} + + number_of_heliostat_groups = Scenario.get_number_of_heliostat_groups_from_hdf5( + scenario_path=scenario_path + ) + + with setup_distributed_environment( + number_of_heliostat_groups=number_of_heliostat_groups, + device=device, + ) as ddp_setup: + number_of_surface_points_per_facet = torch.tensor( + [ + hyperparameters["number_of_surface_points"], + hyperparameters["number_of_surface_points"], + ], + device=device, + ) + + number_of_control_points_per_facet = torch.tensor( + [ + hyperparameters["number_of_control_points"], + hyperparameters["number_of_control_points"], + ], + device=device, + ) + + with h5py.File( + pathlib.Path(scenario_path.parent) / "deflectometry_scenario_surface.h5", + "r", + ) as deflectometry_scenario_file: + scenario_deflectometry = Scenario.load_scenario_from_hdf5( + scenario_file=deflectometry_scenario_file, + number_of_surface_points_per_facet=number_of_surface_points_per_facet, + device=device, + ) + + with h5py.File(scenario_path, "r") as scenario_file: + scenario = Scenario.load_scenario_from_hdf5( + scenario_file=scenario_file, + number_of_surface_points_per_facet=number_of_surface_points_per_facet, + change_number_of_control_points_per_facet=number_of_control_points_per_facet, + device=device, + ) + + selected_heliostats = [ + "AC38", + "BD38", + "AE34", + "BG65", + "AK26", + "AK17", + "BA43", + "AZ28", + "AP51", + "AP35", + ] + + scenario.set_number_of_rays( + number_of_rays=int(hyperparameters["number_of_rays"]) + ) + + for heliostat_group in scenario.heliostat_field.heliostat_groups: + heliostat_group.nurbs_degrees = torch.tensor( + [hyperparameters["nurbs_degree"], hyperparameters["nurbs_degree"]], + device=device, + ) + + optimizer_dict = { + config_dictionary.initial_learning_rate: hyperparameters[ + "initial_learning_rate" + ], + config_dictionary.tolerance: 0, + config_dictionary.max_epoch: 3, + config_dictionary.batch_size: hyperparameters["sample_limit"] * 2, + config_dictionary.log_step: 1, + config_dictionary.early_stopping_delta: 1e-4, + config_dictionary.early_stopping_patience: 150, + config_dictionary.early_stopping_window: 150, + } + scheduler_dict = { + config_dictionary.scheduler_type: hyperparameters["scheduler"], + config_dictionary.gamma: hyperparameters["gamma"], + config_dictionary.min: hyperparameters["min_learning_rate"], + config_dictionary.max: hyperparameters["max_learning_rate"], + config_dictionary.step_size_up: hyperparameters["step_size_up"], + config_dictionary.reduce_factor: hyperparameters["reduce_factor"], + config_dictionary.patience: hyperparameters["patience"], + config_dictionary.threshold: hyperparameters["threshold"], + config_dictionary.cooldown: hyperparameters["cooldown"], + } + ideal_surface_regularizer = IdealSurfaceRegularizer(reduction_dimensions=(1,)) + smoothness_regularizer = SmoothnessRegularizer(reduction_dimensions=(1,)) + regularizers = [ + ideal_surface_regularizer, + smoothness_regularizer, + ] + constraint_dict = { + config_dictionary.regularizers: regularizers, + config_dictionary.initial_lambda_energy: 0.1, + config_dictionary.rho_energy: 1.0, + config_dictionary.energy_tolerance: 0.01, + config_dictionary.weight_smoothness: 0.005, + config_dictionary.weight_ideal_surface: 0.005, + } + optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + config_dictionary.constraints: constraint_dict, + } + data: dict[ + str, + CalibrationDataParser + | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], + ] = { + config_dictionary.data_parser: PaintCalibrationDataParser( + sample_limit=hyperparameters["sample_limit"], + ), + config_dictionary.heliostat_data_mapping: [ + mapping + for mapping in heliostat_data_mapping + if mapping[0] in selected_heliostats + ], + } + + data_plot: dict[ + str, + CalibrationDataParser + | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], + ] = { + config_dictionary.data_parser: PaintCalibrationDataParser( + sample_limit=1, + ), + config_dictionary.heliostat_data_mapping: [ + mapping + for mapping in heliostat_data_mapping + if mapping[0] in selected_heliostats + ], + } + + deflectometry_data = data_for_flux_plots( + scenario=scenario_deflectometry, + ddp_setup=ddp_setup, + heliostat_data=data_plot, + device=device, + ) + + surface_reconstructor = SurfaceReconstructor( + ddp_setup=ddp_setup, + scenario=scenario, + data=data, + optimization_configuration=optimization_configuration, + number_of_surface_points=number_of_surface_points_per_facet, + bitmap_resolution=torch.tensor([256, 256], device=device), + device=device, + ) + + loss_definition = loss_functions.KLDivergenceLoss() + + plot_data_before = data_for_flux_plots( + scenario=scenario, + ddp_setup=ddp_setup, + heliostat_data=data_plot, + device=device, + ) + + per_heliostat_losses = surface_reconstructor.reconstruct_surfaces( + loss_definition=loss_definition, device=device + ) + + plot_data_after = data_for_flux_plots( + scenario=scenario, + ddp_setup=ddp_setup, + heliostat_data=data_plot, + device=device, + ) + + for heliostat_group in scenario.heliostat_field.heliostat_groups: + for index, name in enumerate(heliostat_group.names): + loss_dict.setdefault(name, {}) + loss_dict[name]["loss"] = per_heliostat_losses[index].detach().item() + + flux_data = merge_data(plot_data_before, plot_data_after) + + for group in scenario.heliostat_field.heliostat_groups: + for name, position in zip(group.names, group.positions): + loss_dict[name]["position"] = position.clone().detach().cpu().tolist() + + results = { + "loss": loss_dict, + "flux": flux_data, + "deflectometry": deflectometry_data, + } + + return results + + +if __name__ == "__main__": + """ + Generate results with the optimized parameters. + + Parameters + ---------- + config : str + Path to the configuration file. + device : str + Device to use for the computation. + data_dir : str + Path to the data directory. + heliostat_for_reconstruction : dict[str, list[int]] + The heliostat and its calibration numbers. + results_dir : str + Path to where the results will be saved. + scenarios_dir : str + Path to the directory containing the scenarios. + """ + # Set default location for configuration file. + script_dir = pathlib.Path(__file__).resolve().parent + default_config_path = script_dir / "config.yaml" + + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + help="Path to the YAML configuration file.", + default=default_config_path, + ) + + # Parse the config argument first to load the configuration. + args, unknown = parser.parse_known_args() + config_path = pathlib.Path(args.config) + config = {} + if config_path.exists(): + try: + with open(config_path, "r") as f: + config = yaml.safe_load(f) + except yaml.YAMLError as exc: + warnings.warn(f"Error parsing YAML file: {exc}") + else: + warnings.warn( + f"Warning: Configuration file not found at {config_path}. Using defaults." + ) + + # Add remaining arguments to the parser with defaults loaded from the config. + data_dir_default = config.get("data_dir", "./paint_data") + device_default = config.get("device", "cuda") + scenarios_dir_default = config.get( + "scenarios_dir", "./examples/hyperparameter_optimization/scenarios" + ) + results_dir_default = config.get( + "results_dir", "./examples/hyperparameter_optimization/results" + ) + + parser.add_argument( + "--device", + type=str, + help="Device to use.", + default=device_default, + ) + parser.add_argument( + "--data_dir", + type=str, + help="Path to downloaded paint data.", + default=data_dir_default, + ) + parser.add_argument( + "--scenarios_dir", + type=str, + help="Path to directory containing the generated scenarios.", + default=scenarios_dir_default, + ) + parser.add_argument( + "--results_dir", + type=str, + help="Path to save the results.", + default=results_dir_default, + ) + + # Re-parse the full set of arguments. + args = parser.parse_args(args=unknown) + device = get_device(torch.device(args.device)) + data_dir = pathlib.Path(args.data_dir) + results_dir = pathlib.Path(args.results_dir) + + # Define scenario path. + scenario_file = pathlib.Path(args.scenarios_dir) / "ideal_scenario_surface.h5" + if not scenario_file.exists(): + raise FileNotFoundError( + f"The reconstruction scenario located at {scenario_file} could not be found! Please run the ``generate_scenario.py`` to generate this scenario, or adjust the file path and try again." + ) + + viable_heliostats_data = ( + pathlib.Path(args.results_dir) / "viable_heliostats_surface.json" + ) + if not viable_heliostats_data.exists(): + raise FileNotFoundError( + f"The viable heliostat list located at {viable_heliostats_data} could not be not found! Please run the ``generate_viable_heliostat_list.py`` script to generate this list, or adjust the file path and try again." + ) + + # Load viable heliostats data. + with open(viable_heliostats_data, "r") as f: + viable_heliostats = json.load(f) + + heliostat_data_mapping: list[tuple[str, list[pathlib.Path], list[pathlib.Path]]] = [ + ( + item["name"], + [pathlib.Path(p) for p in item["calibrations"]], + [pathlib.Path(p) for p in item["surface_reconstruction_flux_images"]], + ) + for item in viable_heliostats + ] + + with open(results_dir / "hpo_results_surface.json", "r") as file: + hyperparameters = json.load(file) + + reconstruction_results = generate_reconstruction_results( + scenario_path=scenario_file, + heliostat_data_mapping=heliostat_data_mapping, + hyperparameters=hyperparameters, + device=device, + ) + + results_path = pathlib.Path(args.results_dir) / "surface_reconstruction_results.pt" + if not results_path.parent.is_dir(): + results_path.parent.mkdir(parents=True, exist_ok=True) + + torch.save(reconstruction_results, results_path) + print(f"Reconstruction results saved to {results_path}") diff --git a/examples/hyperparameter_optimization/generate_scenarios.py b/examples/hyperparameter_optimization/generate_scenarios.py new file mode 100644 index 000000000..514ce1629 --- /dev/null +++ b/examples/hyperparameter_optimization/generate_scenarios.py @@ -0,0 +1,482 @@ +import argparse +import json +import pathlib +import random +import warnings + +import paint.util.paint_mappings as paint_mappings +import torch +import yaml +from sklearn.cluster import KMeans + +from artist.data_parser import paint_scenario_parser +from artist.scenario.configuration_classes import ( + LightSourceConfig, + LightSourceListConfig, +) +from artist.scenario.h5_scenario_generator import H5ScenarioGenerator +from artist.util import config_dictionary, set_logger_config +from artist.util.environment_setup import get_device + +set_logger_config() + +torch.manual_seed(7) +torch.cuda.manual_seed(7) + + +def find_latest_deflectometry_file( + heliostat_name: str, data_directory: pathlib.Path +) -> pathlib.Path | None: + """ + Find the latest deflectometry HDF5 file for a given heliostat. + + Parameters + ---------- + heliostat_name : str + Heliostat name being considered. + data_directory : Path + Data directory containing ``PAINT`` data. + + Returns + ------- + pathlib.Path | None + Path to the latest deflectometry file or None. + """ + search_path = ( + pathlib.Path(data_directory) + / heliostat_name + / paint_mappings.SAVE_DEFLECTOMETRY + ) + pattern = f"{heliostat_name}-filled*.h5" + files = sorted(search_path.glob(pattern)) + if not files: + return None + return files[-1] + + +def find_heliostats( + heliostat_properties_list: list[tuple[str, pathlib.Path]], + power_plant_position: torch.Tensor, + number_of_heliostats: int, + random_seed: int = 7, +) -> list[tuple[str, pathlib.Path]]: + """ + Select heliostats evenly but randomly distributed around the tower. + + Parameters + ---------- + heliostat_properties_list : list[tuple[str, pathlib.Path]] + List of heliostat names and paths. + power_plant_position : torch.Tensor + Tower position in WGS84. + Tensor of shape [3]. + number_of_heliostats : int + Number of heliostats to select. + random_seed : int + Random seed for reproducibility (default is 7). + + Returns + ------- + list[tuple[str, pathlib.Path]] + Selected heliostats. + """ + random.seed(random_seed) + + if len(heliostat_properties_list) < number_of_heliostats: + raise ValueError("Not enough heliostats available.") + + tower_lat, tower_lon, _ = power_plant_position + + positions = [] + heliostats = [] + + for name, path in heliostat_properties_list: + with open(path, "r") as f: + data = json.load(f) + lat, lon, _ = data["heliostat_position"] + + positions.append([lat - tower_lat, lon - tower_lon]) + heliostats.append((name, path)) + + features = torch.tensor(positions, dtype=torch.float32) + + kmeans = KMeans( + n_clusters=number_of_heliostats, + random_state=random_seed, + n_init="auto", + ) + labels = kmeans.fit_predict(features.numpy()) + + selected_indices = [] + for cluster_id in range(number_of_heliostats): + cluster_members = torch.where(torch.tensor(labels) == cluster_id)[0].tolist() + if cluster_members: + selected_indices.append(random.choice(cluster_members)) + + if len(selected_indices) < number_of_heliostats: + all_indices = set(range(len(heliostats))) + used = set(selected_indices) + remaining = list(all_indices - used) + random.shuffle(remaining) + selected_indices.extend( + remaining[: number_of_heliostats - len(selected_indices)] + ) + + selected_heliostats = [heliostats[i] for i in selected_indices] + + return selected_heliostats + + +def generate_ideal_scenario( + scenario_path: pathlib.Path, + tower_file_path: pathlib.Path, + heliostat_properties_list: list[tuple[str, pathlib.Path]], + number_of_heliostats: int, + device: torch.device | None = None, +) -> list[tuple[str, pathlib.Path]]: + """ + Generate an ideal HDF5 scenario for the field optimizations. + + Parameters + ---------- + scenario_path : pathlib.Path + Path to save the generated HDF5 scenario. + tower_file_path : pathlib.Path + Path to the tower measurements file. + heliostat_properties_list : list[tuple[str, pathlib.Path]] + List of heliostat names and their property files to include in the scenario. + number_of_heliostats : int + Number of heliostats to select. + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + + Returns + ------- + list[tuple[str, pathlib.Path]] + List of selected heliostats. + """ + device = get_device(device=device) + + # Generate power plant configuration and target area list. + power_plant_config, target_area_list_config = ( + paint_scenario_parser.extract_paint_tower_measurements( + tower_measurements_path=tower_file_path, device=device + ) + ) + + selected_heliostats_list = find_heliostats( + number_of_heliostats=number_of_heliostats, + heliostat_properties_list=heliostat_properties_list, + power_plant_position=power_plant_config.power_plant_position, + ) + + # Set up light source configuration. + light_source1_config = LightSourceConfig( + light_source_key="sun_1", + light_source_type=config_dictionary.sun_key, + number_of_rays=10, + distribution_type=config_dictionary.light_source_distribution_is_normal, + mean=0.0, + covariance=4.3681e-06, + ) + light_source_list = [light_source1_config] + light_source_list_config = LightSourceListConfig( + light_source_list=light_source_list + ) + + # Generate heliostat list configuration. + heliostat_list_config, prototype_config = ( + paint_scenario_parser.extract_paint_heliostats_ideal_surface( + paths=selected_heliostats_list, + power_plant_position=power_plant_config.power_plant_position, + device=device, + ) + ) + + # Generate scenario. + scenario_generator = H5ScenarioGenerator( + file_path=scenario_path, + power_plant_config=power_plant_config, + target_area_list_config=target_area_list_config, + light_source_list_config=light_source_list_config, + prototype_config=prototype_config, + heliostat_list_config=heliostat_list_config, + ) + scenario_generator.generate_scenario() + + return selected_heliostats_list + + +def generate_fitted_scenario( + data_directory: pathlib.Path, + scenario_path: pathlib.Path, + tower_file_path: pathlib.Path, + selected_heliostats_list: list[tuple[str, pathlib.Path]], + device: torch.device | None = None, +) -> None: + """ + Generate a deflectometry HDF5 scenario for the evaluation of the field optimizations. + + Parameters + ---------- + data_directory : pathlib.Path + Path to the data directory. + scenario_path : pathlib.Path + Path to where the scenarios will be saved. + tower_file_path : pathlib.Path + Path to the tower data file. + selected_heliostats_list : list[tuple[str, pathlib.Path]], + List of heliostat names to include in the scenario. + device : torch.device | None + The device on which to perform computations or load tensors and models (default is None). + If None, ``ARTIST`` will automatically select the most appropriate + device (CUDA or CPU) based on availability and OS. + """ + device = get_device(device=device) + + # Include the power plant configuration. + power_plant_config, target_area_list_config = ( + paint_scenario_parser.extract_paint_tower_measurements( + tower_measurements_path=tower_file_path, + device=device, + ) + ) + + # Include the light source configuration. + light_source1_config = LightSourceConfig( + light_source_key="sun_1", + light_source_type=config_dictionary.sun_key, + number_of_rays=10, + distribution_type=config_dictionary.light_source_distribution_is_normal, + mean=0.0, + covariance=4.3681e-06, + ) + + # Create a list of light source configs. + light_source_list = [light_source1_config] + + # Include the configuration for the list of light sources. + light_source_list_config = LightSourceListConfig( + light_source_list=light_source_list + ) + + heliostat_files_list = [ + ( + tuple[0], + pathlib.Path( + f"{data_directory}/{tuple[0]}/{paint_mappings.SAVE_PROPERTIES}/{tuple[0]}-{paint_mappings.HELIOSTAT_PROPERTIES_KEY}.json" + ), + deflectometry_file, + ) + for tuple in selected_heliostats_list + if ( + deflectometry_file := find_latest_deflectometry_file( + tuple[0], data_directory + ) + ) + is not None + ] + + # Fit the NURBS. + nurbs_fit_optimizer = torch.optim.Adam( + [torch.empty(1, requires_grad=True)], lr=1e-3 + ) + nurbs_fit_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + nurbs_fit_optimizer, + mode="min", + factor=0.2, + patience=50, + threshold=1e-7, + threshold_mode="abs", + ) + + # Create the list of heliostats. + heliostat_list_config, prototype_config = ( + paint_scenario_parser.extract_paint_heliostats_fitted_surface( + paths=heliostat_files_list, + power_plant_position=power_plant_config.power_plant_position, + number_of_nurbs_control_points=torch.tensor([20, 20], device=device), + deflectometry_step_size=100, + nurbs_fit_method=config_dictionary.fit_nurbs_from_normals, + nurbs_fit_tolerance=1e-10, + nurbs_fit_max_epoch=400, + nurbs_fit_optimizer=nurbs_fit_optimizer, + nurbs_fit_scheduler=nurbs_fit_scheduler, + device=device, + ) + ) + + # Generate the scenario given the defined parameters. + scenario_generator = H5ScenarioGenerator( + file_path=scenario_path, + power_plant_config=power_plant_config, + target_area_list_config=target_area_list_config, + light_source_list_config=light_source_list_config, + prototype_config=prototype_config, + heliostat_list_config=heliostat_list_config, + ) + scenario_generator.generate_scenario() + + +if __name__ == "__main__": + """ + Generate scenarios for the hyperparameter optimizations. + + This will generate ideal surface scenarios for the hyperparameter searches. + For the surface evaluation a deflectometry scenario will also be created. + + Parameters + ---------- + config : str + Path to the configuration file. + device : str + Device to use for the computation. + data_dir : str + Path to the data directory. + tower_file_name : str + Name of the file containing the tower measurements. + results_dir : str + Path to the results directory containing the viable heliostats list. + scenarios_dir : str + Path to the directory for saving the generated scenarios. + """ + # Set default location for configuration file. + script_dir = pathlib.Path(__file__).resolve().parent + default_config_path = script_dir / "config.yaml" + + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + help="Path to the YAML configuration file.", + default=default_config_path, + ) + + # Parse the config argument first to load the configuration. + args, unknown = parser.parse_known_args() + config_path = pathlib.Path(args.config) + config = {} + if config_path.exists(): + try: + with open(config_path, "r") as f: + config = yaml.safe_load(f) + except yaml.YAMLError as exc: + warnings.warn(f"Error parsing YAML file: {exc}") + else: + warnings.warn( + f"Warning: Configuration file not found at {config_path}. Using defaults." + ) + + # Add remaining arguments to the parser with defaults loaded from the config. + device_default = config.get("device", "cuda") + data_dir_default = config.get("data_dir", "./paint_data") + tower_file_name_default = config.get( + "tower_file_name", "WRI1030197-tower-measurements.json" + ) + results_dir_default = config.get( + "results_dir", "./examples/hyperparameter_optimization/results" + ) + scenarios_dir_default = config.get( + "scenarios_dir", "./examples/hyperparameter_optimization/scenarios" + ) + + parser.add_argument( + "--device", + type=str, + help="Device to use.", + default=device_default, + ) + parser.add_argument( + "--data_dir", + type=str, + help="Path to the data directory.", + default=data_dir_default, + ) + parser.add_argument( + "--tower_file_name", + type=str, + help="Name of the file containing the tower measurements.", + default=tower_file_name_default, + ) + parser.add_argument( + "--results_dir", + type=str, + help="Path to the results directory containing the viable heliostats list.", + default=results_dir_default, + ) + parser.add_argument( + "--scenarios_dir", + type=str, + help="Path to the directory for saving the generated scenarios.", + default=scenarios_dir_default, + ) + + # Re-parse the full set of arguments. + args = parser.parse_args(args=unknown) + + device = get_device(torch.device(args.device)) + data_dir = pathlib.Path(args.data_dir) + tower_file = data_dir / args.tower_file_name + + for case in ["kinematic", "surface"]: + viable_heliostats_data = ( + pathlib.Path(args.results_dir) / f"viable_heliostats_{case}.json" + ) + if not viable_heliostats_data.exists(): + raise FileNotFoundError( + f"The viable heliostat list located at {viable_heliostats_data} could not be not found! Please run the ``generate_viable_heliostats_list.py`` script to generate this list, or adjust the file path and try again." + ) + + scenario_path = pathlib.Path(args.scenarios_dir) / f"ideal_scenario_{case}.h5" + if not scenario_path.parent.exists(): + scenario_path.parent.mkdir(parents=True, exist_ok=True) + + # Load viable heliostats data. + with open(viable_heliostats_data, "r") as f: + viable_heliostats = json.load(f) + + heliostat_properties_list: list[tuple[str, pathlib.Path]] = [ + ( + item["name"], + pathlib.Path(item["properties"]), + ) + for item in viable_heliostats + ] + + if scenario_path.exists(): + print( + f"Scenario found at {scenario_path}... continue without generating scenario." + ) + else: + print(f"Scenario not found. Generating a new one at {scenario_path}...") + number_of_heliostats = len(viable_heliostats) + selected_heliostats_list = generate_ideal_scenario( + scenario_path=scenario_path, + tower_file_path=tower_file, + heliostat_properties_list=heliostat_properties_list, + number_of_heliostats=number_of_heliostats, + device=device, + ) + + if case == "surface": + scenario_path = ( + pathlib.Path(args.scenarios_dir) / f"deflectometry_scenario_{case}.h5" + ) + if not scenario_path.parent.exists(): + scenario_path.parent.mkdir(parents=True, exist_ok=True) + + if scenario_path.exists(): + print( + f"Scenario found at {scenario_path}... continue without generating scenario." + ) + else: + print(f"Scenario not found. Generating a new one at {scenario_path}...") + generate_fitted_scenario( + data_directory=data_dir, + scenario_path=scenario_path, + tower_file_path=tower_file, + selected_heliostats_list=selected_heliostats_list, + device=device, + ) diff --git a/examples/hyperparameter_optimization/generate_viable_heliostats_list.py b/examples/hyperparameter_optimization/generate_viable_heliostats_list.py new file mode 100644 index 000000000..a64b6cedb --- /dev/null +++ b/examples/hyperparameter_optimization/generate_viable_heliostats_list.py @@ -0,0 +1,319 @@ +import argparse +import json +import pathlib +import re +import warnings + +import paint.util.paint_mappings as paint_mappings +import torch +import yaml + +from artist.util.environment_setup import get_device + + +def find_viable_heliostats( + data_directory: pathlib.Path, + minimum_number_of_measurements: int, + kinematic_reconstruction_image_type: str, + surface_reconstruction_image_type: str, + excluded_heliostats: set[str], + require_deflectometry_surface: bool = False, +) -> list[ + tuple[str, list[pathlib.Path], list[pathlib.Path], list[pathlib.Path], pathlib.Path] +]: + """ + Find heliostats that have at least a minimum number of valid calibration files. + + This function iterates through a data directory to find all heliostats having at least the minimum number of measurements + calibration files. All paths are collected, and a sorted list of the heliostats is returned containing tuples including + the heliostat name, path to the calibration file, and path to the flux images for surface and kinematic reconstruction. + + Parameters + ---------- + data_directory : pathlib.Path + The path to the data directory. + minimum_number_of_measurements : int + The minimum number of calibration files required. + kinematic_reconstruction_image_type : str + The type of calibration image to use for the kinematic reconstruction, i.e., ''flux'', or ''flux-centered''. + surface_reconstruction_image_type : str + The type of calibration image to use for the surface reconstruction, i.e., ''flux'', or ''flux-centered''. + excluded_heliostats : set[str] + Excluded heliostats. + require_deflectometry_surface : bool + Flag indicating whether deflectometry files need to be available (default is False). + + Returns + ------- + list[tuple[str, list[pathlib.Path], list[pathlib.Path], list[pathlib.Path], pathlib.Path]] + A list of tuples containing: + - The heliostat name. + - A list of valid calibration file paths. + - A list of flux image file paths for kinematic reconstruction. + - A list of flux image file paths for surface reconstruction. + - The associated heliostat properties path. + """ + heliostat_name_pattern = re.compile(r"^[A-Z]{2}[0-9]{2}$") + found_heliostats = [] + + json_suffix_to_remove = ( + paint_mappings.CALIBRATION_PROPERTIES_IDENTIFIER.removesuffix(".json") + ) + + all_heliostats = ( + d + for d in data_directory.iterdir() + if d.is_dir() and heliostat_name_pattern.match(d.name) + ) + + for heliostat_directory in sorted(all_heliostats): + heliostat_name = heliostat_directory.name + + if heliostat_name in excluded_heliostats: + continue + + if require_deflectometry_surface: + deflectometry_dir = heliostat_directory / paint_mappings.SAVE_DEFLECTOMETRY + pattern = f"{heliostat_name}-filled*.h5" + deflectometry_files = sorted(deflectometry_dir.glob(pattern)) + + if not deflectometry_files: + continue + + properties_path = ( + heliostat_directory + / paint_mappings.SAVE_PROPERTIES + / f"{paint_mappings.HELIOSTAT_PROPERTIES_SAVE_NAME % heliostat_name}" + ) + calibration_dir = heliostat_directory / paint_mappings.SAVE_CALIBRATION + + if not calibration_dir.exists(): + continue + + valid_calibration_files = [] + flux_images_kinematic_reconstruction = [] + flux_images_surface_reconstruction = [] + + for calibration_file_path in sorted( + calibration_dir.glob(f"*{paint_mappings.CALIBRATION_PROPERTIES_IDENTIFIER}") + ): + try: + with calibration_file_path.open("r") as f: + calibration_data = json.load(f) + focal_spot_data = calibration_data.get( + paint_mappings.FOCAL_SPOT_KEY, {} + ) + + if paint_mappings.UTIS_KEY in focal_spot_data: + # Check for the existence of the corresponding flux image. + file_stem = calibration_file_path.stem.removesuffix( + json_suffix_to_remove + ) + kinematic_reconstruction_flux_image_path = ( + calibration_dir + / f"{file_stem}-{kinematic_reconstruction_image_type}.png" + ) + surface_reconstruction_flux_image_path = ( + calibration_dir + / f"{file_stem}-{surface_reconstruction_image_type}.png" + ) + + if ( + kinematic_reconstruction_flux_image_path.exists() + and surface_reconstruction_flux_image_path.exists() + ): + valid_calibration_files.append(calibration_file_path) + flux_images_kinematic_reconstruction.append( + kinematic_reconstruction_flux_image_path + ) + flux_images_surface_reconstruction.append( + surface_reconstruction_flux_image_path + ) + except Exception as e: + print(f"Warning: Skipping {calibration_file_path} due to error: {e}") + + if len(valid_calibration_files) >= minimum_number_of_measurements: + found_heliostats.append( + ( + heliostat_name, + valid_calibration_files[-minimum_number_of_measurements:], + flux_images_kinematic_reconstruction[ + -minimum_number_of_measurements: + ], + flux_images_surface_reconstruction[ + -minimum_number_of_measurements: + ], + properties_path, + ) + ) + + return sorted(found_heliostats, key=lambda x: x[0]) + + +if __name__ == "__main__": + """ + Generate list of viable heliostats for the hyperparameter optimizations. + + This script identifies a list of viable heliostats, i.e., containing a minimum number of valid measurements, for + the optimization process. + + Parameters + ---------- + config : str + Path to the configuration file. + device : str + Device to use for the computation. + data_dir : str + Path to the data directory. + results_dir : str + Path to where the results will be saved. + minimum_number_of_measurements : int + Minimum number of calibration measurements per heliostat required. + kinematic_reconstruction_image_type : str + Type of calibration image to use for the kinematic reconstruction, i.e., flux or flux-centered. + surface_reconstruction_image_type : str + Type of calibration image to use for the surface reconstruction, i.e., flux or flux-centered. + """ + # Set default location for configuration file. + script_dir = pathlib.Path(__file__).resolve().parent + default_config_path = script_dir / "config.yaml" + + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + help="Path to the YAML configuration file.", + default=default_config_path, + ) + + # Parse the config argument first to load the configuration. + args, unknown = parser.parse_known_args() + config_path = pathlib.Path(args.config) + config = {} + if config_path.exists(): + try: + with open(config_path, "r") as f: + config = yaml.safe_load(f) + except yaml.YAMLError as exc: + warnings.warn(f"Error parsing YAML file: {exc}") + else: + warnings.warn( + f"Warning: Configuration file not found at {config_path}. Using defaults." + ) + + # Add remaining arguments to the parser with defaults loaded from the config. + device_default = config.get("device", "cuda") + data_dir_default = config.get("data_dir", "./paint_data") + results_dir_default = config.get( + "results_dir", "./examples/hyperparameter_optimization/results" + ) + minimum_number_of_measurements_default = config.get( + "minimum_number_of_measurements", 8 + ) + kinematic_reconstruction_image_type_default = config.get( + "kinematic_reconstruction_image_type", "flux" + ) + surface_reconstruction_image_type_default = config.get( + "surface_reconstruction_image_type", "flux-centered" + ) + excluded_heliostats_default = config.get( + "excluded_heliostats_for_reconstruction", ["AA39"] + ) + + parser.add_argument( + "--device", + type=str, + help="Device to use.", + default=device_default, + ) + parser.add_argument( + "--data_dir", + type=str, + help="Path to the data directory.", + default=data_dir_default, + ) + parser.add_argument( + "--results_dir", + type=str, + help="Path to where the results will be saved.", + default=results_dir_default, + ) + parser.add_argument( + "--minimum_number_of_measurements", + type=int, + help="Minimum number of calibration measurements per heliostat required.", + default=minimum_number_of_measurements_default, + ) + parser.add_argument( + "--kinematic_reconstruction_image_type", + type=str, + help="Type of calibration image to use for the kinematic reconstruction, i.e., flux or flux-centered.", + choices=["flux", "flux-centered"], + default=kinematic_reconstruction_image_type_default, + ) + parser.add_argument( + "--surface_reconstruction_image_type", + type=str, + help="Type of calibration image to use for the surface reconstruction, i.e., flux or flux-centered.", + choices=["flux", "flux-centered"], + default=surface_reconstruction_image_type_default, + ) + parser.add_argument( + "--excluded_heliostats_for_reconstruction", + type=str, + help="Heliostat names to exclude.", + nargs="+", + default=excluded_heliostats_default, + ) + + # Re-parse the full set of arguments. + args = parser.parse_args(args=unknown) + device = get_device(torch.device(args.device)) + data_dir = pathlib.Path(args.data_dir) + number_measurements = args.minimum_number_of_measurements + excluded_heliostats: set[str] = set(args.excluded_heliostats_for_reconstruction) + + for case in ["kinematic", "surface"]: + if case == "kinematic": + require_deflectometry_surface = False + if case == "surface": + require_deflectometry_surface = True + + heliostat_data_list = find_viable_heliostats( + data_directory=data_dir, + minimum_number_of_measurements=number_measurements, + kinematic_reconstruction_image_type=args.kinematic_reconstruction_image_type, + surface_reconstruction_image_type=args.surface_reconstruction_image_type, + excluded_heliostats=excluded_heliostats, + require_deflectometry_surface=require_deflectometry_surface, + ) + + print(f"Selected {len(heliostat_data_list)} heliostats.") + + serializable_data = [ + { + "name": heliostat_name, + "calibrations": [ + str(calibration_path) for calibration_path in calibration_paths + ], + "kinematic_reconstruction_flux_images": [ + str(flux_path) for flux_path in kinematic_reconstruction_flux_paths + ], + "surface_reconstruction_flux_images": [ + str(flux_path) + for flux_path in surface_reconstruction_flux_image_path + ], + "properties": str(properties_path), + } + for heliostat_name, calibration_paths, kinematic_reconstruction_flux_paths, surface_reconstruction_flux_image_path, properties_path in heliostat_data_list + ] + + results_path = pathlib.Path(args.results_dir) / f"viable_heliostats_{case}.json" + + if not results_path.parent.is_dir(): + results_path.parent.mkdir(parents=True, exist_ok=True) + + with open(results_path, "w") as output_file: + json.dump(serializable_data, output_file, indent=2) + + print(f"Saved {len(serializable_data)} heliostat entries to {results_path}") diff --git a/examples/hyperparameter_optimization/hpo_config.yaml b/examples/hyperparameter_optimization/hpo_config.yaml deleted file mode 100644 index cb0fea4e0..000000000 --- a/examples/hyperparameter_optimization/hpo_config.yaml +++ /dev/null @@ -1,22 +0,0 @@ -metadata_root: "./" -metadata_file_name: "calibration_metadata_all_heliostats.csv" -data_dir: "base/path/data" -tower_file_name: "WRI1030197-tower-measurements.json" -scenarios_dir: "./scenarios" -results_dir: "./results" -plots_dir: "./plots" -propulate_logs_dir: "./logs" -calibration_image_type: "flux-centered" -heliostat_for_reconstruction: - AA39: [244862, 270398, 246213, 258959] -reconstruction_parameter_ranges: - number_of_surface_points: [30, 110] - number_of_control_points: [4, 20] - number_of_rays: [50, 200] - nurbs_degree: [2, 3] - ideal_surface_loss_weight: [0.0, 2.0] - initial_learning_rate: [1e-7, 1e-3] - reduce_factor: [0.05, 0.5] - patience: [5, 25] - threshold: [1e-6, 1e-3] -device: "cuda" diff --git a/examples/hyperparameter_optimization/search_kinematic_reconstruction.py b/examples/hyperparameter_optimization/search_kinematic_reconstruction.py new file mode 100644 index 000000000..524e1c9c6 --- /dev/null +++ b/examples/hyperparameter_optimization/search_kinematic_reconstruction.py @@ -0,0 +1,406 @@ +import argparse +import json +import logging +import pathlib +import pickle +import random +import re +import warnings +from functools import partial + +import h5py +import torch +import yaml +from mpi4py import MPI +from propulate import Propulator +from propulate.utils import get_default_propagator, set_logger_config + +from artist.core import loss_functions +from artist.core.kinematic_reconstructor import KinematicReconstructor +from artist.data_parser.calibration_data_parser import CalibrationDataParser +from artist.data_parser.paint_calibration_parser import PaintCalibrationDataParser +from artist.scenario.scenario import Scenario +from artist.util import config_dictionary +from artist.util.environment_setup import get_device + +log = logging.getLogger(__name__) +"""A logger for the hyper parameter search.""" + + +def kinematic_reconstructor_for_hpo( + params: dict[str, float], + scenario_path: pathlib.Path, + heliostat_data_mapping: list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], +) -> float: + """ + Set up a kinematic reconstructor used in a hyperparameter search. + + Parameters + ---------- + params : dict[str, float] + Combination of reconstruction parameters. + scenario_path : pathlib.Path + Path to the kinematic reconstruction scenario. + heliostat_data_mapping : list[tuple[str, list[pathlib.Path], list[pathlib.Path]]] + Data mapping from heliostat to calibration files used to reconstruct the kinematic. + + Returns + ------- + float + The loss for a specific parameter configuration. + """ + torch.manual_seed(7) + torch.cuda.manual_seed(7) + + # Get device. + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + device = torch.device(f"cuda:{rank % torch.cuda.device_count()}") + device = get_device(device) + + # Set up ARTIST to run in single device mode. + ddp_setup = { + config_dictionary.device: device, + config_dictionary.is_distributed: False, + config_dictionary.is_nested: False, + config_dictionary.rank: 0, + config_dictionary.world_size: 1, + config_dictionary.process_subgroup: None, + config_dictionary.groups_to_ranks_mapping: {0: [0]}, + config_dictionary.heliostat_group_rank: 0, + config_dictionary.heliostat_group_world_size: 1, + config_dictionary.ranks_to_groups_mapping: {0: [0]}, + } + + # Load a scenario from an .h5 file. + # The scenario .h5 file should contain a setup with at least one heliostat (with the same name(s) + # as the heliostat(s) for which reconstruction data is provided). + with h5py.File(scenario_path, "r") as scenario_file: + scenario = Scenario.load_scenario_from_hdf5( + scenario_file=scenario_file, + number_of_surface_points_per_facet=torch.tensor([5, 5], device=device), + device=device, + ) + + # Set number of rays. + scenario.set_number_of_rays(number_of_rays=4) + + data_parser = PaintCalibrationDataParser( + sample_limit=2, + centroid_extraction_method="UTIS", + ) + data: dict[ + str, + CalibrationDataParser + | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], + ] = { + config_dictionary.data_parser: data_parser, + config_dictionary.heliostat_data_mapping: heliostat_data_mapping, + } + optimizer_dict = { + config_dictionary.initial_learning_rate: params["initial_learning_rate"], + config_dictionary.tolerance: 0.0005, + config_dictionary.max_epoch: 60, + config_dictionary.batch_size: 945, + config_dictionary.log_step: 0, + config_dictionary.early_stopping_delta: 1e-4, + config_dictionary.early_stopping_patience: 15, + config_dictionary.early_stopping_window: 10, + } + scheduler_dict = { + config_dictionary.scheduler_type: params["scheduler"], + config_dictionary.min: params["min_learning_rate"], + config_dictionary.max: params["max_learning_rate"], + config_dictionary.step_size_up: params["step_size_up"], + config_dictionary.reduce_factor: params["reduce_factor"], + config_dictionary.patience: params["patience"], + config_dictionary.threshold: params["threshold"], + config_dictionary.cooldown: params["cooldown"], + config_dictionary.gamma: params["gamma"], + } + optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + } + + # Create the kinematic reconstructor. + kinematic_reconstructor = KinematicReconstructor( + ddp_setup=ddp_setup, + scenario=scenario, + data=data, + optimization_configuration=optimization_configuration, + reconstruction_method=config_dictionary.kinematic_reconstruction_raytracing, + ) + + loss_definition = loss_functions.FocalSpotLoss(scenario=scenario) + + # Reconstruct the kinematic. + final_loss_per_heliostat = kinematic_reconstructor.reconstruct_kinematic( + loss_definition=loss_definition, device=device + ) + + return ( + final_loss_per_heliostat[torch.isfinite(final_loss_per_heliostat)].mean().item() + ) + + +if __name__ == "__main__": + """ + Perform the hyperparameter search for the kinematic reconstruction and save the results. + + This script executes the hyperparameter search with ``propulate`` and saves the result for + further inspection. + + Parameters + ---------- + config : str + Path to the configuration file. + device : str + Device to use for the computation. + data_dir : str + Path to the data directory. + heliostat_for_reconstruction : dict[str, list[int]] + The heliostat and its calibration numbers. + results_dir : str + Path to where the results will be saved. + scenarios_dir : str + Path to the directory containing the scenarios. + propulate_logs_dir : str + Path to the directory where propulate will write log messages. + parameter_ranges_kinematic : dict[str, int | float] + The reconstruction parameters. + """ + comm = MPI.COMM_WORLD + + rank = comm.Get_rank() + + # Set default location for configuration file. + script_dir = pathlib.Path(__file__).resolve().parent + default_config_path = script_dir / "config.yaml" + + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + help="Path to the YAML configuration file.", + default=default_config_path, + ) + + # Parse the config argument first to load the configuration. + args, unknown = parser.parse_known_args() + config_path = pathlib.Path(args.config) + config = {} + if config_path.exists(): + try: + with open(config_path, "r") as f: + config = yaml.safe_load(f) + except yaml.YAMLError as exc: + warnings.warn(f"Error parsing YAML file: {exc}") + else: + warnings.warn( + f"Warning: Configuration file not found at {config_path}. Using defaults." + ) + + # Add remaining arguments to the parser with defaults loaded from the config. + data_dir_default = config.get("data_dir", "./paint_data") + device_default = config.get("device", "cuda") + scenarios_dir_default = config.get( + "scenarios_dir", "./examples/hyperparameter_optimization/scenarios" + ) + results_dir_default = config.get( + "results_dir", "./examples/hyperparameter_optimization/results" + ) + propulate_logs_dir_default = config.get( + "propulate_logs_dir", "./examples/hyperparameter_optimization/logs" + ) + parameter_ranges_default = config.get( + "parameter_ranges_kinematic", + { + "initial_learning_rate": [1e-9, 1e-2], + "scheduler": ["exponential", "reduce_on_plateau", "cyclic"], + "min_learning_rate": [1e-12, 1e-6], + "max_learning_rate": [1e-4, 1e-2], + "step_size_up": [100, 500], + "reduce_factor": [0.05, 0.5], + "patience": [3, 50], + "threshold": [1e-6, 1e-2], + "cooldown": [2, 20], + "gamma": [0.85, 0.999], + }, + ) + + parser.add_argument( + "--device", + type=str, + help="Device to use.", + default=device_default, + ) + parser.add_argument( + "--data_dir", + type=str, + help="Path to downloaded paint data.", + default=data_dir_default, + ) + parser.add_argument( + "--scenarios_dir", + type=str, + help="Path to directory containing the generated scenarios.", + default=scenarios_dir_default, + ) + parser.add_argument( + "--results_dir", + type=str, + help="Path to save the results.", + default=results_dir_default, + ) + parser.add_argument( + "--propulate_logs_dir", + type=str, + help="Path to save propulate log messages.", + default=propulate_logs_dir_default, + ) + parser.add_argument( + "--parameter_ranges_kinematic", + type=eval, + help="Parameters used for the reconstruction.", + default=parameter_ranges_default, + ) + + # Re-parse the full set of arguments. + args = parser.parse_args(args=unknown) + + device = get_device(torch.device(args.device)) + data_dir = pathlib.Path(args.data_dir) + propulate_logs_dir = pathlib.Path(args.propulate_logs_dir) / "kinematic" + results_dir = pathlib.Path(args.results_dir) + + # Define scenario path. + scenario_file = pathlib.Path(args.scenarios_dir) / "ideal_scenario_kinematic.h5" + if not scenario_file.exists(): + raise FileNotFoundError( + f"The reconstruction scenario located at {scenario_file} could not be found! Please run the ``generate_scenario.py`` to generate this scenario, or adjust the file path and try again." + ) + + # Set up separate logger for Propulate optimization. + set_logger_config( + level=logging.INFO, + log_file=f"{propulate_logs_dir}/{pathlib.Path(__file__).stem}.log", + log_to_stdout=False, + log_rank=True, + colors=True, + ) + + log = logging.getLogger(__name__) + rank = comm.Get_rank() + log.info(rank) + + seed = 7 + rng = random.Random(seed + comm.rank) + + viable_heliostats_data = ( + pathlib.Path(args.results_dir) / "viable_heliostats_kinematic.json" + ) + if not viable_heliostats_data.exists(): + raise FileNotFoundError( + f"The viable heliostat list located at {viable_heliostats_data} could not be not found! Please run the ``generate_viable_heliostat_list.py`` script to generate this list, or adjust the file path and try again." + ) + + # Load viable heliostats data. + with open(viable_heliostats_data, "r") as f: + viable_heliostats = json.load(f) + + heliostat_data_mapping: list[tuple[str, list[pathlib.Path], list[pathlib.Path]]] = [ + ( + item["name"], + [pathlib.Path(p) for p in item["calibrations"]], + [pathlib.Path(p) for p in item["kinematic_reconstruction_flux_images"]], + ) + for item in viable_heliostats + ] + + reconstruction_parameter_ranges: dict[ + str, tuple[int, ...] | tuple[float, ...] | tuple[str, ...] + ] = {} + + for key, value in args.parameter_ranges_kinematic.items(): + if all(isinstance(x, (int, float)) for x in value): + if all( + isinstance(x, int) or (isinstance(x, float) and x.is_integer()) + for x in value + ): + int_tuple: tuple[int, ...] = tuple(int(x) for x in value) + reconstruction_parameter_ranges[key] = int_tuple + else: + float_tuple: tuple[float, ...] = tuple(float(x) for x in value) + reconstruction_parameter_ranges[key] = float_tuple + else: + str_tuple: tuple[str, ...] = tuple(value) + reconstruction_parameter_ranges[key] = str_tuple + + # Set up evolutionary operator. + num_generations = 200 + pop_size = 2 * comm.size + propagator = get_default_propagator( + pop_size=pop_size, + limits=reconstruction_parameter_ranges, + crossover_prob=0.7, + mutation_prob=0.4, + random_init_prob=0.1, + rng=rng, + ) + + loss_fn = partial( + kinematic_reconstructor_for_hpo, + scenario_path=scenario_file, + heliostat_data_mapping=heliostat_data_mapping, + ) + + # Set up propulator performing actual optimization. + propulator = Propulator( + loss_fn=loss_fn, + propagator=propagator, + rng=rng, + island_comm=comm, + generations=num_generations, + checkpoint_path=propulate_logs_dir, + ) + + # Run optimization and print summary of results. + propulator.propulate( + logging_interval=1, + debug=2, + ) + propulator.summarize( + top_n=20, + debug=2, + ) + + hpo_result_file = propulate_logs_dir / "island_0_ckpt.pickle" + optimized_parameters_file = results_dir / "hpo_results_kinematic.json" + + # Save hpo results in format to be used by plots. + if not hpo_result_file.exists(): + raise FileNotFoundError( + f"The hpo results located at {hpo_result_file} could not be not found! Please run the hpo script again to generate the results." + ) + + with open(hpo_result_file, "rb") as results: + data = pickle.load(results) + + data_dict = data[-1] + parameters_dict = {} + + for key, value in data_dict.items(): + if ( + isinstance(value, str) + and re.fullmatch(r"[+-]?\d+(\.\d+)?[eE][+-]?\d+", value) is not None + ): + parameters_dict[key] = float(value) + else: + parameters_dict[key] = value + + if not results_dir.parent.is_dir(): + results_dir.parent.mkdir(parents=True, exist_ok=True) + + with open(optimized_parameters_file, "w") as output_file: + json.dump(parameters_dict, output_file, indent=2) diff --git a/examples/hyperparameter_optimization/search_motor_position_optimization.py b/examples/hyperparameter_optimization/search_motor_position_optimization.py new file mode 100644 index 000000000..77c1df8bc --- /dev/null +++ b/examples/hyperparameter_optimization/search_motor_position_optimization.py @@ -0,0 +1,392 @@ +import argparse +import json +import logging +import pathlib +import pickle +import random +import re +import warnings +from functools import partial + +import h5py +import torch +import yaml +from mpi4py import MPI +from propulate import Propulator +from propulate.utils import get_default_propagator, set_logger_config + +from artist.core import loss_functions +from artist.core.motor_position_optimizer import MotorPositionsOptimizer +from artist.scenario.scenario import Scenario +from artist.util import config_dictionary, utils +from artist.util.environment_setup import get_device + +log = logging.getLogger(__name__) +"""A logger for the hyper parameter search.""" + + +def motor_position_optimizer_for_hpo( + params: dict[str, float], + scenario_path: pathlib.Path, +) -> float: + """ + Set up a motor position optimizer used in a hyperparameter search. + + Parameters + ---------- + params : dict[str, float] + Combination of optimization parameters. + scenario_path : pathlib.Path + Path to the scenario. + + Returns + ------- + float + The loss for a specific parameter configuration. + """ + torch.manual_seed(7) + torch.cuda.manual_seed(7) + + # Get device. + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + device = torch.device(f"cuda:{rank % torch.cuda.device_count()}") + device = get_device(device) + + # Set up ARTIST to run in single device mode. + ddp_setup = { + config_dictionary.device: device, + config_dictionary.is_distributed: False, + config_dictionary.is_nested: False, + config_dictionary.rank: 0, + config_dictionary.world_size: 1, + config_dictionary.process_subgroup: None, + config_dictionary.groups_to_ranks_mapping: {0: [0]}, + config_dictionary.heliostat_group_rank: 0, + config_dictionary.heliostat_group_world_size: 1, + config_dictionary.ranks_to_groups_mapping: {0: [0]}, + } + + # Load a scenario from an .h5 file. + with h5py.File(scenario_path, "r") as scenario_file: + scenario = Scenario.load_scenario_from_hdf5( + scenario_file=scenario_file, + change_number_of_control_points_per_facet=torch.tensor( + [7, 7], device=device + ), + device=device, + ) + + # Set number of rays. + scenario.set_number_of_rays(number_of_rays=5) + + optimizer_dict = { + config_dictionary.initial_learning_rate: params["initial_learning_rate"], + config_dictionary.tolerance: 0.0005, + config_dictionary.max_epoch: 100, + config_dictionary.batch_size: 250, + config_dictionary.log_step: 0, + config_dictionary.early_stopping_delta: 1e-4, + config_dictionary.early_stopping_patience: 15, + config_dictionary.early_stopping_window: 10, + } + scheduler_dict = { + config_dictionary.scheduler_type: params["scheduler"], + config_dictionary.min: params["min_learning_rate"], + config_dictionary.max: params["max_learning_rate"], + config_dictionary.step_size_up: params["step_size_up"], + config_dictionary.reduce_factor: params["reduce_factor"], + config_dictionary.patience: params["patience"], + config_dictionary.threshold: params["threshold"], + config_dictionary.cooldown: params["cooldown"], + config_dictionary.gamma: params["gamma"], + } + constraint_dict = { + config_dictionary.rho_energy: 1.0, + config_dictionary.max_flux_density: 3, + config_dictionary.rho_pixel: 1.0, + config_dictionary.lambda_lr: 0.1, + } + optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + config_dictionary.constraints: constraint_dict, + } + + # Random, somewhere in the south-west. + baseline_incident_ray_direction = torch.nn.functional.normalize( + torch.tensor([0.0, 0.0, 0.0, 1.0], device=device) + - torch.tensor([-0.411, -0.706, 0.576, 1.0], device=device), + dim=0, + ) + + # Receiver. + target_area_index = 1 + + # Target distribution. + e_trapezoid = utils.trapezoid_distribution( + total_width=256, slope_width=30, plateau_width=180, device=device + ) + u_trapezoid = utils.trapezoid_distribution( + total_width=256, slope_width=30, plateau_width=180, device=device + ) + eu_trapezoid = u_trapezoid.unsqueeze(1) * e_trapezoid.unsqueeze(0) + + target_distribution = (eu_trapezoid / eu_trapezoid.sum()) * 10000000 + + # Create the surface reconstructor. + motor_positions_optimizer = MotorPositionsOptimizer( + ddp_setup=ddp_setup, + scenario=scenario, + optimization_configuration=optimization_configuration, + incident_ray_direction=baseline_incident_ray_direction, + target_area_index=target_area_index, + ground_truth=target_distribution, + dni=500, + device=device, + ) + loss = motor_positions_optimizer.optimize( + loss_definition=loss_functions.KLDivergenceLoss(), device=device + ) + + return loss[torch.isfinite(loss)].mean().item() + + +if __name__ == "__main__": + """ + Perform the hyperparameter search for the motor position optimization and save the results. + + This script executes the hyperparameter search with ``propulate`` and saves the result for + further inspection. + + Parameters + ---------- + config : str + Path to the configuration file. + device : str + Device to use for the computation. + data_dir : str + Path to the data directory. + results_dir : str + Path to where the results will be saved. + scenarios_dir : str + Path to the directory containing the scenarios. + propulate_logs_dir : str + Path to the directory where propulate will write log messages. + parameter_ranges_motor_positions : dict[str, str | int | float] + The reconstruction parameters. + """ + comm = MPI.COMM_WORLD + + rank = comm.Get_rank() + + # Set default location for configuration file. + script_dir = pathlib.Path(__file__).resolve().parent + default_config_path = script_dir / "config.yaml" + + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + help="Path to the YAML configuration file.", + default=default_config_path, + ) + + # Parse the config argument first to load the configuration. + args, unknown = parser.parse_known_args() + config_path = pathlib.Path(args.config) + config = {} + if config_path.exists(): + try: + with open(config_path, "r") as f: + config = yaml.safe_load(f) + except yaml.YAMLError as exc: + warnings.warn(f"Error parsing YAML file: {exc}") + else: + warnings.warn( + f"Warning: Configuration file not found at {config_path}. Using defaults." + ) + + # Add remaining arguments to the parser with defaults loaded from the config. + data_dir_default = config.get("data_dir", "./paint_data") + device_default = config.get("device", "cuda") + scenarios_dir_default = config.get( + "scenarios_dir", "./examples/hyperparameter_optimization/scenarios" + ) + results_dir_default = config.get( + "results_dir", "./examples/hyperparameter_optimization/results" + ) + propulate_logs_dir_default = config.get( + "propulate_logs_dir", "./examples/hyperparameter_optimization/logs" + ) + parameter_ranges_default = config.get( + "parameter_ranges_motor_positions", + { + "initial_learning_rate": [1e-7, 1e-3], + "scheduler": ["exponential", "reduce_on_plateau", "cyclic"], + "min_learning_rate": [1e-9, 1e-6], + "max_learning_rate": [1e-4, 1e-2], + "step_size_up": [100, 500], + "reduce_factor": [0.05, 0.5], + "patience": [3, 50], + "threshold": [1e-6, 1e-3], + "cooldown": [2, 20], + "gamma": [0.85, 0.999], + }, + ) + + parser.add_argument( + "--device", + type=str, + help="Device to use.", + default=device_default, + ) + parser.add_argument( + "--data_dir", + type=str, + help="Path to downloaded paint data.", + default=data_dir_default, + ) + parser.add_argument( + "--scenarios_dir", + type=str, + help="Path to directory containing the generated scenarios.", + default=scenarios_dir_default, + ) + parser.add_argument( + "--results_dir", + type=str, + help="Path to save the results.", + default=results_dir_default, + ) + parser.add_argument( + "--propulate_logs_dir", + type=str, + help="Path to save propulate log messages.", + default=propulate_logs_dir_default, + ) + parser.add_argument( + "--parameter_ranges_motor_positions", + type=eval, + help="Parameters used for the reconstruction.", + default=parameter_ranges_default, + ) + + # Re-parse the full set of arguments. + args = parser.parse_args(args=unknown) + + device = get_device(torch.device(args.device)) + data_dir = pathlib.Path(args.data_dir) + propulate_logs_dir = pathlib.Path(args.propulate_logs_dir) / "motor_positions" + results_dir = pathlib.Path(args.results_dir) + + # Define scenario path. + scenario_file = ( + pathlib.Path(args.scenarios_dir) / "deflectometry_scenario_surface.h5" + ) + if not scenario_file.exists(): + raise FileNotFoundError( + f"The reconstruction scenario located at {scenario_file} could not be found! Please run the ``generate_scenarios.py`` to generate this scenario, or adjust the file path and try again." + ) + + # Set up separate logger for Propulate optimization. + set_logger_config( + level=logging.INFO, + log_file=f"{propulate_logs_dir}/{pathlib.Path(__file__).stem}.log", + log_to_stdout=False, + log_rank=True, + colors=True, + ) + + log = logging.getLogger(__name__) + rank = comm.Get_rank() + log.info(rank) + + seed = 7 + rng = random.Random(seed + comm.rank) + + reconstruction_parameter_ranges: dict[ + str, tuple[int, ...] | tuple[float, ...] | tuple[str, ...] + ] = {} + + for key, value in args.parameter_ranges_motor_positions.items(): + if all(isinstance(x, (int, float)) for x in value): + if all( + isinstance(x, int) or (isinstance(x, float) and x.is_integer()) + for x in value + ): + int_tuple: tuple[int, ...] = tuple(int(x) for x in value) + reconstruction_parameter_ranges[key] = int_tuple + else: + float_tuple: tuple[float, ...] = tuple(float(x) for x in value) + reconstruction_parameter_ranges[key] = float_tuple + else: + str_tuple: tuple[str, ...] = tuple(value) + reconstruction_parameter_ranges[key] = str_tuple + + # Set up evolutionary operator. + num_generations = 400 + pop_size = 2 * comm.size + propagator = get_default_propagator( + pop_size=pop_size, + limits=reconstruction_parameter_ranges, + crossover_prob=0.7, + mutation_prob=0.4, + random_init_prob=0.1, + rng=rng, + ) + + loss_fn = partial( + motor_position_optimizer_for_hpo, + scenario_path=scenario_file, + ) + + # Set up propulator performing actual optimization. + propulator = Propulator( + loss_fn=loss_fn, + propagator=propagator, + rng=rng, + island_comm=comm, + generations=num_generations, + checkpoint_path=propulate_logs_dir, + ) + + # Run optimization and print summary of results. + propulator.propulate( + logging_interval=1, + debug=2, + ) + propulator.summarize( + top_n=10, + debug=50, + ) + + hpo_result_file = propulate_logs_dir / "island_0_ckpt.pickle" + optimized_parameters_file = results_dir / "hpo_results_motor_positions.json" + + # Save hpo results in format to be used by plots. + if not hpo_result_file.exists(): + raise FileNotFoundError( + f"The hpo results located at {hpo_result_file} could not be not found! Please run the hpo script again to generate the results." + ) + + with open(hpo_result_file, "rb") as results: + data = pickle.load(results) + + data_dict = data[-1] + parameters_dict = {} + + for key, value in data_dict.items(): + if ( + isinstance(value, str) + and re.fullmatch(r"[+-]?\d+(\.\d+)?[eE][+-]?\d+", value) is not None + ): + parameters_dict[key] = float(value) + else: + parameters_dict[key] = value + + if not results_dir.parent.is_dir(): + results_dir.parent.mkdir(parents=True, exist_ok=True) + + with open(optimized_parameters_file, "w") as output_file: + json.dump(parameters_dict, output_file, indent=2) diff --git a/examples/hyperparameter_optimization/surface_reconstruction_hyperparameter_search.py b/examples/hyperparameter_optimization/search_surface_reconstruction.py similarity index 68% rename from examples/hyperparameter_optimization/surface_reconstruction_hyperparameter_search.py rename to examples/hyperparameter_optimization/search_surface_reconstruction.py index 06be0aec8..e6b076658 100644 --- a/examples/hyperparameter_optimization/surface_reconstruction_hyperparameter_search.py +++ b/examples/hyperparameter_optimization/search_surface_reconstruction.py @@ -4,6 +4,7 @@ import pathlib import pickle import random +import re import warnings from functools import partial @@ -15,7 +16,7 @@ from propulate.utils import get_default_propagator, set_logger_config from artist.core import loss_functions -from artist.core.regularizers import IdealSurfaceRegularizer +from artist.core.regularizers import IdealSurfaceRegularizer, SmoothnessRegularizer from artist.core.surface_reconstructor import SurfaceReconstructor from artist.data_parser.calibration_data_parser import CalibrationDataParser from artist.data_parser.paint_calibration_parser import PaintCalibrationDataParser @@ -75,7 +76,11 @@ def surface_reconstructor_for_hpo( # For parameter combinations with too many rays directly return a default loss, # to avoid running such combination as they cause "out of memory" errors. total_number_of_rays = ( - params["number_of_surface_points"] * 2 * 4 * params["number_of_rays"] * 4 + params["number_of_surface_points"] + * 2 + * 4 + * params["number_of_rays"] + * params["sample_limit"] ) if total_number_of_rays >= 1500000: loss = 987987 @@ -112,44 +117,57 @@ def surface_reconstructor_for_hpo( [params["nurbs_degree"], params["nurbs_degree"]], device=device ) + data_parser = PaintCalibrationDataParser( + sample_limit=int(params["sample_limit"]), + ) data: dict[ str, CalibrationDataParser | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], ] = { - config_dictionary.data_parser: PaintCalibrationDataParser(), + config_dictionary.data_parser: data_parser, config_dictionary.heliostat_data_mapping: heliostat_data_mapping, } - scheduler = config_dictionary.reduce_on_plateau - scheduler_parameters = { - config_dictionary.min: 1e-7, + optimizer_dict = { + config_dictionary.initial_learning_rate: params["initial_learning_rate"], + config_dictionary.tolerance: 0.0005, + config_dictionary.max_epoch: 150, + config_dictionary.batch_size: params["sample_limit"], + config_dictionary.log_step: 0, + config_dictionary.early_stopping_delta: 1e-4, + config_dictionary.early_stopping_patience: 15, + config_dictionary.early_stopping_window: 10, + } + scheduler_dict = { + config_dictionary.scheduler_type: params["scheduler"], + config_dictionary.min: params["min_learning_rate"], + config_dictionary.max: params["max_learning_rate"], + config_dictionary.step_size_up: params["step_size_up"], config_dictionary.reduce_factor: params["reduce_factor"], config_dictionary.patience: params["patience"], config_dictionary.threshold: params["threshold"], - config_dictionary.cooldown: 2, + config_dictionary.cooldown: params["cooldown"], + config_dictionary.gamma: params["gamma"], } - - # Configure regularizers and their weights. - ideal_surface_regularizer = IdealSurfaceRegularizer( - weight=params["ideal_surface_loss_weight"], reduction_dimensions=(1, 2, 3) - ) - + ideal_surface_regularizer = IdealSurfaceRegularizer(reduction_dimensions=(1,)) + smoothness_regularizer = SmoothnessRegularizer(reduction_dimensions=(1,)) regularizers = [ ideal_surface_regularizer, + smoothness_regularizer, ] - - # Set optimizer parameters. - optimization_configuration = { - config_dictionary.initial_learning_rate: params["initial_learning_rate"], - config_dictionary.tolerance: 0.00005, - config_dictionary.max_epoch: 4500, - config_dictionary.log_step: 0, - config_dictionary.early_stopping_delta: 1e-4, - config_dictionary.early_stopping_patience: 5000, - config_dictionary.scheduler: scheduler, - config_dictionary.scheduler_parameters: scheduler_parameters, + constraint_dict = { config_dictionary.regularizers: regularizers, + config_dictionary.initial_lambda_energy: 0.1, + config_dictionary.rho_energy: 1.0, + config_dictionary.energy_tolerance: 0.01, + config_dictionary.weight_smoothness: 0.005, + config_dictionary.weight_ideal_surface: 0.005, + } + optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + config_dictionary.constraints: constraint_dict, } # Create the surface reconstructor. @@ -159,7 +177,6 @@ def surface_reconstructor_for_hpo( data=data, optimization_configuration=optimization_configuration, number_of_surface_points=number_of_surface_points_per_facet, - bitmap_resolution=torch.tensor([256, 256], device=device), device=device, ) @@ -172,12 +189,12 @@ def surface_reconstructor_for_hpo( device=device, ) - return loss[torch.isfinite(loss)].sum().item() + return loss[torch.isfinite(loss)].mean().item() if __name__ == "__main__": """ - Perform the hyperparameter search and save the results. + Perform the hyperparameter search for the surface reconstruction and save the results. This script executes the hyperparameter search with ``propulate`` and saves the result for further inspection. @@ -190,15 +207,13 @@ def surface_reconstructor_for_hpo( Device to use for the computation. data_dir : str Path to the data directory. - heliostat_for_reconstruction : dict[str, list[int]] - The heliostat and its calibration numbers. results_dir : str Path to where the results will be saved. scenarios_dir : str Path to the directory containing the scenarios. propulate_logs_dir : str Path to the directory where propulate will write log messages. - reconstruction_parameter_ranges : dict[str, int | float] + parameter_ranges_surface : dict[str, int | float] The reconstruction parameters. """ comm = MPI.COMM_WORLD @@ -207,7 +222,7 @@ def surface_reconstructor_for_hpo( # Set default location for configuration file. script_dir = pathlib.Path(__file__).resolve().parent - default_config_path = script_dir / "hpo_config.yaml" + default_config_path = script_dir / "config.yaml" parser = argparse.ArgumentParser() parser.add_argument( @@ -235,24 +250,33 @@ def surface_reconstructor_for_hpo( # Add remaining arguments to the parser with defaults loaded from the config. data_dir_default = config.get("data_dir", "./paint_data") device_default = config.get("device", "cuda") - heliostat_for_reconstruction_default = config.get( - "heliostat_for_reconstruction", {"AA39": [244862, 270398, 246213, 258959]} + scenarios_dir_default = config.get( + "scenarios_dir", "./examples/hyperparameter_optimization/scenarios" ) - scenarios_dir_default = config.get("scenarios_dir", "./scenarios") - results_dir_default = config.get("results_dir", "./results") - propulate_logs_dir_default = config.get("propulate_logs_dir", "./logs") - reconstruction_parameter_ranges_default = config.get( - "reconstruction_parameter_ranges", + results_dir_default = config.get( + "results_dir", "./examples/hyperparameter_optimization/results" + ) + propulate_logs_dir_default = config.get( + "propulate_logs_dir", "./examples/hyperparameter_optimization/logs" + ) + parameter_ranges_default = config.get( + "parameter_ranges_surface", { - "number_of_surface_points": [30, 110], - "number_of_control_points": [4, 20], - "number_of_rays": [50, 200], + "number_of_surface_points": [30, 90], + "number_of_rays": [50, 120], + "number_of_control_points": [4, 12], "nurbs_degree": [2, 3], - "ideal_surface_loss_weight": [0.0, 2.0], + "sample_limit": [2, 8], "initial_learning_rate": [1e-7, 1e-3], + "scheduler": ["exponential", "reduce_on_plateau", "cyclic"], + "min_learning_rate": [1e-9, 1e-6], + "max_learning_rate": [1e-4, 1e-2], + "step_size_up": [100, 500], "reduce_factor": [0.05, 0.5], - "patience": [5, 50], + "patience": [3, 50], "threshold": [1e-6, 1e-3], + "cooldown": [2, 20], + "gamma": [0.85, 0.999], }, ) @@ -268,13 +292,6 @@ def surface_reconstructor_for_hpo( help="Path to downloaded paint data.", default=data_dir_default, ) - parser.add_argument( - "--heliostat_for_reconstruction", - type=str, - help="The heliostat and its calibration numbers to be reconstructed.", - nargs="+", - default=heliostat_for_reconstruction_default, - ) parser.add_argument( "--scenarios_dir", type=str, @@ -294,10 +311,10 @@ def surface_reconstructor_for_hpo( default=propulate_logs_dir_default, ) parser.add_argument( - "--reconstruction_parameter_ranges", + "--parameter_ranges_surface", type=eval, help="Parameters used for the reconstruction.", - default=reconstruction_parameter_ranges_default, + default=parameter_ranges_default, ) # Re-parse the full set of arguments. @@ -305,16 +322,14 @@ def surface_reconstructor_for_hpo( device = get_device(torch.device(args.device)) data_dir = pathlib.Path(args.data_dir) - propulate_logs_dir = pathlib.Path(args.propulate_logs_dir) + propulate_logs_dir = pathlib.Path(args.propulate_logs_dir) / "surface" results_dir = pathlib.Path(args.results_dir) # Define scenario path. - ideal_scenario_file = ( - pathlib.Path(args.scenarios_dir) / "surface_reconstruction_ideal.h5" - ) - if not ideal_scenario_file.exists(): + scenario_file = pathlib.Path(args.scenarios_dir) / "ideal_scenario_surface.h5" + if not scenario_file.exists(): raise FileNotFoundError( - f"The reconstruction scenario located at {ideal_scenario_file} could not be found! Please run the ``surface_reconstruction_generate_scenario.py`` to generate this scenario, or adjust the file path and try again." + f"The reconstruction scenario located at {scenario_file} could not be found! Please run the ``generate_scenarios.py`` to generate this scenario, or adjust the file path and try again." ) # Set up separate logger for Propulate optimization. @@ -334,11 +349,11 @@ def surface_reconstructor_for_hpo( rng = random.Random(seed + comm.rank) viable_heliostats_data = ( - pathlib.Path(args.results_dir) / "surface_reconstruction_viable_heliostats.json" + pathlib.Path(args.results_dir) / "viable_heliostats_surface.json" ) if not viable_heliostats_data.exists(): raise FileNotFoundError( - f"The viable heliostat list located at {viable_heliostats_data} could not be not found! Please run the ``surface_reconstruction_viable_heliostat_list.py`` script to generate this list, or adjust the file path and try again." + f"The viable heliostat list located at {viable_heliostats_data} could not be not found! Please run the ``generate_viable_heliostats_list.py`` script to generate this list, or adjust the file path and try again." ) # Load viable heliostats data. @@ -349,18 +364,45 @@ def surface_reconstructor_for_hpo( ( item["name"], [pathlib.Path(p) for p in item["calibrations"]], - [pathlib.Path(p) for p in item["flux_images"]], + [pathlib.Path(p) for p in item["surface_reconstruction_flux_images"]], ) for item in viable_heliostats + if item["name"] + in [ + "AC38", + "BD38", + "AE34", + "BG65", + "AK26", + "AK17", + "BA43", + "AZ28", + "AP51", + "AP35", + ] ] - reconstruction_parameter_ranges = {} - for key, value in args.reconstruction_parameter_ranges.items(): - tuple_range = tuple(float(x) if isinstance(x, str) else x for x in value) - reconstruction_parameter_ranges[key] = tuple_range + reconstruction_parameter_ranges: dict[ + str, tuple[int, ...] | tuple[float, ...] | tuple[str, ...] + ] = {} + + for key, value in args.parameter_ranges_surface.items(): + if all(isinstance(x, (int, float)) for x in value): + if all( + isinstance(x, int) or (isinstance(x, float) and x.is_integer()) + for x in value + ): + int_tuple: tuple[int, ...] = tuple(int(x) for x in value) + reconstruction_parameter_ranges[key] = int_tuple + else: + float_tuple: tuple[float, ...] = tuple(float(x) for x in value) + reconstruction_parameter_ranges[key] = float_tuple + else: + str_tuple: tuple[str, ...] = tuple(value) + reconstruction_parameter_ranges[key] = str_tuple # Set up evolutionary operator. - num_generations = 500 + num_generations = 100 pop_size = 2 * comm.size propagator = get_default_propagator( pop_size=pop_size, @@ -373,7 +415,7 @@ def surface_reconstructor_for_hpo( loss_fn = partial( surface_reconstructor_for_hpo, - scenario_path=ideal_scenario_file, + scenario_path=scenario_file, heliostat_data_mapping=heliostat_data_mapping, ) @@ -393,12 +435,12 @@ def surface_reconstructor_for_hpo( debug=2, ) propulator.summarize( - top_n=10, - debug=2, + top_n=2, + debug=50, ) hpo_result_file = propulate_logs_dir / "island_0_ckpt.pickle" - optimized_parameters_file = results_dir / "hpo_results.json" + optimized_parameters_file = results_dir / "hpo_results_surface.json" # Save hpo results in format to be used by plots. if not hpo_result_file.exists(): @@ -413,7 +455,10 @@ def surface_reconstructor_for_hpo( parameters_dict = {} for key, value in data_dict.items(): - if isinstance(value, str) and "E" in value.upper(): + if ( + isinstance(value, str) + and re.fullmatch(r"[+-]?\d+(\.\d+)?[eE][+-]?\d+", value) is not None + ): parameters_dict[key] = float(value) else: parameters_dict[key] = value diff --git a/examples/hyperparameter_optimization/surface_reconstruction_generate_scenario.py b/examples/hyperparameter_optimization/surface_reconstruction_generate_scenario.py deleted file mode 100644 index 698f71ca1..000000000 --- a/examples/hyperparameter_optimization/surface_reconstruction_generate_scenario.py +++ /dev/null @@ -1,341 +0,0 @@ -import argparse -import json -import pathlib -import warnings -from pathlib import Path - -import paint.util.paint_mappings as paint_mappings -import torch -import yaml - -from artist.data_parser import paint_scenario_parser -from artist.scenario.configuration_classes import ( - LightSourceConfig, - LightSourceListConfig, -) -from artist.scenario.h5_scenario_generator import H5ScenarioGenerator -from artist.util import config_dictionary, set_logger_config -from artist.util.environment_setup import get_device - -set_logger_config() - - -def find_latest_deflectometry_file(heliostat_name: str, data_directory: Path) -> Path: - """ - Find the latest deflectometry HDF5 file for a given heliostat. - - Parameters - ---------- - heliostat_name : str - Heliostat name being considered. - data_directory : Path - Data directory containing ``PAINT`` data. - - Returns - ------- - pathlib.Path - Path to the latest deflectometry file. - - Raises - ------ - FileNotFoundError - If no matching file is found. - """ - search_path = ( - pathlib.Path(data_directory) - / heliostat_name - / paint_mappings.SAVE_DEFLECTOMETRY - ) - pattern = f"{heliostat_name}-filled*.h5" - files = sorted(search_path.glob(pattern)) - if not files: - raise FileNotFoundError( - f"No deflectometry file found for {heliostat_name} in {search_path}." - ) - return files[-1] - - -def generate_flux_prediction_scenario( - data_directory: Path, - scenario_path: Path, - tower_file_path: Path, - heliostat_names: list[str], - device: torch.device | None = None, - use_deflectometry: bool = True, -) -> None: - """ - Generate an HDF5 scenario for the flux prediction plots using ``PAINT`` data. - - Parameters - ---------- - data_directory : pathlib.Path - Directory where the ``PAINT`` data is stored. - scenario_path : pathlib.Path - Path to save the generated HDF5 scenario. - tower_file_path : pathlib.Path - Path to the tower measurements file. - heliostat_names : list[str] - Names of the heliostats to include in the scenario. - device : torch.device | None - The device on which to perform computations or load tensors and models (default is None). - If None, ``ARTIST`` will automatically select the most appropriate - device (CUDA or CPU) based on availability and OS. - use_deflectometry : bool, optional - Whether to use deflectometry data for surface fitting (default is True). - """ - device = get_device(device=device) - - # Make sure the parent folder is available for saving the scenario. - if not scenario_path.exists(): - scenario_path.parent.mkdir(parents=True, exist_ok=True) - - # Include the power plant configuration. - power_plant_config, target_area_list_config = ( - paint_scenario_parser.extract_paint_tower_measurements( - tower_measurements_path=tower_file_path, - device=device, - ) - ) - - # Include the light source configuration. - light_source1_config = LightSourceConfig( - light_source_key="sun_1", - light_source_type=config_dictionary.sun_key, - number_of_rays=10, - distribution_type=config_dictionary.light_source_distribution_is_normal, - mean=0.0, - covariance=4.3681e-06, - ) - - # Create a list of light source configs. - light_source_list = [light_source1_config] - - # Include the configuration for the list of light sources. - light_source_list_config = LightSourceListConfig( - light_source_list=light_source_list - ) - - heliostat_files_list: ( - list[tuple[str, pathlib.Path]] | list[tuple[str, pathlib.Path, pathlib.Path]] - ) = [] - - if use_deflectometry: - heliostat_files_list = [ - ( - heliostat_name, - pathlib.Path( - f"{data_directory}/{heliostat_name}/{paint_mappings.SAVE_PROPERTIES}/{heliostat_name}-{paint_mappings.HELIOSTAT_PROPERTIES_KEY}.json" - ), - find_latest_deflectometry_file(heliostat_name, data_directory), - ) - for heliostat_name in heliostat_names - ] - - nurbs_fit_optimizer = torch.optim.Adam( - [torch.empty(1, requires_grad=True)], lr=1e-3 - ) - nurbs_fit_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - nurbs_fit_optimizer, - mode="min", - factor=0.2, - patience=50, - threshold=1e-7, - threshold_mode="abs", - ) - - heliostat_list_config, prototype_config = ( - paint_scenario_parser.extract_paint_heliostats_fitted_surface( - paths=heliostat_files_list, - power_plant_position=power_plant_config.power_plant_position, - number_of_nurbs_control_points=torch.tensor([20, 20], device=device), - deflectometry_step_size=100, - nurbs_fit_method=config_dictionary.fit_nurbs_from_normals, - nurbs_fit_tolerance=1e-10, - nurbs_fit_max_epoch=400, - nurbs_fit_optimizer=nurbs_fit_optimizer, - nurbs_fit_scheduler=nurbs_fit_scheduler, - device=device, - ) - ) - else: - heliostat_files_list = [ - ( - heliostat_name, - pathlib.Path( - f"{data_directory}/{heliostat_name}/{paint_mappings.SAVE_PROPERTIES}/{heliostat_name}-{paint_mappings.HELIOSTAT_PROPERTIES_KEY}.json" - ), - ) - for heliostat_name in heliostat_names - ] - heliostat_list_config, prototype_config = ( - paint_scenario_parser.extract_paint_heliostats_ideal_surface( - paths=heliostat_files_list, - power_plant_position=power_plant_config.power_plant_position, - number_of_nurbs_control_points=torch.tensor([20, 20], device=device), - device=device, - ) - ) - - # Generate the scenario given the defined parameters. - scenario_generator = H5ScenarioGenerator( - file_path=scenario_path, - power_plant_config=power_plant_config, - target_area_list_config=target_area_list_config, - light_source_list_config=light_source_list_config, - prototype_config=prototype_config, - heliostat_list_config=heliostat_list_config, - ) - scenario_generator.generate_scenario() - - -if __name__ == "__main__": - """ - Generate two scenarios for the plots. - - One of these scenarios uses ideal surfaces whilst one includes surfaces fitted with deflectometry data. - If a configuration file is provided the values will be loaded from this file. It is also possible to override - the configuration file using command line arguments. If no command line arguments and no configuration file - is provided, default values will be used which may fail. - - Parameters - ---------- - config : str - Path to the configuration file. - device : str - Device to use for the computation. - data_dir : str - Path to the data directory. - tower_file_name : str - Name of the file containing the tower measurements. - heliostat_for_reconstruction : dict[str, list[int]] - The heliostat and its calibration numbers. - scenarios_dir : str - Path to the directory for saving the generated scenarios. - """ - # Set default location for configuration file. - script_dir = pathlib.Path(__file__).resolve().parent - default_config_path = script_dir / "hpo_config.yaml" - - parser = argparse.ArgumentParser() - parser.add_argument( - "--config", - type=str, - help="Path to the YAML configuration file.", - default=default_config_path, - ) - - # Parse the config argument first to load the configuration. - args, unknown = parser.parse_known_args() - config_path = pathlib.Path(args.config) - config = {} - if config_path.exists(): - try: - with open(config_path, "r") as f: - config = yaml.safe_load(f) - except yaml.YAMLError as exc: - warnings.warn(f"Error parsing YAML file: {exc}") - else: - warnings.warn( - f"Warning: Configuration file not found at {config_path}. Using defaults." - ) - - # Add remaining arguments to the parser with defaults loaded from the config. - data_dir_default = config.get("data_dir", "./paint_data") - device_default = config.get("device", "cuda") - tower_file_name_default = config.get( - "tower_file_name", "WRI1030197-tower-measurements.json" - ) - results_dir_default = config.get("results_dir", "./results") - heliostat_for_reconstruction_default = config.get( - "heliostat_for_reconstruction", {"AA39": [244862, 270398, 246213, 258959]} - ) - scenarios_dir_default = config.get("scenarios_dir", "./scenarios") - - parser.add_argument( - "--device", - type=str, - help="Device to use.", - default=device_default, - ) - parser.add_argument( - "--data_dir", - type=str, - help="Path to downloaded paint data.", - default=data_dir_default, - ) - parser.add_argument( - "--tower_file_name", - type=str, - help="File name containing the tower data.", - default=tower_file_name_default, - ) - parser.add_argument( - "--results_dir", - type=str, - help="Path to the results containing the viable heliostats list.", - default=results_dir_default, - ) - parser.add_argument( - "--heliostat_for_reconstruction", - type=str, - help="Heliostats and calibration measurement required in the scenario.", - nargs="+", - default=heliostat_for_reconstruction_default, - ) - parser.add_argument( - "--scenarios_dir", - type=str, - help="Path to save the generated scenario.", - default=scenarios_dir_default, - ) - - # Re-parse the full set of arguments. - args = parser.parse_args(args=unknown) - - device = get_device(torch.device(args.device)) - - data_dir = pathlib.Path(args.data_dir) - tower_file = data_dir / args.tower_file_name - - viable_heliostats_data = ( - pathlib.Path(args.results_dir) / "surface_reconstruction_viable_heliostats.json" - ) - if not viable_heliostats_data.exists(): - raise FileNotFoundError( - f"The viable heliostat list located at {viable_heliostats_data} could not be not found! Please run the ``surface_reconstruction_viable_heliostats_list.py`` script to generate this list, or adjust the file path and try again." - ) - - # Load viable heliostats data. - with open(viable_heliostats_data, "r") as f: - viable_heliostats = json.load(f) - - viable_heliostat_names = [str(item["name"]) for item in viable_heliostats] - - # Generate two scenarios: deflectometry and ideal (no deflectometry). - deflectometry_scenario_file = ( - pathlib.Path(args.scenarios_dir) / "surface_comparison_deflectometry.h5" - ) - ideal_scenario_file = ( - pathlib.Path(args.scenarios_dir) / "surface_reconstruction_ideal.h5" - ) - - for scenario_path, use_deflectometry in [ - (deflectometry_scenario_file, True), - (ideal_scenario_file, False), - ]: - if scenario_path.exists(): - print( - f"Scenario found at {scenario_path}... continue without generating scenario." - ) - else: - print( - f"Scenario not found. Generating a new one at {scenario_path} (use_deflectometry={use_deflectometry})..." - ) - generate_flux_prediction_scenario( - data_directory=data_dir, - scenario_path=scenario_path, - tower_file_path=tower_file, - heliostat_names=viable_heliostat_names, - device=device, - use_deflectometry=use_deflectometry, - ) diff --git a/examples/hyperparameter_optimization/surface_reconstruction_plot.py b/examples/hyperparameter_optimization/surface_reconstruction_plot.py deleted file mode 100644 index 75f0800e8..000000000 --- a/examples/hyperparameter_optimization/surface_reconstruction_plot.py +++ /dev/null @@ -1,278 +0,0 @@ -import argparse -import pathlib -import warnings - -import numpy as np -import torch -import yaml -from matplotlib import gridspec -from matplotlib import pyplot as plt - -from artist.util.environment_setup import get_device - - -def plot_reconstruction_results( - results_file: pathlib.Path, plots_path: pathlib.Path, device: torch.device -) -> None: - """ - Plot the flux prediction results. - - Parameters - ---------- - results_file : pathlib.Path - Path to the results file. - plots_path : pathlib.Path - Path to save the plot to. - device : torch.device - Device to use. - """ - device = get_device(device) - - # Load results. - results_dict: dict[str, dict[str, np.ndarray]] = torch.load( - results_file, - weights_only=False, - map_location=device, - ) - - fig = plt.figure(figsize=(26, 6)) - gs = gridspec.GridSpec(1, 5, width_ratios=[1, 1, 1, 1, 1]) - gs.update(left=0.03, right=0.97, wspace=0.005) - axes = [fig.add_subplot(gs[i]) for i in range(5)] - - # Reference flux. - reference_flux = results_dict["reconstructed"]["measured_flux"][0] - reference_flux_normalized = (reference_flux - reference_flux.min()) / ( - reference_flux.max() - reference_flux.min() - ) - axes[0].imshow(reference_flux.cpu().detach(), cmap="gray") - axes[0].set_title("Reference", fontsize=28) - axes[0].axis("off") - - # Ideal flux. - ideal_flux = results_dict["ideal"]["ideal_flux"][0] - ideal_flux_normalized = (ideal_flux - ideal_flux.min()) / ( - ideal_flux.max() - ideal_flux.min() - ) - rmse_ideal = torch.sqrt( - torch.mean((reference_flux_normalized - ideal_flux_normalized) ** 2) - ) - axes[1].imshow(ideal_flux.cpu().detach(), cmap="gray") - axes[1].set_title("Ideal", fontsize=28) - axes[1].axis("off") - axes[1].text( - 0.5, - -0.05, - f"RMSE(Ref, Ideal)={rmse_ideal:.4f}", - ha="center", - va="top", - transform=axes[1].transAxes, - fontsize=26, - ) - - # Reconstructed flux. - reconstructed_flux = results_dict["reconstructed"]["reconstructed_flux"][0] - reconstructed_flux_normalized = (reconstructed_flux - reconstructed_flux.min()) / ( - reconstructed_flux.max() - reconstructed_flux.min() - ) - rmse_reconstructed = torch.sqrt( - torch.mean((reference_flux_normalized - reconstructed_flux_normalized) ** 2) - ) - axes[2].imshow(reconstructed_flux.cpu().detach(), cmap="gray") - axes[2].set_title("Reconstructed", fontsize=28) - axes[2].axis("off") - axes[2].text( - 0.5, - -0.05, - f"RMSE(Ref, Recon)={rmse_reconstructed:.4f}", - ha="center", - va="top", - transform=axes[2].transAxes, - fontsize=26, - ) - - # Angle maps. - reference_direction = torch.tensor([0.0, 0.0, 1.0, 0.0], device=device) - normals_r = ( - ( - results_dict["reconstructed"]["normals_reconstructed"][..., :3] - / torch.linalg.norm( - results_dict["reconstructed"]["normals_reconstructed"][..., :3], - axis=-1, - keepdims=True, - ) - ) - .cpu() - .detach() - ) - normals_d = ( - ( - results_dict["deflectometry"]["normals_deflectometry"][..., :3] - / torch.linalg.norm( - results_dict["deflectometry"]["normals_deflectometry"][..., :3], - axis=-1, - keepdims=True, - ) - ) - .cpu() - .detach() - ) - ref = ( - (reference_direction[..., :3] / torch.linalg.norm(reference_direction[..., :3])) - .cpu() - .detach() - ) - - all_x_r, all_y_r, all_angles_r = [], [], [] - all_x_d, all_y_d, all_angles_d = [], [], [] - - for facet_points_r, facet_normals_r, facet_points_d, facet_normals_d in zip( - results_dict["reconstructed"]["points_reconstructed"][0].cpu().detach(), - normals_r[0], - results_dict["deflectometry"]["points_deflectometry"][0].cpu().detach(), - normals_d[0], - ): - # Reconstructed. - x_r, y_r = facet_points_r[:, 0], facet_points_r[:, 1] - cos_theta_r = facet_normals_r @ ref - angles_r = torch.arccos(torch.clip(cos_theta_r, -1.0, 1.0)) - angles_r = torch.clip(angles_r, -0.1, 0.1) - all_x_r.append(x_r) - all_y_r.append(y_r) - all_angles_r.append(angles_r) - - # Deflectometry. - x_d, y_d = facet_points_d[:, 0], facet_points_d[:, 1] - cos_theta_d = facet_normals_d @ ref - angles_d = torch.arccos(torch.clip(cos_theta_d, -1.0, 1.0)) - angles_d = torch.clip(angles_d, -0.1, 0.1) - all_x_d.append(x_d) - all_y_d.append(y_d) - all_angles_d.append(angles_d) - - all_x_r = torch.cat(all_x_r) - all_y_r = torch.cat(all_y_r) - all_angles_r = torch.cat(all_angles_r) - all_x_d = torch.cat(all_x_d) - all_y_d = torch.cat(all_y_d) - all_angles_d = torch.cat(all_angles_d) - - sc3 = axes[3].scatter(all_x_d, all_y_d, c=all_angles_d, cmap="viridis", s=20) - axes[3].set_title("Angle Map (Measured Normals)", fontsize=16) - axes[3].set_aspect("equal", adjustable="box") - axes[3].axis("off") - cbar3 = fig.colorbar( - sc3, ax=axes[3], orientation="horizontal", fraction=0.046, pad=0.1 - ) - cbar3.set_label("Angle (rad)") - - sc4 = axes[4].scatter(all_x_r, all_y_r, c=all_angles_r, cmap="viridis", s=20) - axes[4].set_title("Angle Map\n(Reconstructed Normals)", fontsize=28) - axes[4].set_aspect("equal", adjustable="box") - axes[4].axis("off") - cbar4 = fig.colorbar( - sc4, ax=axes[4], orientation="horizontal", fraction=0.046, pad=0.01 - ) - cbar4.set_ticks([0.000, 0.016]) - cbar4.set_label("Angle (rad)", fontsize=26) - cbar4.ax.tick_params(labelsize=24) - - plt.tight_layout() - plt.savefig(plots_path, dpi=300, bbox_inches="tight") - - print(f"Saved flux comparison to {plots_path}.") - - plt.close("all") - - -if __name__ == "__main__": - """ - Generate plots based on the reconstruction results. - - This script loads the results from the ``ARTIST`` reconstruction and generates a plot comparing the fluxes, - from the ideal, reconstructed and measured images, as well as the measured surface with the reconstructed - surface. - - Parameters - ---------- - config : str - Path to the configuration file. - device : str - Device to use for the computation. - results_dir : str - Path to directory where the results are saved. - plots_dir : str - Path to the directory where the plots are saved. - """ - # Set default location for configuration file. - script_dir = pathlib.Path(__file__).resolve().parent - default_config_path = script_dir / "hpo_config.yaml" - - parser = argparse.ArgumentParser() - parser.add_argument( - "--config", - type=str, - help="Path to the YAML configuration file.", - default=default_config_path, - ) - - # Parse the config argument first to load the configuration. - args, unknown = parser.parse_known_args() - config_path = pathlib.Path(args.config) - config = {} - if config_path.exists(): - try: - with open(config_path, "r") as f: - config = yaml.safe_load(f) - except yaml.YAMLError as exc: - warnings.warn(f"Error parsing YAML file: {exc}") - else: - warnings.warn( - f"Warning: Configuration file not found at {config_path}. Using defaults." - ) - - # Add remaining arguments to the parser with defaults loaded from the config. - device_default = config.get("device", "cuda") - results_dir_default = config.get("results_dir", "./results") - plots_dir_default = config.get("plots_dir", "./plots") - - parser.add_argument( - "--device", - type=str, - help="Device to use.", - default=device_default, - ) - parser.add_argument( - "--results_dir", - type=str, - help="Path to load the results.", - default=results_dir_default, - ) - parser.add_argument( - "--plots_dir", - type=str, - help="Path to save the plots.", - default=plots_dir_default, - ) - - # Re-parse the full set of arguments. - args = parser.parse_args(args=unknown) - - device = get_device(torch.device(args.device)) - - results_path = pathlib.Path(args.results_dir) / "surface_reconstruction_results.pt" - - if not results_path.exists(): - raise FileNotFoundError( - f"Results file not found: {results_path}. Please run ``surface_reconstruction_results.py``" - f"or adjust the location of the results file and try again!" - ) - - plots_path = pathlib.Path(args.plots_dir) / "surface_reconstruction.pdf" - if not plots_path.parent.is_dir(): - plots_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate and save plots. - plot_reconstruction_results( - results_file=results_path, plots_path=plots_path, device=device - ) diff --git a/examples/hyperparameter_optimization/surface_reconstruction_results.py b/examples/hyperparameter_optimization/surface_reconstruction_results.py deleted file mode 100644 index 024852f1c..000000000 --- a/examples/hyperparameter_optimization/surface_reconstruction_results.py +++ /dev/null @@ -1,927 +0,0 @@ -import argparse -import json -import pathlib -import warnings -from typing import cast - -import h5py -import numpy as np -import paint.util.paint_mappings as paint_mappings -import torch -import yaml - -from artist.core import loss_functions -from artist.core.heliostat_ray_tracer import HeliostatRayTracer -from artist.core.regularizers import IdealSurfaceRegularizer -from artist.core.surface_reconstructor import SurfaceReconstructor -from artist.data_parser.calibration_data_parser import CalibrationDataParser -from artist.data_parser.paint_calibration_parser import PaintCalibrationDataParser -from artist.data_parser.paint_scenario_parser import extract_paint_heliostat_properties -from artist.scenario.scenario import Scenario -from artist.util import config_dictionary, set_logger_config, utils -from artist.util.environment_setup import get_device, setup_distributed_environment -from artist.util.nurbs import NURBSSurfaces - -torch.manual_seed(7) -torch.cuda.manual_seed(7) - -# Set up logger. -set_logger_config() - - -def perform_inverse_canting_and_translation( - canted_points: torch.Tensor, - translation: torch.Tensor, - canting: torch.Tensor, - device: torch.device | None = None, -) -> torch.Tensor: - """ - Invert the canting rotation and translation on a batch of facets. - - Parameters - ---------- - canted_points : torch.Tensor - Homogeneous points after the forward transform. - Tensor of shape [number_of_facets, number_of_points, 4]. - translation : torch.Tensor - Batch of facet translations. - Tensor of shape [number_of_facets, 4]. - canting : torch.Tensor - Batch of canting vectors (east, north). - Tensor of shape [number_of_facets, 2, 4]. - device : torch.device | None - The device on which to perform computations or load tensors and models (default is None). - If None, ``ARTIST`` will automatically select the most appropriate - device (CUDA or CPU) based on availability and OS. - - Returns - ------- - torch.Tensor - Original 3D points. - Tensor of shape [number_of_facets, number_of_points, 3]. - """ - device = get_device(device=device) - number_of_facets, _, _ = canted_points.shape - - # Build forward transform per facet (use only ENU 3D coordinates for rotation). - forward_transform = torch.zeros((number_of_facets, 4, 4), device=device) - - east_unit_vector = torch.nn.functional.normalize(canting[:, 0, :3], dim=1) - north_unit_vector = torch.nn.functional.normalize(canting[:, 1, :3], dim=1) - up_unit_vector = torch.nn.functional.normalize( - torch.linalg.cross(east_unit_vector, north_unit_vector, dim=1), dim=1 - ) - - forward_transform[:, :3, 0] = east_unit_vector - forward_transform[:, :3, 1] = north_unit_vector - forward_transform[:, :3, 2] = up_unit_vector - forward_transform[:, :3, 3] = translation[:, :3] - forward_transform[:, 3, 3] = 1.0 - - # Extract rotation and translation. - rotation_matrix = forward_transform[:, :3, :3] - translation_vector = forward_transform[:, :3, 3] - - # Compute inverse transform. - rotation_matrix_inverse = rotation_matrix.transpose(1, 2) - translation_inverse = -torch.bmm( - rotation_matrix_inverse, translation_vector.unsqueeze(-1) - ).squeeze(-1) - - inverse_transform = torch.zeros((number_of_facets, 4, 4), device=device) - inverse_transform[:, :3, :3] = rotation_matrix_inverse - inverse_transform[:, :3, 3] = translation_inverse - inverse_transform[:, 3, 3] = 1.0 - - # Apply inverse transform. - restored_points = torch.bmm(canted_points, inverse_transform.transpose(1, 2)) - return restored_points[..., :3] - - -def extract_canting_and_translation_from_properties( - heliostat_list: list[tuple[str, pathlib.Path]], - convert_to_4d: bool = False, - device: torch.device | None = None, -) -> list[tuple[str, torch.Tensor, torch.Tensor]]: - """ - Extract facet translation and canting vectors per heliostat from ``PAINT`` properties files. - - Parameters - ---------- - heliostat_list : list[tuple[str, pathlib.Path]] - A list where each entry is a tuple containing the heliostat name and the path to the heliostat properties data. - convert_to_4d : bool - Indicating whether tensors should be converted to 4D format (default is False). - device : torch.device | None - The device on which to perform computations or load tensors and models (default is None). - If None, ``ARTIST`` will automatically select the most appropriate - device (CUDA or CPU) based on availability and OS. - - Returns - ------- - list[tuple[str, torch.Tensor, torch.Tensor]] - A list containing a tuple for each heliostat including the heliostat name, the facet translations tensor of shape - [number_of_facets, dimension] and the facet canting tensor of shape [number of facets, 2, dimension], where - dimension is three or four depending which conversion is applied via the convert_to_4d parameter. - """ - device = get_device(device=device) - facet_transforms_per_heliostat: list[tuple[str, torch.Tensor, torch.Tensor]] = [] - - for heliostat_name, properties_path in heliostat_list: - try: - ( - _, - facet_translation_vectors, - canting, - _, - _, - _, - ) = extract_paint_heliostat_properties( - heliostat_properties_path=properties_path, - power_plant_position=torch.tensor( - [ - paint_mappings.POWER_PLANT_LAT, - paint_mappings.POWER_PLANT_LON, - paint_mappings.POWER_PLANT_ALT, - ] - ), - device=device, - ) - - if not convert_to_4d: - facet_translation_vectors = facet_translation_vectors[:, :3] - canting = canting[..., :3] - - facet_transforms_per_heliostat.append( - (heliostat_name, facet_translation_vectors, canting) - ) - - except Exception as ex: - warnings.warn( - f"Failed to extract canting/translation for '{heliostat_name}' " - f"from properties '{properties_path}': {ex}." - ) - continue - - return facet_transforms_per_heliostat - - -def reconstruct_and_create_flux_image( - data_directory: pathlib.Path, - scenario_path: pathlib.Path, - heliostat_data_mapping: list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], - validation_heliostat_data_mapping: list[ - tuple[str, list[pathlib.Path], list[pathlib.Path]] - ], - reconstruction_parameters: dict[str, float | int], - results_file: pathlib.Path, - result_key: str, - device: torch.device | None, -) -> None: - """ - Reconstruct the heliostat surface with ``ARTIST`` and save the bitmaps and surface to a results file. - - Parameters - ---------- - data_directory : pathlib.Path - Path to the data directory. - scenario_path : pathlib.Path - Path to the scenario being used. - heliostat_data_mapping : list[tuple[str, list[pathlib.Path], list[pathlib.Path]]] - Data mapping for the reconstruction. - validation_heliostat_data_mapping : list[tuple[str, list[pathlib.Path], list[pathlib.Path]]] - Data mapping for the plot. - reconstruction_parameters : dict[str, float | int] - Parameters for the reconstruction. - results_file : pathlib.Path - Path to the unified results file, saved as a torch checkpoint. - result_key : str - Key under which to store the result. - device : torch.device | None - The device on which to perform computations or load tensors and models (default is None). - If None, ``ARTIST`` will automatically select the most appropriate - device (CUDA or CPU) based on availability and OS. - """ - device = get_device(device) - - results_dict: dict[str, dict[str, np.ndarray | torch.Tensor]] = {} - - try: - loaded = torch.load(results_file, weights_only=False) - results_dict = cast(dict[str, torch.Tensor], loaded) - except FileNotFoundError: - print(f"File not found: {results_file}. Initializing with an empty dictionary.") - except Exception as e: - print(f"An error occurred while loading the file: {e}") - - number_of_heliostat_groups = Scenario.get_number_of_heliostat_groups_from_hdf5( - scenario_path=scenario_path - ) - with setup_distributed_environment( - number_of_heliostat_groups=number_of_heliostat_groups, - device=device, - ) as ddp_setup: - device = ddp_setup[config_dictionary.device] - - number_of_surface_points_per_facet = torch.tensor( - [ - reconstruction_parameters["number_of_surface_points"], - reconstruction_parameters["number_of_surface_points"], - ], - device=device, - ) - - number_of_control_points_per_facet = torch.tensor( - [ - reconstruction_parameters["number_of_control_points"], - reconstruction_parameters["number_of_control_points"], - ], - device=device, - ) - - with h5py.File(scenario_path, "r") as scenario_file: - scenario = Scenario.load_scenario_from_hdf5( - scenario_file=scenario_file, - number_of_surface_points_per_facet=number_of_surface_points_per_facet, - change_number_of_control_points_per_facet=number_of_control_points_per_facet, - device=device, - ) - - scenario.set_number_of_rays( - number_of_rays=int(reconstruction_parameters["number_of_rays"]) - ) - - for heliostat_group in scenario.heliostat_field.heliostat_groups: - heliostat_group.nurbs_degrees = torch.tensor( - [ - reconstruction_parameters["nurbs_degree"], - reconstruction_parameters["nurbs_degree"], - ], - device=device, - ) - - data: dict[ - str, - CalibrationDataParser - | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], - ] = { - config_dictionary.data_parser: PaintCalibrationDataParser(), - config_dictionary.heliostat_data_mapping: heliostat_data_mapping, - } - - heliostat_for_reconstruction_name = heliostat_data_mapping[0][0] - heliostat_group_for_reconstruction = [ - group - for group in scenario.heliostat_field.heliostat_groups - if heliostat_for_reconstruction_name in group.names - ][0] - - heliostat_properties_tuples: list[tuple[str, pathlib.Path]] = [ - ( - heliostat_for_reconstruction_name, - pathlib.Path( - f"{data_directory}/{heliostat_for_reconstruction_name}/{paint_mappings.SAVE_PROPERTIES}/{heliostat_for_reconstruction_name}-{paint_mappings.HELIOSTAT_PROPERTIES_KEY}.json" - ), - ) - ] - - # Extract facet translations and canting vectors. - facet_transforms = extract_canting_and_translation_from_properties( - heliostat_list=heliostat_properties_tuples, - convert_to_4d=True, - device=device, - ) - facet_transforms_by_name = { - heliostat_name: (facet_translations, facet_canting_vectors) - for heliostat_name, facet_translations, facet_canting_vectors in facet_transforms - } - - # Configure regularizers and their weights. - ideal_surface_regularizer = IdealSurfaceRegularizer( - weight=reconstruction_parameters["ideal_surface_loss_weight"], - reduction_dimensions=(1, 2, 3), - ) - - regularizers = [ - ideal_surface_regularizer, - ] - - scheduler = config_dictionary.reduce_on_plateau - scheduler_parameters = { - config_dictionary.min: 1e-7, - config_dictionary.reduce_factor: reconstruction_parameters["reduce_factor"], - config_dictionary.patience: reconstruction_parameters["patience"], - config_dictionary.threshold: reconstruction_parameters["threshold"], - config_dictionary.cooldown: 2, - } - - optimization_configuration = { - config_dictionary.initial_learning_rate: reconstruction_parameters[ - "initial_learning_rate" - ], - config_dictionary.tolerance: 0.00005, - config_dictionary.max_epoch: 4500, - config_dictionary.log_step: 10, - config_dictionary.early_stopping_delta: 1e-4, - config_dictionary.early_stopping_patience: 5000, - config_dictionary.scheduler: scheduler, - config_dictionary.scheduler_parameters: scheduler_parameters, - config_dictionary.regularizers: regularizers, - } - - surface_reconstructor = SurfaceReconstructor( - ddp_setup=ddp_setup, - scenario=scenario, - data=data, - optimization_configuration=optimization_configuration, - number_of_surface_points=number_of_surface_points_per_facet, - bitmap_resolution=torch.tensor([256, 256], device=device), - device=device, - ) - - # Define loss. - loss_definition = loss_functions.KLDivergenceLoss() - - _ = surface_reconstructor.reconstruct_surfaces( - loss_definition=loss_definition, - device=device, - ) - - evaluation_points = ( - utils.create_nurbs_evaluation_grid( - number_of_evaluation_points=number_of_surface_points_per_facet, - device=device, - ) - .unsqueeze(0) - .unsqueeze(0) - .expand( - 1, - heliostat_group_for_reconstruction.number_of_facets_per_heliostat, - -1, - -1, - ) - ) - - reconstructed_nurbs = NURBSSurfaces( - degrees=heliostat_group_for_reconstruction.nurbs_degrees, - control_points=heliostat_group_for_reconstruction.nurbs_control_points[ - 0 - ].unsqueeze(0), - device=device, - ) - - reconstructed_points, reconstructed_normals = ( - reconstructed_nurbs.calculate_surface_points_and_normals( - evaluation_points=evaluation_points[0].unsqueeze(0), - device=device, - ) - ) - - parser = PaintCalibrationDataParser(sample_limit=1) - - ( - validation_measured_flux_distributions, - _, - validation_incident_ray_directions, - _, - validation_active_heliostats_mask, - validation_target_area_mask, - ) = parser.parse_data_for_reconstruction( - heliostat_data_mapping=validation_heliostat_data_mapping, - heliostat_group=heliostat_group_for_reconstruction, - scenario=scenario, - device=device, - ) - - heliostat_group_for_reconstruction.activate_heliostats( - active_heliostats_mask=validation_active_heliostats_mask, - device=device, - ) - - heliostat_group_for_reconstruction.active_surface_points = ( - reconstructed_points.reshape(validation_active_heliostats_mask.sum(), -1, 4) - ) - heliostat_group_for_reconstruction.active_surface_normals = ( - reconstructed_normals.reshape( - validation_active_heliostats_mask.sum(), -1, 4 - ) - ) - - heliostat_group_for_reconstruction.align_surfaces_with_incident_ray_directions( - aim_points=scenario.target_areas.centers[validation_target_area_mask], - incident_ray_directions=validation_incident_ray_directions, - active_heliostats_mask=validation_active_heliostats_mask, - device=device, - ) - - scenario.set_number_of_rays(number_of_rays=100) - - validation_ray_tracer = HeliostatRayTracer( - scenario=scenario, - heliostat_group=heliostat_group_for_reconstruction, - world_size=ddp_setup["heliostat_group_world_size"], - rank=ddp_setup["heliostat_group_rank"], - batch_size=heliostat_group_for_reconstruction.number_of_active_heliostats, - random_seed=ddp_setup["heliostat_group_rank"], - bitmap_resolution=torch.tensor([256, 256], device=device), - ) - - validation_bitmaps_per_heliostat_reconstructed = ( - validation_ray_tracer.trace_rays( - incident_ray_directions=validation_incident_ray_directions, - active_heliostats_mask=validation_active_heliostats_mask, - target_area_mask=validation_target_area_mask, - device=device, - ) - ) - - kl_div_r = loss_definition( - prediction=validation_bitmaps_per_heliostat_reconstructed, - ground_truth=validation_measured_flux_distributions, - target_area_mask=validation_target_area_mask, - reduction_dimensions=(1, 2), - device=device, - )[0].item() - - # Apply inverse canting and translation. - facet_translations, facet_canting_vectors = facet_transforms_by_name[ - heliostat_for_reconstruction_name - ] - reconstructed_normals_decanted = perform_inverse_canting_and_translation( - canted_points=reconstructed_normals[0], - translation=facet_translations, - canting=facet_canting_vectors, - device=device, - ) - - results = { - "measured_flux": validation_measured_flux_distributions, - "reconstructed_flux": validation_bitmaps_per_heliostat_reconstructed, - "kl_div_reconstructed": kl_div_r, - "points_reconstructed": reconstructed_points, - "normals_reconstructed": reconstructed_normals_decanted.unsqueeze(0), - } - - results_dict[result_key] = results - - if not results_file.parent.exists(): - results_file.parent.mkdir(parents=True, exist_ok=True) - - torch.save(results_dict, results_file) - - -def create_ideal_flux_image( - scenario_path: pathlib.Path, - validation_heliostat_data_mapping: list[ - tuple[str, list[pathlib.Path], list[pathlib.Path]] - ], - reconstruction_parameters: dict[str, float | int], - results_file: pathlib.Path, - result_key: str, - device: torch.device | None, -) -> None: - """ - Create the flux from the ideal heliostat surface. - - Parameters - ---------- - scenario_path : pathlib.Path - Path to the scenario being used. - validation_heliostat_data_mapping : list[tuple[str, list[pathlib.Path], list[pathlib.Path]]] - Data mapping for the plot. - reconstruction_parameters : dict[str, float | int] - Parameters for the reconstruction. - results_file : pathlib.Path - Path to the unified results file, saved as a torch checkpoint. - result_key : str - Key under which to store the result. - device : torch.device | None - The device on which to perform computations or load tensors and models (default is None). - If None, ``ARTIST`` will automatically select the most appropriate - device (CUDA or CPU) based on availability and OS. - """ - device = get_device(device) - - results_dict: dict[str, dict[str, np.ndarray | torch.Tensor]] = {} - - try: - loaded = torch.load(results_file, weights_only=False) - results_dict = cast(dict[str, torch.Tensor], loaded) - except FileNotFoundError: - print(f"File not found: {results_file}. Initializing with an empty dictionary.") - except Exception as e: - print(f"An error occurred while loading the file: {e}") - - number_of_heliostat_groups = Scenario.get_number_of_heliostat_groups_from_hdf5( - scenario_path=scenario_path - ) - - with setup_distributed_environment( - number_of_heliostat_groups=number_of_heliostat_groups, - device=device, - ) as ddp_setup: - device = ddp_setup[config_dictionary.device] - - number_of_surface_points_per_facet = torch.tensor( - [ - reconstruction_parameters["number_of_surface_points"], - reconstruction_parameters["number_of_surface_points"], - ], - device=device, - ) - - with h5py.File(scenario_path, "r") as scenario_file: - scenario = Scenario.load_scenario_from_hdf5( - scenario_file=scenario_file, - number_of_surface_points_per_facet=number_of_surface_points_per_facet, - device=device, - ) - scenario.set_number_of_rays(number_of_rays=100) - - heliostat_for_reconstruction_name = validation_heliostat_data_mapping[0][0] - heliostat_group_for_reconstruction = [ - group - for group in scenario.heliostat_field.heliostat_groups - if heliostat_for_reconstruction_name in group.names - ][0] - - parser = PaintCalibrationDataParser(sample_limit=1) - - ( - validation_measured_flux_distributions, - _, - validation_incident_ray_directions, - _, - validation_active_heliostats_mask, - validation_target_area_mask, - ) = parser.parse_data_for_reconstruction( - heliostat_data_mapping=validation_heliostat_data_mapping, - heliostat_group=heliostat_group_for_reconstruction, - scenario=scenario, - device=device, - ) - - heliostat_group_for_reconstruction.activate_heliostats( - active_heliostats_mask=validation_active_heliostats_mask, - device=device, - ) - heliostat_group_for_reconstruction.align_surfaces_with_incident_ray_directions( - aim_points=scenario.target_areas.centers[validation_target_area_mask], - incident_ray_directions=validation_incident_ray_directions, - active_heliostats_mask=validation_active_heliostats_mask, - device=device, - ) - ray_tracer_ideal = HeliostatRayTracer( - scenario=scenario, - heliostat_group=heliostat_group_for_reconstruction, - world_size=ddp_setup["heliostat_group_world_size"], - rank=ddp_setup["heliostat_group_rank"], - batch_size=heliostat_group_for_reconstruction.number_of_active_heliostats, - random_seed=ddp_setup["heliostat_group_rank"], - bitmap_resolution=torch.tensor([256, 256], device=device), - ) - validation_bitmaps_per_heliostat_ideal = ray_tracer_ideal.trace_rays( - incident_ray_directions=validation_incident_ray_directions, - active_heliostats_mask=validation_active_heliostats_mask, - target_area_mask=validation_target_area_mask, - device=device, - ) - - loss_definition = loss_functions.KLDivergenceLoss() - - kl_div_ideal = loss_definition( - prediction=validation_bitmaps_per_heliostat_ideal, - ground_truth=validation_measured_flux_distributions, - target_area_mask=validation_target_area_mask, - reduction_dimensions=(1, 2), - device=device, - )[0].item() - - results = { - "ideal_flux": validation_bitmaps_per_heliostat_ideal, - "kl_div_ideal": kl_div_ideal, - } - results_dict[result_key] = results - - if not results_file.parent.exists(): - results_file.parent.mkdir(parents=True, exist_ok=True) - - torch.save(results_dict, results_file) - - -def create_deflectometry_surface( - data_directory: pathlib.Path, - scenario_path: pathlib.Path, - validation_heliostat_data_mapping: list[ - tuple[str, list[pathlib.Path], list[pathlib.Path]] - ], - reconstruction_parameters: dict[str, float | int], - results_file: pathlib.Path, - result_key: str, - device: torch.device | None, -) -> None: - """ - Create the surface from the measured deflectometry as comparison. - - Parameters - ---------- - data_directory : pathlib.Path - Path to the data directory. - scenario_path : pathlib.Path - Path to the scenario being used. - validation_heliostat_data_mapping : list[tuple[str, list[pathlib.Path], list[pathlib.Path]]] - Data mapping for the plot. - reconstruction_parameters : dict[str, float | int] - Parameters for the reconstruction. - results_file : pathlib.Path - Path to the unified results file, saved as a torch checkpoint. - result_key : str - Key under which to store the result. - device : torch.device | None - The device on which to perform computations or load tensors and models (default is None). - If None, ``ARTIST`` will automatically select the most appropriate - device (CUDA or CPU) based on availability and OS. - """ - device = get_device(device) - - results_dict: dict[str, dict[str, np.ndarray | torch.Tensor]] = {} - - try: - loaded = torch.load(results_file, weights_only=False) - results_dict = cast(dict[str, torch.Tensor], loaded) - except FileNotFoundError: - print(f"File not found: {results_file}. Initializing with an empty dictionary.") - except Exception as e: - print(f"An error occurred while loading the file: {e}") - number_of_heliostat_groups = Scenario.get_number_of_heliostat_groups_from_hdf5( - scenario_path=scenario_path - ) - - with setup_distributed_environment( - number_of_heliostat_groups=number_of_heliostat_groups, - device=device, - ) as ddp_setup: - device = ddp_setup[config_dictionary.device] - - number_of_surface_points_per_facet = torch.tensor( - [ - reconstruction_parameters["number_of_surface_points"], - reconstruction_parameters["number_of_surface_points"], - ], - device=device, - ) - - with h5py.File(scenario_path, "r") as scenario_file: - scenario = Scenario.load_scenario_from_hdf5( - scenario_file=scenario_file, - number_of_surface_points_per_facet=number_of_surface_points_per_facet, - device=device, - ) - - heliostat_for_reconstruction_name = validation_heliostat_data_mapping[0][0] - heliostat_group_for_reconstruction = [ - group - for group in scenario.heliostat_field.heliostat_groups - if heliostat_for_reconstruction_name in group.names - ][0] - - heliostat_properties_tuples: list[tuple[str, pathlib.Path]] = [ - ( - heliostat_for_reconstruction_name, - pathlib.Path( - f"{data_directory}/{heliostat_for_reconstruction_name}/{paint_mappings.SAVE_PROPERTIES}/{heliostat_for_reconstruction_name}-{paint_mappings.HELIOSTAT_PROPERTIES_KEY}.json" - ), - ) - ] - - # Extract facet translations and canting vectors. - facet_transforms = extract_canting_and_translation_from_properties( - heliostat_list=heliostat_properties_tuples, - convert_to_4d=True, - device=device, - ) - facet_transforms_by_name = { - heliostat_name: (facet_translations, facet_canting_vectors) - for heliostat_name, facet_translations, facet_canting_vectors in facet_transforms - } - - evaluation_points = ( - utils.create_nurbs_evaluation_grid( - number_of_evaluation_points=number_of_surface_points_per_facet, - device=device, - ) - .unsqueeze(0) - .unsqueeze(0) - .expand( - 1, - heliostat_group_for_reconstruction.number_of_facets_per_heliostat, - -1, - -1, - ) - ) - - nurbs = NURBSSurfaces( - degrees=heliostat_group_for_reconstruction.nurbs_degrees, - control_points=heliostat_group_for_reconstruction.nurbs_control_points[ - 0 - ].unsqueeze(0), - device=device, - ) - - points_deflectometry, normals_deflectometry = ( - nurbs.calculate_surface_points_and_normals( - evaluation_points=evaluation_points[0].unsqueeze(0), - device=device, - ) - ) - # Apply inverse canting and translation. - facet_translations, facet_canting_vectors = facet_transforms_by_name[ - heliostat_for_reconstruction_name - ] - normals_decanted = perform_inverse_canting_and_translation( - canted_points=normals_deflectometry[0], - translation=facet_translations, - canting=facet_canting_vectors, - device=device, - ) - - results = { - "points_deflectometry": points_deflectometry, - "normals_deflectometry": normals_decanted.unsqueeze(0), - } - - results_dict[result_key] = results - - if not results_file.parent.exists(): - results_file.parent.mkdir(parents=True, exist_ok=True) - - torch.save(results_dict, results_file) - - -if __name__ == "__main__": - """ - Perform raytracing and save the results. - - This script executes the raytracing in ``ARTIST`` for the two previously generated scenarios. The resulting bitmaps - representing flux images are saved for plotting later. - - Parameters - ---------- - config : str - Path to the configuration file. - device : str - Device to use for the computation. - data_dir : str - Path to the data directory. - heliostat_for_reconstruction : dict[str, list[int]] - The heliostat and its calibration numbers. - results_dir : str - Path to where the results will be saved. - scenarios_dir : str - Path to the directory containing the scenarios. - reconstruction_parameters : dict[str, int | float] - The reconstruction parameters. - """ - # Set default location for configuration file. - script_dir = pathlib.Path(__file__).resolve().parent - default_config_path = script_dir / "hpo_config.yaml" - - parser = argparse.ArgumentParser() - parser.add_argument( - "--config", - type=str, - help="Path to the YAML configuration file.", - default=default_config_path, - ) - - # Parse the config argument first to load the configuration. - args, unknown = parser.parse_known_args() - config_path = pathlib.Path(args.config) - config = {} - if config_path.exists(): - try: - with open(config_path, "r") as f: - config = yaml.safe_load(f) - except yaml.YAMLError as exc: - warnings.warn(f"Error parsing YAML file: {exc}") - else: - warnings.warn( - f"Warning: Configuration file not found at {config_path}. Using defaults." - ) - - # Add remaining arguments to the parser with defaults loaded from the config. - data_dir_default = config.get("data_dir", "./paint_data") - device_default = config.get("device", "cuda") - heliostat_for_reconstruction_default = config.get( - "heliostat_for_reconstruction", {"AA39": [244862, 270398, 246213, 258959]} - ) - scenarios_dir_default = config.get("scenarios_dir", "./scenarios") - results_dir_default = config.get("results_dir", "./results") - - parser.add_argument( - "--device", - type=str, - help="Device to use.", - default=device_default, - ) - parser.add_argument( - "--data_dir", - type=str, - help="Path to downloaded paint data.", - default=data_dir_default, - ) - parser.add_argument( - "--heliostat_for_reconstruction", - type=str, - help="The heliostat and its calibration numbers to be reconstructed.", - nargs="+", - default=heliostat_for_reconstruction_default, - ) - parser.add_argument( - "--scenarios_dir", - type=str, - help="Path to directory containing the generated scenarios.", - default=scenarios_dir_default, - ) - parser.add_argument( - "--results_dir", - type=str, - help="Path to save the results.", - default=results_dir_default, - ) - - # Re-parse the full set of arguments. - args = parser.parse_args(args=unknown) - - device = get_device(torch.device(args.device)) - data_dir = pathlib.Path(args.data_dir) - optimized_parameter_file = pathlib.Path(args.results_dir) / "hpo_results.json" - results_path = pathlib.Path(args.results_dir) / "surface_reconstruction_results.pt" - deflectometry_scenario_file = ( - pathlib.Path(args.scenarios_dir) / "surface_comparison_deflectometry.h5" - ) - ideal_scenario_file = ( - pathlib.Path(args.scenarios_dir) / "surface_reconstruction_ideal.h5" - ) - - viable_heliostats_data = ( - pathlib.Path(args.results_dir) / "surface_reconstruction_viable_heliostats.json" - ) - if not viable_heliostats_data.exists(): - raise FileNotFoundError( - f"The viable heliostat list located at {viable_heliostats_data} could not be not found! Please run the ``surface_reconstruction_viable_heliostat_list.py`` script to generate this list, or adjust the file path and try again." - ) - - # Load viable heliostats data. - with open(viable_heliostats_data, "r") as f: - viable_heliostats = json.load(f) - - heliostat_data_mapping: list[tuple[str, list[pathlib.Path], list[pathlib.Path]]] = [ - ( - item["name"], - [pathlib.Path(p) for p in item["calibrations"]], - [pathlib.Path(p) for p in item["flux_images"]], - ) - for item in viable_heliostats - ] - - validation_heliostat_data_mapping = [ - ( - heliostat_data_mapping[0][0], - [heliostat_data_mapping[0][1][0]], - [heliostat_data_mapping[0][2][0]], - ) - ] - - with open(optimized_parameter_file, "r") as file: - reconstruction_parameters = json.load(file) - - # Generate and merge flux images and surfaces. - reconstruct_and_create_flux_image( - data_directory=data_dir, - scenario_path=ideal_scenario_file, - heliostat_data_mapping=heliostat_data_mapping, - validation_heliostat_data_mapping=validation_heliostat_data_mapping, - reconstruction_parameters=reconstruction_parameters, - results_file=results_path, - result_key="reconstructed", - device=device, - ) - create_ideal_flux_image( - scenario_path=ideal_scenario_file, - reconstruction_parameters=reconstruction_parameters, - validation_heliostat_data_mapping=validation_heliostat_data_mapping, - results_file=results_path, - result_key="ideal", - device=device, - ) - - create_deflectometry_surface( - data_directory=data_dir, - scenario_path=ideal_scenario_file, - reconstruction_parameters=reconstruction_parameters, - validation_heliostat_data_mapping=validation_heliostat_data_mapping, - results_file=results_path, - result_key="deflectometry", - device=device, - ) diff --git a/examples/hyperparameter_optimization/surface_reconstruction_viable_heliostat_list.py b/examples/hyperparameter_optimization/surface_reconstruction_viable_heliostat_list.py deleted file mode 100644 index 0fb133046..000000000 --- a/examples/hyperparameter_optimization/surface_reconstruction_viable_heliostat_list.py +++ /dev/null @@ -1,250 +0,0 @@ -import argparse -import json -import pathlib -import warnings - -import paint.util.paint_mappings as paint_mappings -import torch -import yaml - -from artist.util.environment_setup import get_device - - -def find_heliostat_files( - data_directory: pathlib.Path, - heliostat_calibrations: dict[str, list[int]], - calibration_image_type: str, -) -> list[tuple[str, list[pathlib.Path], list[pathlib.Path], pathlib.Path]]: - """ - Find the heliostats and its calibration data. - - This function searches for the requested heliostat, if it is found the paths are collected. - The result contains a tuple including the heliostat name, path to the calibration files, and path to the flux images. - A calibration JSON is considered valid when its focal-spot section contains both centroid extracted by HeliOS and - UTIS. - - Parameters - ---------- - data_directory : pathlib.Path - The path to the data directory. - heliostat_calibrations : dict[str, list[int]] - The selected heliostat and its calibration numbers. - calibration_image_type : str - The type of calibration image to use, i.e., ''flux'', or ''flux-centered''. - - Returns - ------- - list[tuple[str, list[pathlib.Path], list[pathlib.Path], pathlib.Path]] - A list of tuples containing: - - The heliostat name. - - A list of valid calibration file paths. - - A list of flux image file paths. - - The associated heliostat properties path. - """ - found_heliostat = [] - - json_suffix_to_remove = ( - paint_mappings.CALIBRATION_PROPERTIES_IDENTIFIER.removesuffix(".json") - ) - - heliostat_name = list(heliostat_calibrations.keys())[0] - - heliostat_dir = data_directory / heliostat_name - - if not heliostat_dir.is_dir(): - heliostat_dir = data_directory / "AA39" - raise ValueError(f"No data found for {heliostat_name}.") - - properties_path = ( - heliostat_dir - / paint_mappings.SAVE_PROPERTIES - / f"{paint_mappings.HELIOSTAT_PROPERTIES_SAVE_NAME % heliostat_name}" - ) - calibration_dir = heliostat_dir / paint_mappings.SAVE_CALIBRATION - - valid_calibration_files = [] - flux_images = [] - - calibration_numbers = set(map(str, heliostat_calibrations[heliostat_name])) - matching_files = [ - file - for file in calibration_dir.iterdir() - if file.is_file() - and any(number in file.name for number in calibration_numbers) - and paint_mappings.CALIBRATION_PROPERTIES_IDENTIFIER in file.name - ] - - for calibration_file_path in matching_files: - try: - with calibration_file_path.open("r") as f: - calibration_data = json.load(f) - focal_spot_data = calibration_data.get( - paint_mappings.FOCAL_SPOT_KEY, {} - ) - - if ( - paint_mappings.HELIOS_KEY in focal_spot_data - and paint_mappings.UTIS_KEY in focal_spot_data - ): - # Check for the existence of the corresponding flux image. - file_stem = calibration_file_path.stem.removesuffix( - json_suffix_to_remove - ) - flux_image_path = ( - calibration_dir / f"{file_stem}-{calibration_image_type}.png" - ) - - if flux_image_path.exists(): - valid_calibration_files.append(calibration_file_path) - flux_images.append(flux_image_path) - except Exception as e: - print(f"Warning: Skipping {calibration_file_path} due to error: {e}") - - found_heliostat.append( - ( - heliostat_name, - valid_calibration_files, - flux_images, - properties_path, - ) - ) - - return sorted(found_heliostat, key=lambda x: x[0]) - - -if __name__ == "__main__": - """ - Generate list of viable heliostats for the reconstruction. - - This script searches for the selected heliostat and its calibration data. - - Parameters - ---------- - config : str - Path to the configuration file. - device : str - Device to use for the computation. - data_dir : str - Path to the data directory. - results_dir : str - Path to where the results will be saved. - heliostat_for_reconstruction : dict[str, list[int]] - The heliostat and its calibration numbers. - calibration_image_type : str - Type of calibration image to use, either flux or flux-centered. - """ - # Set default location for configuration file. - script_dir = pathlib.Path(__file__).resolve().parent - default_config_path = script_dir / "hpo_config.yaml" - - parser = argparse.ArgumentParser() - parser.add_argument( - "--config", - type=str, - help="Path to the YAML configuration file.", - default=default_config_path, - ) - - # Parse the config argument first to load the configuration. - args, unknown = parser.parse_known_args() - config_path = pathlib.Path(args.config) - config = {} - if config_path.exists(): - try: - with open(config_path, "r") as f: - config = yaml.safe_load(f) - except yaml.YAMLError as exc: - warnings.warn(f"Error parsing YAML file: {exc}") - else: - warnings.warn( - f"Warning: Configuration file not found at {config_path}. Using defaults." - ) - - # Add remaining arguments to the parser with defaults loaded from the config. - data_dir_default = config.get("data_dir", "./paint_data") - device_default = config.get("device", "cuda") - results_dir_default = config.get("results_dir", "./results") - heliostat_for_reconstruction_default = config.get( - "heliostat_for_reconstruction", {"AA39": [244862, 270398, 246213, 258959]} - ) - calibration_image_type_default = config.get( - "calibration_image_type", "flux-centered" - ) - - parser.add_argument( - "--device", - type=str, - help="Device to use.", - default=device_default, - ) - parser.add_argument( - "--data_dir", - type=str, - help="Path to downloaded paint data.", - default=data_dir_default, - ) - parser.add_argument( - "--results_dir", - type=str, - help="Path to save the results.", - default=results_dir_default, - ) - parser.add_argument( - "--heliostat_for_reconstruction", - type=str, - help="The heliostat and its calibration numbers to be reconstructed.", - nargs="+", - default=heliostat_for_reconstruction_default, - ) - parser.add_argument( - "--calibration_image_type", - type=str, - help="Type of calibration image to use, i.e., flux or flux-centered.", - choices=["flux", "flux-centered"], - default=calibration_image_type_default, - ) - - # Re-parse the full set of arguments. - args = parser.parse_args(args=unknown) - - device = get_device(torch.device(args.device)) - data_dir = pathlib.Path(args.data_dir) - - heliostat_data_list = find_heliostat_files( - data_directory=data_dir, - heliostat_calibrations=args.heliostat_for_reconstruction, - calibration_image_type=args.calibration_image_type, - ) - - print(f"Selected {len(heliostat_data_list)} heliostat:") - for ( - heliostat_name, - calibration_paths, - flux_paths, - _, - ) in heliostat_data_list: - print( - f"- {heliostat_name}: {len(calibration_paths)} calibrations, {len(flux_paths)} flux images ({args.calibration_image_type})" - ) - - serializable_data = [ - { - "name": heliostat_name, - "calibrations": [ - str(calibration_path) for calibration_path in calibration_paths - ], - "flux_images": [str(flux_path) for flux_path in flux_paths], - "properties": str(properties_path), - } - for heliostat_name, calibration_paths, flux_paths, properties_path in heliostat_data_list - ] - - results_path = ( - pathlib.Path(args.results_dir) / "surface_reconstruction_viable_heliostats.json" - ) - if not results_path.parent.is_dir(): - results_path.parent.mkdir(parents=True, exist_ok=True) - - with open(results_path, "w") as output_file: - json.dump(serializable_data, output_file, indent=2) - print(f"Saved {len(serializable_data)} heliostat entry to {results_path}") diff --git a/examples/paint_plots/flux_prediction_plot.py b/examples/paint_plots/flux_prediction_plot.py index cd609084a..840c993c5 100644 --- a/examples/paint_plots/flux_prediction_plot.py +++ b/examples/paint_plots/flux_prediction_plot.py @@ -173,8 +173,8 @@ def plot_flux_prediction( # Add remaining arguments to the parser with defaults loaded from the config. device_default = config.get("device", "cuda") - results_dir_default = config.get("results_dir", "./results") - plots_dir_default = config.get("plots_dir", "./plots") + results_dir_default = config.get("results_dir", "./examples/paint_plots/results") + plots_dir_default = config.get("plots_dir", "./examples/paint_plots/plots") parser.add_argument( "--device", diff --git a/examples/paint_plots/flux_prediction_raytracing.py b/examples/paint_plots/flux_prediction_raytracing.py index e840277b9..c921aeae3 100644 --- a/examples/paint_plots/flux_prediction_raytracing.py +++ b/examples/paint_plots/flux_prediction_raytracing.py @@ -511,8 +511,10 @@ def generate_flux_images( heliostats_default = config.get( "heliostats_for_raytracing", {"AA39": 149576, "AY26": 247613, "BC34": 82084} ) - scenarios_dir_default = config.get("scenarios_dir", "./scenarios") - results_dir_default = config.get("results_dir", "./results") + scenarios_dir_default = config.get( + "scenarios_dir", "./examples/paint_plots/scenarios" + ) + results_dir_default = config.get("results_dir", "./examples/paint_plots/results") parser.add_argument( "--device", diff --git a/examples/paint_plots/flux_prediction_scenario.py b/examples/paint_plots/flux_prediction_scenario.py index 720ffd832..825292523 100644 --- a/examples/paint_plots/flux_prediction_scenario.py +++ b/examples/paint_plots/flux_prediction_scenario.py @@ -247,7 +247,9 @@ def generate_flux_prediction_scenario( heliostats_default = config.get( "heliostats_for_raytracing", {"AA39": 149576, "AY26": 247613, "BC34": 82084} ) - scenarios_dir_default = config.get("scenarios_dir", "./scenarios") + scenarios_dir_default = config.get( + "scenarios_dir", "./examples/paint_plots/scenarios" + ) parser.add_argument( "--device", diff --git a/examples/paint_plots/paint_plot_config.yaml b/examples/paint_plots/paint_plot_config.yaml index 3c8383950..c3ca9fc0a 100644 --- a/examples/paint_plots/paint_plot_config.yaml +++ b/examples/paint_plots/paint_plot_config.yaml @@ -2,9 +2,9 @@ metadata_root: "./" metadata_file_name: "calibration_metadata_all_heliostats.csv" data_dir: "base/path/data" tower_file_name: "WRI1030197-tower-measurements.json" -scenarios_dir: "./scenarios" -results_dir: "./results" -plots_dir: "./plots" +scenarios_dir: "./examples/paint_plots/scenarios" +results_dir: "./examples/paint_plots/results" +plots_dir: "./examples/paint_plots/plots" minimum_number_of_measurements: 10 maximum_number_of_heliostats_for_reconstruction: 2200 excluded_heliostats_for_reconstruction: ["BE20", "AP14"] diff --git a/examples/paint_plots/reconstruction_generate_results.py b/examples/paint_plots/reconstruction_generate_results.py index fc36dc7f2..49c3724b2 100644 --- a/examples/paint_plots/reconstruction_generate_results.py +++ b/examples/paint_plots/reconstruction_generate_results.py @@ -74,11 +74,22 @@ def generate_reconstruction_results( config_dictionary.kinematic_reconstruction_raytracing ) + # Configure the optimization. + optimizer_dict = { + config_dictionary.initial_learning_rate: 1e-3, + config_dictionary.tolerance: 0, + config_dictionary.max_epoch: 1000, + config_dictionary.batch_size: 500, + config_dictionary.log_step: 50, + config_dictionary.early_stopping_delta: 1e-6, + config_dictionary.early_stopping_patience: 4000, + config_dictionary.early_stopping_window: 1000, + } # Configure the learning rate scheduler. - scheduler = config_dictionary.exponential - scheduler_parameters = { + scheduler_dict = { + config_dictionary.scheduler_type: config_dictionary.exponential, config_dictionary.gamma: 0.999, - config_dictionary.min: 1e-6, + config_dictionary.min: 1e-5, config_dictionary.max: 1e-2, config_dictionary.step_size_up: 500, config_dictionary.reduce_factor: 0.3, @@ -86,17 +97,10 @@ def generate_reconstruction_results( config_dictionary.threshold: 1e-3, config_dictionary.cooldown: 10, } - - # Set optimization parameters. + # Combine configurations. optimization_configuration = { - config_dictionary.initial_learning_rate: 0.0001, - config_dictionary.tolerance: 0, - config_dictionary.max_epoch: 1000, - config_dictionary.log_step: 50, - config_dictionary.early_stopping_delta: 1e-6, - config_dictionary.early_stopping_patience: 4000, - config_dictionary.scheduler: scheduler, - config_dictionary.scheduler_parameters: scheduler_parameters, + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, } for centroid in [paint_mappings.UTIS_KEY, paint_mappings.HELIOS_KEY]: @@ -110,7 +114,7 @@ def generate_reconstruction_results( | list[tuple[str, list[pathlib.Path], list[pathlib.Path]]], ] = { config_dictionary.data_parser: PaintCalibrationDataParser( - centroid_extraction_method=centroid + sample_limit=3, centroid_extraction_method=centroid ), config_dictionary.heliostat_data_mapping: heliostat_data_mapping, } @@ -192,8 +196,10 @@ def generate_reconstruction_results( # Add remaining arguments to the parser with defaults loaded from the config. device_default = config.get("device", "cuda") - results_dir_default = config.get("results_dir", "./results") - scenarios_dir_default = config.get("scenarios_dir", "./scenarios") + results_dir_default = config.get("results_dir", "./examples/paint_plots/results") + scenarios_dir_default = config.get( + "scenarios_dir", "./examples/paint_plots/scenarios" + ) parser.add_argument( "--device", diff --git a/examples/paint_plots/reconstruction_generate_viable_heliostats_list.py b/examples/paint_plots/reconstruction_generate_viable_heliostats_list.py index 05ddce5cd..3e2e4726e 100644 --- a/examples/paint_plots/reconstruction_generate_viable_heliostats_list.py +++ b/examples/paint_plots/reconstruction_generate_viable_heliostats_list.py @@ -187,7 +187,7 @@ def find_viable_heliostats( # Add remaining arguments to the parser with defaults loaded from the config. data_dir_default = config.get("data_dir", "./paint_data") device_default = config.get("device", "cuda") - results_dir_default = config.get("results_dir", "./results") + results_dir_default = config.get("results_dir", "./examples/paint_plots/results") minimum_number_of_measurements_default = config.get( "minimum_number_of_measurements", 80 ) diff --git a/examples/paint_plots/reconstruction_plot.py b/examples/paint_plots/reconstruction_plot.py index f20e3b3a8..97b03deb4 100644 --- a/examples/paint_plots/reconstruction_plot.py +++ b/examples/paint_plots/reconstruction_plot.py @@ -1,5 +1,4 @@ import argparse -import math import pathlib import warnings from typing import Any @@ -41,12 +40,10 @@ def plot_error_distribution( # Convert losses to list. helios_losses_in_meters = [ - math.sqrt(data[paint_mappings.HELIOS_KEY]) - for data in reconstruction_results.values() + data[paint_mappings.HELIOS_KEY] for data in reconstruction_results.values() ] utis_losses_in_meters = [ - math.sqrt(data[paint_mappings.UTIS_KEY]) - for data in reconstruction_results.values() + data[paint_mappings.UTIS_KEY] for data in reconstruction_results.values() ] x_max = max(utis_losses_in_meters + helios_losses_in_meters) x_vals = np.linspace(0, x_max, 100) @@ -168,12 +165,10 @@ def plot_error_against_distance( # Load as lists. positions_list = [data["Position"] for data in reconstruction_results.values()] helios_loss_list_in_meters = [ - math.sqrt(data[paint_mappings.HELIOS_KEY]) - for data in reconstruction_results.values() + data[paint_mappings.HELIOS_KEY] for data in reconstruction_results.values() ] utis_loss_list_in_meters = [ - math.sqrt(data[paint_mappings.UTIS_KEY]) - for data in reconstruction_results.values() + data[paint_mappings.UTIS_KEY] for data in reconstruction_results.values() ] # Convert to arrays for plotting. @@ -239,7 +234,7 @@ def plot_error_against_distance( label="UTIS Trend", ) - ax.set_xlabel("\\textbf{Heliostat Distance from Tower} \n{m}") + ax.set_xlabel("\\textbf{Heliostat Distance from Tower} \n{meter}") ax.set_ylabel("\\textbf{Mean Pointing Error} \n{meter}") ax.grid(True) ax.legend(fontsize=8, loc="upper right", ncol=2) @@ -303,8 +298,8 @@ def plot_error_against_distance( # Add remaining arguments to the parser with defaults loaded from the config. device_default = config.get("device", "cuda") - results_dir_default = config.get("results_dir", "./results") - plots_dir_default = config.get("plots_dir", "./plots") + results_dir_default = config.get("results_dir", "./examples/paint_plots/results") + plots_dir_default = config.get("plots_dir", "./examples/paint_plots/plots") number_of_points_to_plot_default = config.get("number_of_points_to_plot", 100) random_seed_default = config.get("random_seed", 7) diff --git a/examples/paint_plots/reconstruction_scenario.py b/examples/paint_plots/reconstruction_scenario.py index c0021d2cc..6984e2cdb 100644 --- a/examples/paint_plots/reconstruction_scenario.py +++ b/examples/paint_plots/reconstruction_scenario.py @@ -138,8 +138,10 @@ def generate_reconstruction_scenario( tower_file_name_default = config.get( "tower_file_name", "WRI1030197-tower-measurements.json" ) - results_dir_default = config.get("results_dir", "./results") - scenarios_dir_default = config.get("scenarios_dir", "./scenarios") + results_dir_default = config.get("results_dir", "./examples/paint_plots/results") + scenarios_dir_default = config.get( + "scenarios_dir", "./examples/paint_plots/scenarios" + ) parser.add_argument( "--device", diff --git a/pyproject.toml b/pyproject.toml index 56d11ca4a..94b101de2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ requires-python = ">=3.10" classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", - "Development Status :: 1 - Planning", + "Development Status :: 5 - Production/Stable", ] dependencies = [ "numpy", diff --git a/tests/core/test_blocking.py b/tests/core/test_blocking.py new file mode 100644 index 000000000..7f3b35710 --- /dev/null +++ b/tests/core/test_blocking.py @@ -0,0 +1,530 @@ +import pathlib + +import h5py +import pytest +import torch + +from artist import ARTIST_ROOT +from artist.core import blocking +from artist.core.heliostat_ray_tracer import HeliostatRayTracer +from artist.scenario.scenario import Scenario +from artist.util import utils + + +@pytest.fixture +def surface_at_origin() -> torch.Tensor: + """ + Define a surface at the origin. + + Returns + ------- + torch.Tensor + The surface. + """ + corner_points = torch.tensor( + [ + [-1.0, -0.5, 0.0, 1.0], + [1.0, -0.5, 0.0, 1.0], + [1.0, 0.5, 0.0, 1.0], + [-1.0, 0.5, 0.0, 1.0], + ], + ) + + interior_points = torch.tensor( + [ + [0.0, 0.0, 0.0, 1.0], + [0.5, 0.0, 0.0, 1.0], + ], + ) + + surface_points = torch.cat([corner_points, interior_points], dim=0)[None, :, :] + + return surface_points + + +@pytest.fixture +def surface_rotated_and_translated(surface_at_origin: torch.Tensor) -> torch.Tensor: + """ + Define a rotated and translated surface. + + Parameters + ---------- + surface_at_origin : torch.Tensor + A surface at the origin. + + Returns + ------- + torch.Tensor + The surface. + """ + device = surface_at_origin.device + rotation_e = utils.rotate_e(e=torch.tensor([0.5]), device=device) + rotation_n = utils.rotate_n(n=torch.tensor([0.2]), device=device) + + translation = torch.tensor( + [ + [1.0, 0.0, 0.0, 2.0], + [0.0, 1.0, 0.0, 3.0], + [0.0, 0.0, 1.0, 1.5], + [0.0, 0.0, 0.0, 1.0], + ], + device=device, + ) + + transform = translation.T @ rotation_n @ rotation_e + + transformed_surface = surface_at_origin @ transform + + return transformed_surface + + +@pytest.mark.parametrize( + "surface, transformed_surface, expected", + [ + ( + "surface_at_origin", + "surface_at_origin", + [ + torch.tensor( + [ + [ + [-1.0, -0.5, 0.0, 1.0], + [1.0, -0.5, 0.0, 1.0], + [1.0, 0.5, 0.0, 1.0], + [-1.0, 0.5, 0.0, 1.0], + ] + ], + ), + torch.tensor([[[2.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]]), + torch.tensor([[0.0, 0.0, 1.0]]), + ], + ), + ( + "surface_at_origin", + "surface_rotated_and_translated", + [ + torch.tensor( + [ + [ + [1.2781, 2.8035, -0.0828, 1.0000], + [3.2382, 2.6130, -0.4315, 1.0000], + [3.2382, 3.4906, -0.9109, 1.0000], + [1.2781, 3.6811, -0.5622, 1.0000], + ] + ] + ), + torch.tensor( + [ + [ + [1.9601, -0.1905, -0.3487, 0.0000], + [0.0000, 0.8776, -0.4794, 0.0000], + ] + ] + ), + torch.tensor([[0.1987, 0.4699, 0.8601]]), + ], + ), + ], +) +def test_create_blocking_primitives_rectangle( + surface: torch.Tensor, + transformed_surface: torch.Tensor, + expected: list[torch.Tensor], + request: pytest.FixtureRequest, + device: torch.device, +) -> None: + """ + Test that the creation of blocking primitives works as desired. + + Parameter + --------- + surface : torch.Tensor + Surface at the origin. + transformed_surface : torch.Tensor + Surface that has been transformed. + expected : torch.Tensor + The expected tensors. + request : pytest.FixtureRequest + The pytest fixture used to consider different test cases. + device : torch.device + The device on which to initialize tensors. + + Raises + ------ + AssertionError + If test does not complete as expected. + """ + blocking_surface_points = request.getfixturevalue(surface) + active_surface_points = request.getfixturevalue(transformed_surface) + + corners, spans, normals = blocking.create_blocking_primitives_rectangle( + blocking_heliostats_surface_points=blocking_surface_points.to(device), + blocking_heliostats_active_surface_points=active_surface_points.to(device), + epsilon=0.1, + device=device, + ) + + torch.testing.assert_close(corners, expected[0].to(device), atol=5e-4, rtol=5e-4) + torch.testing.assert_close(spans, expected[1].to(device), atol=5e-4, rtol=5e-4) + torch.testing.assert_close(normals, expected[2].to(device), atol=5e-4, rtol=5e-4) + + +@pytest.fixture +def surface_for_index_test(device: torch.device) -> torch.Tensor: + """ + Define a rotated and translated surface. + + Parameters + ---------- + device : torch.device + The device on which to initialize tensors. + + Returns + ------- + torch.Tensor + The surface. + """ + points_per_axis = 5 + + facet_coordinates: list[torch.Tensor] = [] + facet_origins = [ + (0.0, 0.0), + (2.0, 0.0), + (2.0, 1.0), + (0.0, 2.0), + ] + + facet_size = (2.0, 2.0) + + for x, y in facet_origins: + xs = torch.linspace(x, x + facet_size[0], points_per_axis, device=device) + ys = torch.linspace(y, y + facet_size[1], points_per_axis, device=device) + grid_x, grid_y = torch.meshgrid(xs, ys, indexing="ij") + grid_z = torch.zeros_like(grid_x) + points = torch.stack( + [grid_x.flatten(), grid_y.flatten(), grid_z.flatten()], dim=-1 + ) + facet_coordinates.append(points) + + all_points = torch.cat(facet_coordinates, dim=0) + all_points = torch.cat( + [all_points, torch.ones(all_points.shape[0], 1, device=device)], dim=-1 + ) + all_points = all_points[None, :, :] + + return all_points + + +@pytest.fixture +def surface_for_index_test_rotated_and_translated( + surface_for_index_test, +) -> torch.Tensor: + """ + Define a rotated and translated surface. + + Parameters + ---------- + surface_for_index_test : torch.Tensor + Surface at the origin, not rotated. + + Returns + ------- + torch.Tensor + The surface. + """ + device = surface_for_index_test.device + rotation_e = utils.rotate_e(e=torch.tensor([0.5]), device=device) + rotation_n = utils.rotate_n(n=torch.tensor([0.2]), device=device) + + translation = torch.tensor( + [ + [1.0, 0.0, 0.0, 2.0], + [0.0, 1.0, 0.0, 3.0], + [0.0, 0.0, 1.0, 1.5], + [0.0, 0.0, 0.0, 1.0], + ], + device=device, + ) + + transform = translation.T @ rotation_n @ rotation_e + + active_surface_points = surface_for_index_test @ transform + return active_surface_points + + +@pytest.mark.parametrize( + "surface, expected", + [ + ( + "surface_for_index_test", + [ + torch.tensor( + [ + [ + [2.0, 1.0, 0.0, 1.0], + [0.0, 2.0, 0.0, 1.0], + [4.0, 2.0, 0.0, 1.0], + [2.0, 2.0, 0.0, 1.0], + ] + ] + ), + torch.tensor([[[-2.0, 1.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]]), + torch.tensor([[0.0, 0.0, -1.0]]), + ], + ), + ( + "surface_for_index_test_rotated_and_translated", + [ + torch.tensor( + [ + [ + [4.2183, 3.8341, -1.3250, 1.0000], + [2.2581, 4.9022, -1.4557, 1.0000], + [6.1784, 4.5212, -2.1531, 1.0000], + [4.2183, 4.7117, -1.8044, 1.0000], + ] + ] + ), + torch.tensor( + [ + [ + [-1.9601, 1.0681, -0.1307, 0.0000], + [0.0000, 0.8776, -0.4794, 0.0000], + ] + ] + ), + torch.tensor([[-0.1987, -0.4699, -0.8601]]), + ], + ), + ], +) +def test_create_blocking_primitives_rectangles_by_index( + surface: torch.Tensor, + expected: list[torch.Tensor], + request: pytest.FixtureRequest, + device: torch.device, +) -> None: + """ + Test that the creation of blocking primitives works as desired. + + Parameter + --------- + surface : torch.Tensor + Surface randomly transformed. + expected : torch.Tensor + The expected tensors. + request : pytest.FixtureRequest + The pytest fixture used to consider different test cases. + device : torch.device + The device on which to initialize tensors. + + Raises + ------ + AssertionError + If test does not complete as expected. + """ + active_surface_points = request.getfixturevalue(surface) + + corners, spans, normals = blocking.create_blocking_primitives_rectangles_by_index( + blocking_heliostats_active_surface_points=active_surface_points.to(device), + device=device, + ) + + torch.testing.assert_close(corners, expected[0].to(device), atol=5e-4, rtol=5e-4) + torch.testing.assert_close(spans, expected[1].to(device), atol=5e-4, rtol=5e-4) + torch.testing.assert_close(normals, expected[2].to(device), atol=5e-4, rtol=5e-4) + + +def test_blocking_integration(device: torch.device) -> None: + """ + Test all blocking methods in an integration test. + + Parameters + ---------- + device : torch.device + The device on which to initialize tensors. + + Raises + ------ + AssertionError + If test does not complete as expected. + """ + torch.manual_seed(7) + torch.cuda.manual_seed(7) + + # Load the scenario. + with h5py.File( + pathlib.Path(ARTIST_ROOT) / "tests/data/scenarios/test_blocking.h5", + "r", + ) as scenario_file: + scenario = Scenario.load_scenario_from_hdf5( + scenario_file=scenario_file, + device=device, + ) + + incident_ray_direction = torch.nn.functional.normalize( + torch.tensor([0.0, 1.0, 0.0, 0.0], device=device), dim=-1 + ) + + heliostat_group = scenario.heliostat_field.heliostat_groups[0] + heliostat_target_light_source_mapping = [ + ("heliostat_0", "target_0", incident_ray_direction), + ("heliostat_1", "target_0", incident_ray_direction), + ("heliostat_2", "target_0", incident_ray_direction), + ("heliostat_3", "target_0", incident_ray_direction), + ("heliostat_4", "target_0", incident_ray_direction), + ("heliostat_5", "target_0", incident_ray_direction), + ] + + ( + active_heliostats_mask, + target_area_mask, + incident_ray_directions, + ) = scenario.index_mapping( + heliostat_group=heliostat_group, + string_mapping=heliostat_target_light_source_mapping, + device=device, + ) + + heliostat_group.activate_heliostats( + active_heliostats_mask=active_heliostats_mask, device=device + ) + + heliostat_group.align_surfaces_with_incident_ray_directions( + aim_points=scenario.target_areas.centers[target_area_mask], + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + device=device, + ) + + scenario.set_number_of_rays(number_of_rays=200) + + ray_tracer = HeliostatRayTracer( + scenario=scenario, + heliostat_group=heliostat_group, + blocking_active=True, + batch_size=10, + ) + + bitmaps_per_heliostat = ray_tracer.trace_rays( + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + target_area_mask=target_area_mask, + device=device, + ) + + # Scale bitmap for testing precision. + bitmaps_per_heliostat = ( + bitmaps_per_heliostat + / (bitmaps_per_heliostat.sum(dim=(1, 2), keepdim=True) + 1e-8) + ) * 100 + + expected_path = ( + pathlib.Path(ARTIST_ROOT) + / "tests/data/expected_bitmaps_blocking" + / f"bitmaps_{device.type}.pt" + ) + + expected = torch.load(expected_path, map_location=device, weights_only=True) + + torch.testing.assert_close(bitmaps_per_heliostat, expected, atol=5e-4, rtol=5e-4) + + +def test_ray_extinction(device: torch.device) -> None: + """ + Test the ray extinction. + + Parameters + ---------- + device : torch.device + The device on which to initialize tensors. + + Raises + ------ + AssertionError + If test does not complete as expected. + """ + torch.manual_seed(7) + torch.cuda.manual_seed(7) + + with h5py.File( + pathlib.Path(ARTIST_ROOT) + / "tests/data/scenarios/test_scenario_paint_single_heliostat.h5", + "r", + ) as scenario_file: + scenario = Scenario.load_scenario_from_hdf5( + scenario_file=scenario_file, + device=device, + ) + + heliostat_group = scenario.heliostat_field.heliostat_groups[0] + + ( + active_heliostats_mask, + target_area_mask, + incident_ray_directions, + ) = scenario.index_mapping( + heliostat_group=heliostat_group, + device=device, + ) + + heliostat_group.activate_heliostats( + active_heliostats_mask=active_heliostats_mask, device=device + ) + + heliostat_group.align_surfaces_with_incident_ray_directions( + aim_points=scenario.target_areas.centers[target_area_mask], + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + device=device, + ) + + scenario.set_number_of_rays(number_of_rays=200) + + ray_tracer = HeliostatRayTracer( + scenario=scenario, + heliostat_group=heliostat_group, + blocking_active=True, + batch_size=10, + ) + + ray_extinction_factor = 0.9 + + bitmaps_per_heliostat_no_extinction = ray_tracer.trace_rays( + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + target_area_mask=target_area_mask, + ray_extinction_factor=0.0, + device=device, + ) + + bitmaps_per_heliostat_extinction = ray_tracer.trace_rays( + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + target_area_mask=target_area_mask, + ray_extinction_factor=ray_extinction_factor, + device=device, + ) + + bitmaps = torch.cat( + (bitmaps_per_heliostat_no_extinction, bitmaps_per_heliostat_extinction) + ) + + # Scale bitmap for testing precision. + bitmaps = (bitmaps / (bitmaps.sum(dim=(1, 2), keepdim=True) + 1e-8)) * 100 + + expected_path = ( + pathlib.Path(ARTIST_ROOT) + / "tests/data/expected_bitmaps_ray_extinction" + / f"bitmaps_{device.type}.pt" + ) + + expected = torch.load(expected_path, map_location=device, weights_only=True) + + torch.testing.assert_close(bitmaps, expected, atol=5e-4, rtol=5e-4) + torch.testing.assert_close( + bitmaps_per_heliostat_no_extinction[0].sum() * (1 - ray_extinction_factor), + bitmaps_per_heliostat_extinction[0].sum(), + atol=5e-1, + rtol=5e-1, + ) diff --git a/tests/core/test_core_utils.py b/tests/core/test_core_utils.py deleted file mode 100644 index 92d4828fb..000000000 --- a/tests/core/test_core_utils.py +++ /dev/null @@ -1,113 +0,0 @@ -import pytest -import torch - -from artist.core.core_utils import per_heliostat_reduction, scale_loss - - -@pytest.mark.parametrize( - "per_sample_values, active_heliostats_mask, expected", - [ - ( - torch.tensor([2.0, 5.0, 6.0, 10.0]), - torch.tensor([1, 3]), - torch.tensor([2.0, 7.0]), - ), - ( - torch.tensor([1.0, 3.0, 5.0, 10.0]), - torch.tensor([2, 0, 2]), - torch.tensor([2.0, torch.inf, 7.5]), - ), - ( - torch.tensor([2.0, 4.0]), - torch.tensor([2, 0]), - torch.tensor( - [ - 3.0, - torch.inf, - ] - ), - ), - ], -) -def test_per_heliostat_reduction( - per_sample_values: torch.Tensor, - active_heliostats_mask: torch.Tensor, - expected: torch.Tensor, - device: torch.device, -) -> None: - """ - Test the per heliostat reduction. - - Parameters - ---------- - per_sample_values : torch.Tensor - The loss per sample to be reduced. - Tensor of shape [number_of_samples]. - active_heliostats_mask : torch.Tensor - A mask defining which heliostats are activated. - Tensor of shape [number_of_heliostats]. - expected : torch.Tensor - The expected reduced loss. - Tensor of shape [number_of_heliostats]. - device : torch.device - The device on which to initialize tensors. - - Raises - ------ - AssertionError - If test does not complete as expected. - """ - result = per_heliostat_reduction( - per_sample_values=per_sample_values.to(device), - active_heliostats_mask=active_heliostats_mask.to(device), - device=device, - ) - - assert torch.allclose(result, expected.to(device)) - - -@pytest.mark.parametrize( - "loss, reference, weight, expected", - [ - (torch.tensor([2.0]), torch.tensor([2.0]), 1.0, torch.tensor([2.0])), - (torch.tensor([2.0]), torch.tensor([2.0]), 0.5, torch.tensor([1.0])), - (torch.tensor([4.0]), torch.tensor([2.0]), 1.0, torch.tensor([2.0])), - (torch.tensor([0.0]), torch.tensor([2.0]), 1.0, torch.tensor([0.0])), - ], -) -def test_scale_loss( - loss: torch.Tensor, - reference: torch.Tensor, - weight: float, - expected: torch.Tensor, - device: torch.device, -) -> None: - """ - Test the scale loss function. - - Parameters - ---------- - loss : torch.Tensor - The loss to be scaled. - Tensor of shape [1]. - reference : torch.Tensor - The reference loss. - Tensor of shape [1]. - weight : float - The weight or ratio used for the scaling. - expected : torch.Tensor - The expected scaled loss. - Tensor of shape [1]. - device : torch.device - The device on which to initialize tensors. - - Raises - ------ - AssertionError - If test does not complete as expected. - """ - scaled = scale_loss( - loss=loss.to(device), reference=reference.to(device), weight=weight - ) - - assert scaled == expected.to(device) diff --git a/tests/core/test_heliostat_ray_tracer.py b/tests/core/test_heliostat_ray_tracer.py index 256cebe33..b5f81299e 100644 --- a/tests/core/test_heliostat_ray_tracer.py +++ b/tests/core/test_heliostat_ray_tracer.py @@ -3,12 +3,61 @@ import pytest import torch -from artist.core.heliostat_ray_tracer import HeliostatRayTracer +from artist.core.heliostat_ray_tracer import ( + HeliostatRayTracer, + RestrictedDistributedSampler, +) from artist.field.heliostat_field import HeliostatField from artist.field.heliostat_group_rigid_body import HeliostatGroupRigidBody from artist.scenario.scenario import Scenario +@pytest.mark.parametrize( + "number_of_samples, number_of_heliostats, world_size, indices_per_rank", + [ + (12, 4, 1, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]), + (12, 4, 2, [[0, 1, 2, 6, 7, 8], [3, 4, 5, 9, 10, 11]]), + (12, 4, 3, [[0, 1, 2, 9, 10, 11], [3, 4, 5], [6, 7, 8]]), + (12, 4, 4, [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]), + (4, 1, 3, [[0, 1, 2, 3], [], []]), + (4, 2, 3, [[0, 1], [2, 3], []]), + ], +) +def test_distributed_sampler( + number_of_samples: int, + number_of_heliostats: int, + world_size: int, + indices_per_rank: list[list[int]], +) -> None: + """ + Test the distributed sampler. + + number_of_samples : int + Number of samples to distribute among ranks. + number_of_heliostats : int + Number of heliostats. + world_size : int + Total number of processes. + indices_per_rank : list[list[int]] + Expected indices for each available rank. + + Raises + ------ + AssertionError + If test does not complete as expected. + """ + for rank in range(world_size): + sampler = RestrictedDistributedSampler( + number_of_samples=number_of_samples, + number_of_active_heliostats=number_of_heliostats, + world_size=world_size, + rank=rank, + ) + indices = list(sampler) + + assert indices == indices_per_rank[rank] + + @pytest.fixture() def mock_scenario() -> Scenario: """ @@ -58,7 +107,7 @@ def test_trace_rays_unaligned_heliostats_error( A mocked scenario. active_heliostats_mask_scenario : torch.Tensor The active heliostats mask defined in the scenario. - active_heliostat_mask : torch.Tensor + active_heliostats_mask : torch.Tensor The active heliostats mask given to the trace rays method. expected : str The expected error message. diff --git a/tests/core/test_kinematic_reconstructor.py b/tests/core/test_kinematic_reconstructor.py index f09b898f8..cf6341a90 100644 --- a/tests/core/test_kinematic_reconstructor.py +++ b/tests/core/test_kinematic_reconstructor.py @@ -8,55 +8,63 @@ from artist import ARTIST_ROOT from artist.core.kinematic_reconstructor import KinematicReconstructor -from artist.core.loss_functions import FocalSpotLoss, Loss, VectorLoss +from artist.core.loss_functions import FocalSpotLoss from artist.data_parser.calibration_data_parser import CalibrationDataParser from artist.data_parser.paint_calibration_parser import PaintCalibrationDataParser from artist.scenario.scenario import Scenario -from artist.util import config_dictionary, set_logger_config - -# Set up logger. -set_logger_config() +from artist.util import config_dictionary @pytest.mark.parametrize( - "reconstruction_method, initial_learning_rate, loss_class, data_parser, early_stopping_delta, centroid_extraction_method, scheduler", + "reconstruction_method, data_parser, centroid_extraction_method, early_stopping_window, scheduler", [ + # Test normal behavior. ( config_dictionary.kinematic_reconstruction_raytracing, - 0.005, - FocalSpotLoss, PaintCalibrationDataParser(), - 1e-4, paint_mappings.UTIS_KEY, + 50, config_dictionary.exponential, ), + # Test early stopping. + ( + config_dictionary.kinematic_reconstruction_raytracing, + PaintCalibrationDataParser(), + paint_mappings.UTIS_KEY, + 10, + config_dictionary.reduce_on_plateau, + ), + # Test invalid centroid extraction. ( config_dictionary.kinematic_reconstruction_raytracing, - 0.005, - FocalSpotLoss, PaintCalibrationDataParser(), - 1.0, "invalid", + 10, config_dictionary.reduce_on_plateau, ), + # Test invalid reconstruction method. ( "invalid", - 0.005, - FocalSpotLoss, PaintCalibrationDataParser(), - 1.0, - "invalid", + paint_mappings.UTIS_KEY, + 10, + config_dictionary.reduce_on_plateau, + ), + # Test invalid parser. + ( + config_dictionary.kinematic_reconstruction_raytracing, + CalibrationDataParser(), + paint_mappings.UTIS_KEY, + 10, config_dictionary.reduce_on_plateau, ), ], ) def test_kinematic_reconstructor( reconstruction_method: str, - initial_learning_rate: float, - loss_class: Loss, data_parser: CalibrationDataParser, - early_stopping_delta: float, centroid_extraction_method: str, + early_stopping_window: int, scheduler: str, ddp_setup_for_testing: dict[str, Any], device: torch.device, @@ -68,16 +76,12 @@ def test_kinematic_reconstructor( ---------- reconstruction_method : str The name of the reconstruction method. - initial_learning_rate : float - The initial learning rate. - loss_class : Loss - The loss class. data_parser : CalibrationDataParser The data parser used to load calibration data from files. - early_stopping_delta : float - The minimum required improvement to prevent early stopping. centroid_extraction_method : str The method used to extract the focal spot centroids. + early_stopping_window : int + Early stopping window size. scheduler : str The scheduler to be used. ddp_setup_for_testing : dict[str, Any] @@ -93,24 +97,28 @@ def test_kinematic_reconstructor( torch.manual_seed(7) torch.cuda.manual_seed(7) - scheduler_parameters = { - config_dictionary.gamma: 0.9, + scheduler_dict = { + config_dictionary.scheduler_type: scheduler, + config_dictionary.gamma: 0.99, config_dictionary.min: 1e-4, config_dictionary.reduce_factor: 0.9, config_dictionary.patience: 100, config_dictionary.threshold: 1e-3, config_dictionary.cooldown: 20, } - - optimization_configuration = { - config_dictionary.initial_learning_rate: initial_learning_rate, + optimizer_dict = { + config_dictionary.initial_learning_rate: 1e-3, config_dictionary.tolerance: 0.0005, - config_dictionary.max_epoch: 100, + config_dictionary.max_epoch: 50, + config_dictionary.batch_size: 50, config_dictionary.log_step: 1, - config_dictionary.early_stopping_delta: early_stopping_delta, - config_dictionary.early_stopping_patience: 80, - config_dictionary.scheduler: scheduler, - config_dictionary.scheduler_parameters: scheduler_parameters, + config_dictionary.early_stopping_delta: 1.0, + config_dictionary.early_stopping_patience: 2, + config_dictionary.early_stopping_window: early_stopping_window, + } + optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, } scenario_path = ( @@ -136,9 +144,14 @@ def test_kinematic_reconstructor( "AA31", [ pathlib.Path(ARTIST_ROOT) - / "tests/data/field_data/AA31-calibration-properties_1.json" + / "tests/data/field_data/AA31-calibration-properties_1.json", + pathlib.Path(ARTIST_ROOT) + / "tests/data/field_data/AA31-calibration-properties_2.json", + ], + [ + pathlib.Path(ARTIST_ROOT) / "tests/data/field_data/AA31-flux_1.png", + pathlib.Path(ARTIST_ROOT) / "tests/data/field_data/AA31-flux_2.png", ], - [pathlib.Path(ARTIST_ROOT) / "tests/data/field_data/AA31-flux_1.png"], ), ] @@ -172,6 +185,10 @@ def test_kinematic_reconstructor( ddp_setup_for_testing[config_dictionary.device] = device ddp_setup_for_testing[config_dictionary.groups_to_ranks_mapping] = {0: [0, 1]} + ddp_setup_for_testing[config_dictionary.ranks_to_groups_mapping] = { + 0: [0], + 1: [0], + } if reconstruction_method == "invalid": with pytest.raises(ValueError) as exc_info: @@ -195,11 +212,7 @@ def test_kinematic_reconstructor( reconstruction_method=reconstruction_method, ) - loss_definition = ( - FocalSpotLoss(scenario=scenario) - if loss_class is FocalSpotLoss - else VectorLoss() - ) + loss_definition = FocalSpotLoss(scenario=scenario) # Reconstruct the kinematic. if not isinstance(data_parser, PaintCalibrationDataParser): @@ -220,7 +233,7 @@ def test_kinematic_reconstructor( expected_path = ( pathlib.Path(ARTIST_ROOT) / "tests/data/expected_reconstructed_kinematic_parameters" - / f"{reconstruction_method}_{str(early_stopping_delta).replace('.', '')}_group_{index}_{device.type}.pt" + / f"group_{index}_{early_stopping_window}_{device.type}.pt" ) expected = torch.load( diff --git a/tests/core/test_loss_functions.py b/tests/core/test_loss_functions.py index e9bf5104e..0d222b3c6 100644 --- a/tests/core/test_loss_functions.py +++ b/tests/core/test_loss_functions.py @@ -94,8 +94,6 @@ def test_vector_loss( Tensor of shape [number_of_samples, 4]. reduction_dimensions : tuple[int] | None The dimensions along which to reduce the final loss. - reduction_dimensions : tuple[int] | None - The dimensions to reduce over. expected : torch.Tensor The expected loss. Tensor of shape [number_of_samples]. @@ -143,7 +141,7 @@ def test_vector_loss( ( torch.ones((1, 2, 2)), torch.tensor([[0.0, 0.0, 0.0, 0.0]]), - torch.tensor([3.0]), + torch.tensor([1.732050776482]), True, ), ( @@ -236,7 +234,7 @@ def test_focal_spot_loss( torch.tensor([[[1.0, 2.0], [8.0, 6.0]]]), torch.tensor([[2.0, 2.0]]), 100, - torch.tensor([0.761904418468]), + torch.tensor([39.0]), True, ), ( @@ -244,7 +242,7 @@ def test_focal_spot_loss( torch.tensor([[[1.0, 2.0], [8.0, 6.0]]]), torch.tensor([[2.0, 2.0]]), 100, - torch.tensor([0.761904418468]), + torch.tensor([1.0]), False, ), ], @@ -376,6 +374,12 @@ def test_pixel_loss( torch.tensor([0.0]), False, ), + ( + torch.tensor([[[0.5, 0.5]]]), + torch.tensor([[[-0.5, -0.5]]]), + torch.tensor([0.0]), + True, + ), ], ) def test_kl_divergence( @@ -476,8 +480,6 @@ def test_angle_loss( Tensor of variable shape. reduction_dimensions : tuple[int] The dimensions along which to reduce the final loss. - reduction_dimensions : tuple[int] | None - The dimensions to reduce over. expected : torch.Tensor The expected loss. Tensor of shape [number_of_samples]. diff --git a/tests/core/test_motor_position_optimizer.py b/tests/core/test_motor_position_optimizer.py index 74c53c36b..f5c3eabbd 100644 --- a/tests/core/test_motor_position_optimizer.py +++ b/tests/core/test_motor_position_optimizer.py @@ -23,46 +23,50 @@ def focal_spot() -> torch.Tensor: The desired focal spot. Tensor of shape [4]. """ - ground_truth = torch.tensor([1.1493, -0.5030, 57.0474, 1.0000]) + ground_truth = torch.tensor([1.0, -0.5030, 56.0, 1.0000]) return ground_truth @pytest.fixture -def distribution(device) -> torch.Tensor: +def distribution(device: torch.device) -> torch.Tensor: """ Use a distribution as target in the loss function. + Parameters + ---------- + device : torch.device + The device on which to initialize tensors. + Returns ------- torch.Tensor The desired distribution. Tensor of shape [bitmap_resolution_e, bitmap_resolution_u]. """ - distribution_path_group_1 = ( + path = ( pathlib.Path(ARTIST_ROOT) / "tests/data/expected_optimized_motor_positions" / "distribution.pt" ) - ground_truth = torch.load( - distribution_path_group_1, map_location=device, weights_only=True - ) + ground_truth = torch.load(path, map_location=device, weights_only=True) - return ground_truth + return ground_truth * 19400 @pytest.mark.parametrize( - "loss_class, ground_truth_fixture_name, early_stopping_delta, scheduler", + "loss_class, ground_truth_fixture_name, early_stopping_window, scheduler", [ - (FocalSpotLoss, "focal_spot", 1e-4, config_dictionary.cyclic), - (KLDivergenceLoss, "distribution", 1.0, config_dictionary.reduce_on_plateau), + (FocalSpotLoss, "focal_spot", 50, config_dictionary.cyclic), + (KLDivergenceLoss, "distribution", 50, config_dictionary.reduce_on_plateau), + (KLDivergenceLoss, "distribution", 10, config_dictionary.reduce_on_plateau), ], ) def test_motor_positions_optimizer( loss_class: Loss, - ground_truth_fixture_name: torch.Tensor, - early_stopping_delta: float, + ground_truth_fixture_name: str, + early_stopping_window: int, scheduler: str, request: pytest.FixtureRequest, ddp_setup_for_testing: dict[str, Any], @@ -77,8 +81,8 @@ def test_motor_positions_optimizer( The loss class. ground_truth_fixture_name : str A fixture to retrieve the ground truth. - early_stopping_delta : float - The minimum required improvement to prevent early stopping. + early_stopping_window : int + Number of epochs used to estimate loss trend. scheduler : str The scheduler to be used. request : pytest.FixtureRequest @@ -96,7 +100,8 @@ def test_motor_positions_optimizer( torch.manual_seed(7) torch.cuda.manual_seed(7) - scheduler_parameters = { + scheduler_dict = { + config_dictionary.scheduler_type: scheduler, config_dictionary.min: 1e-3, config_dictionary.max: 2e-3, config_dictionary.step_size_up: 100, @@ -105,18 +110,28 @@ def test_motor_positions_optimizer( config_dictionary.threshold: 1e-3, config_dictionary.cooldown: 20, } - - optimization_configuration = { + optimizer_dict = { config_dictionary.initial_learning_rate: 1e-3, config_dictionary.tolerance: 0.0005, - config_dictionary.max_epoch: 30, - config_dictionary.log_step: 0, - config_dictionary.early_stopping_delta: early_stopping_delta, - config_dictionary.early_stopping_patience: 4, - config_dictionary.scheduler: scheduler, - config_dictionary.scheduler_parameters: scheduler_parameters, + config_dictionary.max_epoch: 50, + config_dictionary.batch_size: 50, + config_dictionary.log_step: 1, + config_dictionary.early_stopping_delta: 1.0, + config_dictionary.early_stopping_patience: 2, + config_dictionary.early_stopping_window: early_stopping_window, + } + constraint_dict = { + config_dictionary.rho_energy: 1.0, + config_dictionary.max_flux_density: 3, + config_dictionary.rho_pixel: 1.0, + config_dictionary.lambda_lr: 0.1, + } + # Combine configurations. + optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + config_dictionary.constraints: constraint_dict, } - scenario_path = ( pathlib.Path(ARTIST_ROOT) / "tests/data/scenarios/test_scenario_paint_four_heliostats.h5" @@ -138,6 +153,7 @@ def test_motor_positions_optimizer( incident_ray_direction=torch.tensor([0.0, 1.0, 0.0, 0.0], device=device), target_area_index=1, ground_truth=request.getfixturevalue(ground_truth_fixture_name).to(device), + dni=800, bitmap_resolution=torch.tensor([256, 256], device=device), device=device, ) @@ -157,7 +173,7 @@ def test_motor_positions_optimizer( expected_path = ( pathlib.Path(ARTIST_ROOT) / "tests/data/expected_optimized_motor_positions" - / f"{ground_truth_fixture_name}_group_{index}_{device.type}.pt" + / f"{ground_truth_fixture_name}_group_{index}_{early_stopping_window}_{device.type}.pt" ) expected = torch.load(expected_path, map_location=device, weights_only=True) @@ -165,6 +181,6 @@ def test_motor_positions_optimizer( torch.testing.assert_close( heliostat_group.kinematic.motor_positions, expected, - atol=5e-4, - rtol=5e-4, + atol=5e-3, + rtol=5e-2, ) diff --git a/tests/core/test_regularizers.py b/tests/core/test_regularizers.py index 52bf2b8be..732ffceb5 100644 --- a/tests/core/test_regularizers.py +++ b/tests/core/test_regularizers.py @@ -4,9 +4,12 @@ from artist.core.regularizers import ( IdealSurfaceRegularizer, Regularizer, - TotalVariationRegularizer, + SmoothnessRegularizer, ) +torch.manual_seed(7) +torch.cuda.manual_seed(7) + def test_base_regularizer( device: torch.device, @@ -24,26 +27,74 @@ def test_base_regularizer( AssertionError If test does not complete as expected. """ - base_regularizer = Regularizer(weight=1.0, reduction_dimensions=(1,)) + base_regularizer = Regularizer(reduction_dimensions=(1,)) with pytest.raises(NotImplementedError) as exc_info: base_regularizer( - original_surface_points=torch.empty((2, 4), device=device), - surface_points=torch.tensor([0, 1], device=device), - surface_normals=(1,), + current_control_points=torch.empty((1, 1, 6, 6, 4), device=device), + original_control_points=torch.empty((1, 1, 6, 6, 4), device=device), device=device, ) assert "Must be overridden!" in str(exc_info.value) -def test_total_variation_regularizer( +@pytest.fixture +def control_points( + device: torch.device, +) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Generate flat, smooth, and irregular control point tensors. + + Parameters + ---------- + device : torch.device + The device on which to initialize tensors. + + Returns + ------- + torch.Tensor + Flat control points. + Tensor of shape [number_of_facets_per_surface, number_of_control_points_u_direction, number_of_control_points_v_direction, 3]. + torch.Tensor + Smooth control points. + Tensor of shape [number_of_facets_per_surface, number_of_control_points_u_direction, number_of_control_points_v_direction, 3]. + torch.Tensor + Irregular control points. + Tensor of shape [number_of_facets_per_surface, number_of_control_points_u_direction, number_of_control_points_v_direction, 3]. + """ + x = torch.linspace(0, 4 * 3.1415, 6, device=device) + y = torch.linspace(0, 4 * 3.1415, 6, device=device) + x_grid, y_grid = torch.meshgrid(x, y, indexing="ij") + + x_expanded = x_grid.unsqueeze(0).expand(4, -1, -1) + y_expanded = y_grid.unsqueeze(0).expand(4, -1, -1) + + # Flat surface + z_flat = torch.zeros_like(x_expanded, device=device) + flat_points = torch.stack([x_expanded, y_expanded, z_flat], dim=-1) + + # Smooth surface + z_smooth = 0.2 * torch.sin(x_expanded) + 0.2 * torch.cos(y_expanded) + smooth_points = torch.stack([x_expanded, y_expanded, z_smooth], dim=-1) + + # Irregular surface + z_irregular = z_smooth * 5.0 + irregular_points = torch.stack([x_expanded, y_expanded, z_irregular], dim=-1) + + return flat_points, smooth_points, irregular_points + + +def test_smoothness_regularizer( + control_points: tuple[torch.Tensor, torch.Tensor, torch.Tensor], device: torch.device, ) -> None: """ - Test the total variation regularizer. + Test the smoothness regularizer. Parameters ---------- + control_points : tuple[torch.Tensor, torch.Tensor, torch.Tensor] + A fixture to retrieve the control points. device : torch.device The device on which to initialize tensors. @@ -52,82 +103,32 @@ def test_total_variation_regularizer( AssertionError If test does not complete as expected. """ - rows = torch.linspace(0, 4 * 3.1415, 120, device=device) - columns = torch.linspace(0, 4 * 3.1415, 120, device=device) - x, y = torch.meshgrid(rows, columns, indexing="ij") - - # Smooth surface with waves. - z_values_smooth = 0.5 * torch.sin(x) + 0.5 * torch.cos(y) - - # Irregular surface = smooth surface with waves and random noise. - noise = torch.randn_like(z_values_smooth, device=device) * 0.5 - z_irregular = z_values_smooth + noise - - coordinates_smooth = torch.stack( - [x.flatten(), y.flatten(), z_values_smooth.flatten()], dim=1 - ).unsqueeze(0) - coordinates_irregular = torch.stack( - [x.flatten(), y.flatten(), z_irregular.flatten()], dim=1 - ).unsqueeze(0) - - surfaces = ( - torch.cat([coordinates_smooth, coordinates_irregular], dim=0) - .unsqueeze(1) - .expand(2, 4, -1, 3) - ) + flat_points, smooth_points, irregular_points = control_points - total_variation = TotalVariationRegularizer( - weight=1.0, + smoothness_regularizer = SmoothnessRegularizer( reduction_dimensions=(1,), - surface="surface_points", - number_of_neighbors=10, - sigma=1.0, ) - loss = total_variation( - original_surface_points=torch.empty(1, device=device), - surface_points=surfaces, - surface_normals=torch.empty(1, device=device), - device=device, + loss = smoothness_regularizer( + current_control_points=torch.stack([smooth_points, irregular_points]), + original_control_points=flat_points.expand(2, 4, 6, 6, 3), ) torch.testing.assert_close( loss, - torch.tensor([0.174590915442, 2.252339363098], device=device), - atol=5e-2, - rtol=5e-2, + torch.tensor([0.529724955559, 13.243123054504], device=device), + atol=5e-4, + rtol=5e-4, ) -@pytest.mark.parametrize( - "original_surface_points, new_surface_points, expected", - [ - ( - torch.tensor([[[[1.0, 2.0, 3.0], [2.0, 2.0, 2.0]]]]), - torch.tensor([[[[1.0, 2.0, 3.0], [2.0, 1.0, 3.0]]]]), - torch.tensor([2.0]), - ), - ], -) -def test_ideal_surface_regularizer( - original_surface_points: torch.Tensor, - new_surface_points: torch.Tensor, - expected: torch.Tensor, - device: torch.device, -) -> None: +def test_ideal_surface_regularizer(control_points, device): """ Test the ideal surface regularizer. Parameters ---------- - original_surface_points : torch.Tensor - The original surface points. - Tensor of shape [number_of_surfaces, number_of_facets_per_surface, number_of_surface_points, 3]. - new_surface_points : torch.Tensor - The new surface points. - Tensor of shape [number_of_surfaces, number_of_facets_per_surface, number_of_surface_points, 3]. - expected : torch.Tensor - The expected loss. - Tensor of shape [number_of_surfaces]. + control_points : tuple[torch.Tensor, torch.Tensor, torch.Tensor] + A fixture to retrieve the control points. device : torch.device The device on which to initialize tensors. @@ -136,14 +137,19 @@ def test_ideal_surface_regularizer( AssertionError If test does not complete as expected. """ + flat_points, smooth_points, irregular_points = control_points + ideal_surface_regularizer = IdealSurfaceRegularizer( - weight=1.0, reduction_dimensions=(1, 2, 3) + reduction_dimensions=(1,), ) loss = ideal_surface_regularizer( - original_surface_points=original_surface_points.to(device), - surface_points=new_surface_points.to(device), - surface_normals=torch.empty(1, device=device), - device=device, + current_control_points=torch.stack([smooth_points, irregular_points]), + original_control_points=flat_points.expand(2, 4, 6, 6, 3), ) - torch.testing.assert_close(loss, expected.to(device), atol=5e-2, rtol=5e-2) + torch.testing.assert_close( + loss, + torch.tensor([0.053332783282, 1.333319664001], device=device), + atol=5e-4, + rtol=5e-4, + ) diff --git a/tests/core/test_surface_reconstructor.py b/tests/core/test_surface_reconstructor.py index 50e962301..a2962dceb 100644 --- a/tests/core/test_surface_reconstructor.py +++ b/tests/core/test_surface_reconstructor.py @@ -7,7 +7,7 @@ from artist import ARTIST_ROOT from artist.core.loss_functions import KLDivergenceLoss, Loss, PixelLoss -from artist.core.regularizers import IdealSurfaceRegularizer, TotalVariationRegularizer +from artist.core.regularizers import IdealSurfaceRegularizer, SmoothnessRegularizer from artist.core.surface_reconstructor import SurfaceReconstructor from artist.data_parser.calibration_data_parser import CalibrationDataParser from artist.data_parser.paint_calibration_parser import PaintCalibrationDataParser @@ -16,17 +16,23 @@ @pytest.mark.parametrize( - "loss_class, data_parser, early_stopping_delta", + "loss_class, early_stopping_window, data_parser, scheduler", [ - (KLDivergenceLoss, PaintCalibrationDataParser(), 1e-4), - (PixelLoss, PaintCalibrationDataParser(), 1e-4), - (PixelLoss, CalibrationDataParser(), 1e-4), + ( + KLDivergenceLoss, + 40, + PaintCalibrationDataParser(), + config_dictionary.reduce_on_plateau, + ), + (PixelLoss, 20, PaintCalibrationDataParser(), config_dictionary.cyclic), + (PixelLoss, 10, CalibrationDataParser(), config_dictionary.cyclic), ], ) def test_surface_reconstructor( loss_class: Loss, + early_stopping_window: int, data_parser: CalibrationDataParser | PaintCalibrationDataParser, - early_stopping_delta: float, + scheduler: str, ddp_setup_for_testing: dict[str, Any], device: torch.device, ) -> None: @@ -37,10 +43,12 @@ def test_surface_reconstructor( ---------- loss_class : Loss The loss class. + early_stopping_window : int + Number of epochs used to estimate loss trend. data_parser : CalibrationDataParser The data parser used to load calibration data from files. - early_stopping_delta : float - The minimum required improvement to prevent early stopping. + scheduler : str + Scheduler name. ddp_setup_for_testing : dict[str, Any] Information about the distributed environment, process_groups, devices, ranks, world_Size, heliostat group to ranks mapping. device : torch.device @@ -54,49 +62,44 @@ def test_surface_reconstructor( torch.manual_seed(7) torch.cuda.manual_seed(7) - scheduler_parameters = { - config_dictionary.min: 1e-4, - config_dictionary.reduce_factor: 0.9, - config_dictionary.patience: 100, - config_dictionary.threshold: 1e-3, - config_dictionary.cooldown: 20, + optimizer_dict = { + config_dictionary.initial_learning_rate: 1e-4, + config_dictionary.tolerance: 5e-4, + config_dictionary.max_epoch: 50, + config_dictionary.batch_size: 30, + config_dictionary.log_step: 0, + config_dictionary.early_stopping_delta: 1.0, + config_dictionary.early_stopping_patience: 2, + config_dictionary.early_stopping_window: early_stopping_window, } - - # Configure regularizers and their weights. - ideal_surface_regularizer = IdealSurfaceRegularizer( - weight=0.5, reduction_dimensions=(1, 2, 3) - ) - total_variation_regularizer_points = TotalVariationRegularizer( - weight=0.5, - reduction_dimensions=(1,), - surface=config_dictionary.surface_points, - number_of_neighbors=64, - sigma=1e-3, - ) - total_variation_regularizer_normals = TotalVariationRegularizer( - weight=0.5, - reduction_dimensions=(1,), - surface=config_dictionary.surface_normals, - number_of_neighbors=64, - sigma=1e-3, - ) - + scheduler_dict = { + config_dictionary.scheduler_type: scheduler, + config_dictionary.min: 1e-6, + config_dictionary.max: 1e-3, + config_dictionary.step_size_up: 500, + config_dictionary.reduce_factor: 0.8, + config_dictionary.patience: 10, + config_dictionary.threshold: 1e-4, + config_dictionary.cooldown: 5, + } + ideal_surface_regularizer = IdealSurfaceRegularizer(reduction_dimensions=(1,)) + smoothness_regularizer = SmoothnessRegularizer(reduction_dimensions=(1,)) regularizers = [ ideal_surface_regularizer, - total_variation_regularizer_points, - total_variation_regularizer_normals, + smoothness_regularizer, ] - - optimization_configuration = { - config_dictionary.initial_learning_rate: 1e-4, - config_dictionary.tolerance: 5e-4, - config_dictionary.max_epoch: 15, - config_dictionary.log_step: 0, - config_dictionary.early_stopping_delta: early_stopping_delta, - config_dictionary.early_stopping_patience: 13, - config_dictionary.scheduler: config_dictionary.reduce_on_plateau, - config_dictionary.scheduler_parameters: scheduler_parameters, + constraint_dict = { config_dictionary.regularizers: regularizers, + config_dictionary.initial_lambda_energy: 0.1, + config_dictionary.rho_energy: 1.0, + config_dictionary.energy_tolerance: 0.01, + config_dictionary.weight_smoothness: 0.005, + config_dictionary.weight_ideal_surface: 0.005, + } + optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + config_dictionary.constraints: constraint_dict, } scenario_path = ( @@ -124,11 +127,15 @@ def test_surface_reconstructor( "AA31", [ pathlib.Path(ARTIST_ROOT) - / "tests/data/field_data/AA31-calibration-properties_1.json" + / "tests/data/field_data/AA31-calibration-properties_1.json", + pathlib.Path(ARTIST_ROOT) + / "tests/data/field_data/AA31-calibration-properties_2.json", ], [ pathlib.Path(ARTIST_ROOT) - / "tests/data/field_data/AA31-flux-centered_1.png" + / "tests/data/field_data/AA31-flux-centered_1.png", + pathlib.Path(ARTIST_ROOT) + / "tests/data/field_data/AA31-flux-centered_2.png", ], ), ] @@ -144,11 +151,19 @@ def test_surface_reconstructor( with h5py.File(scenario_path, "r") as scenario_file: scenario = Scenario.load_scenario_from_hdf5( - scenario_file=scenario_file, device=device + scenario_file=scenario_file, + change_number_of_control_points_per_facet=torch.tensor( + [7, 7], device=device + ), + device=device, ) ddp_setup_for_testing[config_dictionary.device] = device ddp_setup_for_testing[config_dictionary.groups_to_ranks_mapping] = {0: [0, 1]} + ddp_setup_for_testing[config_dictionary.ranks_to_groups_mapping] = { + 0: [0], + 1: [0], + } # Create the surface reconstructor. surface_reconstructor = SurfaceReconstructor( @@ -187,7 +202,7 @@ def test_surface_reconstructor( expected_path = ( pathlib.Path(ARTIST_ROOT) / "tests/data/expected_reconstructed_surfaces" - / f"{loss_name}_group_{index}_{device.type}.pt" + / f"{loss_name}_group_{index}_{early_stopping_window}_{device.type}.pt" ) expected = torch.load(expected_path, map_location=device, weights_only=True) diff --git a/tests/data/expected_bitmaps_blocking/bitmaps_cpu.pt b/tests/data/expected_bitmaps_blocking/bitmaps_cpu.pt new file mode 100644 index 000000000..2fec08a4c Binary files /dev/null and b/tests/data/expected_bitmaps_blocking/bitmaps_cpu.pt differ diff --git a/tests/data/expected_bitmaps_blocking/bitmaps_cuda.pt b/tests/data/expected_bitmaps_blocking/bitmaps_cuda.pt new file mode 100644 index 000000000..74247305c Binary files /dev/null and b/tests/data/expected_bitmaps_blocking/bitmaps_cuda.pt differ diff --git a/tests/data/expected_bitmaps_integration/test_scenario_paint_mix_ideal_prototype_deflectometry_cpu.pt b/tests/data/expected_bitmaps_integration/test_scenario_paint_mix_ideal_prototype_deflectometry_cpu.pt index 651672c7d..b6f1288d7 100644 Binary files a/tests/data/expected_bitmaps_integration/test_scenario_paint_mix_ideal_prototype_deflectometry_cpu.pt and b/tests/data/expected_bitmaps_integration/test_scenario_paint_mix_ideal_prototype_deflectometry_cpu.pt differ diff --git a/tests/data/expected_bitmaps_integration/test_scenario_paint_mix_ideal_prototype_deflectometry_cuda.pt b/tests/data/expected_bitmaps_integration/test_scenario_paint_mix_ideal_prototype_deflectometry_cuda.pt index 24a036d9c..02079113f 100644 Binary files a/tests/data/expected_bitmaps_integration/test_scenario_paint_mix_ideal_prototype_deflectometry_cuda.pt and b/tests/data/expected_bitmaps_integration/test_scenario_paint_mix_ideal_prototype_deflectometry_cuda.pt differ diff --git a/tests/data/expected_bitmaps_integration/test_scenario_paint_single_heliostat_cpu.pt b/tests/data/expected_bitmaps_integration/test_scenario_paint_single_heliostat_cpu.pt index a2119c255..5c6424271 100644 Binary files a/tests/data/expected_bitmaps_integration/test_scenario_paint_single_heliostat_cpu.pt and b/tests/data/expected_bitmaps_integration/test_scenario_paint_single_heliostat_cpu.pt differ diff --git a/tests/data/expected_bitmaps_integration/test_scenario_paint_single_heliostat_cuda.pt b/tests/data/expected_bitmaps_integration/test_scenario_paint_single_heliostat_cuda.pt index 202650135..f3ae871b6 100644 Binary files a/tests/data/expected_bitmaps_integration/test_scenario_paint_single_heliostat_cuda.pt and b/tests/data/expected_bitmaps_integration/test_scenario_paint_single_heliostat_cuda.pt differ diff --git a/tests/data/expected_bitmaps_integration/test_scenario_stral_single_heliostat_prototype_cpu.pt b/tests/data/expected_bitmaps_integration/test_scenario_stral_single_heliostat_prototype_cpu.pt index 57d415ce8..e568c7511 100644 Binary files a/tests/data/expected_bitmaps_integration/test_scenario_stral_single_heliostat_prototype_cpu.pt and b/tests/data/expected_bitmaps_integration/test_scenario_stral_single_heliostat_prototype_cpu.pt differ diff --git a/tests/data/expected_bitmaps_integration/test_scenario_stral_single_heliostat_prototype_cuda.pt b/tests/data/expected_bitmaps_integration/test_scenario_stral_single_heliostat_prototype_cuda.pt index 7d5d38be4..0c5eaa677 100644 Binary files a/tests/data/expected_bitmaps_integration/test_scenario_stral_single_heliostat_prototype_cuda.pt and b/tests/data/expected_bitmaps_integration/test_scenario_stral_single_heliostat_prototype_cuda.pt differ diff --git a/tests/data/expected_bitmaps_ray_extinction/bitmaps_cpu.pt b/tests/data/expected_bitmaps_ray_extinction/bitmaps_cpu.pt new file mode 100644 index 000000000..0a66ef103 Binary files /dev/null and b/tests/data/expected_bitmaps_ray_extinction/bitmaps_cpu.pt differ diff --git a/tests/data/expected_bitmaps_ray_extinction/bitmaps_cuda.pt b/tests/data/expected_bitmaps_ray_extinction/bitmaps_cuda.pt new file mode 100644 index 000000000..498e021dc Binary files /dev/null and b/tests/data/expected_bitmaps_ray_extinction/bitmaps_cuda.pt differ diff --git a/tests/data/expected_optimized_motor_positions/distribution.pt b/tests/data/expected_optimized_motor_positions/distribution.pt index 3b94e0fe2..304121de6 100644 Binary files a/tests/data/expected_optimized_motor_positions/distribution.pt and b/tests/data/expected_optimized_motor_positions/distribution.pt differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_0_10_cpu.pt b/tests/data/expected_optimized_motor_positions/distribution_group_0_10_cpu.pt new file mode 100644 index 000000000..b97e9fece Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/distribution_group_0_10_cpu.pt differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_0_10_cuda.pt b/tests/data/expected_optimized_motor_positions/distribution_group_0_10_cuda.pt new file mode 100644 index 000000000..8d7da8fd3 Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/distribution_group_0_10_cuda.pt differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_0_50_cpu.pt b/tests/data/expected_optimized_motor_positions/distribution_group_0_50_cpu.pt new file mode 100644 index 000000000..cb77a86bb Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/distribution_group_0_50_cpu.pt differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_0_50_cuda.pt b/tests/data/expected_optimized_motor_positions/distribution_group_0_50_cuda.pt new file mode 100644 index 000000000..76e1a45a7 Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/distribution_group_0_50_cuda.pt differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_0_cpu.pt b/tests/data/expected_optimized_motor_positions/distribution_group_0_cpu.pt deleted file mode 100644 index 373997488..000000000 Binary files a/tests/data/expected_optimized_motor_positions/distribution_group_0_cpu.pt and /dev/null differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_0_cuda.pt b/tests/data/expected_optimized_motor_positions/distribution_group_0_cuda.pt deleted file mode 100644 index f4c9bea21..000000000 Binary files a/tests/data/expected_optimized_motor_positions/distribution_group_0_cuda.pt and /dev/null differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_1_10_cpu.pt b/tests/data/expected_optimized_motor_positions/distribution_group_1_10_cpu.pt new file mode 100644 index 000000000..e8ae95178 Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/distribution_group_1_10_cpu.pt differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_1_10_cuda.pt b/tests/data/expected_optimized_motor_positions/distribution_group_1_10_cuda.pt new file mode 100644 index 000000000..eebcec54f Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/distribution_group_1_10_cuda.pt differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_1_50_cpu.pt b/tests/data/expected_optimized_motor_positions/distribution_group_1_50_cpu.pt new file mode 100644 index 000000000..b5b1f09d1 Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/distribution_group_1_50_cpu.pt differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_1_50_cuda.pt b/tests/data/expected_optimized_motor_positions/distribution_group_1_50_cuda.pt new file mode 100644 index 000000000..3c11ba442 Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/distribution_group_1_50_cuda.pt differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_1_cpu.pt b/tests/data/expected_optimized_motor_positions/distribution_group_1_cpu.pt deleted file mode 100644 index 4654bad90..000000000 Binary files a/tests/data/expected_optimized_motor_positions/distribution_group_1_cpu.pt and /dev/null differ diff --git a/tests/data/expected_optimized_motor_positions/distribution_group_1_cuda.pt b/tests/data/expected_optimized_motor_positions/distribution_group_1_cuda.pt deleted file mode 100644 index 2f162b117..000000000 Binary files a/tests/data/expected_optimized_motor_positions/distribution_group_1_cuda.pt and /dev/null differ diff --git a/tests/data/expected_optimized_motor_positions/focal_spot_group_0_50_cpu.pt b/tests/data/expected_optimized_motor_positions/focal_spot_group_0_50_cpu.pt new file mode 100644 index 000000000..43fcd0877 Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/focal_spot_group_0_50_cpu.pt differ diff --git a/tests/data/expected_optimized_motor_positions/focal_spot_group_0_50_cuda.pt b/tests/data/expected_optimized_motor_positions/focal_spot_group_0_50_cuda.pt new file mode 100644 index 000000000..9d0fe9810 Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/focal_spot_group_0_50_cuda.pt differ diff --git a/tests/data/expected_optimized_motor_positions/focal_spot_group_0_cpu.pt b/tests/data/expected_optimized_motor_positions/focal_spot_group_0_cpu.pt deleted file mode 100644 index 19eee9c56..000000000 Binary files a/tests/data/expected_optimized_motor_positions/focal_spot_group_0_cpu.pt and /dev/null differ diff --git a/tests/data/expected_optimized_motor_positions/focal_spot_group_0_cuda.pt b/tests/data/expected_optimized_motor_positions/focal_spot_group_0_cuda.pt deleted file mode 100644 index 6eef71988..000000000 Binary files a/tests/data/expected_optimized_motor_positions/focal_spot_group_0_cuda.pt and /dev/null differ diff --git a/tests/data/expected_optimized_motor_positions/focal_spot_group_1_50_cpu.pt b/tests/data/expected_optimized_motor_positions/focal_spot_group_1_50_cpu.pt new file mode 100644 index 000000000..48ad9cd9d Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/focal_spot_group_1_50_cpu.pt differ diff --git a/tests/data/expected_optimized_motor_positions/focal_spot_group_1_50_cuda.pt b/tests/data/expected_optimized_motor_positions/focal_spot_group_1_50_cuda.pt new file mode 100644 index 000000000..504793634 Binary files /dev/null and b/tests/data/expected_optimized_motor_positions/focal_spot_group_1_50_cuda.pt differ diff --git a/tests/data/expected_optimized_motor_positions/focal_spot_group_1_cpu.pt b/tests/data/expected_optimized_motor_positions/focal_spot_group_1_cpu.pt deleted file mode 100644 index d30f0c192..000000000 Binary files a/tests/data/expected_optimized_motor_positions/focal_spot_group_1_cpu.pt and /dev/null differ diff --git a/tests/data/expected_optimized_motor_positions/focal_spot_group_1_cuda.pt b/tests/data/expected_optimized_motor_positions/focal_spot_group_1_cuda.pt deleted file mode 100644 index 110d79255..000000000 Binary files a/tests/data/expected_optimized_motor_positions/focal_spot_group_1_cuda.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/group_0_10_cpu.pt b/tests/data/expected_reconstructed_kinematic_parameters/group_0_10_cpu.pt new file mode 100644 index 000000000..22b51df9c Binary files /dev/null and b/tests/data/expected_reconstructed_kinematic_parameters/group_0_10_cpu.pt differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/group_0_10_cuda.pt b/tests/data/expected_reconstructed_kinematic_parameters/group_0_10_cuda.pt new file mode 100644 index 000000000..46cfaf65e Binary files /dev/null and b/tests/data/expected_reconstructed_kinematic_parameters/group_0_10_cuda.pt differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/group_0_50_cpu.pt b/tests/data/expected_reconstructed_kinematic_parameters/group_0_50_cpu.pt new file mode 100644 index 000000000..347812962 Binary files /dev/null and b/tests/data/expected_reconstructed_kinematic_parameters/group_0_50_cpu.pt differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/group_0_50_cuda.pt b/tests/data/expected_reconstructed_kinematic_parameters/group_0_50_cuda.pt new file mode 100644 index 000000000..333e0f182 Binary files /dev/null and b/tests/data/expected_reconstructed_kinematic_parameters/group_0_50_cuda.pt differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/group_1_10_cpu.pt b/tests/data/expected_reconstructed_kinematic_parameters/group_1_10_cpu.pt new file mode 100644 index 000000000..ee4365e0a Binary files /dev/null and b/tests/data/expected_reconstructed_kinematic_parameters/group_1_10_cpu.pt differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/group_1_10_cuda.pt b/tests/data/expected_reconstructed_kinematic_parameters/group_1_10_cuda.pt new file mode 100644 index 000000000..dd3934069 Binary files /dev/null and b/tests/data/expected_reconstructed_kinematic_parameters/group_1_10_cuda.pt differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/group_1_50_cpu.pt b/tests/data/expected_reconstructed_kinematic_parameters/group_1_50_cpu.pt new file mode 100644 index 000000000..212fb1324 Binary files /dev/null and b/tests/data/expected_reconstructed_kinematic_parameters/group_1_50_cpu.pt differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/group_1_50_cuda.pt b/tests/data/expected_reconstructed_kinematic_parameters/group_1_50_cuda.pt new file mode 100644 index 000000000..3c6892613 Binary files /dev/null and b/tests/data/expected_reconstructed_kinematic_parameters/group_1_50_cuda.pt differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_0_cpu.pt b/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_0_cpu.pt deleted file mode 100644 index 856e1da82..000000000 Binary files a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_0_cpu.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_0_cuda.pt b/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_0_cuda.pt deleted file mode 100644 index d9bd2cd44..000000000 Binary files a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_0_cuda.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_1_cpu.pt b/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_1_cpu.pt deleted file mode 100644 index 757169fc7..000000000 Binary files a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_1_cpu.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_1_cuda.pt b/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_1_cuda.pt deleted file mode 100644 index 6f05c522b..000000000 Binary files a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_00001_group_1_cuda.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_0_cpu.pt b/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_0_cpu.pt deleted file mode 100644 index 686816499..000000000 Binary files a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_0_cpu.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_0_cuda.pt b/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_0_cuda.pt deleted file mode 100644 index a22061fe3..000000000 Binary files a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_0_cuda.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_1_cpu.pt b/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_1_cpu.pt deleted file mode 100644 index b9c4370fe..000000000 Binary files a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_1_cpu.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_1_cuda.pt b/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_1_cuda.pt deleted file mode 100644 index c24bfef08..000000000 Binary files a/tests/data/expected_reconstructed_kinematic_parameters/raytracing_10_group_1_cuda.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_40_cpu.pt b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_40_cpu.pt new file mode 100644 index 000000000..03e76bf21 Binary files /dev/null and b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_40_cpu.pt differ diff --git a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_40_cuda.pt b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_40_cuda.pt new file mode 100644 index 000000000..b172d99df Binary files /dev/null and b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_40_cuda.pt differ diff --git a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_cpu.pt b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_cpu.pt deleted file mode 100644 index e85f4b9ea..000000000 Binary files a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_cpu.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_cuda.pt b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_cuda.pt deleted file mode 100644 index 11767818b..000000000 Binary files a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_0_cuda.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_40_cpu.pt b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_40_cpu.pt new file mode 100644 index 000000000..cc7a3a3ad Binary files /dev/null and b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_40_cpu.pt differ diff --git a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_40_cuda.pt b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_40_cuda.pt new file mode 100644 index 000000000..ed4ae29c9 Binary files /dev/null and b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_40_cuda.pt differ diff --git a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_cpu.pt b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_cpu.pt deleted file mode 100644 index 2c1ecb8ff..000000000 Binary files a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_cpu.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_cuda.pt b/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_cuda.pt deleted file mode 100644 index 36161f42c..000000000 Binary files a/tests/data/expected_reconstructed_surfaces/kl_divergence_group_1_cuda.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_20_cpu.pt b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_20_cpu.pt new file mode 100644 index 000000000..55d192edd Binary files /dev/null and b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_20_cpu.pt differ diff --git a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_20_cuda.pt b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_20_cuda.pt new file mode 100644 index 000000000..32a2d8c39 Binary files /dev/null and b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_20_cuda.pt differ diff --git a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_cpu.pt b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_cpu.pt deleted file mode 100644 index 414df9fcf..000000000 Binary files a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_cpu.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_cuda.pt b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_cuda.pt deleted file mode 100644 index a995b1a7f..000000000 Binary files a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_0_cuda.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_20_cpu.pt b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_20_cpu.pt new file mode 100644 index 000000000..fec0d2b68 Binary files /dev/null and b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_20_cpu.pt differ diff --git a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_20_cuda.pt b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_20_cuda.pt new file mode 100644 index 000000000..63c3024c2 Binary files /dev/null and b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_20_cuda.pt differ diff --git a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_cpu.pt b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_cpu.pt deleted file mode 100644 index fac916112..000000000 Binary files a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_cpu.pt and /dev/null differ diff --git a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_cuda.pt b/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_cuda.pt deleted file mode 100644 index 61c56a48c..000000000 Binary files a/tests/data/expected_reconstructed_surfaces/pixel_loss_group_1_cuda.pt and /dev/null differ diff --git a/tests/data/field_data/AA31-calibration-properties_1.json b/tests/data/field_data/AA31-calibration-properties_1.json index 720219a44..925f3c1e1 100644 --- a/tests/data/field_data/AA31-calibration-properties_1.json +++ b/tests/data/field_data/AA31-calibration-properties_1.json @@ -1,21 +1,21 @@ { - "motor_position": { - "axis_1_motor_position": 28736, - "axis_2_motor_position": 24545 - }, - "target_name": "solar_tower_juelich_lower", - "focal_spot": { - "HeliOS": [ - 50.9133920341492, - 6.387840023646867, - 123.21949486882116 - ], - "UTIS": [ - 50.91339204305797, - 6.38783967089144, - 123.05599975585938 - ] - }, - "sun_elevation": 55.66332095516, - "sun_azimuth": 45.39696843947765 + "motor_position": { + "axis_1_motor_position": 28736, + "axis_2_motor_position": 24545 + }, + "target_name": "solar_tower_juelich_lower", + "sun_elevation": 55.66332095516, + "sun_azimuth": 45.39696843947765, + "focal_spot": { + "HeliOS": [ + 50.91339198404406, + 6.387839781130179, + 123.21949486882116 + ], + "UTIS": [ + 50.91339199412624, + 6.387839428457502, + 123.05599975585938 + ] + } } diff --git a/tests/data/field_data/AA31-calibration-properties_2.json b/tests/data/field_data/AA31-calibration-properties_2.json new file mode 100644 index 000000000..c235c8c96 --- /dev/null +++ b/tests/data/field_data/AA31-calibration-properties_2.json @@ -0,0 +1 @@ +{"motor_position": {"axis_1_motor_position": 43348, "axis_2_motor_position": 52369}, "target_name": "multi_focus_tower", "sun_elevation": 12.745538922150077, "sun_azimuth": -25.580985774980476, "focal_spot": {"HeliOS": [50.91339645343472, 6.387573686825056, 138.89030690302178], "UTIS": [50.913396454013466, 6.387574172568588, 138.80173110961914]}} diff --git a/tests/data/field_data/AA31-flux-centered_2.png b/tests/data/field_data/AA31-flux-centered_2.png new file mode 100644 index 000000000..bceba4443 Binary files /dev/null and b/tests/data/field_data/AA31-flux-centered_2.png differ diff --git a/tests/data/field_data/AA31-flux_2.png b/tests/data/field_data/AA31-flux_2.png new file mode 100644 index 000000000..5d20b74c8 Binary files /dev/null and b/tests/data/field_data/AA31-flux_2.png differ diff --git a/tests/data/field_data/AA39-calibration-properties_1.json b/tests/data/field_data/AA39-calibration-properties_1.json index e70399173..4fcf750b0 100644 --- a/tests/data/field_data/AA39-calibration-properties_1.json +++ b/tests/data/field_data/AA39-calibration-properties_1.json @@ -1,21 +1,21 @@ { - "motor_position": { - "axis_1_motor_position": 16963, - "axis_2_motor_position": 72374 - }, - "target_name": "multi_focus_tower", - "focal_spot": { - "HeliOS": [ - 50.91339561753624, - 6.3875745058729905, - 137.70818056409652 - ], - "UTIS": [ - 50.913395620694814, - 6.387574139476353, - 137.70894622802734 - ] - }, - "sun_elevation": 27.809486551769538, - "sun_azimuth": -85.31171110078206 + "motor_position": { + "axis_1_motor_position": 16963, + "axis_2_motor_position": 72374 + }, + "target_name": "multi_focus_tower", + "sun_elevation": 27.809486551769538, + "sun_azimuth": -85.31171110078206, + "focal_spot": { + "HeliOS": [ + 50.91339645043245, + 6.387574299623125, + 137.70818056409652 + ], + "UTIS": [ + 50.91339645480913, + 6.387573933261633, + 137.70894622802734 + ] + } } diff --git a/tests/data/field_data/AA39-calibration-properties_2.json b/tests/data/field_data/AA39-calibration-properties_2.json index bc1f21e11..d771372bf 100644 --- a/tests/data/field_data/AA39-calibration-properties_2.json +++ b/tests/data/field_data/AA39-calibration-properties_2.json @@ -1,21 +1,21 @@ { - "motor_position": { - "axis_1_motor_position": 20634, - "axis_2_motor_position": 40816 - }, - "target_name": "multi_focus_tower", - "focal_spot": { - "HeliOS": [ - 50.91339561786706, - 6.387580250206895, - 138.3399014968552 - ], - "UTIS": [ - 50.913395620694814, - 6.387581271641756, - 138.3357048034668 - ] - }, - "sun_elevation": 48.68939631461994, - "sun_azimuth": 62.865328091974916 + "motor_position": { + "axis_1_motor_position": 20634, + "axis_2_motor_position": 40816 + }, + "target_name": "multi_focus_tower", + "sun_elevation": 48.68939631461994, + "sun_azimuth": 62.865328091974916, + "focal_spot": { + "HeliOS": [ + 50.91339643166456, + 6.387580043820685, + 138.3399014968552 + ], + "UTIS": [ + 50.91339643109615, + 6.387581065254336, + 138.3357048034668 + ] + } } diff --git a/tests/data/field_data/AA39-heliostat-properties.json b/tests/data/field_data/AA39-heliostat-properties.json index 9b6d71a57..0f5abc3a4 100644 --- a/tests/data/field_data/AA39-heliostat-properties.json +++ b/tests/data/field_data/AA39-heliostat-properties.json @@ -1,8 +1,16 @@ { - "heliostat_position": [50.9136428083779, 6.38799014568948, 88.68894196], - "height": 2.55999994277954, - "width": 3.22000002861023, - "initial_orientation": [0, 0, 1], + "heliostat_position": [ + 50.913643297719794, + 6.3880132701249455, + 88.68894196 + ], + "height": 2.559999942779541, + "width": 3.2200000286102295, + "initial_orientation": [ + 0.0, + -1.0, + 0.0 + ], "kinematic_properties": { "actuators": [ { @@ -38,39 +46,87 @@ "movement_speed": 0 } ], - "joint_translation_e_1": 0, - "joint_translation_n_1": 0, - "joint_translation_u_1": 0, - "joint_translation_e_2": 0, - "joint_translation_n_2": 0, - "joint_translation_u_2": 0.314999997615814, - "concentrator_translation_e": 0, - "concentrator_translation_n": -0.177550002932549, - "concentrator_translation_u": -0.404500007629395 + "joint_translation_e_1": 0.0, + "joint_translation_n_1": 0.0, + "joint_translation_u_1": 0.0, + "joint_translation_e_2": 0.0, + "joint_translation_n_2": 0.0, + "joint_translation_u_2": 0.0, + "concentrator_translation_e": 0.0, + "concentrator_translation_n": 0.175, + "concentrator_translation_u": 0.0 }, "facet_properties": { "canting_type": "receiver canting", "number_of_facets": 4, "facets": [ { - "translation_vector": [-0.8075, 0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, -0.00498456787317991], - "canting_n": [0.000019569211872294, 0.637492179870606, 0.00315052270889282] + "translation_vector": [ + -0.8075, + 0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + -0.004984567873179913 + ], + "canting_n": [ + 1.9569211872294545e-05, + 0.6374921798706055, + 0.0031505227088928223 + ] }, { - "translation_vector": [0.8075, 0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, 0.00498456787317991], - "canting_n": [-0.000019569211872294, 0.637492179870606, 0.00315052270889282] + "translation_vector": [ + 0.8075, + 0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + 0.004984567873179913 + ], + "canting_n": [ + -1.9569211872294545e-05, + 0.6374921798706055, + 0.0031505227088928223 + ] }, { - "translation_vector": [-0.8075, -0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, -0.00498456787317991], - "canting_n": [-0.000019569211872294, 0.637492179870606, -0.00315052270889282] + "translation_vector": [ + -0.8075, + -0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + -0.004984567873179913 + ], + "canting_n": [ + -1.9569211872294545e-05, + 0.6374921798706055, + -0.0031505227088928223 + ] }, { - "translation_vector": [0.8075, -0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, 0.00498456787317991], - "canting_n": [0.000019569211872294, 0.637492179870606, -0.00315052270889282] + "translation_vector": [ + 0.8075, + -0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + 0.004984567873179913 + ], + "canting_n": [ + 1.9569211872294545e-05, + 0.6374921798706055, + -0.0031505227088928223 + ] } ] }, diff --git a/tests/data/scenarios/test_blocking.h5 b/tests/data/scenarios/test_blocking.h5 new file mode 100644 index 000000000..ca207df20 Binary files /dev/null and b/tests/data/scenarios/test_blocking.h5 differ diff --git a/tests/data_parser/test_paint_calibration_parser.py b/tests/data_parser/test_paint_calibration_parser.py index 381075a67..fa2a2f67f 100644 --- a/tests/data_parser/test_paint_calibration_parser.py +++ b/tests/data_parser/test_paint_calibration_parser.py @@ -30,18 +30,18 @@ torch.tensor( [ [ - -17.593019485474, + -17.623041152954, -3.039341926575, 50.708953857422, 1.000000000000, - ], + ] ] ), torch.tensor( [[0.881544291973, 0.072294861078, -0.466533094645, 0.000000000000]] ), torch.tensor([[16963.0, 72374.0]]), - torch.tensor([0, 1]), + torch.tensor([0, 1], dtype=torch.int32), torch.tensor([0]), ], ), @@ -64,13 +64,13 @@ torch.tensor( [ [ - -17.562997817993, + -17.593019485474, -3.039341926575, 50.708175659180, 1.000000000000, ], [ - -17.142686843872, + -17.202730178833, -3.039341926575, 51.339904785156, 1.000000000000, @@ -94,7 +94,7 @@ ] ), torch.tensor([[16963.0, 72374.0], [20634.0, 40816.0]]), - torch.tensor([0, 2]), + torch.tensor([0, 2], dtype=torch.int32), torch.tensor([0, 0]), ], ), @@ -127,7 +127,7 @@ ] ), torch.tensor([[28061.0, 47874.0], [22585.0, 48224.0]]), - torch.tensor([0, 2]), + torch.tensor([0, 2], dtype=torch.int32), torch.tensor([3, 0]), ], ), diff --git a/tests/data_parser/test_paint_scenario_parser.py b/tests/data_parser/test_paint_scenario_parser.py index a55d53d43..5d5f917f6 100644 --- a/tests/data_parser/test_paint_scenario_parser.py +++ b/tests/data_parser/test_paint_scenario_parser.py @@ -4,7 +4,6 @@ import paint.util.paint_mappings as paint_mappings import pytest import torch -from _pytest.monkeypatch import MonkeyPatch from artist import ARTIST_ROOT from artist.data_parser import paint_scenario_parser @@ -129,12 +128,12 @@ def test_extract_paint_tower_measurements( [HeliostatListConfig, PrototypeConfig], [ torch.tensor( - [11.664672851562, 24.570718765259, 1.688941955566, 1.000000000000] + [13.290966033936, 24.625156402588, 1.688941955566, 1.000000000000] ), 2, "linear", torch.tensor(154166.671875000000), - torch.tensor([0, 0, 1, 0]), + torch.tensor([0.0, -1.0, 0.0, 0.0]), torch.tensor( [ [ @@ -151,7 +150,7 @@ def test_extract_paint_tower_measurements( ], ] ), - torch.tensor([-1.609997987747, 0.206321120262, 0.043028946966]), + torch.tensor([-0.802500069141, -0.436184167862, 0.000000000000]), ], ), ], @@ -168,13 +167,13 @@ def test_extract_paint_heliostats_ideal_surface( Parameters ---------- - heliostat_paths : tuple[str, pathlib.Path] + heliostat_paths : list[tuple[str, pathlib.Path]] Name of the heliostat and a heliostat properties file path. power_plant_position : torch.Tensor The position of the power plant in latitude, longitude and elevation. expected_types : list[Any] The expected extracted data types. - expected_heliostat : list[Union[torch.Tensor, int, str]], + expected_heliostat : list[Any] The expected extracted heliostat data. device : torch.device The device on which to initialize tensors. @@ -250,12 +249,12 @@ def test_extract_paint_heliostats_ideal_surface( [HeliostatListConfig, PrototypeConfig], [ torch.tensor( - [11.664672851562, 24.570718765259, 1.688941955566, 1.000000000000] + [13.290966033936, 24.625156402588, 1.688941955566, 1.000000000000] ), 2, "linear", torch.tensor(154166.671875000000), - torch.tensor([0, 0, 1, 0]), + torch.tensor([0.0, -1.0, 0.0, 0.0]), torch.tensor( [ [ @@ -290,7 +289,7 @@ def test_extract_paint_heliostats_fitted_surface( Parameters ---------- - heliostat_and_deflectometry_paths : tuple[str, pathlib.Path, pathlib.Path] + heliostat_and_deflectometry_paths : list[tuple[str, pathlib.Path, pathlib.Path]] Name of the heliostat and a pair of heliostat properties and deflectometry file paths. power_plant_position : torch.Tensor The position of the power plant in latitude, longitude and elevation. @@ -298,7 +297,7 @@ def test_extract_paint_heliostats_fitted_surface( The maximum amount of epochs for fitting the NURBS. expected_types : list[Any] The expected extracted data types. - expected_heliostat : list[torch.Tensor| int| str], + expected_heliostat : list[Any] The expected extracted heliostat data. device : torch.device The device on which to initialize tensors. @@ -397,12 +396,12 @@ def test_extract_paint_heliostats_fitted_surface( [HeliostatListConfig, PrototypeConfig], [ torch.tensor( - [11.664672851562, 24.570718765259, 1.688941955566, 1.000000000000] + [13.290966033936, 24.625156402588, 1.688941955566, 1.000000000000] ), 2, "linear", torch.tensor(154166.671875000000), - torch.tensor([0, 0, 1, 0]), + torch.tensor([0.0, -1.0, 0.0, 0.0]), torch.tensor( [ [ @@ -419,16 +418,16 @@ def test_extract_paint_heliostats_fitted_surface( ], ] ), - torch.tensor([-1.609997987747, 0.206321120262, 0.043028946966]), + torch.tensor([-0.802500069141, -0.436184167862, 0.000000000000]), ], [ torch.tensor( - [11.664672851562, 24.570718765259, 1.688941955566, 1.000000000000] + [13.290966033936, 24.625156402588, 1.688941955566, 1.000000000000] ), 2, "linear", torch.tensor(154166.671875000000), - torch.tensor([0, 0, 1, 0]), + torch.tensor([0.0, -1.0, 0.0, 0.0]), torch.tensor( [ [ @@ -475,9 +474,9 @@ def test_extract_paint_heliostats_mixed_surface( The maximum number of epochs for fitting the NURBS. expected_types : list[Any] The expected extracted data types. - expected_heliostat_ideal : list[Union[torch.Tensor, int, str]] + expected_heliostat_ideal : list[Any] The expected data for the ideal heliostat. - expected_heliostat_fitted : list[Union[torch.Tensor, int, str]] + expected_heliostat_fitted : list[Any] The expected data for the fitted heliostat. device : torch.device The device on which to initialize tensors. @@ -512,11 +511,11 @@ def test_extract_paint_heliostats_mixed_surface( ) ) - # Assert overall return types + # Assert overall return types. assert isinstance(extracted_list[0], expected_types[0]) assert isinstance(extracted_list[1], expected_types[1]) - # Find the ideal and fitted heliostat in the returned list + # Find the ideal and fitted heliostat in the returned list. ideal_heliostat = next( h for h in extracted_list[0].heliostat_list if h.name == "ideal_heliostat" ) @@ -621,18 +620,19 @@ def _make_fake_calibration_data( @pytest.mark.parametrize( - "randomize_selection_flag, random_seed_value, number_of_measurements, image_variant_name", + "randomize_selection_flag, random_seed_value, number_of_measurements, count_per_heliostat, image_variant_name", [ - (False, 0, 2, "flux"), - (True, 123, 2, "flux"), + (False, 0, 2, 2, "flux"), + (True, 123, 2, 2, "flux"), + (True, 123, 6, 5, "flux"), ], ) def test_build_heliostat_data_mapping_shape_parametrized( tmp_path: pathlib.Path, - monkeypatch: MonkeyPatch, randomize_selection_flag: bool, random_seed_value: int, number_of_measurements: int, + count_per_heliostat: int, image_variant_name: str, ) -> None: """ @@ -657,14 +657,14 @@ def test_build_heliostat_data_mapping_shape_parametrized( ---------- tmp_path : pathlib.Path Temporary directory provided by pytest for creating fake calibration data. - monkeypatch : MonkeyPatch - Pytest fixture to dynamically replace module attributes for testing. randomize_selection_flag : bool Flag to randomize selection of measurement files when building the mapping. random_seed_value : int Random seed to use when `randomize_selection_flag` is `True` for reproducibility. number_of_measurements : int Number of measurement files to select per heliostat. + count_per_heliostat : int + Number of files actually loaded. image_variant_name : str Identifier for the variant of image data to use (e.g., ``raw``, ``processed``). @@ -684,7 +684,7 @@ def test_build_heliostat_data_mapping_shape_parametrized( result_mapping_list = paint_scenario_parser.build_heliostat_data_mapping( base_path=str(tmp_path), - heliostat_names=heliostat_name_list, + heliostat_names=["heliostat_1", "heliostat_2", "heliostat_3"], number_of_measurements=number_of_measurements, image_variant=image_variant_name, randomize=randomize_selection_flag, @@ -710,8 +710,8 @@ def test_build_heliostat_data_mapping_shape_parametrized( isinstance(image_path, pathlib.Path) for image_path in image_file_paths ) - assert len(property_file_paths) == number_of_measurements - assert len(image_file_paths) == number_of_measurements + assert len(property_file_paths) == count_per_heliostat + assert len(image_file_paths) == count_per_heliostat # Correspondence by ID and directory. for property_file_path, image_file_path in zip( @@ -727,7 +727,6 @@ def test_build_heliostat_data_mapping_shape_parametrized( @pytest.mark.parametrize("random_seed_value", [7, 11, 123, 2024]) def test_build_heliostat_data_mapping_randomization_changes_order( tmp_path: pathlib.Path, - monkeypatch: MonkeyPatch, random_seed_value: int, ) -> None: """ @@ -741,8 +740,6 @@ def test_build_heliostat_data_mapping_randomization_changes_order( ---------- tmp_path : pathlib.Path Temporary directory provided by pytest for creating fake calibration data. - monkeypatch : MonkeyPatch - Pytest fixture to dynamically replace module attributes for testing. random_seed_value : int Random seed to use for reproducibility in randomized selection. diff --git a/tests/field/test_heliostat_group.py b/tests/field/test_heliostat_group.py index 9cbc92cde..c9095a7a8 100644 --- a/tests/field/test_heliostat_group.py +++ b/tests/field/test_heliostat_group.py @@ -32,6 +32,8 @@ def heliostat_group(device: torch.device) -> HeliostatGroupRigidBody: ), surface_points=torch.rand((3, 100, 4), device=device), surface_normals=torch.rand((3, 100, 4), device=device), + canting=torch.rand((3, 4, 2, 4), device=device), + facet_translations=torch.rand((3, 4, 4), device=device), initial_orientations=torch.tensor( [[0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 1.0, 0.0]], device=device, @@ -135,6 +137,8 @@ def test_abstract_align( ), surface_points=torch.rand((3, 100, 4), device=device), surface_normals=torch.rand((3, 100, 4), device=device), + canting=torch.rand((3, 4, 2, 4), device=device), + facet_translations=torch.rand((3, 4, 4), device=device), initial_orientations=torch.tensor( [[0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 1.0, 0.0]], device=device, diff --git a/tests/field/test_integration_alignment.py b/tests/field/test_integration_alignment.py index a28b2c510..97c2e8057 100644 --- a/tests/field/test_integration_alignment.py +++ b/tests/field/test_integration_alignment.py @@ -133,6 +133,7 @@ def test_integration_alignment( ray_tracer = HeliostatRayTracer( scenario=scenario, heliostat_group=heliostat_group, + blocking_active=False, bitmap_resolution=bitmap_resolution, batch_size=10, ) @@ -153,6 +154,11 @@ def test_integration_alignment( flux_distributions = flux_distributions + group_bitmaps_per_target + # Scale bitmap for testing precision. + flux_distributions = ( + flux_distributions / (flux_distributions.sum(dim=(1, 2), keepdim=True) + 1e-8) + ) * 100 + expected_path = ( pathlib.Path(ARTIST_ROOT) / "tests/data/expected_bitmaps_integration" diff --git a/tests/field/test_kinematic.py b/tests/field/test_kinematic.py index a82ac21fc..1fe34e706 100644 --- a/tests/field/test_kinematic.py +++ b/tests/field/test_kinematic.py @@ -11,10 +11,30 @@ torch.tensor( [ [ - [0.9999, 0.0104, 0.0000, -0.0019], - [-0.0074, 0.7107, 0.7035, -0.1892], - [0.0073, -0.7035, 0.7107, 0.0613], - [0.0000, 0.0000, 0.0000, 1.0000], + [ + 9.999455809593e-01, + -4.559969624118e-10, + -1.043199468404e-02, + -1.852722256444e-03, + ], + [ + -7.413947489113e-03, + 7.035020589828e-01, + -7.106545567513e-01, + -1.891756802797e-01, + ], + [ + 7.338930387050e-03, + 7.106932401657e-01, + 7.034637928009e-01, + 6.132812798023e-02, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ] ] ) diff --git a/tests/field/test_kinematic_rigid_body.py b/tests/field/test_kinematic_rigid_body.py index b6ec82ff4..080e3aa8d 100644 --- a/tests/field/test_kinematic_rigid_body.py +++ b/tests/field/test_kinematic_rigid_body.py @@ -233,40 +233,160 @@ def kinematic_model_ideal_2( torch.tensor( [ [ - [0.9999, 0.0104, 0.0000, -0.0019], - [-0.0074, 0.7107, 0.7035, -0.1891], - [0.0073, -0.7035, 0.7107, 0.0613], - [0.0000, 0.0000, 0.0000, 1.0000], + [ + 9.999456405640e-01, + -4.558640964714e-10, + -1.042895484716e-02, + -1.851661014371e-03, + ], + [ + -7.411775179207e-03, + 7.035031914711e-01, + -7.106534838676e-01, + -1.891400665045e-01, + ], + [ + 7.336803711951e-03, + 7.106921076775e-01, + 7.034649252892e-01, + 6.129327416420e-02, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [0.7123, -0.7019, 0.0000, 0.1246], - [0.7019, 0.7122, -0.0103, -0.1255], - [0.0072, 0.0073, 0.9999, -0.0908], - [0.0000, 0.0000, 0.0000, 1.0000], + [ + 7.122755050659e-01, + 3.068102216730e-08, + 7.019000053406e-01, + 1.246223449707e-01, + ], + [ + 7.018629312515e-01, + -1.027521397918e-02, + -7.122378945351e-01, + -1.255382150412e-01, + ], + [ + 7.212151307613e-03, + 9.999471902847e-01, + -7.318804971874e-03, + -9.079471230507e-02, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [9.9997e-01, 7.3368e-03, 0.0000e00, -1.3026e-03], - [-7.3367e-03, 9.9996e-01, -5.1375e-03, -1.7708e-01], - [-3.7693e-05, 5.1374e-03, 9.9999e-01, -9.0411e-02], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + 9.999730587006e-01, + -3.207012433393e-10, + -7.336789276451e-03, + -1.302646938711e-03, + ], + [ + -7.336692418903e-03, + -5.136868450791e-03, + -9.999598860741e-01, + -1.770831346512e-01, + ], + [ + -3.768780152313e-05, + 9.999868273735e-01, + -5.136730149388e-03, + -9.041082859039e-02, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [0.7019, 0.7123, 0.0000, -0.1265], - [-0.7122, 0.7019, -0.0103, -0.1237], - [-0.0073, 0.0072, 0.9999, -0.0908], - [0.0000, 0.0000, 0.0000, 1.0000], + [ + 7.018998265266e-01, + -3.113456159554e-08, + -7.122757434845e-01, + -1.264645606279e-01, + ], + [ + -7.122381329536e-01, + -1.027521397918e-02, + -7.018627524376e-01, + -1.236961036921e-01, + ], + [ + -7.318763993680e-03, + 9.999471902847e-01, + -7.212193217129e-03, + -9.077578783035e-02, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [1.0000, 0.0080, 0.0000, -0.0014], - [-0.0074, 0.9258, 0.3780, -0.1982], - [0.0030, -0.3779, 0.9258, -0.0158], - [0.0000, 0.0000, 0.0000, 1.0000], + [ + 9.999683499336e-01, + -3.478643761934e-10, + -7.958209142089e-03, + -1.412980025634e-03, + ], + [ + -7.367914076895e-03, + 3.779509067535e-01, + -9.257963299751e-01, + -1.982017606497e-01, + ], + [ + 3.007812658325e-03, + 9.258256554604e-01, + 3.779389560223e-01, + -1.575836539268e-02, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [0.9999, 0.0104, 0.0000, -0.0019], - [-0.0074, 0.7107, 0.7035, 0.8109], - [0.0073, -0.7035, 0.7107, 0.0613], - [0.0000, 0.0000, 0.0000, 1.0000], + [ + 9.999456405640e-01, + -4.558640964714e-10, + -1.042895484716e-02, + -1.851661014371e-03, + ], + [ + -7.411775179207e-03, + 7.035031914711e-01, + -7.106534838676e-01, + 8.108599185944e-01, + ], + [ + 7.336803711951e-03, + 7.106921076775e-01, + 7.034649252892e-01, + 6.129327416420e-02, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], ] ), @@ -304,90 +424,160 @@ def kinematic_model_ideal_2( torch.tensor( [ [ - [1, 0, 0, 0], [ - 0, - torch.cos(torch.tensor(-torch.pi / 4)), - -torch.sin(torch.tensor(-torch.pi / 4)), - 0, + 1.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, ], [ - 0, - torch.sin(torch.tensor(-torch.pi / 4)), - torch.cos(torch.tensor(-torch.pi / 4)), - 0, + 0.000000000000e00, + 7.071067690849e-01, + -7.071067690849e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 7.071067690849e-01, + 7.071067690849e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, ], - [0, 0, 0, 1], ], [ [ - torch.cos(torch.tensor(torch.pi / 4)), - -torch.sin(torch.tensor(torch.pi / 4)), - 0.0, - 0.0, + 7.071067690849e-01, + 3.090861966371e-08, + 7.071068286896e-01, + 0.000000000000e00, ], [ - torch.sin(torch.tensor(torch.pi / 4)), - torch.cos(torch.tensor(torch.pi / 4)), - 0.0, - 0.0, + 7.071068286896e-01, + -3.090861966371e-08, + -7.071067690849e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 1.000000000000e00, + -4.371138828674e-08, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, ], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0], ], [ - [1.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 0.0, 0.0], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0], + [ + 1.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + -4.371138828674e-08, + -1.000000000000e00, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 1.000000000000e00, + -4.371138828674e-08, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ [ - torch.cos(torch.tensor(-torch.pi / 4)), - -torch.sin(torch.tensor(-torch.pi / 4)), - 0.0, - 0.0, + 7.071067690849e-01, + -3.090861966371e-08, + -7.071068286896e-01, + 0.000000000000e00, ], [ - torch.sin(torch.tensor(-torch.pi / 4)), - torch.cos(torch.tensor(-torch.pi / 4)), - 0.0, - 0.0, + -7.071068286896e-01, + -3.090861966371e-08, + -7.071067690849e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 1.000000000000e00, + -4.371138828674e-08, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, ], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0], ], [ - [1.0, 0.0, 0.0, 0.0], [ - 0.0, - torch.cos(torch.tensor(-torch.pi / 8)), - -torch.sin(torch.tensor(-torch.pi / 8)), - 0.0, + 1.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, ], [ - 0.0, - torch.sin(torch.tensor(-torch.pi / 8)), - torch.cos(torch.tensor(-torch.pi / 8)), - 0.0, + 0.000000000000e00, + 3.826815783978e-01, + -9.238802790642e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 9.238802790642e-01, + 3.826815783978e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, ], - [0.0, 0.0, 0.0, 1.0], ], [ - [1.0, 0.0, 0.0, 0.0], [ - 0.0, - torch.cos(torch.tensor(-torch.pi / 4)), - -torch.sin(torch.tensor(-torch.pi / 4)), - 1.0, + 1.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, ], [ - 0.0, - torch.sin(torch.tensor(-torch.pi / 4)), - torch.cos(torch.tensor(-torch.pi / 4)), - 0.0, + 0.000000000000e00, + 7.071067690849e-01, + -7.071067690849e-01, + 1.000000000000e00, + ], + [ + 0.000000000000e00, + 7.071067690849e-01, + 7.071067690849e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, ], - [0.0, 0.0, 0.0, 1.0], ], [ [1.0, 0.0, 0.0, 0.0], @@ -556,42 +746,162 @@ def test_incident_ray_direction_to_orientation( torch.tensor( [ [ - [5.7358e-01, -8.1915e-01, 0.0000e00, 1.4544e-01], - [2.5715e-07, 1.8006e-07, 1.0000e00, -8.9500e-02], - [-8.1915e-01, -5.7358e-01, 3.1392e-07, 1.0184e-01], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + 5.735765099525e-01, + 3.580627350175e-08, + 8.191520571709e-01, + 1.454404443502e-01, + ], + [ + 2.571453308065e-07, + 1.000000000000e00, + -2.237665057692e-07, + -8.950003981590e-02, + ], + [ + -8.191520571709e-01, + 3.389882863303e-07, + 5.735765099525e-01, + 1.018384844065e-01, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [5.9221e-01, -8.0578e-01, 0.0000e00, 1.4307e-01], - [8.2862e-05, 6.0899e-05, 1.0000e00, -8.9511e-02], - [-8.0578e-01, -5.9221e-01, 1.0283e-04, 1.0514e-01], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + 5.922092795372e-01, + 3.522194802486e-08, + 8.057842254639e-01, + 1.430669873953e-01, + ], + [ + 8.305405208375e-05, + 1.000000000000e00, + -6.108410161687e-05, + -8.951085805893e-02, + ], + [ + -8.057842254639e-01, + 1.030982093653e-04, + 5.922092795372e-01, + 1.051375344396e-01, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [5.7434e-01, -8.1862e-01, 0.0000e00, 1.4535e-01], - [1.6830e-04, 1.1808e-04, 1.0000e00, -8.9521e-02], - [-8.1862e-01, -5.7434e-01, 2.0559e-04, 1.0196e-01], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + 5.743377208710e-01, + 3.578294993645e-08, + 8.186184763908e-01, + 1.453457176685e-01, + ], + [ + 1.681064895820e-04, + 1.000000000000e00, + -1.179862010758e-04, + -8.952096104622e-02, + ], + [ + -8.186184763908e-01, + 2.053789939964e-04, + 5.743377208710e-01, + 1.019552871585e-01, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [5.7358e-01, -8.1915e-01, 0.0000e00, 1.4544e-01], - [2.5715e-07, 1.8006e-07, 1.0000e00, -8.9500e-02], - [-8.1915e-01, -5.7358e-01, 3.1392e-07, 1.0184e-01], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + 5.735765099525e-01, + 3.580627350175e-08, + 8.191520571709e-01, + 1.454404443502e-01, + ], + [ + 2.571453308065e-07, + 1.000000000000e00, + -2.237665057692e-07, + -8.950003981590e-02, + ], + [ + -8.191520571709e-01, + 3.389882863303e-07, + 5.735765099525e-01, + 1.018384844065e-01, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [5.9221e-01, -8.0578e-01, 0.0000e00, 1.4307e-01], - [8.2862e-05, 6.0899e-05, 1.0000e00, -8.9511e-02], - [-8.0578e-01, -5.9221e-01, 1.0283e-04, 1.0514e-01], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + 5.922092795372e-01, + 3.522194802486e-08, + 8.057842254639e-01, + 1.430669873953e-01, + ], + [ + 8.305405208375e-05, + 1.000000000000e00, + -6.108410161687e-05, + -8.951085805893e-02, + ], + [ + -8.057842254639e-01, + 1.030982093653e-04, + 5.922092795372e-01, + 1.051375344396e-01, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [5.7434e-01, -8.1862e-01, 0.0000e00, 1.4535e-01], - [1.6830e-04, 1.1808e-04, 1.0000e00, 9.1048e-01], - [-8.1862e-01, -5.7434e-01, 2.0559e-04, 1.0196e-01], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + 5.743377208710e-01, + 3.578294993645e-08, + 8.186184763908e-01, + 1.453457176685e-01, + ], + [ + 1.681064895820e-04, + 1.000000000000e00, + -1.179862010758e-04, + 9.104790687561e-01, + ], + [ + -8.186184763908e-01, + 2.053789939964e-04, + 5.743377208710e-01, + 1.019552871585e-01, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], - ], + ] ), ), ( @@ -600,40 +910,160 @@ def test_incident_ray_direction_to_orientation( torch.tensor( [ [ - [-8.6163e-01, 5.0753e-01, 0.0000e00, 0.0000e00], - [3.2746e-01, 5.5592e-01, 7.6402e-01, 0.0000e00], - [3.8777e-01, 6.5830e-01, -6.4519e-01, 0.0000e00], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + -8.616312146187e-01, + -2.218505557039e-08, + -5.075349211693e-01, + 0.000000000000e00, + ], + [ + 3.274583220482e-01, + 7.640190720558e-01, + -5.559191107750e-01, + 0.000000000000e00, + ], + [ + 3.877663612366e-01, + -6.451936960220e-01, + -6.583026647568e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [-8.6163e-01, 5.0753e-01, 0.0000e00, 0.0000e00], - [3.2746e-01, 5.5592e-01, 7.6402e-01, 0.0000e00], - [3.8777e-01, 6.5830e-01, -6.4519e-01, 0.0000e00], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + -8.616312146187e-01, + -2.218505557039e-08, + -5.075349211693e-01, + 0.000000000000e00, + ], + [ + 3.274583220482e-01, + 7.640190720558e-01, + -5.559191107750e-01, + 0.000000000000e00, + ], + [ + 3.877663612366e-01, + -6.451936960220e-01, + -6.583026647568e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [-8.6163e-01, 5.0753e-01, 0.0000e00, 0.0000e00], - [3.2746e-01, 5.5592e-01, 7.6402e-01, 0.0000e00], - [3.8777e-01, 6.5830e-01, -6.4519e-01, 0.0000e00], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + -8.616312146187e-01, + -2.218505557039e-08, + -5.075349211693e-01, + 0.000000000000e00, + ], + [ + 3.274583220482e-01, + 7.640190720558e-01, + -5.559191107750e-01, + 0.000000000000e00, + ], + [ + 3.877663612366e-01, + -6.451936960220e-01, + -6.583026647568e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [-8.6163e-01, 5.0753e-01, 0.0000e00, 0.0000e00], - [3.2746e-01, 5.5592e-01, 7.6402e-01, 0.0000e00], - [3.8777e-01, 6.5830e-01, -6.4519e-01, 0.0000e00], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + -8.616312146187e-01, + -2.218505557039e-08, + -5.075349211693e-01, + 0.000000000000e00, + ], + [ + 3.274583220482e-01, + 7.640190720558e-01, + -5.559191107750e-01, + 0.000000000000e00, + ], + [ + 3.877663612366e-01, + -6.451936960220e-01, + -6.583026647568e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [-8.6163e-01, 5.0753e-01, 0.0000e00, 0.0000e00], - [3.2746e-01, 5.5592e-01, 7.6402e-01, 0.0000e00], - [3.8777e-01, 6.5830e-01, -6.4519e-01, 0.0000e00], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + -8.616312146187e-01, + -2.218505557039e-08, + -5.075349211693e-01, + 0.000000000000e00, + ], + [ + 3.274583220482e-01, + 7.640190720558e-01, + -5.559191107750e-01, + 0.000000000000e00, + ], + [ + 3.877663612366e-01, + -6.451936960220e-01, + -6.583026647568e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ - [-8.6163e-01, 5.0753e-01, 0.0000e00, 0.0000e00], - [3.2746e-01, 5.5592e-01, 7.6402e-01, 1.0000e00], - [3.8777e-01, 6.5830e-01, -6.4519e-01, 0.0000e00], - [0.0000e00, 0.0000e00, 0.0000e00, 1.0000e00], + [ + -8.616312146187e-01, + -2.218505557039e-08, + -5.075349211693e-01, + 0.000000000000e00, + ], + [ + 3.274583220482e-01, + 7.640190720558e-01, + -5.559191107750e-01, + 1.000000000000e00, + ], + [ + 3.877663612366e-01, + -6.451936960220e-01, + -6.583026647568e-01, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ [-8.6163e-01, -2.2185e-08, -5.0753e-01, 0.0000e00], @@ -668,35 +1098,55 @@ def test_incident_ray_direction_to_orientation( torch.tensor( [ [ - [1.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 0.0, 0.0], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0], + [ + 1.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + -4.371138828674e-08, + -1.000000000000e00, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 1.000000000000e00, + -4.371138828674e-08, + 0.000000000000e00, + ], + [ + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, + ], ], [ [ - 0.562379062176, - -0.826879560947, - 0.000000000000, - 0.000000000000, + 5.623790621758e-01, + 3.614405486019e-08, + 8.268795609474e-01, + 0.000000000000e00, ], [ - 0.234554469585, - 0.159525677562, - 0.958924293518, - 1.000000000000, + 2.345544546843e-01, + 9.589242935181e-01, + -1.595257073641e-01, + 1.000000000000e00, ], [ - -0.792914927006, - -0.539278924465, - 0.283662199974, - 0.000000000000, + -7.929149270058e-01, + 2.836621999741e-01, + 5.392789244652e-01, + 0.000000000000e00, ], [ - 0.000000000000, - 0.000000000000, - 0.000000000000, - 1.000000000000, + 0.000000000000e00, + 0.000000000000e00, + 0.000000000000e00, + 1.000000000000e00, ], ], [ diff --git a/tests/scenario/test_h5_scenario_generator.py b/tests/scenario/test_h5_scenario_generator.py index 93f7586e7..3f7d83dda 100644 --- a/tests/scenario/test_h5_scenario_generator.py +++ b/tests/scenario/test_h5_scenario_generator.py @@ -84,12 +84,13 @@ def scenario_generator(mocker: MockerFixture) -> H5ScenarioGenerator: (pathlib.Path("scenario.h5")), (pathlib.Path("scenario")), (pathlib.Path("scenario.txt")), + ("invalid"), ], ) def test_generate_scenario( scenario_generator: H5ScenarioGenerator, tmp_path: pathlib.Path, - filename: pathlib.Path, + filename: pathlib.Path | str, ) -> None: """ Test the h5 scenario generator. @@ -100,7 +101,7 @@ def test_generate_scenario( The h5 scenario generator. tmp_path : pathlib.Path Pytest temporary directory fixture. - filename : pathlib.Path + filename : pathlib.Path | str File name to test. Raises @@ -119,20 +120,28 @@ def test_generate_scenario( ) assert save_name.exists() - with h5py.File(save_name, "r") as f: - assert f.attrs["version"] == 1.0 - assert config_dictionary.number_of_heliostat_groups in f - assert f[config_dictionary.number_of_heliostat_groups][()] == 3 - - expected_datasets = { - config_dictionary.power_plant_key: ["param1"], - config_dictionary.target_area_key: ["param2"], - config_dictionary.light_source_key: ["param3"], - config_dictionary.prototype_key: ["param4"], - config_dictionary.heliostat_key: ["param5"], - } - - for prefix, keys in expected_datasets.items(): - for key in keys: - dataset_path = f"{prefix}/{key}" - assert dataset_path in f + if filename == "invalid": + save_name = pathlib.Path("test_invalid") + with pytest.raises(FileNotFoundError) as exc_info: + with h5py.File(save_name, "r") as f: + pass + assert "No such file or directory" in str(exc_info.value) + + else: + with h5py.File(save_name, "r") as f: + assert f.attrs["version"] == 1.0 + assert config_dictionary.number_of_heliostat_groups in f + assert f[config_dictionary.number_of_heliostat_groups][()] == 3 + + expected_datasets = { + config_dictionary.power_plant_key: ["param1"], + config_dictionary.target_area_key: ["param2"], + config_dictionary.light_source_key: ["param3"], + config_dictionary.prototype_key: ["param4"], + config_dictionary.heliostat_key: ["param5"], + } + + for prefix, keys in expected_datasets.items(): + for key in keys: + dataset_path = f"{prefix}/{key}" + assert dataset_path in f diff --git a/tests/scenario/test_surface_generator.py b/tests/scenario/test_surface_generator.py index a2ce20305..a6b65fc9b 100644 --- a/tests/scenario/test_surface_generator.py +++ b/tests/scenario/test_surface_generator.py @@ -162,7 +162,7 @@ def test_surface_generator(device: torch.device) -> None: ) torch.testing.assert_close( surface_config_ideal.facet_list[2].control_points[3, 2], - torch.tensor([-0.406257748604, -0.642499983311, 0.037706092000], device=device), + torch.tensor([0.401250004768, 0.000000000000, 0.000000000000], device=device), ) diff --git a/tests/scene/test_light_source.py b/tests/scene/test_light_source.py new file mode 100644 index 000000000..78017be69 --- /dev/null +++ b/tests/scene/test_light_source.py @@ -0,0 +1,54 @@ +import h5py +import pytest +import torch +from pytest_mock import MockerFixture + +from artist.scene.light_source import LightSource + +torch.manual_seed(7) +torch.cuda.manual_seed(7) + + +def test_load_light_source_from_hdf5( + mocker: MockerFixture, + device: torch.device, +) -> None: + """ + Test abstract light source load from hdf5 file. + + Parameters + ---------- + mocker : MockerFixture + A pytest-mocker fixture used to create mock objects. + device : torch.device + The device on which to initialize tensors. + + Raises + ------ + AssertionError + If test does not complete as expected. + """ + mock_h5_file = mocker.MagicMock(spec=h5py.File) + with pytest.raises(NotImplementedError) as exc_info: + LightSource.from_hdf5( + config_file=mock_h5_file, + light_source_name="Sun", + device=device, + ) + assert "Must be overridden!" in str(exc_info.value) + + +def test_light_source_distortions() -> None: + """ + Test the abstract light source distortions. + + Raises + ------ + AssertionError + If test does not complete as expected. + """ + light_source = LightSource(number_of_rays=4) + + with pytest.raises(NotImplementedError) as exc_info: + light_source.get_distortions(number_of_points=40, number_of_active_heliostats=5) + assert "Must be overridden!" in str(exc_info.value) diff --git a/tests/scene/test_sun.py b/tests/scene/test_sun.py index f0fbbd68d..429a4b062 100644 --- a/tests/scene/test_sun.py +++ b/tests/scene/test_sun.py @@ -251,7 +251,7 @@ def test_load_sun_and_get_distortions( ) distortions_u, distortions_e = sun.get_distortions( number_of_points=further_params_dict["num_points"], - number_of_heliostats=further_params_dict["num_heliostats"], + number_of_active_heliostats=further_params_dict["num_heliostats"], random_seed=further_params_dict["random_seed"], ) expected_u, expected_e = calculate_expected( diff --git a/tests/util/test_environment_setup.py b/tests/util/test_environment_setup.py index 93186b0ac..7727fa1e4 100644 --- a/tests/util/test_environment_setup.py +++ b/tests/util/test_environment_setup.py @@ -42,7 +42,6 @@ def test_initialize_ddp_environment( AssertionError If test does not complete as expected. """ - # Set up mock environment variables mock_env = { "WORLD_SIZE": str(world_size), "RANK": str(rank), @@ -106,7 +105,7 @@ def test_initialize_ddp_environment( ) def test_create_subgroups_for_nested_ddp( rank: int, groups_to_ranks_mapping: dict[int, list[int]], expected: tuple[int, int] -): +) -> None: """ Test the creation of process subgroups. @@ -147,7 +146,7 @@ def test_distribute_groups_among_ranks( number_of_heliostat_groups: int, expected_mapping: dict[int, list[int]], expected_is_nested: bool, -): +) -> None: """ Test the distribution of groups among ranks. diff --git a/tests/util/test_nurbs.py b/tests/util/test_nurbs.py index 5a95045c0..47c31220a 100644 --- a/tests/util/test_nurbs.py +++ b/tests/util/test_nurbs.py @@ -83,6 +83,19 @@ def test_nurbs(device: torch.device) -> None: ones = torch.ones(surface_points.shape[0], 1, device=device) surface_points = torch.cat((surface_points, ones), dim=1).unsqueeze(0).unsqueeze(0) + canting = torch.tensor( + [[[[0.3, 0.0, 0.0, 0.0], [0.0, 0.3, 0.0, 0.0]]]], device=device + ) + facet_translation = torch.tensor([[[[0.5, 0.0, 0.0, 0.0]]]], device=device) + + canted_surface_points = utils.perform_canting( + canting_angles=canting, data=surface_points, device=device + ) + + canted_and_translated = canted_surface_points + facet_translation.reshape( + 1, 1, 1, 4 + ) + evaluation_points = utils.create_nurbs_evaluation_grid( number_of_evaluation_points=torch.tensor([40, 40], device=device), device=device ) @@ -111,20 +124,23 @@ def test_nurbs(device: torch.device) -> None: for epoch in range(100): points, normals = nurbs.calculate_surface_points_and_normals( - evaluation_points=evaluation_points.unsqueeze(0).unsqueeze(0), device=device + evaluation_points=evaluation_points.unsqueeze(0).unsqueeze(0), + canting=canting, + facet_translations=facet_translation, + device=device, ) optimizer.zero_grad() - loss = points - surface_points + loss = points - canted_and_translated loss.abs().mean().backward() optimizer.step() - torch.testing.assert_close(points, surface_points, atol=1e-2, rtol=1e-2) + torch.testing.assert_close(points, canted_and_translated, atol=1e-2, rtol=1e-2) -def test_find_span(device: torch.device): +def test_find_span(device: torch.device) -> None: """ Test the find span method for non uniform knot vectors. @@ -239,16 +255,43 @@ def test_nurbs_forward(device: torch.device) -> None: device=device, ) - surface_points, surface_normals = nurbs(evaluation_points, device) + canting = torch.tensor( + [[[[0.7071, 0.7071, 0.0, 0.0], [0.7071, 0.7071, 0.0, 0.0]]]], device=device + ) + facet_translation = torch.tensor([[[[0.5, 0.0, 0.0, 0.0]]]], device=device) + + surface_points, surface_normals = nurbs( + evaluation_points, canting, facet_translation, device + ) expected_points = torch.tensor( [ [ [ - [-4.999866008759, -4.999866008759, 0.000000000000, 0.999999880791], - [-4.999866485596, 4.999866008759, 0.000000000000, 0.999999940395], - [4.999866008759, -4.999866485596, 0.000000000000, 0.999999940395], - [4.999866485596, 4.999866485596, 0.000000000000, 1.000000000000], + [ + -6.570879459381e00, + -7.070879459381e00, + 0.000000000000e00, + 1.000000000000e00, + ], + [ + 4.999997615814e-01, + -2.384185791016e-07, + 0.000000000000e00, + 1.000000000000e00, + ], + [ + 4.999997615814e-01, + -2.384185791016e-07, + 0.000000000000e00, + 1.000000000000e00, + ], + [ + 7.570879459381e00, + 7.070879459381e00, + 0.000000000000e00, + 1.000000000000e00, + ], ] ] ], @@ -258,10 +301,10 @@ def test_nurbs_forward(device: torch.device) -> None: [ [ [ - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, -1.0, 0.0], + [0.0, 0.0, -1.0, 0.0], + [0.0, 0.0, -1.0, 0.0], + [0.0, 0.0, -1.0, 0.0], ] ] ], diff --git a/tests/util/test_raytracing_utils.py b/tests/util/test_raytracing_utils.py index ec29de241..57c0bcc5e 100644 --- a/tests/util/test_raytracing_utils.py +++ b/tests/util/test_raytracing_utils.py @@ -222,7 +222,7 @@ def target_area_mask( "target_area_2", torch.tensor([[[2.0, 2.0, 2.0, 1.0]]]), torch.tensor([[[[0.7273, -0.5455, 0.7273, 1.0]]]]), - torch.tensor([[[0.458333343267]]]), + torch.tensor([[[5.500000000000]]]), ), ( # Multiple intersections with multiple rays. ( @@ -246,21 +246,7 @@ def target_area_mask( torch.tensor( [[[[0.0, 0.0, 0.0, 1.0], [1.0, 1.0, 0.0, 1.0], [0.0, -2.0, 0.0, 1.0]]]] ), - torch.tensor([[[1.0000, 1.0000, 0.0833]]]), - ), - ( # ValueError - no intersection since ray is parallel to plane. - (torch.tensor([[[[1.0, 0.0, 0.0, 0.0]]]]), torch.tensor([[[1.0]]])), - "target_area_1", - torch.tensor([[[0.0, 0.0, 1.0, 1.0]]]), - None, - None, - ), - ( # ValueError - no intersection since ray is within the plane. - (torch.tensor([[[[1.0, 0.0, 0.0, 0.0]]]]), torch.tensor([[[1.0]]])), - "target_area_1", - torch.tensor([[[0.0, 0.0, 0.0, 1.0]]]), - None, - None, + torch.tensor([[[1.0000, 1.0000, 1.0000]]]), ), ], indirect=["rays"], @@ -271,8 +257,8 @@ def test_line_plane_intersection( rays: Rays, target_areas_fixture: str, points_at_ray_origins: torch.Tensor, - expected_intersections: torch.Tensor | None, - expected_absolute_intensities: torch.Tensor | None, + expected_intersections: torch.Tensor, + expected_absolute_intensities: torch.Tensor, device: torch.device, ) -> None: """ @@ -286,16 +272,14 @@ def test_line_plane_intersection( The target area mask. rays : Rays The rays with directions and magnitudes. - plane_normal_vectors : torch.Tensor - The normal vectors of the plane being considered for the intersection. - plane_center : torch.Tensor - The center of the plane being considered for the intersection. - points_at_ray_origin : torch.Tensor + target_areas_fixture : str + Name of fixture to get target areas. + points_at_ray_origins : torch.Tensor The surface points of the ray origin. - expected_intersections : torch.Tensor | None - The expected intersections between the rays and the plane, or ``None`` if no intersections are expected. - expected_absolute_intensities : torch.Tensor | None - The expected absolute intensities of the ray intersections, or ``None`` if no intersections are expected. + expected_intersections : torch.Tensor + The expected intersections between the rays and the plane. + expected_absolute_intensities : torch.Tensor + The expected absolute intensities of the ray intersections. device : torch.device The device on which to initialize tensors. @@ -304,34 +288,19 @@ def test_line_plane_intersection( AssertionError If test does not complete as expected. """ - # Check if the ValueError is thrown as expected. - if expected_intersections is None or expected_absolute_intensities is None: - with pytest.raises(ValueError) as exc_info: - raytracing_utils.line_plane_intersections( - rays=rays, - points_at_ray_origins=points_at_ray_origins.to(device), - target_areas=request.getfixturevalue(target_areas_fixture), - target_area_mask=target_area_mask, - device=device, - ) - assert "No ray intersections on the front of the target area planes." in str( - exc_info.value - ) - else: - # Check if the intersections match the expected intersections. - intersections, absolute_intensities = raytracing_utils.line_plane_intersections( - rays=rays, - points_at_ray_origins=points_at_ray_origins.to(device), - target_areas=request.getfixturevalue(target_areas_fixture), - target_area_mask=target_area_mask, - device=device, - ) - torch.testing.assert_close( - intersections, expected_intersections.to(device), rtol=1e-4, atol=1e-4 - ) - torch.testing.assert_close( - absolute_intensities, - expected_absolute_intensities.to(device), - rtol=1e-4, - atol=1e-4, - ) + intersections, absolute_intensities = raytracing_utils.line_plane_intersections( + rays=rays, + points_at_ray_origins=points_at_ray_origins.to(device), + target_areas=request.getfixturevalue(target_areas_fixture), + target_area_mask=target_area_mask, + device=device, + ) + torch.testing.assert_close( + intersections, expected_intersections.to(device), rtol=1e-4, atol=1e-4 + ) + torch.testing.assert_close( + absolute_intensities, + expected_absolute_intensities.to(device), + rtol=1e-4, + atol=1e-4, + ) diff --git a/tests/util/test_runtime_tracker.py b/tests/util/test_runtime_tracker.py new file mode 100644 index 000000000..cbacf7924 --- /dev/null +++ b/tests/util/test_runtime_tracker.py @@ -0,0 +1,61 @@ +import logging +import pathlib +import re +import time +from typing import cast + +import pytest + +from artist.util import set_runtime_logger, track_runtime + + +@pytest.fixture +def runtime_logger(tmp_path: pathlib.Path) -> logging.Logger: + """ + Pytest fixture that returns a runtime logger writing to a temporary file. + + Parameters + ---------- + tmp_path : pathlib.Path + Temporary path to the logs. + + Returns + ------- + logging.Logger + A temporary logger. + """ + log_file = tmp_path / "runtime_test.log" + logger = set_runtime_logger(log_file=log_file, level=logging.INFO) + return logger + + +def test_track_runtime_with_fixture(runtime_logger: logging.Logger) -> None: + """ + Test the runtime tracker decorator. + + Parameters + ---------- + runtime_logger : logging.Logger + Temporary runtime logger. + + Raises + ------ + AssertionError + If test does not complete as expected. + """ + + @track_runtime(runtime_logger) + def _dummy_function(x, y): + time.sleep(0.05) + return x + y + + result = _dummy_function(2, 3) + assert result == 5 + + file_handler = cast(logging.FileHandler, runtime_logger.handlers[0]).baseFilename + log_contents = pathlib.Path(file_handler).read_text() + + assert "dummy_function started" in log_contents + assert "dummy_function finished in" in log_contents + match = re.search(r"finished in (\d+\.\d+)s", log_contents) + assert match is not None diff --git a/tests/util/test_utils.py b/tests/util/test_utils.py index aaf1ac5b4..66f861b45 100644 --- a/tests/util/test_utils.py +++ b/tests/util/test_utils.py @@ -1,11 +1,9 @@ import math -import pathlib import pytest import torch -from artist import ARTIST_ROOT -from artist.util import config_dictionary, utils +from artist.util import utils @pytest.mark.parametrize( @@ -529,55 +527,6 @@ def test_distortion_rotations( torch.testing.assert_close(distorted_rays, expected_distorted_rays.to(device)) -def test_normalize_bitmaps(device: torch.device) -> None: - """ - Test the normalization for bitmaps. - - Parameters - ---------- - device : torch.device - The device on which to initialize tensors. - - Raises - ------ - AssertionError - If test does not complete as expected. - """ - bitmap_path = ( - pathlib.Path(ARTIST_ROOT) - / "tests/data/expected_optimized_motor_positions/distribution.pt" - ) - - bitmap = torch.load(bitmap_path, map_location=device, weights_only=True).unsqueeze( - 0 - ) - - normalized_bitmaps = utils.normalize_bitmaps( - flux_distributions=bitmap, - target_area_widths=torch.full( - (bitmap.shape[0],), - config_dictionary.utis_crop_width, - device=device, - ), - target_area_heights=torch.full( - (bitmap.shape[0],), - config_dictionary.utis_crop_height, - device=device, - ), - number_of_rays=bitmap.sum(dim=[1, 2]), - ) - - expected_path = ( - pathlib.Path(ARTIST_ROOT) - / "tests/data/expected_normalized_bitmaps" - / f"bitmaps_{device.type}.pt" - ) - - expected = torch.load(expected_path, map_location=device, weights_only=True) - - torch.testing.assert_close(normalized_bitmaps, expected, atol=5e-4, rtol=5e-4) - - @pytest.mark.parametrize( "total_width, slope_width, plateau_width, expected", [ diff --git a/tutorials/00_generate_scenario_from_paint_tutorial.py b/tutorials/00_generate_scenario_from_paint_tutorial.py index 2d8c57731..7a104fe41 100644 --- a/tutorials/00_generate_scenario_from_paint_tutorial.py +++ b/tutorials/00_generate_scenario_from_paint_tutorial.py @@ -101,6 +101,7 @@ threshold_mode="abs", ) +# Use this configuration for deflectometry surfaces. heliostat_list_config, prototype_config = ( paint_scenario_parser.extract_paint_heliostats_fitted_surface( paths=heliostat_files_list, @@ -116,6 +117,16 @@ ) ) +# Use this configuration for ideal surfaces. +# heliostat_list_config, prototype_config = ( +# paint_scenario_parser.extract_paint_heliostats_ideal_surface( +# paths=heliostat_files_list, +# power_plant_position=power_plant_config.power_plant_position, +# number_of_nurbs_control_points=number_of_nurbs_control_points, +# device=device, +# ) +# ) + if __name__ == "__main__": """Generate the scenario given the defined parameters.""" scenario_generator = H5ScenarioGenerator( diff --git a/tutorials/01_single_heliostat_raytracing_tutorial.py b/tutorials/01_single_heliostat_raytracing_tutorial.py index d3b19e1a6..9d8bd691c 100644 --- a/tutorials/01_single_heliostat_raytracing_tutorial.py +++ b/tutorials/01_single_heliostat_raytracing_tutorial.py @@ -242,7 +242,7 @@ def align_and_trace_rays( torch.Tensor A tensor containing the distribution strengths used to generate the image on the receiver. """ - # Activate heliostats + # Activate heliostats. scenario.heliostat_field.heliostat_groups[ index_mapping.first_heliostat_group ].activate_heliostats( diff --git a/tutorials/02_heliostat_raytracing_distributed_tutorial.py b/tutorials/02_heliostat_raytracing_distributed_tutorial.py index 1d2c51d02..e03404e17 100644 --- a/tutorials/02_heliostat_raytracing_distributed_tutorial.py +++ b/tutorials/02_heliostat_raytracing_distributed_tutorial.py @@ -5,6 +5,7 @@ from matplotlib import pyplot as plt from artist.core.heliostat_ray_tracer import HeliostatRayTracer +from artist.field.heliostat_group import HeliostatGroup from artist.scenario.scenario import Scenario from artist.util import config_dictionary, index_mapping, set_logger_config from artist.util.environment_setup import get_device, setup_distributed_environment @@ -38,6 +39,8 @@ scenario_file=scenario_file, device=device, ) + # Set a ray extinction factor responsible for global shading of rays (0.0 -> no global shading, 1.0 -> full global shading). + ray_extinction_factor = 0.0 # Use a heliostat target light source mapping to specify which heliostat in your scenario should be activated, # which heliostat will receive which incident ray direction for alignment and on which target it will be raytraced. @@ -65,12 +68,8 @@ # Since each individual heliostat group has individual kinematic and actuator types, they must be # processed separately. If a distributed environment exists, they can be processed in parallel, # otherwise each heliostat group results will be computed sequentially. - for heliostat_group_index in ddp_setup[config_dictionary.groups_to_ranks_mapping][ - ddp_setup[config_dictionary.rank] - ]: - heliostat_group = scenario.heliostat_field.heliostat_groups[ - heliostat_group_index - ] + # For blocking to work correctly, all heliostat groups have to be aligned before any group can be raytraced. + for heliostat_group_alignment in scenario.heliostat_field.heliostat_groups: # If no mapping from heliostats to target areas to incident ray direction is provided, the scenario.index_mapping() method # activates all heliostats. It is possible to then provide a default target area index and a default incident ray direction # if those are not specified either all heliostats are assigned to the first target area found in the scenario with an @@ -80,7 +79,7 @@ target_area_mask, incident_ray_directions, ) = scenario.index_mapping( - heliostat_group=heliostat_group, + heliostat_group=heliostat_group_alignment, string_mapping=heliostat_target_light_source_mapping, device=device, ) @@ -89,22 +88,40 @@ # For each index 0 indicates a deactivated heliostat and 1 an activated one. # An integer greater than 1 indicates that the heliostat in this index is regarded multiple times. # It is a tensor of shape [number_of_heliostats_in_group]. - heliostat_group.activate_heliostats( + heliostat_group_alignment.activate_heliostats( active_heliostats_mask=active_heliostats_mask, device=device ) # Align heliostats. - heliostat_group.align_surfaces_with_incident_ray_directions( + heliostat_group_alignment.align_surfaces_with_incident_ray_directions( aim_points=scenario.target_areas.centers[target_area_mask], incident_ray_directions=incident_ray_directions, active_heliostats_mask=active_heliostats_mask, device=device, ) + # Raytracing happens only on one device for each group. + for heliostat_group_index in ddp_setup[config_dictionary.groups_to_ranks_mapping][ + ddp_setup[config_dictionary.rank] + ]: + heliostat_group: HeliostatGroup = scenario.heliostat_field.heliostat_groups[ + heliostat_group_index + ] + ( + active_heliostats_mask, + target_area_mask, + incident_ray_directions, + ) = scenario.index_mapping( + heliostat_group=heliostat_group, + string_mapping=heliostat_target_light_source_mapping, + device=device, + ) + # Create a parallelized ray tracer. ray_tracer = HeliostatRayTracer( scenario=scenario, heliostat_group=heliostat_group, + blocking_active=False, world_size=ddp_setup[config_dictionary.heliostat_group_world_size], rank=ddp_setup[config_dictionary.heliostat_group_rank], batch_size=heliostat_group.number_of_active_heliostats, @@ -120,41 +137,32 @@ device=device, ) + sample_indices_for_local_rank = ray_tracer.get_sampler_indices() # Plot the bitmaps of each single heliostat. - for heliostat_index in range(heliostat_group.number_of_heliostats): - plt.imshow( - bitmaps_per_heliostat[heliostat_index].cpu().detach(), cmap="gray" - ) + for i in range(bitmaps_per_heliostat.shape[0]): + expanded_names = [ + name + for name, m in zip(heliostat_group.names, active_heliostats_mask) + for _ in range(m) + ] + plt.imshow(bitmaps_per_heliostat[i].cpu().detach(), cmap="gray") plt.axis("off") plt.title( - f"Heliostat: {heliostat_group.names[heliostat_index]}, Group: {heliostat_group_index}, Rank: {ddp_setup['rank']}" + f"Heliostat: {expanded_names[sample_indices_for_local_rank[i]]}, Group: {heliostat_group_index}, Rank: {ddp_setup['rank']} Target: {scenario.target_areas.names[target_area_mask[i]]}" ) plt.savefig( - f"bitmap_of_heliostat_{heliostat_group.names[heliostat_index]}_in_group_{heliostat_group_index}_on_rank_{ddp_setup['rank']}.png" + f"bitmap_group_{heliostat_group_index}_on_rank_{ddp_setup['rank']}_sample_{i}_heliostat_{expanded_names[sample_indices_for_local_rank[i]]}.png" ) # Get the flux distributions per target. bitmaps_per_target = ray_tracer.get_bitmaps_per_target( bitmaps_per_heliostat=bitmaps_per_heliostat, - target_area_mask=target_area_mask, + target_area_mask=target_area_mask[sample_indices_for_local_rank], device=device, ) combined_bitmaps_per_target = combined_bitmaps_per_target + bitmaps_per_target - # Plot the combined bitmaps of heliostats on the same target. - for target_area_index in range(scenario.target_areas.number_of_target_areas): - plt.imshow( - bitmaps_per_target[target_area_index].cpu().detach(), cmap="gray" - ) - plt.axis("off") - plt.title( - f"Target area: {scenario.target_areas.names[target_area_index]}, Group: {heliostat_group_index}, Rank: {ddp_setup['rank']}" - ) - plt.savefig( - f"combined_bitmap_on_{scenario.target_areas.names[target_area_index]}_from_group_{heliostat_group_index}_on_rank_{ddp_setup['rank']}.png" - ) - # It is possible to skip this nested reduction step. The reduction within the outer process group would take # care of it but to see how the nested process group it is nice to look at the intermediate reduction results. if ddp_setup[config_dictionary.is_nested]: @@ -175,7 +183,7 @@ f"Reduced within group, Target area: {scenario.target_areas.names[target_area_index]}, Rank: {ddp_setup['rank']}" ) plt.savefig( - f"reduced_bitmap_on_{scenario.target_areas.names[target_area_index]}_on_rank_{ddp_setup['rank']}.png" + f"reduced_bitmap_on_rank_{ddp_setup['rank']}_on_{scenario.target_areas.names[target_area_index]}.png" ) if ddp_setup[config_dictionary.is_distributed]: @@ -183,16 +191,16 @@ combined_bitmaps_per_target, op=torch.distributed.ReduceOp.SUM ) - # Plot the final combined bitmaps of heliostats on the same target fully reduced. - for target_area_index in range(scenario.target_areas.number_of_target_areas): - plt.imshow( - combined_bitmaps_per_target[target_area_index].cpu().detach(), - cmap="gray", - ) - plt.axis("off") - plt.title( - f"Final bitmap, Target area: {scenario.target_areas.names[target_area_index]}, Rank: {ddp_setup['rank']}" - ) - plt.savefig( - f"final_reduced_bitmap_on_{scenario.target_areas.names[target_area_index]}_on_rank_{ddp_setup['rank']}.png" - ) + # Plot the final combined bitmaps of heliostats on the same target fully reduced. + for target_area_index in range(scenario.target_areas.number_of_target_areas): + plt.imshow( + combined_bitmaps_per_target[target_area_index].cpu().detach(), + cmap="gray", + ) + plt.axis("off") + plt.title( + f"Final bitmap, Target area: {scenario.target_areas.names[target_area_index]}, Rank: {ddp_setup['rank']}" + ) + plt.savefig( + f"final_reduced_bitmap_on_rank_{ddp_setup['rank']}_on_{scenario.target_areas.names[target_area_index]}.png" + ) diff --git a/tutorials/03_nurbs_surface_reconstruction_tutorial.py b/tutorials/03_nurbs_surface_reconstruction_tutorial.py index 0d55ac20f..e10a8e1dd 100644 --- a/tutorials/03_nurbs_surface_reconstruction_tutorial.py +++ b/tutorials/03_nurbs_surface_reconstruction_tutorial.py @@ -7,7 +7,7 @@ from artist.core.heliostat_ray_tracer import HeliostatRayTracer from artist.core.loss_functions import KLDivergenceLoss -from artist.core.regularizers import IdealSurfaceRegularizer, TotalVariationRegularizer +from artist.core.regularizers import IdealSurfaceRegularizer, SmoothnessRegularizer from artist.core.surface_reconstructor import SurfaceReconstructor from artist.data_parser import paint_scenario_parser from artist.data_parser.calibration_data_parser import CalibrationDataParser @@ -23,7 +23,7 @@ ############################################################################################################# # Define helper functions for the plots. -# Skip to line 324 for the tutorial code. +# Skip to line 343 for the tutorial code. ############################################################################################################# @@ -160,7 +160,7 @@ def create_surface_plots(name: str) -> None: # Create evaluation points. evaluation_points = ( utils.create_nurbs_evaluation_grid( - number_of_evaluation_points=number_of_surface_points, + number_of_evaluation_points=torch.tensor([50, 50], device=device), device=device, ) .unsqueeze(index_mapping.heliostat_dimension) @@ -186,6 +186,12 @@ def create_surface_plots(name: str) -> None: temporary_points, temporary_normals = ( temporary_nurbs.calculate_surface_points_and_normals( evaluation_points=evaluation_points, + canting=heliostat_group.canting[heliostat_index].unsqueeze( + index_mapping.heliostat_dimension + ), + facet_translations=heliostat_group.facet_translations[ + heliostat_index + ].unsqueeze(index_mapping.heliostat_dimension), device=device, ) ) @@ -195,7 +201,7 @@ def create_surface_plots(name: str) -> None: surface_points=temporary_points[index_mapping.first_heliostat], surface_normals=temporary_normals[index_mapping.first_heliostat], reference_direction=torch.tensor([0.0, 0.0, 1.0, 0.0], device=device), - name=f"{name}_rank_{ddp_setup['rank']}_heliostat_group_{heliostat_group_index}_heliostat_{heliostat_index}", + name=f"{name}_rank_{ddp_setup['rank']}_heliostat_group_{heliostat_group_index}_heliostat_{heliostat_group.names[heliostat_index]}", ) @@ -251,82 +257,86 @@ def create_flux_plots( device=device, ) - # Activate heliostats. - heliostat_group.activate_heliostats( - active_heliostats_mask=validation_active_heliostats_mask, - device=device, - ) - - # Create surfaces for all samples. - validation_nurbs = NURBSSurfaces( - degrees=heliostat_group.nurbs_degrees, - control_points=heliostat_group.active_nurbs_control_points, - uniform=True, - device=device, - ) + if validation_active_heliostats_mask.sum() > 0: + # Activate heliostats. + heliostat_group.activate_heliostats( + active_heliostats_mask=validation_active_heliostats_mask, + device=device, + ) - # Create evaluation points for all samples. - validation_evaluation_points = ( - utils.create_nurbs_evaluation_grid( - number_of_evaluation_points=number_of_surface_points, + # Create surfaces for all samples. + validation_nurbs = NURBSSurfaces( + degrees=heliostat_group.nurbs_degrees, + control_points=heliostat_group.active_nurbs_control_points, + uniform=True, device=device, ) - .unsqueeze(index_mapping.heliostat_dimension) - .unsqueeze(index_mapping.facet_index_unbatched) - .expand( - validation_active_heliostats_mask.sum(), - heliostat_group.number_of_facets_per_heliostat, - -1, - -1, + + # Create evaluation points for all samples. + validation_evaluation_points = ( + utils.create_nurbs_evaluation_grid( + number_of_evaluation_points=torch.tensor([50, 50], device=device), + device=device, + ) + .unsqueeze(index_mapping.heliostat_dimension) + .unsqueeze(index_mapping.facet_index_unbatched) + .expand( + validation_active_heliostats_mask.sum(), + heliostat_group.number_of_facets_per_heliostat, + -1, + -1, + ) ) - ) - # Calculate new surface points and normals for all samples. - validation_surface_points, validation_surface_normals = ( - validation_nurbs.calculate_surface_points_and_normals( - evaluation_points=validation_evaluation_points, - device=device, + # Calculate new surface points and normals for all samples. + validation_surface_points, validation_surface_normals = ( + validation_nurbs.calculate_surface_points_and_normals( + evaluation_points=validation_evaluation_points, + canting=heliostat_group.active_canting, + facet_translations=heliostat_group.active_facet_translations, + device=device, + ) ) - ) - heliostat_group.active_surface_points = validation_surface_points.reshape( - validation_active_heliostats_mask.sum(), -1, 4 - ) - heliostat_group.active_surface_normals = validation_surface_normals.reshape( - validation_active_heliostats_mask.sum(), -1, 4 - ) + heliostat_group.active_surface_points = validation_surface_points.reshape( + validation_active_heliostats_mask.sum(), -1, 4 + ) + heliostat_group.active_surface_normals = validation_surface_normals.reshape( + validation_active_heliostats_mask.sum(), -1, 4 + ) - # Align heliostats. - heliostat_group.align_surfaces_with_incident_ray_directions( - aim_points=scenario.target_areas.centers[validation_target_area_mask], - incident_ray_directions=validation_incident_ray_directions, - active_heliostats_mask=validation_active_heliostats_mask, - device=device, - ) + # Align heliostats. + heliostat_group.align_surfaces_with_incident_ray_directions( + aim_points=scenario.target_areas.centers[validation_target_area_mask], + incident_ray_directions=validation_incident_ray_directions, + active_heliostats_mask=validation_active_heliostats_mask, + device=device, + ) - # Create a ray tracer and reduce number of rays in scenario light source. - scenario.set_number_of_rays(number_of_rays=10) - validation_ray_tracer = HeliostatRayTracer( - scenario=scenario, - heliostat_group=heliostat_group, - batch_size=heliostat_group.number_of_active_heliostats, - bitmap_resolution=torch.tensor([256, 256], device=device), - ) + # Create a ray tracer and reduce number of rays in scenario light source. + scenario.set_number_of_rays(number_of_rays=10) + validation_ray_tracer = HeliostatRayTracer( + scenario=scenario, + heliostat_group=heliostat_group, + blocking_active=False, + batch_size=heliostat_group.number_of_active_heliostats, + bitmap_resolution=torch.tensor([256, 256], device=device), + ) - # Perform heliostat-based ray tracing. - validation_bitmaps_per_heliostat = validation_ray_tracer.trace_rays( - incident_ray_directions=validation_incident_ray_directions, - active_heliostats_mask=validation_active_heliostats_mask, - target_area_mask=validation_target_area_mask, - device=device, - ) + # Perform heliostat-based ray tracing. + validation_bitmaps_per_heliostat = validation_ray_tracer.trace_rays( + incident_ray_directions=validation_incident_ray_directions, + active_heliostats_mask=validation_active_heliostats_mask, + target_area_mask=validation_target_area_mask, + device=device, + ) - # Create the plots. - plot_multiple_fluxes( - validation_bitmaps_per_heliostat, - validation_measured_flux_distributions, - name=f"{plot_name}_rank_{ddp_setup['rank']}_heliostat_group_{heliostat_group_index}", - ) + # Create the plots. + plot_multiple_fluxes( + validation_bitmaps_per_heliostat, + validation_measured_flux_distributions, + name=f"{plot_name}_rank_{ddp_setup['rank']}_heliostat_group_{heliostat_group_index}", + ) ############################################################################################################# @@ -387,6 +397,52 @@ def create_flux_plots( # randomize=True, # ) +# Configure the optimization. +optimizer_dict = { + config_dictionary.initial_learning_rate: 1e-4, + config_dictionary.tolerance: 1e-5, + config_dictionary.max_epoch: 30, + config_dictionary.batch_size: 30, + config_dictionary.log_step: 1, + config_dictionary.early_stopping_delta: 1e-4, + config_dictionary.early_stopping_patience: 100, + config_dictionary.early_stopping_window: 100, +} +# Configure the learning rate scheduler. +scheduler_dict = { + config_dictionary.scheduler_type: config_dictionary.exponential, + config_dictionary.gamma: 0.99, + config_dictionary.min: 1e-6, + config_dictionary.max: 1e-2, + config_dictionary.step_size_up: 100, + config_dictionary.reduce_factor: 0.5, + config_dictionary.patience: 10, + config_dictionary.threshold: 1e-4, + config_dictionary.cooldown: 5, +} +# Configure the regularizers. +ideal_surface_regularizer = IdealSurfaceRegularizer(reduction_dimensions=(1,)) +smoothness_regularizer = SmoothnessRegularizer(reduction_dimensions=(1,)) +regularizers = [ + ideal_surface_regularizer, + smoothness_regularizer, +] +# Configure the regularizers and constraints. +constraint_dict = { + config_dictionary.regularizers: regularizers, + config_dictionary.weight_smoothness: 0.005, + config_dictionary.weight_ideal_surface: 0.005, + config_dictionary.initial_lambda_energy: 0.1, + config_dictionary.rho_energy: 1.0, + config_dictionary.energy_tolerance: 0.01, +} +# Combine configurations. +optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + config_dictionary.constraints: constraint_dict, +} + # Create dict for the data parser and the heliostat_data_mapping. data: dict[ str, @@ -410,10 +466,10 @@ def create_flux_plots( with h5py.File(scenario_path, "r") as scenario_file: scenario = Scenario.load_scenario_from_hdf5( scenario_file=scenario_file, - device=device, change_number_of_control_points_per_facet=torch.tensor( - [17, 17], device=device + [7, 7], device=device ), + device=device, ) # Set loss function. @@ -421,69 +477,10 @@ def create_flux_plots( # Another possibility would be the pixel loss: # loss_definition = PixelLoss(scenario=scenario) - # Configure regularizers and their weights. - ideal_surface_regularizer = IdealSurfaceRegularizer( - weight=0.4, - reduction_dimensions=( - index_mapping.facet_dimension, - index_mapping.points_dimension, - index_mapping.coordinates_dimension, - ), - ) - total_variation_regularizer_points = TotalVariationRegularizer( - weight=0.3, - reduction_dimensions=(index_mapping.facet_dimension,), - surface=config_dictionary.surface_points, - number_of_neighbors=1000, - sigma=1e-3, - ) - total_variation_regularizer_normals = TotalVariationRegularizer( - weight=0.8, - reduction_dimensions=(index_mapping.facet_dimension,), - surface=config_dictionary.surface_points, - number_of_neighbors=1000, - sigma=1e-3, - ) - - regularizers = [ - ideal_surface_regularizer, - ] - - # Configure the learning rate scheduler. The example scheduler parameter dict includes - # example parameters for all three possible schedulers. - scheduler = ( - config_dictionary.reduce_on_plateau - ) # exponential, cyclic or reduce_on_plateau - scheduler_parameters = { - config_dictionary.min: 1e-4, - config_dictionary.min: 5e-6, - config_dictionary.max: 8e-5, - config_dictionary.step_size_up: 50, - config_dictionary.reduce_factor: 0.9, - config_dictionary.patience: 100, - config_dictionary.threshold: 1e-3, - config_dictionary.cooldown: 20, - } - - # Set optimizer parameters. - optimization_configuration = { - config_dictionary.initial_learning_rate: 1e-5, - config_dictionary.tolerance: 1e-5, - config_dictionary.max_epoch: 200, - config_dictionary.log_step: 3, - config_dictionary.early_stopping_delta: 5e-5, - config_dictionary.early_stopping_patience: 200, - config_dictionary.scheduler: scheduler, - config_dictionary.scheduler_parameters: scheduler_parameters, - config_dictionary.regularizers: regularizers, - } - scenario.set_number_of_rays(number_of_rays=170) - number_of_surface_points = torch.tensor([60, 60], device=device) resolution = torch.tensor([256, 256], device=device) - # Visualize the ideal surfaces and flux distributions from ideal heliostats. - # Please adapt the heliostat names according to the ones to be plotted. + # Visualize the surfaces and flux distributions from the initial heliostats. number_of_plots_per_heliostat = 2 create_surface_plots(name="ideal") create_flux_plots( @@ -502,8 +499,6 @@ def create_flux_plots( scenario=scenario, data=data, optimization_configuration=optimization_configuration, - number_of_surface_points=number_of_surface_points, - bitmap_resolution=resolution, device=device, ) @@ -515,7 +510,7 @@ def create_flux_plots( # Inspect the synchronized loss per heliostat. Heliostats that have not been optimized have an infinite loss. print(f"rank {ddp_setup['rank']}, final loss per heliostat {final_loss_per_heliostat}") -# Visualize the results (reconstructed surfaces and flux distributions from reconstructed heliostats). +# Visualize the surfaces and flux distributions from the reconstructed heliostats. create_surface_plots(name="reconstructed") create_flux_plots( heliostat_names=heliostat_names_plots, diff --git a/tutorials/04_kinematic_reconstruction_tutorial.py b/tutorials/04_kinematic_reconstruction_tutorial.py index 99d2b94f0..3aef8e393 100644 --- a/tutorials/04_kinematic_reconstruction_tutorial.py +++ b/tutorials/04_kinematic_reconstruction_tutorial.py @@ -20,7 +20,7 @@ ############################################################################################################# # Define helper functions for the plots. -# Skip to line 143 for the tutorial code. +# Skip to line 145 for the tutorial code. ############################################################################################################# @@ -61,38 +61,40 @@ def create_fluxes( device=device, ) - measured_bitmaps.append(measured_flux) - - # Activate heliostats. - heliostat_group.activate_heliostats( - active_heliostats_mask=active_heliostats_mask, - device=device, - ) - - # Align heliostats. - heliostat_group.align_surfaces_with_incident_ray_directions( - aim_points=scenario.target_areas.centers[target_area_mask], - incident_ray_directions=incident_ray_directions, - active_heliostats_mask=active_heliostats_mask, - device=device, - ) - - # Create a ray tracer. - ray_tracer = HeliostatRayTracer( - scenario=scenario, - heliostat_group=heliostat_group, - batch_size=heliostat_group.number_of_active_heliostats, - bitmap_resolution=torch.tensor([256, 256], device=device), - ) - - # Perform heliostat-based ray tracing. - bitmaps_per_heliostat = ray_tracer.trace_rays( - incident_ray_directions=incident_ray_directions, - active_heliostats_mask=active_heliostats_mask, - target_area_mask=target_area_mask, - device=device, - ) - bitmaps.append(bitmaps_per_heliostat) + if active_heliostats_mask.sum() > 0: + measured_bitmaps.append(measured_flux) + + # Activate heliostats. + heliostat_group.activate_heliostats( + active_heliostats_mask=active_heliostats_mask, + device=device, + ) + + # Align heliostats. + heliostat_group.align_surfaces_with_incident_ray_directions( + aim_points=scenario.target_areas.centers[target_area_mask], + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + device=device, + ) + + # Create a ray tracer. + ray_tracer = HeliostatRayTracer( + scenario=scenario, + heliostat_group=heliostat_group, + blocking_active=False, + batch_size=heliostat_group.number_of_active_heliostats, + bitmap_resolution=torch.tensor([256, 256], device=device), + ) + + # Perform heliostat-based ray tracing. + bitmaps_per_heliostat = ray_tracer.trace_rays( + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + target_area_mask=target_area_mask, + device=device, + ) + bitmaps.append(bitmaps_per_heliostat) scenario.set_number_of_rays(number_of_rays=4) @@ -188,12 +190,41 @@ def create_plots( # Or if you have a directory with downloaded data use this code to create a mapping. # heliostat_data_mapping = paint_scenario_parser.build_heliostat_data_mapping( # base_path="base/path/data", -# heliostat_names=["heliostat_1", "..."], +# heliostat_names=["heliostat_1"], # number_of_measurements=5, # image_variant="flux", # randomize=True, # ) +# Configure the optimization. +optimizer_dict = { + config_dictionary.initial_learning_rate: 0.0005, + config_dictionary.tolerance: 0.0005, + config_dictionary.max_epoch: 100, + config_dictionary.batch_size: 50, + config_dictionary.log_step: 3, + config_dictionary.early_stopping_delta: 1e-4, + config_dictionary.early_stopping_patience: 300, + config_dictionary.early_stopping_window: 300, +} +# Configure the learning rate scheduler. +scheduler_dict = { + config_dictionary.scheduler_type: config_dictionary.reduce_on_plateau, + config_dictionary.gamma: 0.9, + config_dictionary.min: 1e-6, + config_dictionary.max: 1e-3, + config_dictionary.step_size_up: 500, + config_dictionary.reduce_factor: 0.0001, + config_dictionary.patience: 50, + config_dictionary.threshold: 1e-3, + config_dictionary.cooldown: 10, +} +# Combine configurations. +optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, +} + data_parser = PaintCalibrationDataParser( sample_limit=50, centroid_extraction_method=paint_mappings.UTIS_KEY ) @@ -226,34 +257,6 @@ def create_plots( scenario_file=scenario_file, device=device ) - # Configure the learning rate scheduler. The example scheduler parameter dict includes - # example parameters for all three possible schedulers. - scheduler = ( - config_dictionary.reduce_on_plateau - ) # exponential, cyclic or reduce_on_plateau - scheduler_parameters = { - config_dictionary.gamma: 0.9, - config_dictionary.min: 1e-6, - config_dictionary.max: 1e-3, - config_dictionary.step_size_up: 500, - config_dictionary.reduce_factor: 0.0001, - config_dictionary.patience: 50, - config_dictionary.threshold: 1e-3, - config_dictionary.cooldown: 10, - } - - # Set optimization parameters. - optimization_configuration = { - config_dictionary.initial_learning_rate: 0.0005, - config_dictionary.tolerance: 0.0005, - config_dictionary.max_epoch: 500, - config_dictionary.log_step: 3, - config_dictionary.early_stopping_delta: 1e-4, - config_dictionary.early_stopping_patience: 300, - config_dictionary.scheduler: scheduler, - config_dictionary.scheduler_parameters: scheduler_parameters, - } - bitmaps_before, _ = create_fluxes( data_parser=data_parser_plots, ) diff --git a/tutorials/05_motor_positions_optimizer_tutorial.py b/tutorials/05_motor_positions_optimizer_tutorial.py index 7c5708713..5c2dc7943 100644 --- a/tutorials/05_motor_positions_optimizer_tutorial.py +++ b/tutorials/05_motor_positions_optimizer_tutorial.py @@ -16,19 +16,20 @@ ############################################################################################################# # Define helper functions for the plots. -# Skip to line 149 for the tutorial code. +# Skip to line 110 for the tutorial code. ############################################################################################################# -def create_flux_plot_before_optimization() -> None: - """Create data to plot the heliostat fluxes.""" - total_flux = torch.zeros( - ( - bitmap_resolution[index_mapping.unbatched_bitmap_e], - bitmap_resolution[index_mapping.unbatched_bitmap_u], - ), - device=device, - ) +def create_flux_plot(id: str) -> None: + """ + Create flux plots. + + Parameters + ---------- + id : str + Identifier of flux. + """ + total_flux = torch.zeros((256, 256), device=device) for heliostat_group_index, heliostat_group in enumerate( scenario.heliostat_field.heliostat_groups @@ -48,54 +49,20 @@ def create_flux_plot_before_optimization() -> None: device=device, ) - # Align heliostats. - heliostat_group.align_surfaces_with_incident_ray_directions( - aim_points=scenario.target_areas.centers[target_area_mask], - incident_ray_directions=incident_ray_directions, - active_heliostats_mask=active_heliostats_mask, - device=device, - ) - - # Create a ray tracer. - ray_tracer = HeliostatRayTracer( - scenario=scenario, - heliostat_group=heliostat_group, - batch_size=heliostat_group.number_of_active_heliostats, - bitmap_resolution=torch.tensor([256, 256], device=device), - ) - - # Perform heliostat-based ray tracing. - bitmaps_per_heliostat = ray_tracer.trace_rays( - incident_ray_directions=incident_ray_directions, - active_heliostats_mask=active_heliostats_mask, - target_area_mask=target_area_mask, - device=device, - ) - - flux_distribution_on_target = ray_tracer.get_bitmaps_per_target( - bitmaps_per_heliostat=bitmaps_per_heliostat, - target_area_mask=target_area_mask, - device=device, - )[target_area_index] - - total_flux += flux_distribution_on_target - - # Create the plot. - plt.imshow(flux_distribution_on_target.cpu().detach(), cmap="gray") - plt.axis("off") - plt.title("Flux before aimpoint optimization") - plt.savefig("flux_before_aimpoint_optimization.png") - - -def create_flux_plot_after_optimization() -> None: - """Create data to plot the heliostat fluxes.""" - total_flux = torch.zeros( - ( - bitmap_resolution[index_mapping.unbatched_bitmap_e], - bitmap_resolution[index_mapping.unbatched_bitmap_u], - ), - device=device, - ) + # Align Heliostats. + if id == "before": + heliostat_group.align_surfaces_with_incident_ray_directions( + aim_points=scenario.target_areas.centers[target_area_mask], + incident_ray_directions=incident_ray_directions, + active_heliostats_mask=active_heliostats_mask, + device=device, + ) + elif id == "after": + heliostat_group.align_surfaces_with_motor_positions( + motor_positions=heliostat_group.kinematic.active_motor_positions, + active_heliostats_mask=active_heliostats_mask, + device=device, + ) for heliostat_group_index, heliostat_group in enumerate( scenario.heliostat_field.heliostat_groups @@ -108,26 +75,13 @@ def create_flux_plot_after_optimization() -> None: device=device, ) ) - - # Activate heliostats. - heliostat_group.activate_heliostats( - active_heliostats_mask=active_heliostats_mask, - device=device, - ) - - # Align heliostats. - heliostat_group.align_surfaces_with_motor_positions( - motor_positions=heliostat_group.kinematic.active_motor_positions, - active_heliostats_mask=active_heliostats_mask, - device=device, - ) - # Create a ray tracer. ray_tracer = HeliostatRayTracer( scenario=scenario, heliostat_group=heliostat_group, batch_size=heliostat_group.number_of_active_heliostats, bitmap_resolution=torch.tensor([256, 256], device=device), + dni=dni, ) # Perform heliostat-based ray tracing. @@ -147,10 +101,10 @@ def create_flux_plot_after_optimization() -> None: total_flux += flux_distribution_on_target # Create the plot. - plt.imshow(flux_distribution_on_target.cpu().detach(), cmap="gray") + plt.imshow(total_flux.cpu().detach(), cmap="gray") plt.axis("off") - plt.title("Flux after aimpoint optimization") - plt.savefig("flux_after_aimpoint_optimization.png") + plt.title(f"Flux {id} aimpoint optimization {total_flux.sum():.3f}") + plt.savefig(f"flux_{id}_aimpoint_optimization.png") ############################################################################################################# @@ -166,6 +120,43 @@ def create_flux_plot_after_optimization() -> None: # Specify the path to your scenario.h5 file. scenario_path = pathlib.Path("please/insert/the/path/to/the/scenario/here/scenario.h5") +# Set optimizer parameters. +optimizer_dict = { + config_dictionary.initial_learning_rate: 3e-4, + config_dictionary.tolerance: 0.0005, + config_dictionary.max_epoch: 30, + config_dictionary.batch_size: 50, + config_dictionary.log_step: 3, + config_dictionary.early_stopping_delta: 1e-4, + config_dictionary.early_stopping_patience: 100, + config_dictionary.early_stopping_window: 100, +} +# Configure the learning rate scheduler. +scheduler_dict = { + config_dictionary.scheduler_type: config_dictionary.reduce_on_plateau, + config_dictionary.gamma: 0.9, + config_dictionary.min: 1e-6, + config_dictionary.max: 1e-3, + config_dictionary.step_size_up: 500, + config_dictionary.reduce_factor: 0.3, + config_dictionary.patience: 100, + config_dictionary.threshold: 1e-3, + config_dictionary.cooldown: 10, +} +# Configure the regularizers and constraints. +constraint_dict = { + config_dictionary.rho_energy: 1.0, + config_dictionary.max_flux_density: 1e10, + config_dictionary.rho_pixel: 1.0, + config_dictionary.lambda_lr: 0.1, +} +# Combine configurations. +optimization_configuration = { + config_dictionary.optimization: optimizer_dict, + config_dictionary.scheduler: scheduler_dict, + config_dictionary.constraints: constraint_dict, +} + number_of_heliostat_groups = Scenario.get_number_of_heliostat_groups_from_hdf5( scenario_path=scenario_path ) @@ -182,8 +173,16 @@ def create_flux_plot_after_optimization() -> None: scenario_file=scenario_file, device=device, ) - + # Set DNI W/m^2. + dni = 800 + # Set number of rays per surface point. scenario.set_number_of_rays(number_of_rays=4) + # Set incident ray direction. + incident_ray_direction = torch.tensor([0.0, 1.0, 0.0, 0.0], device=device) + # Set target area. + target_area_index = 1 + # Set target flux integral. + target_flux_integral = 10000 # Set loss function and define the ground truth. # For an optimization using a focal spot as ground truth use this loss definition: @@ -191,52 +190,21 @@ def create_flux_plot_after_optimization() -> None: # [1.1493, -0.5030, 57.0474, 1.0000], device=device # ) # loss_definition = FocalSpotLoss(scenario=scenario) - # For an optimization using a distribution as target use this loss function definition: e_trapezoid = utils.trapezoid_distribution( - total_width=256, slope_width=30, plateau_width=180, device=device + total_width=256, slope_width=30, plateau_width=110, device=device ) u_trapezoid = utils.trapezoid_distribution( - total_width=256, slope_width=30, plateau_width=180, device=device + total_width=256, slope_width=30, plateau_width=110, device=device ) ground_truth = u_trapezoid.unsqueeze( index_mapping.unbatched_bitmap_u ) * e_trapezoid.unsqueeze(index_mapping.unbatched_bitmap_e) - loss_definition = KLDivergenceLoss() - - # Configure the learning rate scheduler. The example scheduler parameter dict includes - # example parameters for all three possible schedulers. - scheduler = ( - config_dictionary.reduce_on_plateau - ) # Choose from: exponential, cyclic or reduce_on_plateau - scheduler_parameters = { - config_dictionary.gamma: 0.9, - config_dictionary.min: 1e-6, - config_dictionary.max: 1e-3, - config_dictionary.step_size_up: 500, - config_dictionary.reduce_factor: 0.3, - config_dictionary.patience: 100, - config_dictionary.threshold: 1e-3, - config_dictionary.cooldown: 10, - } + ground_truth = (ground_truth / ground_truth.sum()) * target_flux_integral - # Set optimizer parameters. - optimization_configuration = { - config_dictionary.initial_learning_rate: 1e-3, - config_dictionary.tolerance: 0.0005, - config_dictionary.max_epoch: 30, - config_dictionary.log_step: 3, - config_dictionary.early_stopping_delta: 1e-4, - config_dictionary.early_stopping_patience: 100, - config_dictionary.scheduler: scheduler, - config_dictionary.scheduler_parameters: scheduler_parameters, - } - - incident_ray_direction = torch.tensor([0.0, 1.0, 0.0, 0.0], device=device) - target_area_index = 1 - bitmap_resolution = torch.tensor([256, 256], device=device) + loss_definition = KLDivergenceLoss() - create_flux_plot_before_optimization() + create_flux_plot(id="before") # Create the motor positions optimizer. motor_positions_optimizer = MotorPositionsOptimizer( @@ -246,7 +214,7 @@ def create_flux_plot_after_optimization() -> None: incident_ray_direction=incident_ray_direction, target_area_index=target_area_index, ground_truth=ground_truth, - bitmap_resolution=bitmap_resolution, + dni=dni, device=device, ) @@ -258,4 +226,4 @@ def create_flux_plot_after_optimization() -> None: # Inspect the synchronized loss per heliostat. Heliostats that have not been optimized have an infinite loss. print(f"rank {ddp_setup['rank']}, final loss {final_loss_per_heliostat}") -create_flux_plot_after_optimization() +create_flux_plot(id="after") diff --git a/tutorials/data/paint/AA28/heliostat-properties.json b/tutorials/data/paint/AA28/heliostat-properties.json index 8d9bf17b6..4b3a14d05 100644 --- a/tutorials/data/paint/AA28/heliostat-properties.json +++ b/tutorials/data/paint/AA28/heliostat-properties.json @@ -1,78 +1,134 @@ { - "heliostat_position": [50.9136452423776, 6.38730246837658, 88.62004852], - "height": 2.55999994277954, - "width": 3.22000002861023, - "initial_orientation": [0, 0, 1], - "kinematic_properties": { - "actuators": [ - { - "type_axis": "ideal", - "min_increment": 0, - "max_increment": 68618, - "increment": 154166.6667, - "offset_shift": 0, - "initial_stroke_length": 0.07586774, - "offset": 0.335308, - "pivot_radius": 0.338095, - "radius_shift": 0, - "clockwise_axis_movement": 0, - "initial_angle": 0.02415194, - "min_movement_angle": 0.017594663, - "max_movement_angle": 1.570796327, - "movement_speed": 0 - }, - { - "type_axis": "ideal", - "min_increment": 0, - "max_increment": 76074, - "increment": 154166.6667, - "offset_shift": 0, - "initial_stroke_length": 0.076957598, - "offset": 0.340771, - "pivot_radius": 0.3191, - "radius_shift": 0, - "clockwise_axis_movement": 1, - "initial_angle": 0.946485181, - "min_movement_angle": -0.95993, - "max_movement_angle": 0.943837714, - "movement_speed": 0 - } - ], - "joint_translation_e_1": 0, - "joint_translation_n_1": 0, - "joint_translation_u_1": 0, - "joint_translation_e_2": 0, - "joint_translation_n_2": 0, - "joint_translation_u_2": 0.314999997615814, - "concentrator_translation_e": 0, - "concentrator_translation_n": -0.177550002932549, - "concentrator_translation_u": -0.404500007629395 - }, - "facet_properties": { - "canting_type": "receiver canting", - "number_of_facets": 4, - "facets": [ - { - "translation_vector": [-0.8075, 0.6425, 0.0402], - "canting_e": [0.802485806709588, 0, -0.00477361718903754], - "canting_n": [0.000017949241937134, 0.63749285710955, 0.00301719668132688] - }, - { - "translation_vector": [0.8075, 0.6425, 0.0402], - "canting_e": [0.802485806709588, 0, 0.00477361718903754], - "canting_n": [-0.000017949241937134, 0.63749285710955, 0.00301719668132688] - }, - { - "translation_vector": [-0.8075, -0.6425, 0.0402], - "canting_e": [0.802485806709588, 0, -0.00477361718903754], - "canting_n": [-0.000017949241937134, 0.63749285710955, -0.00301719668132688] - }, - { - "translation_vector": [0.8075, -0.6425, 0.0402], - "canting_e": [0.802485806709588, 0, 0.00477361718903754], - "canting_n": [0.000017949241937134, 0.63749285710955, -0.00301719668132688] - } - ] - }, - "renovation": "2021-04-15" - } + "heliostat_position": [ + 50.9136457318557, + 6.3873255928067865, + 88.62004852 + ], + "height": 2.559999942779541, + "width": 3.2200000286102295, + "initial_orientation": [ + 0.0, + 0.0, + 1.0 + ], + "kinematic_properties": { + "actuators": [ + { + "type_axis": "ideal", + "min_increment": 0, + "max_increment": 68618, + "increment": 154166.6667, + "offset_shift": 0, + "initial_stroke_length": 0.07586774, + "offset": 0.335308, + "pivot_radius": 0.338095, + "radius_shift": 0, + "clockwise_axis_movement": 0, + "initial_angle": 0.02415194, + "min_movement_angle": 0.017594663, + "max_movement_angle": 1.570796327, + "movement_speed": 0 + }, + { + "type_axis": "ideal", + "min_increment": 0, + "max_increment": 76074, + "increment": 154166.6667, + "offset_shift": 0, + "initial_stroke_length": 0.076957598, + "offset": 0.340771, + "pivot_radius": 0.3191, + "radius_shift": 0, + "clockwise_axis_movement": 1, + "initial_angle": 0.946485181, + "min_movement_angle": -0.95993, + "max_movement_angle": 0.943837714, + "movement_speed": 0 + } + ], + "joint_translation_e_1": 0.0, + "joint_translation_n_1": 0.0, + "joint_translation_u_1": 0.0, + "joint_translation_e_2": 0.0, + "joint_translation_n_2": 0.0, + "joint_translation_u_2": 0.0, + "concentrator_translation_e": 0.0, + "concentrator_translation_n": 0.175, + "concentrator_translation_u": 0.0 + }, + "facet_properties": { + "canting_type": "receiver canting", + "number_of_facets": 4, + "facets": [ + { + "translation_vector": [ + -0.8075, + 0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024858067095879, + -0.0, + -0.0047736171890375425 + ], + "canting_n": [ + 1.794924193713494e-05, + 0.6374928571095503, + 0.003017196681326875 + ] + }, + { + "translation_vector": [ + 0.8075, + 0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024858067095879, + -0.0, + 0.0047736171890375425 + ], + "canting_n": [ + -1.794924193713494e-05, + 0.6374928571095503, + 0.003017196681326875 + ] + }, + { + "translation_vector": [ + -0.8075, + -0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024858067095879, + -0.0, + -0.0047736171890375425 + ], + "canting_n": [ + -1.794924193713494e-05, + 0.6374928571095503, + -0.003017196681326875 + ] + }, + { + "translation_vector": [ + 0.8075, + -0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024858067095879, + -0.0, + 0.0047736171890375425 + ], + "canting_n": [ + 1.794924193713494e-05, + 0.6374928571095503, + -0.003017196681326875 + ] + } + ] + }, + "renovation": "2021-04-15" +} diff --git a/tutorials/data/paint/AA31/125284-calibration-properties.json b/tutorials/data/paint/AA31/125284-calibration-properties.json index 8d448a53a..753a86fd6 100644 --- a/tutorials/data/paint/AA31/125284-calibration-properties.json +++ b/tutorials/data/paint/AA31/125284-calibration-properties.json @@ -1,21 +1,21 @@ { "motor_position": { - "axis_1_motor_position": 24560, - "axis_2_motor_position": 49189 + "axis_1_motor_position": 24560, + "axis_2_motor_position": 49189 }, "target_name": "solar_tower_juelich_upper", - "focal_spot": { - "HeliOS": [ - 50.913392027527664, - 6.387825919937372, - 130.16671568075242 - ], - "UTIS": [ - 50.91339204305797, - 6.38782614314434, - 130.15333938598633 - ] - }, "sun_elevation": 47.872624059100566, - "sun_azimuth": -65.08347815150802 + "sun_azimuth": -65.08347815150802, + "focal_spot": { + "HeliOS": [ + 50.91339202434541, + 6.387825677708846, + 130.16671568075242 + ], + "UTIS": [ + 50.91339203913275, + 6.387825901039664, + 130.15333938598633 + ] + } } diff --git a/tutorials/data/paint/AA31/126372-calibration-properties.json b/tutorials/data/paint/AA31/126372-calibration-properties.json index 500bc0c29..45203d672 100644 --- a/tutorials/data/paint/AA31/126372-calibration-properties.json +++ b/tutorials/data/paint/AA31/126372-calibration-properties.json @@ -1,21 +1,21 @@ { - "motor_position": { - "axis_1_motor_position": 27834, - "axis_2_motor_position": 20458 - }, - "target_name": "solar_tower_juelich_lower", - "focal_spot": { - "HeliOS": [ - 50.9133920280681, - 6.3878252006769705, - 122.9162076007083 - ], - "UTIS": [ - 50.91339204305797, - 6.387825133340274, - 122.78834533691406 - ] - }, - "sun_elevation": 50.5553624952223, - "sun_azimuth": 59.79079145762157 + "motor_position": { + "axis_1_motor_position": 27834, + "axis_2_motor_position": 20458 + }, + "target_name": "solar_tower_juelich_lower", + "sun_elevation": 50.5553624952223, + "sun_azimuth": 59.79079145762157, + "focal_spot": { + "HeliOS": [ + 50.91339202727877, + 6.3878249584704525, + 122.9162076007083 + ], + "UTIS": [ + 50.9133920424923, + 6.387824891260173, + 122.78834533691406 + ] + } } diff --git a/tutorials/data/paint/AA31/heliostat-properties.json b/tutorials/data/paint/AA31/heliostat-properties.json index 754a8facd..ea053b2f7 100644 --- a/tutorials/data/paint/AA31/heliostat-properties.json +++ b/tutorials/data/paint/AA31/heliostat-properties.json @@ -1,8 +1,16 @@ { - "heliostat_position": [50.9136444630788, 6.38749024195609, 88.67166138], - "height": 2.55999994277954, - "width": 3.22000002861023, - "initial_orientation": [0, 0, 1], + "heliostat_position": [ + 50.91364495251966, + 6.387513366387674, + 88.67166138 + ], + "height": 2.559999942779541, + "width": 3.2200000286102295, + "initial_orientation": [ + 0.0, + 0.0, + 1.0 + ], "kinematic_properties": { "actuators": [ { @@ -38,39 +46,87 @@ "movement_speed": 0 } ], - "joint_translation_e_1": 0, - "joint_translation_n_1": 0, - "joint_translation_u_1": 0, - "joint_translation_e_2": 0, - "joint_translation_n_2": 0, - "joint_translation_u_2": 0.314999997615814, - "concentrator_translation_e": 0, - "concentrator_translation_n": -0.177550002932549, - "concentrator_translation_u": -0.404500007629395 + "joint_translation_e_1": 0.0, + "joint_translation_n_1": 0.0, + "joint_translation_u_1": 0.0, + "joint_translation_e_2": 0.0, + "joint_translation_n_2": 0.0, + "joint_translation_u_2": 0.0, + "concentrator_translation_e": 0.0, + "concentrator_translation_n": -0.175, + "concentrator_translation_u": 0.0 }, "facet_properties": { "canting_type": "receiver canting", "number_of_facets": 4, "facets": [ { - "translation_vector": [-0.8075, 0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, -0.00498456787317991], - "canting_n": [0.000019569211872294, 0.637492179870606, 0.00315052270889282] + "translation_vector": [ + -0.8075, + 0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + -0.004984567873179913 + ], + "canting_n": [ + 1.9569211872294545e-05, + 0.6374921798706055, + 0.0031505227088928223 + ] }, { - "translation_vector": [0.8075, 0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, 0.00498456787317991], - "canting_n": [-0.000019569211872294, 0.637492179870606, 0.00315052270889282] + "translation_vector": [ + 0.8075, + 0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + 0.004984567873179913 + ], + "canting_n": [ + -1.9569211872294545e-05, + 0.6374921798706055, + 0.0031505227088928223 + ] }, { - "translation_vector": [-0.8075, -0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, -0.00498456787317991], - "canting_n": [-0.000019569211872294, 0.637492179870606, -0.00315052270889282] + "translation_vector": [ + -0.8075, + -0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + -0.004984567873179913 + ], + "canting_n": [ + -1.9569211872294545e-05, + 0.6374921798706055, + -0.0031505227088928223 + ] }, { - "translation_vector": [0.8075, -0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, 0.00498456787317991], - "canting_n": [0.000019569211872294, 0.637492179870606, -0.00315052270889282] + "translation_vector": [ + 0.8075, + -0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + 0.004984567873179913 + ], + "canting_n": [ + 1.9569211872294545e-05, + 0.6374921798706055, + -0.0031505227088928223 + ] } ] }, diff --git a/tutorials/data/paint/AA39/270398-calibration-properties.json b/tutorials/data/paint/AA39/270398-calibration-properties.json index 0425c9816..4c7bf8abd 100644 --- a/tutorials/data/paint/AA39/270398-calibration-properties.json +++ b/tutorials/data/paint/AA39/270398-calibration-properties.json @@ -1,21 +1,21 @@ { - "motor_position": { - "axis_1_motor_position": 28837, - "axis_2_motor_position": 70357 - }, - "target_name": "multi_focus_tower", - "focal_spot": { - "HeliOS": [ - 50.91339561809233, - 6.3875752345482715, - 138.5341255112699 - ], - "UTIS": [ - 50.913395620694814, - 6.387576142174752, - 138.54986953735352 - ] - }, - "sun_elevation": 23.375303561551995, - "sun_azimuth": -62.23899565972679 + "motor_position": { + "axis_1_motor_position": 28837, + "axis_2_motor_position": 70357 + }, + "target_name": "multi_focus_tower", + "sun_elevation": 23.375303561551995, + "sun_azimuth": -62.23899565972679, + "focal_spot": { + "HeliOS": [ + 50.91339644856586, + 6.387575028285388, + 138.5341255112699 + ], + "UTIS": [ + 50.91339644815061, + 6.387575935911539, + 138.54986953735352 + ] + } } diff --git a/tutorials/data/paint/AA39/271633-calibration-properties.json b/tutorials/data/paint/AA39/271633-calibration-properties.json index b0bc0865e..5b07ac31c 100644 --- a/tutorials/data/paint/AA39/271633-calibration-properties.json +++ b/tutorials/data/paint/AA39/271633-calibration-properties.json @@ -1,21 +1,21 @@ { - "motor_position": { - "axis_1_motor_position": 30094, - "axis_2_motor_position": 33406 - }, - "target_name": "solar_tower_juelich_lower", - "focal_spot": { - "HeliOS": [ - 50.91339203075979, - 6.3878288087819435, - 122.6312633480574 - ], - "UTIS": [ - 50.91339204305797, - 6.387830331032702, - 122.61065673828124 - ] - }, - "sun_elevation": 37.324952494177886, - "sun_azimuth": 60.120563555225715 + "motor_position": { + "axis_1_motor_position": 30094, + "axis_2_motor_position": 33406 + }, + "target_name": "solar_tower_juelich_lower", + "sun_elevation": 37.324952494177886, + "sun_azimuth": 60.120563555225715, + "focal_spot": { + "HeliOS": [ + 50.913392017966494, + 6.387828566510014, + 122.6312633480574 + ], + "UTIS": [ + 50.913392025199904, + 6.387830088826097, + 122.61065673828124 + ] + } } diff --git a/tutorials/data/paint/AA39/275564-calibration-properties.json b/tutorials/data/paint/AA39/275564-calibration-properties.json index 81e7ed397..c35e90390 100644 --- a/tutorials/data/paint/AA39/275564-calibration-properties.json +++ b/tutorials/data/paint/AA39/275564-calibration-properties.json @@ -1,21 +1,21 @@ { - "motor_position": { - "axis_1_motor_position": 22585, - "axis_2_motor_position": 48224 - }, - "target_name": "multi_focus_tower", - "focal_spot": { - "HeliOS": [ - 50.913395618056676, - 6.387575976613831, - 138.57460135946872 - ], - "UTIS": [ - 50.913395620694814, - 6.387576747263912, - 138.61198806762695 - ] - }, - "sun_elevation": 58.78088789763654, - "sun_azimuth": 31.923206270952843 + "motor_position": { + "axis_1_motor_position": 22585, + "axis_2_motor_position": 48224 + }, + "target_name": "multi_focus_tower", + "sun_elevation": 58.78088789763654, + "sun_azimuth": 31.923206270952843, + "focal_spot": { + "HeliOS": [ + 50.91339644606301, + 6.3875757703326785, + 138.57460135946872 + ], + "UTIS": [ + 50.913396446138826, + 6.387576540986046, + 138.61198806762695 + ] + } } diff --git a/tutorials/data/paint/AA39/heliostat-properties.json b/tutorials/data/paint/AA39/heliostat-properties.json index 9b6d71a57..3862f8684 100644 --- a/tutorials/data/paint/AA39/heliostat-properties.json +++ b/tutorials/data/paint/AA39/heliostat-properties.json @@ -1,8 +1,16 @@ { - "heliostat_position": [50.9136428083779, 6.38799014568948, 88.68894196], - "height": 2.55999994277954, - "width": 3.22000002861023, - "initial_orientation": [0, 0, 1], + "heliostat_position": [ + 50.913643297719794, + 6.3880132701249455, + 88.68894196 + ], + "height": 2.559999942779541, + "width": 3.2200000286102295, + "initial_orientation": [ + 0.0, + 0.0, + 1.0 + ], "kinematic_properties": { "actuators": [ { @@ -38,39 +46,87 @@ "movement_speed": 0 } ], - "joint_translation_e_1": 0, - "joint_translation_n_1": 0, - "joint_translation_u_1": 0, - "joint_translation_e_2": 0, - "joint_translation_n_2": 0, - "joint_translation_u_2": 0.314999997615814, - "concentrator_translation_e": 0, - "concentrator_translation_n": -0.177550002932549, - "concentrator_translation_u": -0.404500007629395 + "joint_translation_e_1": 0.0, + "joint_translation_n_1": 0.0, + "joint_translation_u_1": 0.0, + "joint_translation_e_2": 0.0, + "joint_translation_n_2": 0.0, + "joint_translation_u_2": 0.0, + "concentrator_translation_e": 0.0, + "concentrator_translation_n": 0.175, + "concentrator_translation_u": 0.0 }, "facet_properties": { "canting_type": "receiver canting", "number_of_facets": 4, "facets": [ { - "translation_vector": [-0.8075, 0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, -0.00498456787317991], - "canting_n": [0.000019569211872294, 0.637492179870606, 0.00315052270889282] + "translation_vector": [ + -0.8075, + 0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + -0.004984567873179913 + ], + "canting_n": [ + 1.9569211872294545e-05, + 0.6374921798706055, + 0.0031505227088928223 + ] }, { - "translation_vector": [0.8075, 0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, 0.00498456787317991], - "canting_n": [-0.000019569211872294, 0.637492179870606, 0.00315052270889282] + "translation_vector": [ + 0.8075, + 0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + 0.004984567873179913 + ], + "canting_n": [ + -1.9569211872294545e-05, + 0.6374921798706055, + 0.0031505227088928223 + ] }, { - "translation_vector": [-0.8075, -0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, -0.00498456787317991], - "canting_n": [-0.000019569211872294, 0.637492179870606, -0.00315052270889282] + "translation_vector": [ + -0.8075, + -0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + -0.004984567873179913 + ], + "canting_n": [ + -1.9569211872294545e-05, + 0.6374921798706055, + -0.0031505227088928223 + ] }, { - "translation_vector": [0.8075, -0.6425, 0.0402], - "canting_e": [0.802484571933746, 0, 0.00498456787317991], - "canting_n": [0.000019569211872294, 0.637492179870606, -0.00315052270889282] + "translation_vector": [ + 0.8075, + -0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024845719337463, + -0.0, + 0.004984567873179913 + ], + "canting_n": [ + 1.9569211872294545e-05, + 0.6374921798706055, + -0.0031505227088928223 + ] } ] }, diff --git a/tutorials/data/paint/AC43/62900-calibration-properties.json b/tutorials/data/paint/AC43/62900-calibration-properties.json index 0da31ace1..3aad9db0e 100644 --- a/tutorials/data/paint/AC43/62900-calibration-properties.json +++ b/tutorials/data/paint/AC43/62900-calibration-properties.json @@ -1,21 +1,21 @@ { - "motor_position": { - "axis_1_motor_position": 45850, - "axis_2_motor_position": 56159 - }, - "target_name": "multi_focus_tower", - "focal_spot": { - "HeliOS": [ - 50.913395618489496, - 6.387575066266025, - 139.16720665427795 - ], - "UTIS": [ - 50.913395620694814, - 6.387575567894667, - 139.07592010498047 - ] - }, - "sun_elevation": 19.89165825200497, - "sun_azimuth": 4.039972592503291 + "motor_position": { + "axis_1_motor_position": 45850, + "axis_2_motor_position": 56159 + }, + "target_name": "multi_focus_tower", + "sun_elevation": 19.89165825200497, + "sun_azimuth": 4.039972592503291, + "focal_spot": { + "HeliOS": [ + 50.9133964495225, + 6.387574860010517, + 139.16720665427795 + ], + "UTIS": [ + 50.91339645005998, + 6.3875753616453625, + 139.07592010498047 + ] + } } diff --git a/tutorials/data/paint/AC43/72752-calibration-properties.json b/tutorials/data/paint/AC43/72752-calibration-properties.json index 86e57981d..5fad41bc1 100644 --- a/tutorials/data/paint/AC43/72752-calibration-properties.json +++ b/tutorials/data/paint/AC43/72752-calibration-properties.json @@ -1,21 +1,21 @@ { - "motor_position": { - "axis_1_motor_position": 44706, - "axis_2_motor_position": 73338 - }, - "target_name": "solar_tower_juelich_lower", - "focal_spot": { - "HeliOS": [ - 50.91339203148444, - 6.3878315470020155, - 122.75970234459815 - ], - "UTIS": [ - 50.91339204305797, - 6.387832143045703, - 122.70735168457033 - ] - }, - "sun_elevation": 9.583570217402297, - "sun_azimuth": -64.89874544268952 + "motor_position": { + "axis_1_motor_position": 44706, + "axis_2_motor_position": 73338 + }, + "target_name": "solar_tower_juelich_lower", + "sun_elevation": 9.583570217402297, + "sun_azimuth": -64.89874544268952, + "focal_spot": { + "HeliOS": [ + 50.91339200958119, + 6.387831304669475, + 122.75970234459815 + ], + "UTIS": [ + 50.91339201917141, + 6.387831900794994, + 122.70735168457033 + ] + } } diff --git a/tutorials/data/paint/AC43/heliostat-properties.json b/tutorials/data/paint/AC43/heliostat-properties.json index b5ac4f4be..44dfd6a7e 100644 --- a/tutorials/data/paint/AC43/heliostat-properties.json +++ b/tutorials/data/paint/AC43/heliostat-properties.json @@ -1,78 +1,134 @@ { - "heliostat_position": [50.9137246599368, 6.38824162208941, 88.73834991], - "height": 2.55999994277954, - "width": 3.22000002861023, - "initial_orientation": [0, 0, 1], - "kinematic_properties": { - "actuators": [ - { - "type_axis": "ideal", - "min_increment": 0, - "max_increment": 69525, - "increment": 154166.6667, - "offset_shift": 0, - "initial_stroke_length": 0.071051608, - "offset": 0.335308, - "pivot_radius": 0.338095, - "radius_shift": 0, - "clockwise_axis_movement": 0, - "initial_angle": 0.007796821, - "min_movement_angle": -0.009363691, - "max_movement_angle": 1.570796327, - "movement_speed": 0 - }, - { - "type_axis": "ideal", - "min_increment": 0, - "max_increment": 74920, - "increment": 154166.6667, - "offset_shift": 0, - "initial_stroke_length": 0.076357226, - "offset": 0.340771, - "pivot_radius": 0.3191, - "radius_shift": 0, - "clockwise_axis_movement": 1, - "initial_angle": 0.933083607, - "min_movement_angle": -0.95993, - "max_movement_angle": 0.9164343, - "movement_speed": 0 - } - ], - "joint_translation_e_1": 0, - "joint_translation_n_1": 0, - "joint_translation_u_1": 0, - "joint_translation_e_2": 0, - "joint_translation_n_2": 0, - "joint_translation_u_2": 0.314999997615814, - "concentrator_translation_e": 0, - "concentrator_translation_n": -0.177550002932549, - "concentrator_translation_u": -0.404500007629395 - }, - "facet_properties": { - "canting_type": "receiver canting", - "number_of_facets": 4, - "facets": [ - { - "translation_vector": [-0.8075, 0.6425, 0.0402], - "canting_e": [0.802486548614803, 0, -0.00464349363949021], - "canting_n": [0.000017008391871989, 0.637493232598497, 0.00293495464922505] - }, - { - "translation_vector": [0.8075, 0.6425, 0.0402], - "canting_e": [0.802486548614803, 0, 0.00464349363949021], - "canting_n": [-0.000017008391871989, 0.637493232598497, 0.00293495464922505] - }, - { - "translation_vector": [-0.8075, -0.6425, 0.0402], - "canting_e": [0.802486548614803, 0, -0.00464349363949021], - "canting_n": [-0.000017008391871989, 0.637493232598497, -0.00293495464922505] - }, - { - "translation_vector": [0.8075, -0.6425, 0.0402], - "canting_e": [0.802486548614803, 0, 0.00464349363949021], - "canting_n": [0.000017008391871989, 0.637493232598497, -0.00293495464922505] - } - ] - }, - "renovation": "2021-03-04" - } + "heliostat_position": [ + 50.91372514922872, + 6.38826474656781, + 88.73834991 + ], + "height": 2.559999942779541, + "width": 3.2200000286102295, + "initial_orientation": [ + 0.0, + 0.0, + 1.0 + ], + "kinematic_properties": { + "actuators": [ + { + "type_axis": "ideal", + "min_increment": 0, + "max_increment": 69525, + "increment": 154166.6667, + "offset_shift": 0, + "initial_stroke_length": 0.071051608, + "offset": 0.335308, + "pivot_radius": 0.338095, + "radius_shift": 0, + "clockwise_axis_movement": 0, + "initial_angle": 0.007796821, + "min_movement_angle": -0.009363691, + "max_movement_angle": 1.570796327, + "movement_speed": 0 + }, + { + "type_axis": "ideal", + "min_increment": 0, + "max_increment": 74920, + "increment": 154166.6667, + "offset_shift": 0, + "initial_stroke_length": 0.076357226, + "offset": 0.340771, + "pivot_radius": 0.3191, + "radius_shift": 0, + "clockwise_axis_movement": 1, + "initial_angle": 0.933083607, + "min_movement_angle": -0.95993, + "max_movement_angle": 0.9164343, + "movement_speed": 0 + } + ], + "joint_translation_e_1": 0.0, + "joint_translation_n_1": 0.0, + "joint_translation_u_1": 0.0, + "joint_translation_e_2": 0.0, + "joint_translation_n_2": 0.0, + "joint_translation_u_2": 0.0, + "concentrator_translation_e": 0.0, + "concentrator_translation_n": 0.175, + "concentrator_translation_u": 0.0 + }, + "facet_properties": { + "canting_type": "receiver canting", + "number_of_facets": 4, + "facets": [ + { + "translation_vector": [ + -0.8075, + 0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024865486148033, + -0.0, + -0.004643493639490212 + ], + "canting_n": [ + 1.7008391871989296e-05, + 0.6374932325984969, + 0.0029349546492250466 + ] + }, + { + "translation_vector": [ + 0.8075, + 0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024865486148033, + -0.0, + 0.004643493639490212 + ], + "canting_n": [ + -1.7008391871989296e-05, + 0.6374932325984969, + 0.0029349546492250466 + ] + }, + { + "translation_vector": [ + -0.8075, + -0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024865486148033, + -0.0, + -0.004643493639490212 + ], + "canting_n": [ + -1.7008391871989296e-05, + 0.6374932325984969, + -0.0029349546492250466 + ] + }, + { + "translation_vector": [ + 0.8075, + -0.6425, + 0.0402 + ], + "canting_e": [ + 0.8024865486148033, + -0.0, + 0.004643493639490212 + ], + "canting_n": [ + 1.7008391871989296e-05, + 0.6374932325984969, + -0.0029349546492250466 + ] + } + ] + }, + "renovation": "2021-03-04" +} diff --git a/tutorials/data/paint/tower-measurements.json b/tutorials/data/paint/tower-measurements.json index 1072b3958..2bc4fbfab 100644 --- a/tutorials/data/paint/tower-measurements.json +++ b/tutorials/data/paint/tower-measurements.json @@ -1,56 +1,180 @@ { "power_plant_properties": { "ID": "WRI1030197", - "coordinates": [50.9134211225926, 6.38782475587486, 87] + "coordinates": [ + 50.913421122592574, + 6.387824755874856, + 87.0 + ] }, "solar_tower_juelich_upper": { "type": "planar", - "normal_vector": [0, 1, 0], + "normal_vector": [ + 0, + 1, + 0 + ], "coordinates": { - "center": [50.91339203684, 6.38782456351324, 130.097666666667], - "upper_left": [50.9133919650731, 6.38788598226217, 133.684], - "upper_middle": [50.9133919086783, 6.38782458377497, 133.71], - "upper_right": [50.913392112596, 6.38776328698828, 133.719], - "lower_left": [50.9133918659594, 6.38788605253239, 126.476], - "lower_right": [50.9133921569252, 6.38776347220538, 126.506] + "center": [ + 50.91339203683997, + 6.387824563513243, + 130.09766666666667 + ], + "upper_left": [ + 50.91339196507306, + 6.387885982262168, + 133.684 + ], + "upper_middle": [ + 50.91339190867827, + 6.387824583774971, + 133.71 + ], + "upper_right": [ + 50.91339211259599, + 6.387763286988281, + 133.719 + ], + "lower_left": [ + 50.913391865959426, + 6.387886052532387, + 126.476 + ], + "lower_right": [ + 50.91339215692524, + 6.387763472205384, + 126.506 + ] } }, "solar_tower_juelich_lower": { "type": "planar", - "normal_vector": [0, 1, 0], + "normal_vector": [ + 0, + 1, + 0 + ], "coordinates": { - "center": [50.91339203684, 6.38782456351324, 122.8815], - "upper_left": [50.9133918659594, 6.38788605253239, 126.476], - "upper_right": [50.9133921569252, 6.38776347220538, 126.506], - "lower_left": [50.9133918390403, 6.38788603808917, 119.268], - "lower_middle": [50.9133921065743, 6.38782454276512, 119.269], - "lower_right": [50.9133923375531, 6.38776321776524, 119.279] + "center": [ + 50.91339203683997, + 6.387824563513243, + 122.8815 + ], + "upper_left": [ + 50.913391865959426, + 6.387886052532387, + 126.476 + ], + "upper_right": [ + 50.91339215692524, + 6.387763472205384, + 126.506 + ], + "lower_left": [ + 50.913391839040266, + 6.387886038089168, + 119.268 + ], + "lower_middle": [ + 50.913392106574314, + 6.387824542765121, + 119.269 + ], + "lower_right": [ + 50.9133923375531, + 6.387763217765236, + 119.279 + ] } }, "multi_focus_tower": { "type": "planar", - "normal_vector": [0, 1, 0], + "normal_vector": [ + 0, + 1, + 0 + ], "coordinates": { - "center": [50.913396450887, 6.38757443672805, 138.97975], - "upper_left": [50.91339628901, 6.38761298332958, 142.175], - "upper_right": [50.9133966167729, 6.38753603235053, 142.172], - "lower_left": [50.9133963434157, 6.38761284159136, 135.789], - "lower_right": [50.9133965543239, 6.38753588964017, 135.783] + "center": [ + 50.91339645088695, + 6.387574436728054, + 138.97975 + ], + "upper_left": [ + 50.91339628900999, + 6.387612983329586, + 142.175 + ], + "upper_right": [ + 50.91339661677292, + 6.387536032350528, + 142.172 + ], + "lower_left": [ + 50.913396343415734, + 6.387612841591359, + 135.789 + ], + "lower_right": [ + 50.91339655432385, + 6.3875358896401675, + 135.783 + ] } }, "receiver": { "type": "convex_cylinder", - "normal_vector": [0, 0.90630779, -0.42261826], + "normal_vector": [ + 0.0, + 0.90630779, + -0.42261826 + ], "coordinates": { - "center": [50.91341660151, 6.3878253047761, 142.22675], - "receiver_outer_upper_left": [50.913427272183, 6.3878568569144, 144.805], - "receiver_outer_upper_right": [50.9134277392519, 6.38779212125015, 144.82], - "receiver_outer_lower_left": [50.9134054755624, 6.38785629153485, 139.596], - "receiver_outer_lower_right": [50.9134057066037, 6.38779225067161, 139.592], - "receiver_inner_lower_left": [50.9134065441443, 6.38785392584286, 139.86], - "receiver_inner_lower_right": [50.9134066492965, 6.38779530140411, 139.862], - "receiver_inner_upper_left": [50.9134264540107, 6.38785420535071, 144.592], - "receiver_inner_upper_right": [50.9134267664737, 6.38779541198343, 144.593] + "center": [ + 50.91341660151, + 6.387825304776098, + 142.22674999999998 + ], + "receiver_outer_upper_left": [ + 50.91342727218299, + 6.387856856914401, + 144.805 + ], + "receiver_outer_upper_right": [ + 50.91342773925188, + 6.387792121250146, + 144.82 + ], + "receiver_outer_lower_left": [ + 50.913405475562435, + 6.387856291534852, + 139.596 + ], + "receiver_outer_lower_right": [ + 50.91340570660374, + 6.3877922506716125, + 139.592 + ], + "receiver_inner_lower_left": [ + 50.913406544144294, + 6.387853925842859, + 139.86 + ], + "receiver_inner_lower_right": [ + 50.91340664929648, + 6.387795301404112, + 139.862 + ], + "receiver_inner_upper_left": [ + 50.91342645401072, + 6.387854205350705, + 144.592 + ], + "receiver_inner_upper_right": [ + 50.913426766473705, + 6.3877954119834275, + 144.593 + ] } } } diff --git a/tutorials/data/scenarios/test_scenario_paint_multiple_heliostat_groups_deflectometry.h5 b/tutorials/data/scenarios/test_scenario_paint_multiple_heliostat_groups_deflectometry.h5 index 5962c986f..03c38b73c 100644 Binary files a/tutorials/data/scenarios/test_scenario_paint_multiple_heliostat_groups_deflectometry.h5 and b/tutorials/data/scenarios/test_scenario_paint_multiple_heliostat_groups_deflectometry.h5 differ diff --git a/tutorials/data/scenarios/test_scenario_paint_multiple_heliostat_groups_ideal.h5 b/tutorials/data/scenarios/test_scenario_paint_multiple_heliostat_groups_ideal.h5 index 01a9291bd..092eaaa86 100644 Binary files a/tutorials/data/scenarios/test_scenario_paint_multiple_heliostat_groups_ideal.h5 and b/tutorials/data/scenarios/test_scenario_paint_multiple_heliostat_groups_ideal.h5 differ diff --git a/tutorials/data/stral/test_stral_data.binp b/tutorials/data/stral/stral_data.binp similarity index 100% rename from tutorials/data/stral/test_stral_data.binp rename to tutorials/data/stral/stral_data.binp