From d3e4fa7d17ab1817e70b2f289527c1570f027705 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Thu, 12 Feb 2026 06:01:14 -0800 Subject: [PATCH 01/50] start moving gpu program building to the correct earlier step --- mcdc/code_factory/gpu/program_builder.py | 27 ++++++++++++- mcdc/code_factory/numba_objects_generator.py | 40 ++++++++------------ 2 files changed, 42 insertions(+), 25 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index d6d0e4ff..0dbf8c7d 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -19,7 +19,7 @@ free_state = lambda pointer: None -def build_gpu_program(mcdc_container, data): +def build_gpu_program(simulation, size): global src_free_program, free_state # Compilation check @@ -209,6 +209,31 @@ def teardown_gpu_program(mcdc): free_state(cast_uintp_to_voidptr(mcdc["gpu_meta"]["state_pointer"])) +# ====================================================================================== +# Simulation structure and data creators +# ====================================================================================== + + +def create_data_array(size, dtype): + if config.gpu_state_storage == "managed": + data_tally_ptr = harmonize.alloc_managed_bytes(size) + else: + data_tally_ptr = harmonize.alloc_device_bytes(size) + data_tally_uint = cast_voidptr_to_uintp(data_tally_ptr) + data_tally = nb.carray(data_tally_ptr, (size,), dtype) + return data_tally, data_tally_uint + + +def create_mcdc_container(dtype): + if config.gpu_state_storage == "managed": + mcdc_ptr = harmonize.alloc_managed_bytes(dtype.itemsize) + else: + mcdc_ptr = harmonize.alloc_device_bytes(dtype.itemsize) + mcdc_uint = cast_voidptr_to_uintp(mcdc_ptr) + mcdc_container = nb.carray(mcdc_ptr, (1,), dtype) + return mcdc_container, mcdc_uint + + # ====================================================================================== # Type casters # ====================================================================================== diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index d4ed0321..c93ae769 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -253,6 +253,12 @@ def generate_numba_objects(simulation): set_object(object_, annotations, structures, records, data) set_object(simulation, annotations, structures, records, data) + # Build GPU program if needed + if config.target == "gpu": + from mcdc.code_factory.gpu.program_builder import build_gpu_program + + build_gpu_program(simulation, data["size"]) + # Allocate the flattened data and re-set the objects data["array"], data["pointer"] = create_data_array(data["size"], type_map[float]) @@ -344,10 +350,10 @@ def generate_numba_objects(simulation): # ================================================================================== # The global structure/variable container - mcdc_simulation_arr, mcdc_simulation_pointer = create_mcdc_array( + mcdc_simulation_container, mcdc_simulation_pointer = create_mcdc_container( into_dtype(structures["simulation"]) ) - mcdc_simulation = mcdc_simulation_arr[0] + mcdc_simulation = mcdc_simulation_container[0] record = records["simulation"] structure = structures["simulation"] @@ -385,7 +391,7 @@ def generate_numba_objects(simulation): for name in bank_names: mcdc_simulation[name]["tag"] = getattr(simulation, name).tag - return mcdc_simulation_arr, data["array"] + return mcdc_simulation_container, data["array"] def set_structure(label, structures, accessor_targets, annotations): @@ -644,36 +650,22 @@ def set_object( def create_data_array(size, dtype): if config.target == "gpu": - import mcdc.code_factory.gpu.adapt as adapt - import harmonize, numba + import mcdc.code_factory.gpu.program_builder as gpu_builder - if config.gpu_state_storage == "managed": - data_tally_ptr = harmonize.alloc_managed_bytes(size) - else: - data_tally_ptr = harmonize.alloc_device_bytes(size) - data_tally_uint = adapt.voidptr_to_uintp(data_tally_ptr) - data_tally = numba.carray(data_tally_ptr, (size,), dtype) - return data_tally, data_tally_uint + return gpu_builder.create_data_array(size, dtype) else: data_tally = np.zeros(size, dtype=dtype) return data_tally, 0 -def create_mcdc_array(dtype): +def create_mcdc_container(dtype): if config.target == "gpu": - import mcdc.code_factory.gpu.adapt as adapt - import harmonize, numba + import mcdc.code_factory.gpu.program_builder as gpu_builder - if config.gpu_state_storage == "managed": - mcdc_ptr = harmonize.alloc_managed_bytes(dtype.itemsize) - else: - mcdc_ptr = harmonize.alloc_device_bytes(dtype.itemsize) - mcdc_uint = adapt.voidptr_to_uintp(mcdc_ptr) - mcdc_array = numba.carray(mcdc_ptr, (1,), dtype) - return mcdc_array, mcdc_uint + return gpu_builder.create_mcdc_container(dtype) else: - mcdc_array = np.zeros((1,), dtype=dtype) - return mcdc_array, 0 + mcdc_container = np.zeros((1,), dtype=dtype) + return mcdc_container, 0 # ====================================================================================== From 28bbd2548aa2b55e2792d464aa891974b9e2825c Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Fri, 13 Feb 2026 08:56:30 +0700 Subject: [PATCH 02/50] Add global module parameter in GPU prgram builder --- mcdc/code_factory/gpu/program_builder.py | 38 ++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 3 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 0dbf8c7d..595cbf68 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -14,12 +14,38 @@ # Build GPU program # ====================================================================================== -# For teardown +# Main types +none_type = None +simulation_type = None +data_type = None + +# Access functions +state_spec = None +simulation_gpu = None +data_gpu = None +group_gpu = None +thread_gpu = None +particle_gpu = None +particle_record_gpu = None + +# Asynchronous transport kernels +step_async = None +find_cell_async = None + +# Memory allocations +alloc_managed_bytes = None +alloc_device_bytes = None + +# For teardown functions src_free_program = lambda pointer: None free_state = lambda pointer: None def build_gpu_program(simulation, size): + global none_type, simulation_type, data_type + global state_spec, simulation_gpu, data_gpu, group_gpu, thread_gpu, particle_gpu, particle_record_gpu + global step_async, find_cell_async + global alloc_managed_bytes, alloc_device_bytes global src_free_program, free_state # Compilation check @@ -62,10 +88,10 @@ def build_gpu_program(simulation, size): particle_record_gpu = nb.from_dtype(type_.particle_data) # Functions, and their signatures - def step(prog: nb.uintp, P: particle_gpu): + def step(program: nb.uintp, particle: particle_gpu): pass - def find_cell(prog: nb.uintp, P: particle_gpu): + def find_cell(program: nb.uintp, particle: particle_gpu): pass # Asynchronous versions @@ -75,6 +101,12 @@ def find_cell(prog: nb.uintp, P: particle_gpu): interface = harmonize.RuntimeSpec.program_interface() halt_early = interface["halt_early"] + # Byte allocators + alloc_managed_bytes = harmonize.alloc_managed_bytes + alloc_device_bytes = harmomize.alloc_device_bytes + + return + # ================================================================================== # TODO: "gpu_sources_spec" # ================================================================================== From b76beb2ef5a3670fc7da3a9c5e7b5b74f773d253 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Thu, 12 Feb 2026 18:02:22 -0800 Subject: [PATCH 03/50] fix typo --- mcdc/code_factory/gpu/program_builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 595cbf68..027ead21 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -103,7 +103,7 @@ def find_cell(program: nb.uintp, particle: particle_gpu): # Byte allocators alloc_managed_bytes = harmonize.alloc_managed_bytes - alloc_device_bytes = harmomize.alloc_device_bytes + alloc_device_bytes = harmonize.alloc_device_bytes return From 6dda91764d421704db881240565f09df5d9ade47 Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Fri, 13 Feb 2026 18:56:25 +0700 Subject: [PATCH 04/50] minor comments --- mcdc/code_factory/gpu/program_builder.py | 4 +--- mcdc/main.py | 6 +++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 027ead21..f9fb45fb 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -105,10 +105,8 @@ def find_cell(program: nb.uintp, particle: particle_gpu): alloc_managed_bytes = harmonize.alloc_managed_bytes alloc_device_bytes = harmonize.alloc_device_bytes - return - # ================================================================================== - # TODO: "gpu_sources_spec" + # "gpu_sources_spec" # ================================================================================== # ============== diff --git a/mcdc/main.py b/mcdc/main.py index f39761d1..f5ab2bee 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -283,11 +283,11 @@ def preparation(): # Platform targeting, adapters, and toggles for portability # ================================================================================== - # Build GPU program if desired + # Set up GPU if needed if config.target == "gpu": - from mcdc.code_factory.gpu.program_builder import build_gpu_program + from mcdc.code_factory.gpu.program_builder import setup_gpu - build_gpu_program(mcdc_container, data) + setup_gpu(mcdc_container, data) # ================================================================================== # Finalize From 792ced58055329ed6e918f90fc073db903c9b6a0 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Sat, 21 Feb 2026 16:52:17 -0800 Subject: [PATCH 05/50] fix data shape and gpu particle signature in program builder --- mcdc/code_factory/gpu/program_builder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index f9fb45fb..306ffcac 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -136,9 +136,9 @@ def finalize(prog: nb.uintp): # Async. functions # ================ - shape = eval(f"{adapt.tally_shape_literal}") + shape = (size,) - def step(prog: nb.uintp, P_input: adapt.particle_gpu): + def step(prog: nb.uintp, P_input: particle_gpu): mcdc = adapt.mcdc_global(prog) data_ptr = adapt.mcdc_data(prog) data = adapt.harm.array_from_ptr(data_ptr, shape, nb.float64) From 2447d829a70b8b847b0546626364c0578d5dbc96 Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Wed, 25 Feb 2026 09:10:31 +0700 Subject: [PATCH 06/50] fix source isotropy flagging --- mcdc/object_/source.py | 2 +- mcdc/object_/surface.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mcdc/object_/source.py b/mcdc/object_/source.py index 99072dbb..2cb63e90 100644 --- a/mcdc/object_/source.py +++ b/mcdc/object_/source.py @@ -192,7 +192,7 @@ def __init__( self.z = np.array(z) # Direction - if isotropic is not None: + if isotropic is not None and isotropic: pass elif direction is not None: self.isotropic_direction = False diff --git a/mcdc/object_/surface.py b/mcdc/object_/surface.py index 80c34508..2e9d99eb 100644 --- a/mcdc/object_/surface.py +++ b/mcdc/object_/surface.py @@ -42,7 +42,7 @@ class Surface(ObjectNonSingleton): Parameters ---------- - type\_ : int + type\\_ : int One of ``SURFACE_*`` constants (e.g., ``SURFACE_PLANE_X``). name : str Optional label for reporting. @@ -53,7 +53,7 @@ class Surface(ObjectNonSingleton): ---------- ID : int Index in the global registry (assigned on construction). - type\_ : int + type\\_ : int Surface type code (``SURFACE_*``). name : str User label. From f8f095135bc47a5a46394844001d5b19bda3a454 Mon Sep 17 00:00:00 2001 From: Melek Derman <48313913+melekderman@users.noreply.github.com> Date: Mon, 16 Feb 2026 23:01:53 -0800 Subject: [PATCH 07/50] add new surface - general cylinder --- mcdc/constant.py | 1 + mcdc/object_/surface.py | 50 ++ mcdc/transport/geometry/surface/cylinder.py | 172 +++++++ mcdc/transport/geometry/surface/interface.py | 10 + .../transport/geometry/surface/cylinder.py | 459 ++++++++++++++++++ 5 files changed, 692 insertions(+) create mode 100644 mcdc/transport/geometry/surface/cylinder.py create mode 100644 test/unit/transport/geometry/surface/cylinder.py diff --git a/mcdc/constant.py b/mcdc/constant.py index 695ee4a5..9215a30b 100644 --- a/mcdc/constant.py +++ b/mcdc/constant.py @@ -59,6 +59,7 @@ SURFACE_CYLINDER_Z = 7 SURFACE_SPHERE = 8 SURFACE_QUADRIC = 9 +SURFACE_CYLINDER = 10 # Boolean operator BOOL_AND = -1 diff --git a/mcdc/object_/surface.py b/mcdc/object_/surface.py index 2e9d99eb..7cb06b84 100644 --- a/mcdc/object_/surface.py +++ b/mcdc/object_/surface.py @@ -18,6 +18,7 @@ SURFACE_PLANE_X, SURFACE_PLANE_Y, SURFACE_PLANE_Z, + SURFACE_CYLINDER, SURFACE_QUADRIC, SURFACE_SPHERE, ) @@ -219,6 +220,9 @@ def __repr__(self): r = (x**2 + y**2 + z**2 - self.J) ** 0.5 text += f" - Center (x, y, z): ({x}, {y}, {z}) cm\n" text += f" - Radius: {r} cm\n" + elif self.type == SURFACE_CYLINDER: + text += f" - Coeffs.: {self.A}, {self.B}, {self.C},\n" + text += f" {self.G}, {self.H}, {self.I}, {self.J}\n" elif self.type == SURFACE_QUADRIC: text += f" - Coeffs.: {self.A}, {self.B}, {self.C},\n" text += f" {self.D}, {self.E}, {self.F},\n" @@ -505,6 +509,50 @@ def CylinderZ( return surface + @classmethod + def Cylinder( + cls, + name: str = "", + A: float = 0.0, + B: float = 0.0, + C: float = 0.0, + G: float = 0.0, + H: float = 0.0, + I: float = 0.0, + J: float = 0.0, + boundary_condition: str = "none", + ): + """ + Create a general infinite cylinder (diagonal quadric without cross terms): + A x^2 + B y^2 + C z^2 + G x + H y + I z + J = 0 + + Parameters + ---------- + name : str, optional + A, B, C, G, H, I, J : float + Cylinder coefficients. + boundary_condition : {"none","vacuum","reflective"}, optional + + Returns + ------- + Surface + General cylinder surface. + """ + type_ = SURFACE_CYLINDER + surface = cls(type_, name, boundary_condition) + + surface.linear = False + + # Coefficients + surface.A = A + surface.B = B + surface.C = C + surface.G = G + surface.H = H + surface.I = I + surface.J = J + return surface + @classmethod def Sphere( cls, @@ -685,6 +733,8 @@ def decode_type(type_): return "Infinite cylinder-Z surface" elif type_ == SURFACE_SPHERE: return "Sphere surface" + elif type_ == SURFACE_CYLINDER: + return "General cylinder surface" elif type_ == SURFACE_QUADRIC: return "Quadric surface" diff --git a/mcdc/transport/geometry/surface/cylinder.py b/mcdc/transport/geometry/surface/cylinder.py new file mode 100644 index 00000000..f49a609d --- /dev/null +++ b/mcdc/transport/geometry/surface/cylinder.py @@ -0,0 +1,172 @@ +""" +Cylinder: General infinite cylinder + +f(x, y, z) = Axx + Byy + Czz + Gx + Hy + Iz + J +""" + +import math + +from numba import njit + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) + + +@njit +def evaluate(particle_container, surface): + particle = particle_container[0] + # Particle parameters + x = particle["x"] + y = particle["y"] + z = particle["z"] + + # Surface parameters + A = surface["A"] + B = surface["B"] + C = surface["C"] + G = surface["G"] + H = surface["H"] + I = surface["I"] + J = surface["J"] + + return A * x**2 + B * y**2 + C * z**2 + G * x + H * y + I * z + J + + +@njit +def reflect(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface coefficients + A = surface["A"] + B = surface["B"] + C = surface["C"] + G = surface["G"] + H = surface["H"] + I = surface["I"] + + # Surface normal + dx = 2 * A * x + G + dy = 2 * B * y + H + dz = 2 * C * z + I + norm = (dx**2 + dy**2 + dz**2) ** 0.5 + nx = dx / norm + ny = dy / norm + nz = dz / norm + + # Reflect + c = 2.0 * (nx * ux + ny * uy + nz * uz) + particle["ux"] -= c * nx + particle["uy"] -= c * ny + particle["uz"] -= c * nz + + +@njit +def get_normal_component(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface coefficients + A = surface["A"] + B = surface["B"] + C = surface["C"] + G = surface["G"] + H = surface["H"] + I = surface["I"] + + # Surface normal + dx = 2 * A * x + G + dy = 2 * B * y + H + dz = 2 * C * z + I + norm = (dx**2 + dy**2 + dz**2) ** 0.5 + nx = dx / norm + ny = dy / norm + nz = dz / norm + + return nx * ux + ny * uy + nz * uz + + +@njit +def get_distance(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface coefficients + A = surface["A"] + B = surface["B"] + C = surface["C"] + G = surface["G"] + H = surface["H"] + I = surface["I"] + + # Coincident? + f = evaluate(particle_container, surface) + coincident = abs(f) < COINCIDENCE_TOLERANCE + if coincident: + # Moving away or tangent? + if ( + get_normal_component(particle_container, surface) + >= 0.0 - COINCIDENCE_TOLERANCE + ): + return INF + + # Quadratic equation constants + a = ( + A * ux * ux + + B * uy * uy + + C * uz * uz + ) + b = ( + 2 * (A * x * ux + B * y * uy + C * z * uz) + + G * ux + + H * uy + + I * uz + ) + c = f + + determinant = b * b - 4.0 * a * c + + # Roots are complex : no intersection + # Roots are identical: tangent + # ==> return huge number + if determinant <= 0.0: + return INF + else: + # Get the roots + denom = 2.0 * a + sqrt = math.sqrt(determinant) + root_1 = (-b + sqrt) / denom + root_2 = (-b - sqrt) / denom + + # Coincident? + if coincident: + return max(root_1, root_2) + + # Negative roots, moving away from the surface + if root_1 < 0.0: + root_1 = INF + if root_2 < 0.0: + root_2 = INF + + # Return the smaller root + return min(root_1, root_2) diff --git a/mcdc/transport/geometry/surface/interface.py b/mcdc/transport/geometry/surface/interface.py index 91d59b9d..56159e69 100644 --- a/mcdc/transport/geometry/surface/interface.py +++ b/mcdc/transport/geometry/surface/interface.py @@ -16,6 +16,7 @@ import mcdc.transport.geometry.surface.cylinder_y as cylinder_y import mcdc.transport.geometry.surface.cylinder_z as cylinder_z import mcdc.transport.geometry.surface.sphere as sphere +import mcdc.transport.geometry.surface.cylinder as cylinder import mcdc.transport.geometry.surface.quadric as quadric from mcdc.constant import ( @@ -30,6 +31,7 @@ SURFACE_CYLINDER_Y, SURFACE_CYLINDER_Z, SURFACE_SPHERE, + SURFACE_CYLINDER, ) from mcdc.transport.util import find_bin @@ -88,6 +90,8 @@ def evaluate(particle_container, surface, data): result = cylinder_z.evaluate(particle_container, surface) elif surface["type"] == SURFACE_SPHERE: result = sphere.evaluate(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER: + result = cylinder.evaluate(particle_container, surface) else: result = quadric.evaluate(particle_container, surface) @@ -138,6 +142,8 @@ def get_normal_component(particle_container, speed, surface, data): result = cylinder_z.get_normal_component(particle_container, surface) elif surface["type"] == SURFACE_SPHERE: result = sphere.get_normal_component(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER: + result = cylinder.get_normal_component(particle_container, surface) else: result = quadric.get_normal_component(particle_container, surface) @@ -177,6 +183,8 @@ def reflect(particle_container, surface): return cylinder_z.reflect(particle_container, surface) elif surface["type"] == SURFACE_SPHERE: return sphere.reflect(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER: + return cylinder.reflect(particle_container, surface) else: return quadric.reflect(particle_container, surface) @@ -221,6 +229,8 @@ def _get_distance_static(particle_container, surface): return cylinder_z.get_distance(particle_container, surface) elif surface["type"] == SURFACE_SPHERE: return sphere.get_distance(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER: + return cylinder.get_distance(particle_container, surface) else: return quadric.get_distance(particle_container, surface) diff --git a/test/unit/transport/geometry/surface/cylinder.py b/test/unit/transport/geometry/surface/cylinder.py new file mode 100644 index 00000000..2fc5b2fd --- /dev/null +++ b/test/unit/transport/geometry/surface/cylinder.py @@ -0,0 +1,459 @@ +import mcdc +import numpy as np + +#### + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) +from mcdc.main import preparation + +# ====================================================================================== +# Setup +# ====================================================================================== + +# Reference surface description +# General cylinder parallel to z-axis, centered at origin, radius R = 5.0 +# f(x, y, z) = x^2 + y^2 - R^2 +R = 5.0 +durations = np.array([5.0, 5.0, 5.0]) +velocities = np.zeros((3, 3)) +velocities[:, 0] = np.array([-1.0, 2.0, -3.0]) + +# Test object: static surface +static_surface = mcdc.Surface.Cylinder(A=1.0, B=1.0, J=-(R**2)) + +# Test object: moving surface +moving_surface = mcdc.Surface.Cylinder(A=1.0, B=1.0, J=-(R**2)) +moving_surface.move(velocities, durations) + +# Test object: static surface with general coefficients +general_surface = mcdc.Surface.Cylinder( + A=1.5, B=0.5, C=2.0, G=-1.0, H=3.0, I=-0.5, J=-4.0 +) + +# Create the dummy simulation structure and data +structure_container, data = preparation() +structure = structure_container[0] + +# Get the "compiled" test objects +static_surface = structure["surfaces"][0] +moving_surface = structure["surfaces"][1] +general_surface = structure["surfaces"][2] + +# Particle object for testing +import mcdc.numba_types as type_ + +particle_container = np.zeros(1, type_.particle_data) +particle = particle_container[0] + +# Miscellanies +# For quadratic surfaces, position offset delta gives f ~ 2*R*delta, +# so delta < COINCIDENCE_TOLERANCE / (2*R) is needed for coincidence. +TINY = COINCIDENCE_TOLERANCE / (2.0 * R) * 0.8 + +# Load modules to be tested +from mcdc.transport.geometry.surface import ( + interface, + cylinder, +) + + +# ===================================================================================== +# Cylinder core functions +# ===================================================================================== + + +def test_evaluate(): + def run(x, y, answer): + particle["x"] = x + particle["y"] = y + result = cylinder.evaluate(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive side (outside) + run(x=8.0, y=0.0, answer=39.0) + # Negative side (inside) + run(x=3.0, y=0.0, answer=-16.0) + + +def test_evaluate_general_coefficients(): + def run(x, y, z, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + result = cylinder.evaluate(particle_container, general_surface) + assert np.isclose(result, answer) + + run(x=1.0, y=-2.0, z=0.5, answer=-7.25) + run(x=2.0, y=1.0, z=-1.0, answer=6.0) + + +def test_reflect(): + def run(ux, answer): + particle["x"] = R + particle["y"] = 0.0 + particle["ux"] = ux + particle["uy"] = 0.0 + cylinder.reflect(particle_container, static_surface) + assert np.isclose(particle["ux"], answer) + + # From positive direction + run(ux=0.2, answer=-0.2) + # From negative direction + run(ux=-0.1, answer=0.1) + + +def test_get_normal_component(): + def run(ux, answer): + particle["x"] = R + particle["y"] = 0.0 + particle["ux"] = ux + particle["uy"] = 0.0 + result = cylinder.get_normal_component(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive direction + run(ux=0.4, answer=0.4) + # Negative direction + run(ux=-0.2, answer=-0.2) + # Parallel + run(ux=0.0, answer=0.0) + + +def test_get_normal_component_general_coefficients(): + particle["x"] = 1.0 + particle["y"] = -2.0 + particle["z"] = 0.5 + particle["ux"] = 0.4 + particle["uy"] = -0.2 + particle["uz"] = 0.1 + + result = cylinder.get_normal_component(particle_container, general_surface) + answer = (2.0 * 0.4 + 1.0 * (-0.2) + 1.5 * 0.1) / (2.0**2 + 1.0**2 + 1.5**2) ** 0.5 + assert np.isclose(result, answer) + + +def test_get_distance(): + def run(x, ux, answer): + particle["x"] = x + particle["y"] = 0.0 + particle["ux"] = ux + particle["uy"] = 0.0 + result = cylinder.get_distance(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive side (outside) + x = 8.0 + ## Moving closer + run(x, ux=-0.4, answer=7.5) + ## Moving away + run(x, ux=0.3, answer=INF) + ## Parallel + run(x, ux=0.0, answer=INF) + + # Negative side (inside) + x = 3.0 + ## Moving outward (toward near surface) + run(x, ux=0.4, answer=5.0) + ## Moving inward (toward far surface) + run(x, ux=-0.3, answer=80.0 / 3.0) + ## Parallel + run(x, ux=0.0, answer=INF) + + # At surface, within tolerance, on the positive side + x = R + TINY + ## Moving away + run(x, ux=0.4, answer=INF) + ## Moving closer (crosses to far side) + run(x, ux=-0.4, answer=2.0 * R / 0.4) + ## Parallel + run(x, ux=0.0, answer=INF) + + # At surface, within tolerance, on the negative side + x = R - TINY + ## Moving away (toward center, crosses to far side) + run(x, ux=-0.4, answer=2.0 * R / 0.4) + ## Moving closer + run(x, ux=0.4, answer=INF) + ## Parallel + run(x, ux=0.0, answer=INF) + + +# ===================================================================================== +# Cylinder integrated transport interface +# ===================================================================================== + + +def test_interface_reflect(): + def run(ux, answer): + particle["x"] = R + particle["y"] = 0.0 + particle["ux"] = ux + particle["uy"] = 0.0 + interface.reflect(particle_container, static_surface) + assert np.isclose(particle["ux"], answer) + + # From positive direction + run(ux=0.2, answer=-0.2) + # From negative direction + run(ux=-0.1, answer=0.1) + + +def test_interface_evaluate(): + def run_static(x, y, answer): + particle["x"] = x + particle["y"] = y + result = interface.evaluate(particle_container, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(x, y, t, answer): + particle["x"] = x + particle["y"] = y + particle["t"] = t + result = interface.evaluate(particle_container, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive side (outside) + run_static(x=8.0, y=0.0, answer=39.0) + # Negative side (inside) + run_static(x=3.0, y=0.0, answer=-16.0) + + # ================================================================================= + # Moving + # ================================================================================= + + # First bin: center x = -3.0 + t = 3.0 + ## Positive side (outside) + run_moving(x=4.0, y=0.0, t=t, answer=24.0) + ## Negative side (inside) + run_moving(x=-4.0, y=0.0, t=t, answer=-24.0) + + # First bin, at grid: center x = -5.0 + t = 5.0 + ## Positive side (outside) + run_moving(x=4.0, y=0.0, t=t, answer=56.0) + ## Negative side (inside) + run_moving(x=-4.0, y=0.0, t=t, answer=-24.0) + + # Interior bin: center x = -1.0 + t = 12.0 + ## Positive side (outside) + run_moving(x=6.0, y=0.0, t=t, answer=24.0) + ## Negative side (inside) + run_moving(x=-4.0, y=0.0, t=t, answer=-16.0) + + # Final bin: center x = -10.0 + t = 100.0 + ## Positive side (outside) + run_moving(x=0.0, y=0.0, t=t, answer=75.0) + ## Negative side (inside) + run_moving(x=-8.0, y=0.0, t=t, answer=-21.0) + + +def test_interface_get_normal_component(): + def run_static(ux, answer): + particle["x"] = R + particle["y"] = 0.0 + particle["ux"] = ux + particle["uy"] = 0.0 + speed = 2.0 # Arbitrary + result = interface.get_normal_component( + particle_container, speed, static_surface, data + ) + assert np.isclose(result, answer) + + def run_moving(x, ux, t, speed, answer): + particle["x"] = x + particle["y"] = 0.0 + particle["ux"] = ux + particle["uy"] = 0.0 + particle["t"] = t + result = interface.get_normal_component( + particle_container, speed, moving_surface, data + ) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive direction + run_static(ux=0.4, answer=0.4) + # Negative direction + run_static(ux=-0.2, answer=-0.2) + # Parallel + run_static(ux=0.0, answer=0.0) + + # ================================================================================= + # Moving + # ================================================================================= + + # First bin: center x = -3.0, velocity = -1.0 + t = 3.0 + x = -3.0 + R + run_moving(x, ux=0.4, t=t, speed=2.0, answer=0.9) + run_moving(x, ux=-0.6, t=t, speed=2.0, answer=-0.1) + run_moving(x, ux=-0.5, t=t, speed=2.0, answer=0.0) + + # Interior bin: center x = 1.0, velocity = 2.0 + t = 8.0 + x = 1.0 + R + run_moving(x, ux=0.4, t=t, speed=2.0, answer=-0.6) + run_moving(x, ux=1.0, t=t, speed=2.0, answer=0.0) + run_moving(x, ux=0.0, t=t, speed=4.0, answer=-0.5) + + # Interior bin: center x = -1.0, velocity = -3.0 + t = 12.0 + x = -1.0 + R + run_moving(x, ux=-0.2, t=t, speed=10.0, answer=0.1) + + +def test_interface_check_sense(): + def run_static(x, y, ux, answer): + particle["x"] = x + particle["y"] = y + particle["ux"] = ux + particle["uy"] = 0.0 + speed = 2.0 # Arbitrary + result = interface.check_sense(particle_container, speed, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(x, y, ux, t, speed, answer): + particle["x"] = x + particle["y"] = y + particle["ux"] = ux + particle["uy"] = 0.0 + particle["t"] = t + result = interface.check_sense(particle_container, speed, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Not at surface + ux = 0.3 # Arbitrary + ## Positive side (outside) + run_static(x=8.0, y=0.0, ux=ux, answer=True) + ## Negative side (inside) + run_static(x=3.0, y=0.0, ux=ux, answer=False) + + # At surface, positive side + x = R + TINY + ## Positive direction (outward) + run_static(x, y=0.0, ux=0.4, answer=True) + ## Negative direction (inward) + run_static(x, y=0.0, ux=-0.4, answer=False) + + # At surface, negative side + x = R - TINY + ## Positive direction (outward) + run_static(x, y=0.0, ux=0.2, answer=True) + ## Negative direction (inward) + run_static(x, y=0.0, ux=-0.2, answer=False) + + # ================================================================================= + # Moving + # ================================================================================= + + # First bin: center x = -3.0 + t = 3.0 + speed = 2.0 + ## Not at surface + run_moving(x=4.0, y=0.0, ux=0.2, t=t, speed=speed, answer=True) + run_moving(x=-4.0, y=0.0, ux=0.2, t=t, speed=speed, answer=False) + ## At surface + x = -3.0 + R + run_moving(x, y=0.0, ux=0.4, t=t, speed=speed, answer=True) + run_moving(x, y=0.0, ux=-0.6, t=t, speed=speed, answer=False) + run_moving(x, y=0.0, ux=-0.5, t=t, speed=speed, answer=False) + + # Interior bin: center x = 1.0 + t = 8.0 + speed = 4.0 + x = 1.0 + R + run_moving(x, y=0.0, ux=0.8, t=t, speed=speed, answer=True) + run_moving(x, y=0.0, ux=0.5, t=t, speed=speed, answer=False) + + +def test_interface_get_distance(): + def run_static(x, ux, answer): + particle["x"] = x + particle["y"] = 0.0 + particle["ux"] = ux + particle["uy"] = 0.0 + speed = 2.0 # Arbitrary + result = interface.get_distance(particle_container, speed, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(x, ux, t, speed, answer): + particle["x"] = x + particle["y"] = 0.0 + particle["ux"] = ux + particle["uy"] = 0.0 + particle["t"] = t + result = interface.get_distance(particle_container, speed, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive side (outside) + x = 8.0 + ## Moving closer + run_static(x, ux=-0.4, answer=7.5) + ## Moving away + run_static(x, ux=0.3, answer=INF) + ## Parallel + run_static(x, ux=0.0, answer=INF) + + # Negative side (inside) + x = 3.0 + ## Moving outward (toward near surface) + run_static(x, ux=0.4, answer=5.0) + ## Moving inward (toward far surface) + run_static(x, ux=-0.3, answer=80.0 / 3.0) + ## Parallel + run_static(x, ux=0.0, answer=INF) + + # At surface, on the positive side + x = R + TINY + ## Moving away + run_static(x, ux=0.4, answer=INF) + ## Moving closer (crosses to far side) + run_static(x, ux=-0.4, answer=2.0 * R / 0.4) + ## Parallel + run_static(x, ux=0.0, answer=INF) + + # At surface, on the negative side + x = R - TINY + ## Moving away (toward center, crosses to far side) + run_static(x, ux=-0.4, answer=2.0 * R / 0.4) + ## Moving closer + run_static(x, ux=0.4, answer=INF) + ## Parallel + run_static(x, ux=0.0, answer=INF) + + # ================================================================================= + # Moving + # ================================================================================= + + # First bin intersection + run_moving(x=6.0, ux=-1.0, t=1.0, speed=2.0, answer=4.0) + + # Crossing after entering the second bin + run_moving(x=10.0, ux=-1.0, t=2.0, speed=2.0, answer=8.0) + + # Moving away from the surface + run_moving(x=10.0, ux=1.0, t=6.0, speed=2.0, answer=INF) + + # Starting inside and moving outward + run_moving(x=-2.0, ux=1.0, t=2.0, speed=2.0, answer=10.0 / 3.0) From bbf1e6b864189b63778ccbf68ddef37503109e5e Mon Sep 17 00:00:00 2001 From: Melek Derman <48313913+melekderman@users.noreply.github.com> Date: Mon, 16 Feb 2026 23:32:06 -0800 Subject: [PATCH 08/50] back in black --- mcdc/transport/geometry/surface/cylinder.py | 13 ++----------- test/unit/transport/geometry/surface/cylinder.py | 1 - 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/mcdc/transport/geometry/surface/cylinder.py b/mcdc/transport/geometry/surface/cylinder.py index f49a609d..5e59ff58 100644 --- a/mcdc/transport/geometry/surface/cylinder.py +++ b/mcdc/transport/geometry/surface/cylinder.py @@ -131,17 +131,8 @@ def get_distance(particle_container, surface): return INF # Quadratic equation constants - a = ( - A * ux * ux - + B * uy * uy - + C * uz * uz - ) - b = ( - 2 * (A * x * ux + B * y * uy + C * z * uz) - + G * ux - + H * uy - + I * uz - ) + a = A * ux * ux + B * uy * uy + C * uz * uz + b = 2 * (A * x * ux + B * y * uy + C * z * uz) + G * ux + H * uy + I * uz c = f determinant = b * b - 4.0 * a * c diff --git a/test/unit/transport/geometry/surface/cylinder.py b/test/unit/transport/geometry/surface/cylinder.py index 2fc5b2fd..3252ff6c 100644 --- a/test/unit/transport/geometry/surface/cylinder.py +++ b/test/unit/transport/geometry/surface/cylinder.py @@ -59,7 +59,6 @@ cylinder, ) - # ===================================================================================== # Cylinder core functions # ===================================================================================== From 85cd32d2bd67eff3608f645b4dc14eb30bcffc2e Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Thu, 19 Feb 2026 08:16:24 +0700 Subject: [PATCH 09/50] minor edits --- mcdc/constant.py | 8 ++++---- mcdc/object_/surface.py | 16 ++++++++-------- mcdc/transport/geometry/surface/interface.py | 18 +++++++++--------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/mcdc/constant.py b/mcdc/constant.py index 9215a30b..b0a17e64 100644 --- a/mcdc/constant.py +++ b/mcdc/constant.py @@ -50,16 +50,16 @@ REGION_ALL = 4 # Surface type -SURFACE_PLANE_X = 0 +SURFACE_PLANE_X = 1 SURFACE_PLANE_Y = 2 SURFACE_PLANE_Z = 3 SURFACE_PLANE = 4 SURFACE_CYLINDER_X = 5 SURFACE_CYLINDER_Y = 6 SURFACE_CYLINDER_Z = 7 -SURFACE_SPHERE = 8 -SURFACE_QUADRIC = 9 -SURFACE_CYLINDER = 10 +SURFACE_CYLINDER = 8 +SURFACE_SPHERE = 9 +SURFACE_QUADRIC = 10 # Boolean operator BOOL_AND = -1 diff --git a/mcdc/object_/surface.py b/mcdc/object_/surface.py index 7cb06b84..98c51f12 100644 --- a/mcdc/object_/surface.py +++ b/mcdc/object_/surface.py @@ -14,13 +14,13 @@ SURFACE_CYLINDER_X, SURFACE_CYLINDER_Y, SURFACE_CYLINDER_Z, - SURFACE_PLANE, + SURFACE_CYLINDER, SURFACE_PLANE_X, SURFACE_PLANE_Y, SURFACE_PLANE_Z, - SURFACE_CYLINDER, - SURFACE_QUADRIC, + SURFACE_PLANE, SURFACE_SPHERE, + SURFACE_QUADRIC, ) from mcdc.object_.base import ObjectNonSingleton from mcdc.object_.cell import Region @@ -213,6 +213,9 @@ def __repr__(self): r = (x**2 + y**2 - self.J) ** 0.5 text += f" - Center (x, y): ({x}, {y}) cm\n" text += f" - Radius: {r} cm\n" + elif self.type == SURFACE_CYLINDER: + text += f" - Coeffs.: {self.A}, {self.B}, {self.C},\n" + text += f" {self.G}, {self.H}, {self.I}, {self.J}\n" elif self.type == SURFACE_SPHERE: x = -0.5 * self.G y = -0.5 * self.H @@ -220,9 +223,6 @@ def __repr__(self): r = (x**2 + y**2 + z**2 - self.J) ** 0.5 text += f" - Center (x, y, z): ({x}, {y}, {z}) cm\n" text += f" - Radius: {r} cm\n" - elif self.type == SURFACE_CYLINDER: - text += f" - Coeffs.: {self.A}, {self.B}, {self.C},\n" - text += f" {self.G}, {self.H}, {self.I}, {self.J}\n" elif self.type == SURFACE_QUADRIC: text += f" - Coeffs.: {self.A}, {self.B}, {self.C},\n" text += f" {self.D}, {self.E}, {self.F},\n" @@ -731,10 +731,10 @@ def decode_type(type_): return "Infinite cylinder-Y surface" elif type_ == SURFACE_CYLINDER_Z: return "Infinite cylinder-Z surface" - elif type_ == SURFACE_SPHERE: - return "Sphere surface" elif type_ == SURFACE_CYLINDER: return "General cylinder surface" + elif type_ == SURFACE_SPHERE: + return "Sphere surface" elif type_ == SURFACE_QUADRIC: return "Quadric surface" diff --git a/mcdc/transport/geometry/surface/interface.py b/mcdc/transport/geometry/surface/interface.py index 56159e69..8ba7ac88 100644 --- a/mcdc/transport/geometry/surface/interface.py +++ b/mcdc/transport/geometry/surface/interface.py @@ -30,8 +30,8 @@ SURFACE_CYLINDER_X, SURFACE_CYLINDER_Y, SURFACE_CYLINDER_Z, - SURFACE_SPHERE, SURFACE_CYLINDER, + SURFACE_SPHERE, ) from mcdc.transport.util import find_bin @@ -88,10 +88,10 @@ def evaluate(particle_container, surface, data): result = cylinder_y.evaluate(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER_Z: result = cylinder_z.evaluate(particle_container, surface) - elif surface["type"] == SURFACE_SPHERE: - result = sphere.evaluate(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER: result = cylinder.evaluate(particle_container, surface) + elif surface["type"] == SURFACE_SPHERE: + result = sphere.evaluate(particle_container, surface) else: result = quadric.evaluate(particle_container, surface) @@ -140,10 +140,10 @@ def get_normal_component(particle_container, speed, surface, data): result = cylinder_y.get_normal_component(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER_Z: result = cylinder_z.get_normal_component(particle_container, surface) - elif surface["type"] == SURFACE_SPHERE: - result = sphere.get_normal_component(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER: result = cylinder.get_normal_component(particle_container, surface) + elif surface["type"] == SURFACE_SPHERE: + result = sphere.get_normal_component(particle_container, surface) else: result = quadric.get_normal_component(particle_container, surface) @@ -181,10 +181,10 @@ def reflect(particle_container, surface): return cylinder_y.reflect(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER_Z: return cylinder_z.reflect(particle_container, surface) - elif surface["type"] == SURFACE_SPHERE: - return sphere.reflect(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER: return cylinder.reflect(particle_container, surface) + elif surface["type"] == SURFACE_SPHERE: + return sphere.reflect(particle_container, surface) else: return quadric.reflect(particle_container, surface) @@ -227,10 +227,10 @@ def _get_distance_static(particle_container, surface): return cylinder_y.get_distance(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER_Z: return cylinder_z.get_distance(particle_container, surface) - elif surface["type"] == SURFACE_SPHERE: - return sphere.get_distance(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER: return cylinder.get_distance(particle_container, surface) + elif surface["type"] == SURFACE_SPHERE: + return sphere.get_distance(particle_container, surface) else: return quadric.get_distance(particle_container, surface) From 842f88b11d978c269f792f059f23f6b5106a7789 Mon Sep 17 00:00:00 2001 From: Melek Derman <48313913+melekderman@users.noreply.github.com> Date: Thu, 19 Feb 2026 01:03:30 -0800 Subject: [PATCH 10/50] define ConeX, ConeY, ConeZ and general cylinder using quadric --- mcdc/constant.py | 3 + mcdc/object_/surface.py | 216 ++++++++- mcdc/transport/geometry/surface/cylinder.py | 163 ------- mcdc/transport/geometry/surface/interface.py | 41 +- .../transport/geometry/surface/cylinder.py | 458 ------------------ 5 files changed, 236 insertions(+), 645 deletions(-) delete mode 100644 mcdc/transport/geometry/surface/cylinder.py delete mode 100644 test/unit/transport/geometry/surface/cylinder.py diff --git a/mcdc/constant.py b/mcdc/constant.py index b0a17e64..5e3d02fa 100644 --- a/mcdc/constant.py +++ b/mcdc/constant.py @@ -60,6 +60,9 @@ SURFACE_CYLINDER = 8 SURFACE_SPHERE = 9 SURFACE_QUADRIC = 10 +SURFACE_CONE_X = 11 +SURFACE_CONE_Y = 12 +SURFACE_CONE_Z = 13 # Boolean operator BOOL_AND = -1 diff --git a/mcdc/object_/surface.py b/mcdc/object_/surface.py index 98c51f12..6d8007a7 100644 --- a/mcdc/object_/surface.py +++ b/mcdc/object_/surface.py @@ -21,6 +21,10 @@ SURFACE_PLANE, SURFACE_SPHERE, SURFACE_QUADRIC, + SURFACE_SPHERE, + SURFACE_CONE_X, + SURFACE_CONE_Y, + SURFACE_CONE_Z, ) from mcdc.object_.base import ObjectNonSingleton from mcdc.object_.cell import Region @@ -223,6 +227,31 @@ def __repr__(self): r = (x**2 + y**2 + z**2 - self.J) ** 0.5 text += f" - Center (x, y, z): ({x}, {y}, {z}) cm\n" text += f" - Radius: {r} cm\n" + elif self.type == SURFACE_CYLINDER: + text += f" - Coeffs.: {self.A}, {self.B}, {self.C},\n" + text += f" {self.D}, {self.E}, {self.F},\n" + text += f" {self.G}, {self.H}, {self.I}, {self.J}\n" + elif self.type == SURFACE_CONE_X: + t_sq = -self.A + y0 = -0.5 * self.H + z0 = -0.5 * self.I + x0 = 0.0 if t_sq == 0.0 else 0.5 * self.G / t_sq + text += f" - Apex (x, y, z): ({x0}, {y0}, {z0}) cm\n" + text += f" - tan^2(theta): {t_sq}\n" + elif self.type == SURFACE_CONE_Y: + t_sq = -self.B + x0 = -0.5 * self.G + z0 = -0.5 * self.I + y0 = 0.0 if t_sq == 0.0 else 0.5 * self.H / t_sq + text += f" - Apex (x, y, z): ({x0}, {y0}, {z0}) cm\n" + text += f" - tan^2(theta): {t_sq}\n" + elif self.type == SURFACE_CONE_Z: + t_sq = -self.C + x0 = -0.5 * self.G + y0 = -0.5 * self.H + z0 = 0.0 if t_sq == 0.0 else 0.5 * self.I / t_sq + text += f" - Apex (x, y, z): ({x0}, {y0}, {z0}) cm\n" + text += f" - tan^2(theta): {t_sq}\n" elif self.type == SURFACE_QUADRIC: text += f" - Coeffs.: {self.A}, {self.B}, {self.C},\n" text += f" {self.D}, {self.E}, {self.F},\n" @@ -513,24 +542,23 @@ def CylinderZ( def Cylinder( cls, name: str = "", - A: float = 0.0, - B: float = 0.0, - C: float = 0.0, - G: float = 0.0, - H: float = 0.0, - I: float = 0.0, - J: float = 0.0, + radius: float = 0.0, + axis: Iterable[float] = [0.0, 0.0, 1.0], + point: Iterable[float] = [0.0, 0.0, 0.0], boundary_condition: str = "none", ): """ - Create a general infinite cylinder (diagonal quadric without cross terms): - A x^2 + B y^2 + C z^2 + G x + H y + I z + J = 0 + Create a general infinite cylinder with an arbitrary axis. Parameters ---------- name : str, optional - A, B, C, G, H, I, J : float - Cylinder coefficients. + radius : float + Cylinder radius (cm). + axis : (3,) array_like of float + Direction vector of the cylinder axis (normalized automatically). + point : (3,) array_like of float + A point on the cylinder axis (cm). boundary_condition : {"none","vacuum","reflective"}, optional Returns @@ -540,17 +568,31 @@ def Cylinder( """ type_ = SURFACE_CYLINDER surface = cls(type_, name, boundary_condition) - surface.linear = False + # Axis and point + ax, ay, az = axis + norm = (ax**2 + ay**2 + az**2) ** 0.5 + dx, dy, dz = ax / norm, ay / norm, az / norm + px, py, pz = point + r = radius + # Coefficients - surface.A = A - surface.B = B - surface.C = C - surface.G = G - surface.H = H - surface.I = I - surface.J = J + surface.A = 1.0 - dx**2 + surface.B = 1.0 - dy**2 + surface.C = 1.0 - dz**2 + surface.D = -2.0 * dx * dy + surface.E = -2.0 * dx * dz + surface.F = -2.0 * dy * dz + Qpx = (1.0 - dx**2) * px - dx * dy * py - dx * dz * pz + Qpy = -dx * dy * px + (1.0 - dy**2) * py - dy * dz * pz + Qpz = -dx * dz * px - dy * dz * py + (1.0 - dz**2) * pz + surface.G = -2.0 * Qpx + surface.H = -2.0 * Qpy + surface.I = -2.0 * Qpz + pdotd = px * dx + py * dy + pz * dz + surface.J = px**2 + py**2 + pz**2 - pdotd**2 - r**2 + return surface @classmethod @@ -598,6 +640,136 @@ def Sphere( surface.I = -2.0 * z surface.J = x**2 + y**2 + z**2 - r**2 return surface + + @classmethod + def ConeX( + cls, + name: str = "", + apex: Iterable[float] = [0.0, 0.0, 0.0], + t_sq: float = 1.0, + boundary_condition: str = "none", + ): + """ + Create an infinite cone with axis along the x-axis. + + Equation: (y - y0)^2 + (z - z0)^2 - t_sq * (x - x0)^2 = 0 + + Parameters + ---------- + name : str, optional + apex : (3,) array_like of float + Cone apex (x0, y0, z0) in cm. + t_sq : float + Squared tangent of the half-angle: t_sq = tan^2(theta). + For a 45-degree half-angle use t_sq = 1.0. + boundary_condition : {"none","vacuum","reflective"}, optional + + Returns + ------- + Surface + Cone-X surface. + """ + type_ = SURFACE_CONE_X + surface = cls(type_, name, boundary_condition) + surface.linear = False + + x0, y0, z0 = apex + + surface.A = -t_sq + surface.B = 1.0 + surface.C = 1.0 + surface.G = 2.0 * t_sq * x0 + surface.H = -2.0 * y0 + surface.I = -2.0 * z0 + surface.J = y0**2 + z0**2 - t_sq * x0**2 + + return surface + + @classmethod + def ConeY( + cls, + name: str = "", + apex: Iterable[float] = [0.0, 0.0, 0.0], + t_sq: float = 1.0, + boundary_condition: str = "none", + ): + """ + Create an infinite cone with axis along the y-axis. + + Equation: (x - x0)^2 + (z - z0)^2 - t_sq * (y - y0)^2 = 0 + + Parameters + ---------- + name : str, optional + apex : (3,) array_like of float + Cone apex (x0, y0, z0) in cm. + t_sq : float + Squared tangent of the half-angle: t_sq = tan^2(theta). + boundary_condition : {"none","vacuum","reflective"}, optional + + Returns + ------- + Surface + Cone-Y surface. + """ + type_ = SURFACE_CONE_Y + surface = cls(type_, name, boundary_condition) + surface.linear = False + + x0, y0, z0 = apex + + surface.A = 1.0 + surface.B = -t_sq + surface.C = 1.0 + surface.G = -2.0 * x0 + surface.H = 2.0 * t_sq * y0 + surface.I = -2.0 * z0 + surface.J = x0**2 + z0**2 - t_sq * y0**2 + + return surface + + @classmethod + def ConeZ( + cls, + name: str = "", + apex: Iterable[float] = [0.0, 0.0, 0.0], + t_sq: float = 1.0, + boundary_condition: str = "none", + ): + """ + Create an infinite cone with axis along the z-axis. + + Equation: (x - x0)^2 + (y - y0)^2 - t_sq * (z - z0)^2 = 0 + + Parameters + ---------- + name : str, optional + apex : (3,) array_like of float + Cone apex (x0, y0, z0) in cm. + t_sq : float + Squared tangent of the half-angle: t_sq = tan^2(theta). + boundary_condition : {"none","vacuum","reflective"}, optional + + Returns + ------- + Surface + Cone surface. + """ + type_ = SURFACE_CONE_Z + surface = cls(type_, name, boundary_condition) + surface.linear = False + + x0, y0, z0 = apex + + surface.A = 1.0 + surface.B = 1.0 + surface.C = -t_sq + surface.G = -2.0 * x0 + surface.H = -2.0 * y0 + surface.I = 2.0 * t_sq * z0 + surface.J = x0**2 + y0**2 - t_sq * z0**2 + + return surface @classmethod def Quadric( @@ -733,6 +905,12 @@ def decode_type(type_): return "Infinite cylinder-Z surface" elif type_ == SURFACE_CYLINDER: return "General cylinder surface" + elif type_ == SURFACE_CONE_X: + return "Infinite cone-X surface" + elif type_ == SURFACE_CONE_Y: + return "Infinite cone-Y surface" + elif type_ == SURFACE_CONE_Z: + return "Infinite cone-Z surface" elif type_ == SURFACE_SPHERE: return "Sphere surface" elif type_ == SURFACE_QUADRIC: diff --git a/mcdc/transport/geometry/surface/cylinder.py b/mcdc/transport/geometry/surface/cylinder.py deleted file mode 100644 index 5e59ff58..00000000 --- a/mcdc/transport/geometry/surface/cylinder.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -Cylinder: General infinite cylinder - -f(x, y, z) = Axx + Byy + Czz + Gx + Hy + Iz + J -""" - -import math - -from numba import njit - -from mcdc.constant import ( - COINCIDENCE_TOLERANCE, - INF, -) - - -@njit -def evaluate(particle_container, surface): - particle = particle_container[0] - # Particle parameters - x = particle["x"] - y = particle["y"] - z = particle["z"] - - # Surface parameters - A = surface["A"] - B = surface["B"] - C = surface["C"] - G = surface["G"] - H = surface["H"] - I = surface["I"] - J = surface["J"] - - return A * x**2 + B * y**2 + C * z**2 + G * x + H * y + I * z + J - - -@njit -def reflect(particle_container, surface): - particle = particle_container[0] - # Particle coordinate - x = particle["x"] - y = particle["y"] - z = particle["z"] - ux = particle["ux"] - uy = particle["uy"] - uz = particle["uz"] - - # Surface coefficients - A = surface["A"] - B = surface["B"] - C = surface["C"] - G = surface["G"] - H = surface["H"] - I = surface["I"] - - # Surface normal - dx = 2 * A * x + G - dy = 2 * B * y + H - dz = 2 * C * z + I - norm = (dx**2 + dy**2 + dz**2) ** 0.5 - nx = dx / norm - ny = dy / norm - nz = dz / norm - - # Reflect - c = 2.0 * (nx * ux + ny * uy + nz * uz) - particle["ux"] -= c * nx - particle["uy"] -= c * ny - particle["uz"] -= c * nz - - -@njit -def get_normal_component(particle_container, surface): - particle = particle_container[0] - # Particle coordinate - x = particle["x"] - y = particle["y"] - z = particle["z"] - ux = particle["ux"] - uy = particle["uy"] - uz = particle["uz"] - - # Surface coefficients - A = surface["A"] - B = surface["B"] - C = surface["C"] - G = surface["G"] - H = surface["H"] - I = surface["I"] - - # Surface normal - dx = 2 * A * x + G - dy = 2 * B * y + H - dz = 2 * C * z + I - norm = (dx**2 + dy**2 + dz**2) ** 0.5 - nx = dx / norm - ny = dy / norm - nz = dz / norm - - return nx * ux + ny * uy + nz * uz - - -@njit -def get_distance(particle_container, surface): - particle = particle_container[0] - # Particle coordinate - x = particle["x"] - y = particle["y"] - z = particle["z"] - ux = particle["ux"] - uy = particle["uy"] - uz = particle["uz"] - - # Surface coefficients - A = surface["A"] - B = surface["B"] - C = surface["C"] - G = surface["G"] - H = surface["H"] - I = surface["I"] - - # Coincident? - f = evaluate(particle_container, surface) - coincident = abs(f) < COINCIDENCE_TOLERANCE - if coincident: - # Moving away or tangent? - if ( - get_normal_component(particle_container, surface) - >= 0.0 - COINCIDENCE_TOLERANCE - ): - return INF - - # Quadratic equation constants - a = A * ux * ux + B * uy * uy + C * uz * uz - b = 2 * (A * x * ux + B * y * uy + C * z * uz) + G * ux + H * uy + I * uz - c = f - - determinant = b * b - 4.0 * a * c - - # Roots are complex : no intersection - # Roots are identical: tangent - # ==> return huge number - if determinant <= 0.0: - return INF - else: - # Get the roots - denom = 2.0 * a - sqrt = math.sqrt(determinant) - root_1 = (-b + sqrt) / denom - root_2 = (-b - sqrt) / denom - - # Coincident? - if coincident: - return max(root_1, root_2) - - # Negative roots, moving away from the surface - if root_1 < 0.0: - root_1 = INF - if root_2 < 0.0: - root_2 = INF - - # Return the smaller root - return min(root_1, root_2) diff --git a/mcdc/transport/geometry/surface/interface.py b/mcdc/transport/geometry/surface/interface.py index 8ba7ac88..9e3427b9 100644 --- a/mcdc/transport/geometry/surface/interface.py +++ b/mcdc/transport/geometry/surface/interface.py @@ -16,7 +16,6 @@ import mcdc.transport.geometry.surface.cylinder_y as cylinder_y import mcdc.transport.geometry.surface.cylinder_z as cylinder_z import mcdc.transport.geometry.surface.sphere as sphere -import mcdc.transport.geometry.surface.cylinder as cylinder import mcdc.transport.geometry.surface.quadric as quadric from mcdc.constant import ( @@ -32,6 +31,10 @@ SURFACE_CYLINDER_Z, SURFACE_CYLINDER, SURFACE_SPHERE, + SURFACE_QUADRIC, + SURFACE_CONE_X, + SURFACE_CONE_Y, + SURFACE_CONE_Z, ) from mcdc.transport.util import find_bin @@ -89,7 +92,14 @@ def evaluate(particle_container, surface, data): elif surface["type"] == SURFACE_CYLINDER_Z: result = cylinder_z.evaluate(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER: - result = cylinder.evaluate(particle_container, surface) + result = quadric.evaluate(particle_container, surface) + elif ( + surface["type"] == SURFACE_QUADRIC + or surface["type"] == SURFACE_CONE_X + or surface["type"] == SURFACE_CONE_Y + or surface["type"] == SURFACE_CONE_Z + ): + result = quadric.evaluate(particle_container, surface) elif surface["type"] == SURFACE_SPHERE: result = sphere.evaluate(particle_container, surface) else: @@ -141,7 +151,14 @@ def get_normal_component(particle_container, speed, surface, data): elif surface["type"] == SURFACE_CYLINDER_Z: result = cylinder_z.get_normal_component(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER: - result = cylinder.get_normal_component(particle_container, surface) + result = quadric.get_normal_component(particle_container, surface) + elif ( + surface["type"] == SURFACE_QUADRIC + or surface["type"] == SURFACE_CONE_X + or surface["type"] == SURFACE_CONE_Y + or surface["type"] == SURFACE_CONE_Z + ): + result = quadric.get_normal_component(particle_container, surface) elif surface["type"] == SURFACE_SPHERE: result = sphere.get_normal_component(particle_container, surface) else: @@ -182,7 +199,14 @@ def reflect(particle_container, surface): elif surface["type"] == SURFACE_CYLINDER_Z: return cylinder_z.reflect(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER: - return cylinder.reflect(particle_container, surface) + return quadric.reflect(particle_container, surface) + elif ( + surface["type"] == SURFACE_QUADRIC + or surface["type"] == SURFACE_CONE_X + or surface["type"] == SURFACE_CONE_Y + or surface["type"] == SURFACE_CONE_Z + ): + return quadric.reflect(particle_container, surface) elif surface["type"] == SURFACE_SPHERE: return sphere.reflect(particle_container, surface) else: @@ -228,7 +252,14 @@ def _get_distance_static(particle_container, surface): elif surface["type"] == SURFACE_CYLINDER_Z: return cylinder_z.get_distance(particle_container, surface) elif surface["type"] == SURFACE_CYLINDER: - return cylinder.get_distance(particle_container, surface) + return quadric.get_distance(particle_container, surface) + elif ( + surface["type"] == SURFACE_QUADRIC + or surface["type"] == SURFACE_CONE_X + or surface["type"] == SURFACE_CONE_Y + or surface["type"] == SURFACE_CONE_Z + ): + return quadric.get_distance(particle_container, surface) elif surface["type"] == SURFACE_SPHERE: return sphere.get_distance(particle_container, surface) else: diff --git a/test/unit/transport/geometry/surface/cylinder.py b/test/unit/transport/geometry/surface/cylinder.py deleted file mode 100644 index 3252ff6c..00000000 --- a/test/unit/transport/geometry/surface/cylinder.py +++ /dev/null @@ -1,458 +0,0 @@ -import mcdc -import numpy as np - -#### - -from mcdc.constant import ( - COINCIDENCE_TOLERANCE, - INF, -) -from mcdc.main import preparation - -# ====================================================================================== -# Setup -# ====================================================================================== - -# Reference surface description -# General cylinder parallel to z-axis, centered at origin, radius R = 5.0 -# f(x, y, z) = x^2 + y^2 - R^2 -R = 5.0 -durations = np.array([5.0, 5.0, 5.0]) -velocities = np.zeros((3, 3)) -velocities[:, 0] = np.array([-1.0, 2.0, -3.0]) - -# Test object: static surface -static_surface = mcdc.Surface.Cylinder(A=1.0, B=1.0, J=-(R**2)) - -# Test object: moving surface -moving_surface = mcdc.Surface.Cylinder(A=1.0, B=1.0, J=-(R**2)) -moving_surface.move(velocities, durations) - -# Test object: static surface with general coefficients -general_surface = mcdc.Surface.Cylinder( - A=1.5, B=0.5, C=2.0, G=-1.0, H=3.0, I=-0.5, J=-4.0 -) - -# Create the dummy simulation structure and data -structure_container, data = preparation() -structure = structure_container[0] - -# Get the "compiled" test objects -static_surface = structure["surfaces"][0] -moving_surface = structure["surfaces"][1] -general_surface = structure["surfaces"][2] - -# Particle object for testing -import mcdc.numba_types as type_ - -particle_container = np.zeros(1, type_.particle_data) -particle = particle_container[0] - -# Miscellanies -# For quadratic surfaces, position offset delta gives f ~ 2*R*delta, -# so delta < COINCIDENCE_TOLERANCE / (2*R) is needed for coincidence. -TINY = COINCIDENCE_TOLERANCE / (2.0 * R) * 0.8 - -# Load modules to be tested -from mcdc.transport.geometry.surface import ( - interface, - cylinder, -) - -# ===================================================================================== -# Cylinder core functions -# ===================================================================================== - - -def test_evaluate(): - def run(x, y, answer): - particle["x"] = x - particle["y"] = y - result = cylinder.evaluate(particle_container, static_surface) - assert np.isclose(result, answer) - - # Positive side (outside) - run(x=8.0, y=0.0, answer=39.0) - # Negative side (inside) - run(x=3.0, y=0.0, answer=-16.0) - - -def test_evaluate_general_coefficients(): - def run(x, y, z, answer): - particle["x"] = x - particle["y"] = y - particle["z"] = z - result = cylinder.evaluate(particle_container, general_surface) - assert np.isclose(result, answer) - - run(x=1.0, y=-2.0, z=0.5, answer=-7.25) - run(x=2.0, y=1.0, z=-1.0, answer=6.0) - - -def test_reflect(): - def run(ux, answer): - particle["x"] = R - particle["y"] = 0.0 - particle["ux"] = ux - particle["uy"] = 0.0 - cylinder.reflect(particle_container, static_surface) - assert np.isclose(particle["ux"], answer) - - # From positive direction - run(ux=0.2, answer=-0.2) - # From negative direction - run(ux=-0.1, answer=0.1) - - -def test_get_normal_component(): - def run(ux, answer): - particle["x"] = R - particle["y"] = 0.0 - particle["ux"] = ux - particle["uy"] = 0.0 - result = cylinder.get_normal_component(particle_container, static_surface) - assert np.isclose(result, answer) - - # Positive direction - run(ux=0.4, answer=0.4) - # Negative direction - run(ux=-0.2, answer=-0.2) - # Parallel - run(ux=0.0, answer=0.0) - - -def test_get_normal_component_general_coefficients(): - particle["x"] = 1.0 - particle["y"] = -2.0 - particle["z"] = 0.5 - particle["ux"] = 0.4 - particle["uy"] = -0.2 - particle["uz"] = 0.1 - - result = cylinder.get_normal_component(particle_container, general_surface) - answer = (2.0 * 0.4 + 1.0 * (-0.2) + 1.5 * 0.1) / (2.0**2 + 1.0**2 + 1.5**2) ** 0.5 - assert np.isclose(result, answer) - - -def test_get_distance(): - def run(x, ux, answer): - particle["x"] = x - particle["y"] = 0.0 - particle["ux"] = ux - particle["uy"] = 0.0 - result = cylinder.get_distance(particle_container, static_surface) - assert np.isclose(result, answer) - - # Positive side (outside) - x = 8.0 - ## Moving closer - run(x, ux=-0.4, answer=7.5) - ## Moving away - run(x, ux=0.3, answer=INF) - ## Parallel - run(x, ux=0.0, answer=INF) - - # Negative side (inside) - x = 3.0 - ## Moving outward (toward near surface) - run(x, ux=0.4, answer=5.0) - ## Moving inward (toward far surface) - run(x, ux=-0.3, answer=80.0 / 3.0) - ## Parallel - run(x, ux=0.0, answer=INF) - - # At surface, within tolerance, on the positive side - x = R + TINY - ## Moving away - run(x, ux=0.4, answer=INF) - ## Moving closer (crosses to far side) - run(x, ux=-0.4, answer=2.0 * R / 0.4) - ## Parallel - run(x, ux=0.0, answer=INF) - - # At surface, within tolerance, on the negative side - x = R - TINY - ## Moving away (toward center, crosses to far side) - run(x, ux=-0.4, answer=2.0 * R / 0.4) - ## Moving closer - run(x, ux=0.4, answer=INF) - ## Parallel - run(x, ux=0.0, answer=INF) - - -# ===================================================================================== -# Cylinder integrated transport interface -# ===================================================================================== - - -def test_interface_reflect(): - def run(ux, answer): - particle["x"] = R - particle["y"] = 0.0 - particle["ux"] = ux - particle["uy"] = 0.0 - interface.reflect(particle_container, static_surface) - assert np.isclose(particle["ux"], answer) - - # From positive direction - run(ux=0.2, answer=-0.2) - # From negative direction - run(ux=-0.1, answer=0.1) - - -def test_interface_evaluate(): - def run_static(x, y, answer): - particle["x"] = x - particle["y"] = y - result = interface.evaluate(particle_container, static_surface, data) - assert np.isclose(result, answer) - - def run_moving(x, y, t, answer): - particle["x"] = x - particle["y"] = y - particle["t"] = t - result = interface.evaluate(particle_container, moving_surface, data) - assert np.isclose(result, answer) - - # ================================================================================= - # Static - # ================================================================================= - - # Positive side (outside) - run_static(x=8.0, y=0.0, answer=39.0) - # Negative side (inside) - run_static(x=3.0, y=0.0, answer=-16.0) - - # ================================================================================= - # Moving - # ================================================================================= - - # First bin: center x = -3.0 - t = 3.0 - ## Positive side (outside) - run_moving(x=4.0, y=0.0, t=t, answer=24.0) - ## Negative side (inside) - run_moving(x=-4.0, y=0.0, t=t, answer=-24.0) - - # First bin, at grid: center x = -5.0 - t = 5.0 - ## Positive side (outside) - run_moving(x=4.0, y=0.0, t=t, answer=56.0) - ## Negative side (inside) - run_moving(x=-4.0, y=0.0, t=t, answer=-24.0) - - # Interior bin: center x = -1.0 - t = 12.0 - ## Positive side (outside) - run_moving(x=6.0, y=0.0, t=t, answer=24.0) - ## Negative side (inside) - run_moving(x=-4.0, y=0.0, t=t, answer=-16.0) - - # Final bin: center x = -10.0 - t = 100.0 - ## Positive side (outside) - run_moving(x=0.0, y=0.0, t=t, answer=75.0) - ## Negative side (inside) - run_moving(x=-8.0, y=0.0, t=t, answer=-21.0) - - -def test_interface_get_normal_component(): - def run_static(ux, answer): - particle["x"] = R - particle["y"] = 0.0 - particle["ux"] = ux - particle["uy"] = 0.0 - speed = 2.0 # Arbitrary - result = interface.get_normal_component( - particle_container, speed, static_surface, data - ) - assert np.isclose(result, answer) - - def run_moving(x, ux, t, speed, answer): - particle["x"] = x - particle["y"] = 0.0 - particle["ux"] = ux - particle["uy"] = 0.0 - particle["t"] = t - result = interface.get_normal_component( - particle_container, speed, moving_surface, data - ) - assert np.isclose(result, answer) - - # ================================================================================= - # Static - # ================================================================================= - - # Positive direction - run_static(ux=0.4, answer=0.4) - # Negative direction - run_static(ux=-0.2, answer=-0.2) - # Parallel - run_static(ux=0.0, answer=0.0) - - # ================================================================================= - # Moving - # ================================================================================= - - # First bin: center x = -3.0, velocity = -1.0 - t = 3.0 - x = -3.0 + R - run_moving(x, ux=0.4, t=t, speed=2.0, answer=0.9) - run_moving(x, ux=-0.6, t=t, speed=2.0, answer=-0.1) - run_moving(x, ux=-0.5, t=t, speed=2.0, answer=0.0) - - # Interior bin: center x = 1.0, velocity = 2.0 - t = 8.0 - x = 1.0 + R - run_moving(x, ux=0.4, t=t, speed=2.0, answer=-0.6) - run_moving(x, ux=1.0, t=t, speed=2.0, answer=0.0) - run_moving(x, ux=0.0, t=t, speed=4.0, answer=-0.5) - - # Interior bin: center x = -1.0, velocity = -3.0 - t = 12.0 - x = -1.0 + R - run_moving(x, ux=-0.2, t=t, speed=10.0, answer=0.1) - - -def test_interface_check_sense(): - def run_static(x, y, ux, answer): - particle["x"] = x - particle["y"] = y - particle["ux"] = ux - particle["uy"] = 0.0 - speed = 2.0 # Arbitrary - result = interface.check_sense(particle_container, speed, static_surface, data) - assert np.isclose(result, answer) - - def run_moving(x, y, ux, t, speed, answer): - particle["x"] = x - particle["y"] = y - particle["ux"] = ux - particle["uy"] = 0.0 - particle["t"] = t - result = interface.check_sense(particle_container, speed, moving_surface, data) - assert np.isclose(result, answer) - - # ================================================================================= - # Static - # ================================================================================= - - # Not at surface - ux = 0.3 # Arbitrary - ## Positive side (outside) - run_static(x=8.0, y=0.0, ux=ux, answer=True) - ## Negative side (inside) - run_static(x=3.0, y=0.0, ux=ux, answer=False) - - # At surface, positive side - x = R + TINY - ## Positive direction (outward) - run_static(x, y=0.0, ux=0.4, answer=True) - ## Negative direction (inward) - run_static(x, y=0.0, ux=-0.4, answer=False) - - # At surface, negative side - x = R - TINY - ## Positive direction (outward) - run_static(x, y=0.0, ux=0.2, answer=True) - ## Negative direction (inward) - run_static(x, y=0.0, ux=-0.2, answer=False) - - # ================================================================================= - # Moving - # ================================================================================= - - # First bin: center x = -3.0 - t = 3.0 - speed = 2.0 - ## Not at surface - run_moving(x=4.0, y=0.0, ux=0.2, t=t, speed=speed, answer=True) - run_moving(x=-4.0, y=0.0, ux=0.2, t=t, speed=speed, answer=False) - ## At surface - x = -3.0 + R - run_moving(x, y=0.0, ux=0.4, t=t, speed=speed, answer=True) - run_moving(x, y=0.0, ux=-0.6, t=t, speed=speed, answer=False) - run_moving(x, y=0.0, ux=-0.5, t=t, speed=speed, answer=False) - - # Interior bin: center x = 1.0 - t = 8.0 - speed = 4.0 - x = 1.0 + R - run_moving(x, y=0.0, ux=0.8, t=t, speed=speed, answer=True) - run_moving(x, y=0.0, ux=0.5, t=t, speed=speed, answer=False) - - -def test_interface_get_distance(): - def run_static(x, ux, answer): - particle["x"] = x - particle["y"] = 0.0 - particle["ux"] = ux - particle["uy"] = 0.0 - speed = 2.0 # Arbitrary - result = interface.get_distance(particle_container, speed, static_surface, data) - assert np.isclose(result, answer) - - def run_moving(x, ux, t, speed, answer): - particle["x"] = x - particle["y"] = 0.0 - particle["ux"] = ux - particle["uy"] = 0.0 - particle["t"] = t - result = interface.get_distance(particle_container, speed, moving_surface, data) - assert np.isclose(result, answer) - - # ================================================================================= - # Static - # ================================================================================= - - # Positive side (outside) - x = 8.0 - ## Moving closer - run_static(x, ux=-0.4, answer=7.5) - ## Moving away - run_static(x, ux=0.3, answer=INF) - ## Parallel - run_static(x, ux=0.0, answer=INF) - - # Negative side (inside) - x = 3.0 - ## Moving outward (toward near surface) - run_static(x, ux=0.4, answer=5.0) - ## Moving inward (toward far surface) - run_static(x, ux=-0.3, answer=80.0 / 3.0) - ## Parallel - run_static(x, ux=0.0, answer=INF) - - # At surface, on the positive side - x = R + TINY - ## Moving away - run_static(x, ux=0.4, answer=INF) - ## Moving closer (crosses to far side) - run_static(x, ux=-0.4, answer=2.0 * R / 0.4) - ## Parallel - run_static(x, ux=0.0, answer=INF) - - # At surface, on the negative side - x = R - TINY - ## Moving away (toward center, crosses to far side) - run_static(x, ux=-0.4, answer=2.0 * R / 0.4) - ## Moving closer - run_static(x, ux=0.4, answer=INF) - ## Parallel - run_static(x, ux=0.0, answer=INF) - - # ================================================================================= - # Moving - # ================================================================================= - - # First bin intersection - run_moving(x=6.0, ux=-1.0, t=1.0, speed=2.0, answer=4.0) - - # Crossing after entering the second bin - run_moving(x=10.0, ux=-1.0, t=2.0, speed=2.0, answer=8.0) - - # Moving away from the surface - run_moving(x=10.0, ux=1.0, t=6.0, speed=2.0, answer=INF) - - # Starting inside and moving outward - run_moving(x=-2.0, ux=1.0, t=2.0, speed=2.0, answer=10.0 / 3.0) From d7322dca4aa2ad5092e606ce722fc80da4bbce25 Mon Sep 17 00:00:00 2001 From: Melek Derman <48313913+melekderman@users.noreply.github.com> Date: Thu, 19 Feb 2026 01:41:42 -0800 Subject: [PATCH 11/50] back in black --- mcdc/object_/surface.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/mcdc/object_/surface.py b/mcdc/object_/surface.py index 6d8007a7..2e27bfc1 100644 --- a/mcdc/object_/surface.py +++ b/mcdc/object_/surface.py @@ -21,7 +21,6 @@ SURFACE_PLANE, SURFACE_SPHERE, SURFACE_QUADRIC, - SURFACE_SPHERE, SURFACE_CONE_X, SURFACE_CONE_Y, SURFACE_CONE_Z, @@ -217,9 +216,6 @@ def __repr__(self): r = (x**2 + y**2 - self.J) ** 0.5 text += f" - Center (x, y): ({x}, {y}) cm\n" text += f" - Radius: {r} cm\n" - elif self.type == SURFACE_CYLINDER: - text += f" - Coeffs.: {self.A}, {self.B}, {self.C},\n" - text += f" {self.G}, {self.H}, {self.I}, {self.J}\n" elif self.type == SURFACE_SPHERE: x = -0.5 * self.G y = -0.5 * self.H @@ -640,7 +636,7 @@ def Sphere( surface.I = -2.0 * z surface.J = x**2 + y**2 + z**2 - r**2 return surface - + @classmethod def ConeX( cls, From abbdec70316cd13861f0d78a1599be551acf4de9 Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Thu, 26 Feb 2026 23:50:34 +0700 Subject: [PATCH 12/50] rename mcdc -> simulation --- mcdc/main.py | 93 +++++---- mcdc/transport/data.py | 6 +- mcdc/transport/distribution.py | 34 +-- mcdc/transport/geometry/interface.py | 72 ++++--- mcdc/transport/mesh/interface.py | 24 +-- mcdc/transport/mpi.py | 12 +- mcdc/transport/particle.py | 4 +- mcdc/transport/particle_bank.py | 90 ++++---- mcdc/transport/physics/interface.py | 20 +- mcdc/transport/physics/neutron/interface.py | 16 +- mcdc/transport/physics/neutron/multigroup.py | 76 ++++--- mcdc/transport/physics/neutron/native.py | 174 ++++++++------- mcdc/transport/simulation.py | 209 ++++++++++--------- mcdc/transport/source.py | 10 +- mcdc/transport/tally/closeout.py | 140 +++++++------ mcdc/transport/tally/filter.py | 8 +- mcdc/transport/tally/score.py | 102 ++++----- mcdc/transport/technique.py | 18 +- 18 files changed, 591 insertions(+), 517 deletions(-) diff --git a/mcdc/main.py b/mcdc/main.py index e005f989..b0a9166a 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -21,9 +21,9 @@ def run(): time_total_start = MPI.Wtime() # Get settings and MPI master status - from mcdc.object_.simulation import simulation + from mcdc.object_.simulation import simulation as simulationPy - settings = simulation.settings + settings = simulationPy.settings master = MPI.COMM_WORLD.Get_rank() == 0 # Override settings with command-line arguments @@ -46,14 +46,15 @@ def run(): time_prep_start = MPI.Wtime() # Generate the program state: - # - `mcdc`: the simulation structure, storing fixed side data and meta data + # - `simulation`: the simulation, storing fixed side data and meta data that + # describes arbitrarily-sized data # - `data`: a long 1D array storing arbitrarily-sized data of the simulation - # NOTE: The simulation structure needs to be generated in a container, which is a + # NOTE: The simulation structure to be generated in a container, which is a # a one-sized array that stores the structure. The container is needed to # ensure proper mutability and tracking of the structure when running in - # different kinds of machines supported. - mcdc_container, data = preparation() - mcdc = mcdc_container[0] + # different kinds of machines supported by the Numba compilation framework. + simulation_container, data = preparation() + simulation = simulation_container[0] # Print headers if master: @@ -61,7 +62,7 @@ def run(): print_module.print_configuration() print(" Now running the particle transport...") if settings.eigenvalue_mode: - print_module.print_eigenvalue_header(mcdc) + print_module.print_eigenvalue_header(simulation) # TIMER: preparation time_prep_end = MPI.Wtime() @@ -77,9 +78,9 @@ def run(): import mcdc.transport.simulation as simulation_module if settings.eigenvalue_mode: - simulation_module.eigenvalue_simulation(mcdc_container, data) + simulation_module.eigenvalue_simulation(simulation_container, data) else: - simulation_module.fixed_source_simulation(mcdc_container, data) + simulation_module.fixed_source_simulation(simulation_container, data) # TIMER: simulation time_simulation_end = MPI.Wtime() @@ -94,7 +95,7 @@ def run(): time_output_start = MPI.Wtime() # Generate hdf5 output file - output_module.generate_output(mcdc, data) + output_module.generate_output(simulation, data) # TIMER: output time_output_end = MPI.Wtime() @@ -106,13 +107,13 @@ def run(): time_total_end = MPI.Wtime() # Manage timers - mcdc["runtime_total"] = time_total_end - time_total_start - mcdc["runtime_preparation"] = time_prep_end - time_prep_start - mcdc["runtime_simulation"] = time_simulation_end - time_simulation_start - mcdc["runtime_output"] = time_output_end - time_output_start - output_module.create_runtime_datasets(mcdc) + simulation["runtime_total"] = time_total_end - time_total_start + simulation["runtime_preparation"] = time_prep_end - time_prep_start + simulation["runtime_simulation"] = time_simulation_end - time_simulation_start + simulation["runtime_output"] = time_output_end - time_output_start + output_module.create_runtime_datasets(simulation) if master: - print_module.print_runtime(mcdc) + print_module.print_runtime(simulation) # ================================================================================== # Finalizing @@ -122,7 +123,7 @@ def run(): if config.target == "gpu": from mcdc.code_factory.gpu.program_builder import teardown_gpu_program - teardown_gpu_program(mcdc) + teardown_gpu_program(simulation) # ====================================================================================== @@ -135,7 +136,7 @@ def preparation(): from mpi4py import MPI - from mcdc.object_.simulation import simulation + from mcdc.object_.simulation import simulation as simulationPy from mcdc.object_.material import MaterialMG # ================================================================================== @@ -143,18 +144,18 @@ def preparation(): # ================================================================================== # Get settings - settings = simulation.settings + settings = simulationPy.settings # Set physics mode - if len(simulation.materials) == 0: + if len(simulationPy.materials) == 0: # Default physics in dummy mode settings.multigroup_mode = True else: - settings.multigroup_mode = isinstance(simulation.materials[0], MaterialMG) + settings.multigroup_mode = isinstance(simulationPy.materials[0], MaterialMG) # Set appropriate time boundary settings.time_boundary = min( - [settings.time_boundary] + [tally.time[-1] for tally in simulation.tallies] + [settings.time_boundary] + [tally.time[-1] for tally in simulationPy.tallies] ) # ================================================================================== @@ -164,29 +165,29 @@ def preparation(): # Reset time grid size of all tallies if census-based tally is desired if settings.use_census_based_tally: N_bin = settings.census_tally_frequency - for tally in simulation.tallies: + for tally in simulationPy.tallies: tally._use_census_based_tally(N_bin) # Normalize source probability norm = 0.0 - for source in simulation.sources: + for source in simulationPy.sources: norm += source.probability - for source in simulation.sources: + for source in simulationPy.sources: source.probability /= norm # Create root universe if not defined - if len(simulation.universes[0].cells) == 0: - simulation.universes[0].cells = simulation.cells + if len(simulationPy.universes[0].cells) == 0: + simulationPy.universes[0].cells = simulationPy.cells # Initial guess - simulation.k_eff = settings.k_init + simulationPy.k_eff = settings.k_init # Activate tally scoring for fixed-source if not settings.eigenvalue_mode: - simulation.cycle_active = True + simulationPy.cycle_active = True # All active eigenvalue cycle? elif settings.N_inactive == 0: - simulation.cycle_active = True + simulationPy.cycle_active = True # ================================================================================== # Set particle bank sizes @@ -209,10 +210,10 @@ def preparation(): size_future = int((settings.future_bank_buffer_ratio) * N_work) # Set bank size - simulation.bank_active.size[0] = size_active - simulation.bank_census.size[0] = size_census - simulation.bank_source.size[0] = size_source - simulation.bank_future.size[0] = size_future + simulationPy.bank_active.size[0] = size_active + simulationPy.bank_census.size[0] = size_census + simulationPy.bank_source.size[0] = size_source + simulationPy.bank_future.size[0] = size_future # ================================================================================== # Generate Numba-supported "Objects" @@ -223,9 +224,9 @@ def preparation(): if MPI.COMM_WORLD.Get_rank() == 0: from mcdc.code_factory.numba_objects_generator import make_literals - make_literals(simulation) - mcdc_container, data = generate_numba_objects(simulation) - mcdc = mcdc_container[0] + make_literals(simulationPy) + simulation_container, data = generate_numba_objects(simulationPy) + simulation = simulation_container[0] # Reload mcdc getters and setters import importlib @@ -269,24 +270,24 @@ def preparation(): import h5py # All ranks, take turn - for i in range(mcdc["mpi_size"]): - if mcdc["mpi_rank"] == i: + for i in range(simulation["mpi_size"]): + if simulation["mpi_rank"] == i: if settings.use_source_file: with h5py.File(settings.source_file_name, "r") as f: # Get source particle size N_particle = f["particles_size"][()] # Redistribute work - mpi.distribute_work(N_particle, mcdc) - N_local = mcdc["mpi_work_size"] - start = mcdc["mpi_work_start"] + mpi.distribute_work(N_particle, simulation) + N_local = simulation["mpi_work_size"] + start = simulation["mpi_work_start"] end = start + N_local # Add particles to source bank - mcdc["bank_source"]["particles"][:N_local] = f["particles"][ + simulation["bank_source"]["particles"][:N_local] = f["particles"][ start:end ] - mcdc["bank_source"]["size"] = N_local + simulation["bank_source"]["size"] = N_local MPI.COMM_WORLD.Barrier() # ================================================================================== @@ -303,4 +304,4 @@ def preparation(): # Finalize # ================================================================================== - return mcdc_container, data + return simulation_container, data diff --git a/mcdc/transport/data.py b/mcdc/transport/data.py index 51e8b9b3..c8e7a575 100644 --- a/mcdc/transport/data.py +++ b/mcdc/transport/data.py @@ -14,14 +14,14 @@ @njit -def evaluate_data(x, data_base, mcdc, data): +def evaluate_data(x, data_base, simulation, data): data_type = data_base["child_type"] ID = data_base["child_ID"] if data_type == DATA_TABLE: - table = mcdc["table_data"][ID] + table = simulation["table_data"][ID] return evaluate_table(x, table, data) elif data_type == DATA_POLYNOMIAL: - polynomial = mcdc["polynomial_data"][ID] + polynomial = simulation["polynomial_data"][ID] return evaluate_polynomial(x, polynomial, data) else: return 0.0 diff --git a/mcdc/transport/distribution.py b/mcdc/transport/distribution.py index cbd82941..3024e350 100644 --- a/mcdc/transport/distribution.py +++ b/mcdc/transport/distribution.py @@ -27,29 +27,29 @@ @njit -def sample_distribution(E, distribution, rng_state, mcdc, data, scale=False): +def sample_distribution(E, distribution, rng_state, simulation, data, scale=False): distribution_type = distribution["child_type"] ID = distribution["child_ID"] if distribution_type == DISTRIBUTION_TABULATED: - table = mcdc["tabulated_distributions"][ID] + table = simulation["tabulated_distributions"][ID] return sample_tabulated(table, rng_state, data) elif distribution_type == DISTRIBUTION_MULTITABLE: - multi_table = mcdc["multi_table_distributions"][ID] + multi_table = simulation["multi_table_distributions"][ID] return sample_multi_table(E, rng_state, multi_table, data, scale) elif distribution_type == DISTRIBUTION_LEVEL_SCATTERING: - level_scattering = mcdc["level_scattering_distributions"][ID] + level_scattering = simulation["level_scattering_distributions"][ID] return sample_level_scattering(E, level_scattering) elif distribution_type == DISTRIBUTION_EVAPORATION: - evaporation = mcdc["evaporation_distributions"][ID] - return sample_evaporation(E, rng_state, evaporation, mcdc, data) + evaporation = simulation["evaporation_distributions"][ID] + return sample_evaporation(E, rng_state, evaporation, simulation, data) elif distribution_type == DISTRIBUTION_MAXWELLIAN: - maxwellian = mcdc["maxwellian_distributions"][ID] - return sample_maxwellian(E, rng_state, maxwellian, mcdc, data) + maxwellian = simulation["maxwellian_distributions"][ID] + return sample_maxwellian(E, rng_state, maxwellian, simulation, data) # TODO: Should not get here else: @@ -57,20 +57,22 @@ def sample_distribution(E, distribution, rng_state, mcdc, data, scale=False): @njit -def sample_correlated_distribution(E, distribution, rng_state, mcdc, data, scale=False): +def sample_correlated_distribution( + E, distribution, rng_state, simulation, data, scale=False +): distribution_type = distribution["child_type"] ID = distribution["child_ID"] if distribution_type == DISTRIBUTION_KALBACH_MANN: - kalbach_mann = mcdc["kalbach_mann_distributions"][ID] + kalbach_mann = simulation["kalbach_mann_distributions"][ID] return sample_kalbach_mann(E, rng_state, kalbach_mann, data) elif distribution_type == DISTRIBUTION_TABULATED_ENERGY_ANGLE: - table = mcdc["tabulated_energy_angle_distributions"][ID] + table = simulation["tabulated_energy_angle_distributions"][ID] return sample_tabulated_energy_angle(E, rng_state, table, data) elif distribution_type == DISTRIBUTION_N_BODY: - nbody = mcdc["nbody_distributions"][ID] + nbody = simulation["nbody_distributions"][ID] E_out = sample_tabulated(nbody, rng_state, data) mu = sample_isotropic_cosine(rng_state) return E_out, mu @@ -289,9 +291,9 @@ def sample_multi_table(E, rng_state, multi_table, data, scale=False): @njit -def sample_maxwellian(E, rng_state, maxwellian, mcdc, data): +def sample_maxwellian(E, rng_state, maxwellian, simulation, data): # Get nuclear temperature - table = mcdc["table_data"][maxwellian["nuclear_temperature_ID"]] + table = simulation["table_data"][maxwellian["nuclear_temperature_ID"]] nuclear_temperature = evaluate_table(E, table, data) restriction_energy = maxwellian["restriction_energy"] @@ -319,9 +321,9 @@ def sample_level_scattering(E, level_scattering): @njit -def sample_evaporation(E, rng_state, evaporation, mcdc, data): +def sample_evaporation(E, rng_state, evaporation, simulation, data): # Get nuclear temperature - table = mcdc["table_data"][evaporation["nuclear_temperature_ID"]] + table = simulation["table_data"][evaporation["nuclear_temperature_ID"]] nuclear_temperature = evaluate_table(E, table, data) restriction_energy = evaporation["restriction_energy"] diff --git a/mcdc/transport/geometry/interface.py b/mcdc/transport/geometry/interface.py index fae78e40..23c50b72 100644 --- a/mcdc/transport/geometry/interface.py +++ b/mcdc/transport/geometry/interface.py @@ -20,7 +20,7 @@ @njit -def inspect_geometry(particle_container, mcdc, data): +def inspect_geometry(particle_container, simulation, data): """ Full geometry inspection of the particle: - Set particle top cell and material IDs (if not lost) @@ -39,7 +39,7 @@ def inspect_geometry(particle_container, mcdc, data): ux_global = particle["ux"] uy_global = particle["uy"] uz_global = particle["uz"] - speed = physics.particle_speed(particle_container, mcdc, data) + speed = physics.particle_speed(particle_container, simulation, data) # Default returns distance = INF @@ -47,20 +47,22 @@ def inspect_geometry(particle_container, mcdc, data): # Find top cell from root universe if unknown if particle["cell_ID"] == -1: - particle["cell_ID"] = get_cell(particle_container, UNIVERSE_ROOT, mcdc, data) + particle["cell_ID"] = get_cell( + particle_container, UNIVERSE_ROOT, simulation, data + ) # Particle is lost? if particle["cell_ID"] == -1: event = EVENT_LOST # The top cell - cell = mcdc["cells"][particle["cell_ID"]] + cell = simulation["cells"][particle["cell_ID"]] # Recursively check cells until material cell is found (or the particle is lost) while event != EVENT_LOST: # Distance to nearest surface d_surface, surface_ID = distance_to_nearest_surface( - particle_container, cell, mcdc, data + particle_container, cell, simulation, data ) # Check if smaller @@ -103,7 +105,7 @@ def inspect_geometry(particle_container, mcdc, data): # Lattice cell? elif cell["fill_type"] == FILL_LATTICE: # Get lattice - lattice = mcdc["lattices"][cell["fill_ID"]] + lattice = simulation["lattices"][cell["fill_ID"]] # Distance to lattice grid d_lattice = mesh.uniform.get_crossing_distance( @@ -137,9 +139,9 @@ def inspect_geometry(particle_container, mcdc, data): particle["z"] -= lattice["z0"] + (iz + 0.5) * lattice["dz"] # Get inner cell - cell_ID = get_cell(particle_container, universe_ID, mcdc, data) + cell_ID = get_cell(particle_container, universe_ID, simulation, data) if cell_ID > -1: - cell = mcdc["cells"][cell_ID] + cell = simulation["cells"][cell_ID] else: event = EVENT_LOST @@ -154,7 +156,7 @@ def inspect_geometry(particle_container, mcdc, data): # Report lost particle if event == EVENT_LOST: - report_lost_particle(particle_container, mcdc) + report_lost_particle(particle_container, simulation) # Assign particle event particle["event"] = event @@ -163,7 +165,7 @@ def inspect_geometry(particle_container, mcdc, data): @njit -def locate_particle(particle_container, mcdc, data): +def locate_particle(particle_container, simulation, data): """ Set particle cell and material IDs Return False if particle is lost @@ -187,14 +189,16 @@ def locate_particle(particle_container, mcdc, data): # Find top cell from root universe if unknown if particle["cell_ID"] == -1: - particle["cell_ID"] = get_cell(particle_container, UNIVERSE_ROOT, mcdc, data) + particle["cell_ID"] = get_cell( + particle_container, UNIVERSE_ROOT, simulation, data + ) # Particle is lost? if particle["cell_ID"] == -1: particle_is_lost = True # The top cell - cell = mcdc["cells"][particle["cell_ID"]] + cell = simulation["cells"][particle["cell_ID"]] # Recursively check cells until material cell is found (or the particle is lost) while not particle_is_lost: @@ -224,7 +228,7 @@ def locate_particle(particle_container, mcdc, data): # Lattice cell? elif cell["fill_type"] == FILL_LATTICE: # Get lattice - lattice = mcdc["lattices"][cell["fill_ID"]] + lattice = simulation["lattices"][cell["fill_ID"]] # Get universe ix, iy, iz = mesh.uniform.get_indices(particle_container, lattice) @@ -241,9 +245,9 @@ def locate_particle(particle_container, mcdc, data): particle["z"] -= lattice["z0"] + (iz + 0.5) * lattice["dz"] # Get inner cell - cell_ID = get_cell(particle_container, universe_ID, mcdc, data) + cell_ID = get_cell(particle_container, universe_ID, simulation, data) if cell_ID > -1: - cell = mcdc["cells"][cell_ID] + cell = simulation["cells"][cell_ID] else: particle_is_lost = True @@ -258,7 +262,7 @@ def locate_particle(particle_container, mcdc, data): # Report lost particle if particle_is_lost: - report_lost_particle(particle_container, mcdc) + report_lost_particle(particle_container, simulation) return not particle_is_lost @@ -325,19 +329,19 @@ def _rotation_matrix(rotation): @njit -def get_cell(particle_container, universe_ID, mcdc, data): +def get_cell(particle_container, universe_ID, simulation, data): """ Find and return particle cell ID in the given universe Return -1 if particle is lost """ particle = particle_container[0] - universe = mcdc["universes"][universe_ID] + universe = simulation["universes"][universe_ID] # Check over all cells in the universe for i in range(universe["N_cell"]): cell_ID = int(mcdc_get.universe.cell_IDs(i, universe, data)) - cell = mcdc["cells"][cell_ID] - if check_cell(particle_container, cell, mcdc, data): + cell = simulation["cells"][cell_ID] + if check_cell(particle_container, cell, simulation, data): return cell_ID # Particle is not found @@ -345,7 +349,7 @@ def get_cell(particle_container, universe_ID, mcdc, data): @njit -def check_cell(particle_container, cell, mcdc, data): +def check_cell(particle_container, cell, simulation, data): """ Check if the particle is inside the cell """ @@ -361,14 +365,14 @@ def check_cell(particle_container, cell, mcdc, data): N_value = 0 # Particle parameters - speed = physics.particle_speed(particle_container, mcdc, data) + speed = physics.particle_speed(particle_container, simulation, data) # March forward through RPN tokens for idx in range(N_token): token = int(mcdc_get.cell.region_RPN_tokens(idx, cell, data)) if token >= 0: - surface = mcdc["surfaces"][token] + surface = simulation["surfaces"][token] value[N_value] = check_sense(particle_container, speed, surface, data) N_value += 1 @@ -387,7 +391,7 @@ def check_cell(particle_container, cell, mcdc, data): @njit -def report_lost_particle(particle_container, mcdc): +def report_lost_particle(particle_container, simulation): """ Report lost particle and terminate it """ @@ -397,9 +401,9 @@ def report_lost_particle(particle_container, mcdc): y = particle["y"] z = particle["z"] t = particle["t"] - idx_batch = mcdc["idx_batch"] - idx_census = mcdc["idx_census"] - idx_work = mcdc["idx_work"] + idx_batch = simulation["idx_batch"] + idx_census = simulation["idx_census"] + idx_work = simulation["idx_work"] print("A particle is lost at (", x, y, z, t, ")") print(" (batch/census/work) indices: (", idx_batch, idx_census, idx_work, ")") particle["alive"] = False @@ -411,7 +415,7 @@ def report_lost_particle(particle_container, mcdc): @njit -def distance_to_nearest_surface(particle_container, cell, mcdc, data): +def distance_to_nearest_surface(particle_container, cell, simulation, data): """ Determine the nearest cell surface and the distance to it """ @@ -419,12 +423,12 @@ def distance_to_nearest_surface(particle_container, cell, mcdc, data): surface_ID = -1 # Particle parameters - speed = physics.particle_speed(particle_container, mcdc, data) + speed = physics.particle_speed(particle_container, simulation, data) # Iterate over all surfaces and find the minimum distance for i in range(cell["N_surface"]): candidate_surface_ID = int(mcdc_get.cell.surface_IDs(i, cell, data)) - surface = mcdc["surfaces"][candidate_surface_ID] + surface = simulation["surfaces"][candidate_surface_ID] d = get_distance(particle_container, speed, surface, data) if d < distance: distance = d @@ -434,11 +438,11 @@ def distance_to_nearest_surface(particle_container, cell, mcdc, data): @njit -def surface_crossing(P_arr, mcdc, data): +def surface_crossing(P_arr, simulation, data): P = P_arr[0] # Apply BC - surface = mcdc["surfaces"][P["surface_ID"]] + surface = simulation["surfaces"][P["surface_ID"]] BC = surface["boundary_condition"] if BC == BC_VACUUM: P["alive"] = False @@ -448,8 +452,8 @@ def surface_crossing(P_arr, mcdc, data): # Score tally for i in range(surface["N_tally"]): tally_ID = int(mcdc_get.surface.tally_IDs(i, surface, data)) - tally = mcdc["surface_tallies"][tally_ID] - tally_module.score.surface_tally(P_arr, surface, tally, mcdc, data) + tally = simulation["surface_tallies"][tally_ID] + tally_module.score.surface_tally(P_arr, surface, tally, simulation, data) # Need to check new cell later? if P["alive"] and not BC == BC_REFLECTIVE: diff --git a/mcdc/transport/mesh/interface.py b/mcdc/transport/mesh/interface.py index 97cdc416..2e4fda69 100644 --- a/mcdc/transport/mesh/interface.py +++ b/mcdc/transport/mesh/interface.py @@ -7,57 +7,57 @@ @njit -def get_indices(particle_container, mesh_base, mcdc, data): +def get_indices(particle_container, mesh_base, simulation, data): mesh_type = mesh_base["child_type"] mesh_ID = mesh_base["child_ID"] if mesh_type == MESH_UNIFORM: - mesh = mcdc["uniform_meshes"][mesh_ID] + mesh = simulation["uniform_meshes"][mesh_ID] return uniform.get_indices(particle_container, mesh) elif mesh_type == MESH_STRUCTURED: - mesh = mcdc["structured_meshes"][mesh_ID] + mesh = simulation["structured_meshes"][mesh_ID] return structured.get_indices(particle_container, mesh, data) return -1, -1, -1 @njit -def get_x(index, mesh_base, mcdc, data): +def get_x(index, mesh_base, simulation, data): mesh_type = mesh_base["child_type"] mesh_ID = mesh_base["child_ID"] if mesh_type == MESH_UNIFORM: - mesh = mcdc["uniform_meshes"][mesh_ID] + mesh = simulation["uniform_meshes"][mesh_ID] return mesh["x0"] + mesh["dx"] * index elif mesh_type == MESH_STRUCTURED: - mesh = mcdc["structured_meshes"][mesh_ID] + mesh = simulation["structured_meshes"][mesh_ID] return mcdc_get.structured_mesh.x(index, mesh, data) return 0.0 @njit -def get_y(index, mesh_base, mcdc, data): +def get_y(index, mesh_base, simulation, data): mesh_type = mesh_base["child_type"] mesh_ID = mesh_base["child_ID"] if mesh_type == MESH_UNIFORM: - mesh = mcdc["uniform_meshes"][mesh_ID] + mesh = simulation["uniform_meshes"][mesh_ID] return mesh["y0"] + mesh["dy"] * index elif mesh_type == MESH_STRUCTURED: - mesh = mcdc["structured_meshes"][mesh_ID] + mesh = simulation["structured_meshes"][mesh_ID] return mcdc_get.structured_mesh.y(index, mesh, data) return 0.0 @njit -def get_z(index, mesh_base, mcdc, data): +def get_z(index, mesh_base, simulation, data): mesh_type = mesh_base["child_type"] mesh_ID = mesh_base["child_ID"] if mesh_type == MESH_UNIFORM: - mesh = mcdc["uniform_meshes"][mesh_ID] + mesh = simulation["uniform_meshes"][mesh_ID] return mesh["z0"] + mesh["dz"] * index elif mesh_type == MESH_STRUCTURED: - mesh = mcdc["structured_meshes"][mesh_ID] + mesh = simulation["structured_meshes"][mesh_ID] return mcdc_get.structured_mesh.z(index, mesh, data) return 0.0 diff --git a/mcdc/transport/mpi.py b/mcdc/transport/mpi.py index 395957f6..35b38c41 100644 --- a/mcdc/transport/mpi.py +++ b/mcdc/transport/mpi.py @@ -4,9 +4,9 @@ @njit -def distribute_work(N_work, mcdc): - size = mcdc["mpi_size"] - rank = mcdc["mpi_rank"] +def distribute_work(N_work, simulation): + size = simulation["mpi_size"] + rank = simulation["mpi_rank"] # Total number of work work_size_total = N_work @@ -28,6 +28,6 @@ def distribute_work(N_work, mcdc): work_start += rem # Store the workload specification - mcdc["mpi_work_start"] = work_start - mcdc["mpi_work_size"] = work_size - mcdc["mpi_work_size_total"] = work_size_total + simulation["mpi_work_start"] = work_start + simulation["mpi_work_size"] = work_size + simulation["mpi_work_size_total"] = work_size_total diff --git a/mcdc/transport/particle.py b/mcdc/transport/particle.py index 5a01b02b..e524e1c2 100644 --- a/mcdc/transport/particle.py +++ b/mcdc/transport/particle.py @@ -7,9 +7,9 @@ @njit -def move(particle_container, distance, mcdc, data): +def move(particle_container, distance, simulation, data): particle = particle_container[0] - ut = 1.0 / physics.particle_speed(particle_container, mcdc, data) + ut = 1.0 / physics.particle_speed(particle_container, simulation, data) particle["x"] += particle["ux"] * distance particle["y"] += particle["uy"] * distance diff --git a/mcdc/transport/particle_bank.py b/mcdc/transport/particle_bank.py index 13f0b550..29deeb6a 100644 --- a/mcdc/transport/particle_bank.py +++ b/mcdc/transport/particle_bank.py @@ -58,23 +58,23 @@ def _bank_particle(particle_container, bank): @njit -def bank_active_particle(particle_container, mcdc): - _bank_particle(particle_container, mcdc["bank_active"]) +def bank_active_particle(particle_container, simulation): + _bank_particle(particle_container, simulation["bank_active"]) @njit -def bank_source_particle(particle_container, mcdc): - _bank_particle(particle_container, mcdc["bank_source"]) +def bank_source_particle(particle_container, simulation): + _bank_particle(particle_container, simulation["bank_source"]) @njit -def bank_census_particle(particle_container, mcdc): - _bank_particle(particle_container, mcdc["bank_census"]) +def bank_census_particle(particle_container, simulation): + _bank_particle(particle_container, simulation["bank_census"]) @njit -def bank_future_particle(particle_container, mcdc): - _bank_particle(particle_container, mcdc["bank_future"]) +def bank_future_particle(particle_container, simulation): + _bank_particle(particle_container, simulation["bank_future"]) @njit @@ -117,13 +117,13 @@ def report_empty_bank(bank): @njit -def promote_future_particles(mcdc, data): +def promote_future_particles(simulation, data): # Get the banks - future_bank = mcdc["bank_future"] + future_bank = simulation["bank_future"] # Get the next census time - idx = mcdc["idx_census"] + 1 - next_census_time = mcdc_get.settings.census_time(idx, mcdc["settings"], data) + idx = simulation["idx_census"] + 1 + next_census_time = mcdc_get.settings.census_time(idx, simulation["settings"], data) # Particle container particle_container = np.zeros(1, type_.particle_data) @@ -141,7 +141,7 @@ def promote_future_particles(mcdc, data): # Promote the future particle to census bank if particle["t"] < next_census_time: - bank_census_particle(particle_container, mcdc) + bank_census_particle(particle_container, simulation) add_bank_size(future_bank, -1) # Consolidate the emptied space in the future bank @@ -158,9 +158,9 @@ def promote_future_particles(mcdc, data): @njit -def manage_particle_banks(mcdc): - master = mcdc["mpi_master"] - serial = mcdc["mpi_size"] == 1 +def manage_particle_banks(simulation): + master = simulation["mpi_master"] + serial = simulation["mpi_size"] == 1 # TIMER: bank management if master: @@ -168,19 +168,21 @@ def manage_particle_banks(mcdc): time_start = MPI.Wtime() # Reset source bank - set_bank_size(mcdc["bank_source"], 0) + set_bank_size(simulation["bank_source"], 0) # Normalize weight - if mcdc["settings"]["eigenvalue_mode"]: - normalize_weight(mcdc["bank_census"], mcdc["settings"]["N_particle"]) + if simulation["settings"]["eigenvalue_mode"]: + normalize_weight( + simulation["bank_census"], simulation["settings"]["N_particle"] + ) # Population control - if mcdc["population_control"]["active"]: - technique.population_control(mcdc) + if simulation["population_control"]["active"]: + technique.population_control(simulation) else: # Swap census and source bank - source_bank = mcdc["bank_source"] - census_bank = mcdc["bank_census"] + source_bank = simulation["bank_source"] + census_bank = simulation["bank_census"] size = get_bank_size(census_bank) if size >= source_bank["particles"].shape[0]: @@ -192,18 +194,18 @@ def manage_particle_banks(mcdc): # Redistribute work and rebalance bank size across MPI ranks if serial: - mpi.distribute_work(get_bank_size(mcdc["bank_source"]), mcdc) + mpi.distribute_work(get_bank_size(simulation["bank_source"]), simulation) else: - bank_rebalance(mcdc) + bank_rebalance(simulation) # Reset census bank - set_bank_size(mcdc["bank_census"], 0) + set_bank_size(simulation["bank_census"], 0) # TIMER: bank management if master: with objmode(time_end="float64"): time_end = MPI.Wtime() - mcdc["runtime_bank_management"] += time_end - time_start + simulation["runtime_bank_management"] += time_end - time_start # ====================================================================================== @@ -212,25 +214,25 @@ def manage_particle_banks(mcdc): @njit -def bank_rebalance(mcdc): +def bank_rebalance(simulation): # Scan the bank - idx_start, N_local, N = bank_scanning(mcdc["bank_source"], mcdc) + idx_start, N_local, N = bank_scanning(simulation["bank_source"], simulation) idx_end = idx_start + N_local - mpi.distribute_work(N, mcdc) + mpi.distribute_work(N, simulation) # Abort if source bank is empty if N == 0: return # Rebalance not needed if there is only one rank - if mcdc["mpi_size"] <= 1: + if simulation["mpi_size"] <= 1: return # Some constants - work_start = mcdc["mpi_work_start"] - work_end = work_start + mcdc["mpi_work_size"] - left = mcdc["mpi_rank"] - 1 - right = mcdc["mpi_rank"] + 1 + work_start = simulation["mpi_work_start"] + work_end = work_start + simulation["mpi_work_size"] + left = simulation["mpi_rank"] - 1 + right = simulation["mpi_rank"] + 1 # Flags if need to receive from or sent to the neighbors send_to_left = idx_start < work_start @@ -247,13 +249,13 @@ def bank_rebalance(mcdc): # MPI nearest-neighbor send/receive buff = np.zeros( - mcdc["bank_source"]["particles"].shape[0], dtype=type_.particle_data + simulation["bank_source"]["particles"].shape[0], dtype=type_.particle_data ) with objmode(size="int64"): # Create MPI-supported numpy object - size = get_bank_size(mcdc["bank_source"]) - bank = np.array(mcdc["bank_source"]["particles"][:size]) + size = get_bank_size(simulation["bank_source"]) + bank = np.array(simulation["bank_source"]["particles"][:size]) if receive_first: if receive_from_left: @@ -289,9 +291,9 @@ def bank_rebalance(mcdc): buff[i] = bank[i] # Set source bank from buffer - set_bank_size(mcdc["bank_source"], size) + set_bank_size(simulation["bank_source"], size) for i in range(size): - mcdc["bank_source"]["particles"][i] = buff[i] + simulation["bank_source"]["particles"][i] = buff[i] # ====================================================================================== @@ -300,7 +302,7 @@ def bank_rebalance(mcdc): @njit -def bank_scanning(bank, mcdc): +def bank_scanning(bank, simulation): N_local = get_bank_size(bank) # Starting index @@ -312,14 +314,14 @@ def bank_scanning(bank, mcdc): # Global size buff[0] += N_local with objmode(): - MPI.COMM_WORLD.Bcast(buff, mcdc["mpi_size"] - 1) + MPI.COMM_WORLD.Bcast(buff, simulation["mpi_size"] - 1) N_global = buff[0] return idx_start, N_local, N_global @njit -def bank_scanning_weight(bank, mcdc): +def bank_scanning_weight(bank, simulation): # Local weight CDF N_local = get_bank_size(bank) w_cdf = np.zeros(N_local + 1) @@ -337,7 +339,7 @@ def bank_scanning_weight(bank, mcdc): # Global weight buff[0] = w_cdf[-1] with objmode(): - MPI.COMM_WORLD.Bcast(buff, mcdc["mpi_size"] - 1) + MPI.COMM_WORLD.Bcast(buff, simulation["mpi_size"] - 1) W_global = buff[0] return w_start, w_cdf, W_global diff --git a/mcdc/transport/physics/interface.py b/mcdc/transport/physics/interface.py index 34da6f21..5cc9d7c4 100644 --- a/mcdc/transport/physics/interface.py +++ b/mcdc/transport/physics/interface.py @@ -15,10 +15,10 @@ @njit -def particle_speed(particle_container, mcdc, data): +def particle_speed(particle_container, simulation, data): particle = particle_container[0] if particle["particle_type"] == PARTICLE_NEUTRON: - return neutron.particle_speed(particle_container, mcdc, data) + return neutron.particle_speed(particle_container, simulation, data) return -1.0 @@ -28,19 +28,19 @@ def particle_speed(particle_container, mcdc, data): @njit -def macro_xs(reaction_type, particle_container, mcdc, data): +def macro_xs(reaction_type, particle_container, simulation, data): particle = particle_container[0] if particle["particle_type"] == PARTICLE_NEUTRON: - return neutron.macro_xs(reaction_type, particle_container, mcdc, data) + return neutron.macro_xs(reaction_type, particle_container, simulation, data) return -1.0 @njit -def neutron_production_xs(reaction_type, particle_container, mcdc, data): +def neutron_production_xs(reaction_type, particle_container, simulation, data): particle = particle_container[0] if particle["particle_type"] == PARTICLE_NEUTRON: return neutron.neutron_production_xs( - reaction_type, particle_container, mcdc, data + reaction_type, particle_container, simulation, data ) return -1.0 @@ -51,9 +51,9 @@ def neutron_production_xs(reaction_type, particle_container, mcdc, data): @njit -def collision_distance(particle_container, mcdc, data): +def collision_distance(particle_container, simulation, data): # Get total cross-section - SigmaT = macro_xs(REACTION_TOTAL, particle_container, mcdc, data) + SigmaT = macro_xs(REACTION_TOTAL, particle_container, simulation, data) # Vacuum material? if SigmaT == 0.0: @@ -66,7 +66,7 @@ def collision_distance(particle_container, mcdc, data): @njit -def collision(particle_container, mcdc, data): +def collision(particle_container, simulation, data): particle = particle_container[0] if particle["particle_type"] == PARTICLE_NEUTRON: - neutron.collision(particle_container, mcdc, data) + neutron.collision(particle_container, simulation, data) diff --git a/mcdc/transport/physics/neutron/interface.py b/mcdc/transport/physics/neutron/interface.py index 102a4415..3309c087 100644 --- a/mcdc/transport/physics/neutron/interface.py +++ b/mcdc/transport/physics/neutron/interface.py @@ -10,7 +10,7 @@ @njit -def particle_speed(particle_container, mcdc, data): +def particle_speed(particle_container, simulation, data): return native.particle_speed(particle_container) @@ -20,13 +20,15 @@ def particle_speed(particle_container, mcdc, data): @njit -def macro_xs(reaction_type, particle_container, mcdc, data): - return native.macro_xs(reaction_type, particle_container, mcdc, data) +def macro_xs(reaction_type, particle_container, simulation, data): + return native.macro_xs(reaction_type, particle_container, simulation, data) @njit -def neutron_production_xs(reaction_type, particle_container, mcdc, data): - return native.neutron_production_xs(reaction_type, particle_container, mcdc, data) +def neutron_production_xs(reaction_type, particle_container, simulation, data): + return native.neutron_production_xs( + reaction_type, particle_container, simulation, data + ) # ====================================================================================== @@ -35,5 +37,5 @@ def neutron_production_xs(reaction_type, particle_container, mcdc, data): @njit -def collision(particle_container, mcdc, data): - native.collision(particle_container, mcdc, data) +def collision(particle_container, simulation, data): + native.collision(particle_container, simulation, data) diff --git a/mcdc/transport/physics/neutron/multigroup.py b/mcdc/transport/physics/neutron/multigroup.py index 8bc47318..82c9ec41 100644 --- a/mcdc/transport/physics/neutron/multigroup.py +++ b/mcdc/transport/physics/neutron/multigroup.py @@ -29,9 +29,9 @@ @njit -def particle_speed(particle_container, mcdc, data): +def particle_speed(particle_container, simulation, data): particle = particle_container[0] - material = mcdc["multigroup_materials"][particle["material_ID"]] + material = simulation["multigroup_materials"][particle["material_ID"]] return mcdc_get.multigroup_material.mgxs_speed(particle["g"], material, data) @@ -41,9 +41,9 @@ def particle_speed(particle_container, mcdc, data): @njit -def macro_xs(reaction_type, particle_container, mcdc, data): +def macro_xs(reaction_type, particle_container, simulation, data): particle = particle_container[0] - material = mcdc["multigroup_materials"][particle["material_ID"]] + material = simulation["multigroup_materials"][particle["material_ID"]] g = particle["g"] if reaction_type == REACTION_TOTAL: @@ -58,9 +58,9 @@ def macro_xs(reaction_type, particle_container, mcdc, data): @njit -def neutron_production_xs(reaction_type, particle_container, mcdc, data): +def neutron_production_xs(reaction_type, particle_container, simulation, data): particle = particle_container[0] - material = mcdc["multigroup_materials"][particle["material_ID"]] + material = simulation["multigroup_materials"][particle["material_ID"]] g = particle["g"] if reaction_type == REACTION_TOTAL: @@ -68,11 +68,11 @@ def neutron_production_xs(reaction_type, particle_container, mcdc, data): total += neutron_production_xs( REACTION_NEUTRON_ELASTIC_SCATTERING, particle_container, - mcdc, + simulation, data, ) total += neutron_production_xs( - REACTION_NEUTRON_FISSION, particle_container, mcdc, data + REACTION_NEUTRON_FISSION, particle_container, simulation, data ) return total elif reaction_type == REACTION_NEUTRON_CAPTURE: @@ -101,19 +101,19 @@ def neutron_production_xs(reaction_type, particle_container, mcdc, data): @njit -def collision(particle_container, mcdc, data): +def collision(particle_container, simulation, data): particle = particle_container[0] # Get the reaction cross-sections - SigmaT = macro_xs(REACTION_TOTAL, particle_container, mcdc, data) + SigmaT = macro_xs(REACTION_TOTAL, particle_container, simulation, data) SigmaS = macro_xs( - REACTION_NEUTRON_ELASTIC_SCATTERING, particle_container, mcdc, data + REACTION_NEUTRON_ELASTIC_SCATTERING, particle_container, simulation, data ) - SigmaC = macro_xs(REACTION_NEUTRON_CAPTURE, particle_container, mcdc, data) - SigmaF = macro_xs(REACTION_NEUTRON_FISSION, particle_container, mcdc, data) + SigmaC = macro_xs(REACTION_NEUTRON_CAPTURE, particle_container, simulation, data) + SigmaF = macro_xs(REACTION_NEUTRON_FISSION, particle_container, simulation, data) # Implicit capture - if mcdc["implicit_capture"]["active"]: + if simulation["implicit_capture"]["active"]: particle["w"] *= (SigmaT - SigmaC) / SigmaT SigmaT -= SigmaC @@ -121,11 +121,11 @@ def collision(particle_container, mcdc, data): xi = rng.lcg(particle_container) * SigmaT total = SigmaS if total > xi: - scattering(particle_container, mcdc, data) + scattering(particle_container, simulation, data) else: total += SigmaF if total > xi: - fission(particle_container, mcdc, data) + fission(particle_container, simulation, data) else: particle["alive"] = False @@ -136,7 +136,7 @@ def collision(particle_container, mcdc, data): @njit -def scattering(particle_container, mcdc, data): +def scattering(particle_container, simulation, data): # Particle attributes particle = particle_container[0] g = particle["g"] @@ -145,7 +145,7 @@ def scattering(particle_container, mcdc, data): uz = particle["uz"] # Material attributes - material = mcdc["multigroup_materials"][particle["material_ID"]] + material = simulation["multigroup_materials"][particle["material_ID"]] G = material["G"] # Kill the current particle @@ -154,8 +154,8 @@ def scattering(particle_container, mcdc, data): # Adjust production and product weights if weighted emission weight_production = 1.0 weight_product = particle["w"] - if mcdc["weighted_emission"]["active"]: - weight_target = mcdc["weighted_emission"]["weight_target"] + if simulation["weighted_emission"]["active"]: + weight_target = simulation["weighted_emission"]["weight_target"] weight_production = particle["w"] / weight_target weight_product = weight_target @@ -207,19 +207,21 @@ def scattering(particle_container, mcdc, data): particle["E"] = particle_new["E"] particle["w"] = particle_new["w"] else: - particle_bank_module.bank_active_particle(particle_container_new, mcdc) + particle_bank_module.bank_active_particle( + particle_container_new, simulation + ) @njit -def fission(particle_container, mcdc, data): - settings = mcdc["settings"] +def fission(particle_container, simulation, data): + settings = simulation["settings"] # Particle properties particle = particle_container[0] g = particle["g"] # Material properties - material = mcdc["multigroup_materials"][particle["material_ID"]] + material = simulation["multigroup_materials"][particle["material_ID"]] G = material["G"] J = material["J"] @@ -229,8 +231,8 @@ def fission(particle_container, mcdc, data): # Adjust production and product weights if weighted emission weight_production = 1.0 weight_product = particle["w"] - if mcdc["weighted_emission"]["active"]: - weight_target = mcdc["weighted_emission"]["weight_target"] + if simulation["weighted_emission"]["active"]: + weight_target = simulation["weighted_emission"]["weight_target"] weight_production = particle["w"] / weight_target weight_product = weight_target @@ -242,7 +244,9 @@ def fission(particle_container, mcdc, data): # Get number of secondaries N = int( - math.floor(weight_production * nu / mcdc["k_eff"] + rng.lcg(particle_container)) + math.floor( + weight_production * nu / simulation["k_eff"] + rng.lcg(particle_container) + ) ) # Set up secondary partice container @@ -300,7 +304,9 @@ def fission(particle_container, mcdc, data): # Eigenvalue mode: bank right away if settings["eigenvalue_mode"]: - particle_bank_module.bank_census_particle(particle_container_new, mcdc) + particle_bank_module.bank_census_particle( + particle_container_new, simulation + ) continue # Below is only relevant for fixed-source problem @@ -311,7 +317,7 @@ def fission(particle_container, mcdc, data): # Check if it hits current or next census times hit_current_census = False hit_future_census = False - idx_census = mcdc["idx_census"] + idx_census = simulation["idx_census"] if settings["N_census"] > 1: if particle_new["t"] > mcdc_get.settings.census_time( idx_census, settings, data @@ -335,14 +341,20 @@ def fission(particle_container, mcdc, data): particle["E"] = particle_new["E"] particle["w"] = particle_new["w"] else: - particle_bank_module.bank_active_particle(particle_container_new, mcdc) + particle_bank_module.bank_active_particle( + particle_container_new, simulation + ) # Hit future census --> add to future bank elif hit_future_census: # Particle will participate in the future - particle_bank_module.bank_future_particle(particle_container_new, mcdc) + particle_bank_module.bank_future_particle( + particle_container_new, simulation + ) # Hit current census --> add to census bank else: # Particle will participate after the current census is completed - particle_bank_module.bank_census_particle(particle_container_new, mcdc) + particle_bank_module.bank_census_particle( + particle_container_new, simulation + ) diff --git a/mcdc/transport/physics/neutron/native.py b/mcdc/transport/physics/neutron/native.py index 241e60a6..daa3222b 100644 --- a/mcdc/transport/physics/neutron/native.py +++ b/mcdc/transport/physics/neutron/native.py @@ -67,16 +67,16 @@ def particle_energy_from_speed(speed): @njit -def macro_xs(reaction_type, particle_container, mcdc, data): +def macro_xs(reaction_type, particle_container, simulation, data): particle = particle_container[0] - material = mcdc["native_materials"][particle["material_ID"]] + material = simulation["native_materials"][particle["material_ID"]] E = particle["E"] total = 0.0 for i in range(material["N_nuclide"]): nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) - nuclide = mcdc["nuclides"][nuclide_ID] + nuclide = simulation["nuclides"][nuclide_ID] nuclide_density = mcdc_get.native_material.nuclide_densities(i, material, data) xs = total_micro_xs(reaction_type, E, nuclide, data) @@ -124,24 +124,28 @@ def reaction_micro_xs(E, reaction_base, nuclide, data): @njit -def neutron_production_xs(reaction_type, particle_container, mcdc, data): +def neutron_production_xs(reaction_type, particle_container, simulation, data): particle = particle_container[0] - material_base = mcdc["materials"][particle["material_ID"]] - material = mcdc["native_materials"][material_base["child_ID"]] + material_base = simulation["materials"][particle["material_ID"]] + material = simulation["native_materials"][material_base["child_ID"]] if reaction_type == REACTION_TOTAL: elastic_type = REACTION_NEUTRON_ELASTIC_SCATTERING inelastic_type = REACTION_NEUTRON_INELASTIC_SCATTERING fission_type = REACTION_NEUTRON_FISSION - elastic_xs = neutron_production_xs(elastic_type, particle_container, mcdc, data) + elastic_xs = neutron_production_xs( + elastic_type, particle_container, simulation, data + ) inelastic_xs = neutron_production_xs( - inelastic_type, particle_container, mcdc, data + inelastic_type, particle_container, simulation, data + ) + fission_xs = neutron_production_xs( + fission_type, particle_container, simulation, data ) - fission_xs = neutron_production_xs(fission_type, particle_container, mcdc, data) return elastic_xs + inelastic_xs + fission_xs elif reaction_type == REACTION_NEUTRON_ELASTIC_SCATTERING: - return macro_xs(reaction_type, particle_container, mcdc, data) + return macro_xs(reaction_type, particle_container, simulation, data) elif reaction_type == REACTION_NEUTRON_CAPTURE: return 0.0 @@ -150,7 +154,7 @@ def neutron_production_xs(reaction_type, particle_container, mcdc, data): total = 0.0 for i in range(material["N_nuclide"]): nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) - nuclide = mcdc["nuclides"][nuclide_ID] + nuclide = simulation["nuclides"][nuclide_ID] E = particle["E"] nuclide_density = mcdc_get.native_material.nuclide_densities( @@ -161,8 +165,8 @@ def neutron_production_xs(reaction_type, particle_container, mcdc, data): reaction_ID = int( mcdc_get.nuclide.inelastic_scattering_reaction_IDs(j, nuclide, data) ) - reaction_base = mcdc["reactions"][reaction_ID] - reaction = mcdc["neutron_inelastic_scattering_reactions"][ + reaction_base = simulation["reactions"][reaction_ID] + reaction = simulation["neutron_inelastic_scattering_reactions"][ reaction_base["child_ID"] ] @@ -179,7 +183,7 @@ def neutron_production_xs(reaction_type, particle_container, mcdc, data): total = 0.0 for i in range(material["N_nuclide"]): nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) - nuclide = mcdc["nuclides"][nuclide_ID] + nuclide = simulation["nuclides"][nuclide_ID] if not nuclide["fissionable"]: continue @@ -192,12 +196,14 @@ def neutron_production_xs(reaction_type, particle_container, mcdc, data): reaction_ID = int( mcdc_get.nuclide.fission_reaction_IDs(j, nuclide, data) ) - reaction_base = mcdc["reactions"][reaction_ID] - reaction = mcdc["neutron_fission_reactions"][reaction_base["child_ID"]] + reaction_base = simulation["reactions"][reaction_ID] + reaction = simulation["neutron_fission_reactions"][ + reaction_base["child_ID"] + ] xs = reaction_micro_xs(E, reaction_base, nuclide, data) - nu_p = fission_prompt_multiplicity(E, nuclide, mcdc, data) - nu_d = fission_delayed_multiplicity(E, nuclide, mcdc, data) + nu_p = fission_prompt_multiplicity(E, nuclide, simulation, data) + nu_d = fission_delayed_multiplicity(E, nuclide, simulation, data) nu = nu_d + nu_p total += nuclide_density * nu * xs @@ -213,9 +219,9 @@ def neutron_production_xs(reaction_type, particle_container, mcdc, data): @njit -def collision(particle_container, mcdc, data): +def collision(particle_container, simulation, data): particle = particle_container[0] - material = mcdc["native_materials"][particle["material_ID"]] + material = simulation["native_materials"][particle["material_ID"]] # Particle properties E = particle["E"] @@ -224,11 +230,13 @@ def collision(particle_container, mcdc, data): # Sample colliding nuclide # ================================================================================== - SigmaT = macro_xs(REACTION_TOTAL, particle_container, mcdc, data) + SigmaT = macro_xs(REACTION_TOTAL, particle_container, simulation, data) # Implicit capture - if mcdc["implicit_capture"]["active"]: - SigmaC = macro_xs(REACTION_NEUTRON_CAPTURE, particle_container, mcdc, data) + if simulation["implicit_capture"]["active"]: + SigmaC = macro_xs( + REACTION_NEUTRON_CAPTURE, particle_container, simulation, data + ) particle["w"] *= (SigmaT - SigmaC) / SigmaT SigmaT -= SigmaC @@ -236,12 +244,12 @@ def collision(particle_container, mcdc, data): total = 0.0 for i in range(material["N_nuclide"]): nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) - nuclide = mcdc["nuclides"][nuclide_ID] + nuclide = simulation["nuclides"][nuclide_ID] nuclide_density = mcdc_get.native_material.nuclide_densities(i, material, data) sigmaT = total_micro_xs(REACTION_TOTAL, E, nuclide, data) - if mcdc["implicit_capture"]["active"]: + if simulation["implicit_capture"]["active"]: sigmaC = total_micro_xs(REACTION_NEUTRON_CAPTURE, E, nuclide, data) particle["w"] *= (sigmaT - sigmaC) / sigmaT sigmaT -= sigmaC @@ -274,16 +282,18 @@ def collision(particle_container, mcdc, data): reaction_ID = int( mcdc_get.nuclide.elastic_scattering_reaction_IDs(i, nuclide, data) ) - reaction = mcdc["neutron_elastic_scattering_reactions"][reaction_ID] + reaction = simulation["neutron_elastic_scattering_reactions"][reaction_ID] reaction_base_ID = reaction["parent_ID"] - reaction_base = mcdc["reactions"][reaction_base_ID] + reaction_base = simulation["reactions"][reaction_base_ID] total += reaction_micro_xs(E, reaction_base, nuclide, data) if xi < total: - elastic_scattering(reaction, particle_container, nuclide, mcdc, data) + elastic_scattering( + reaction, particle_container, nuclide, simulation, data + ) return # Capture - if not mcdc["implicit_capture"]["active"]: + if not simulation["implicit_capture"]["active"]: sigma_capture = total_micro_xs(REACTION_NEUTRON_CAPTURE, E, nuclide, data) total += sigma_capture if xi < total: @@ -299,13 +309,15 @@ def collision(particle_container, mcdc, data): reaction_ID = int( mcdc_get.nuclide.inelastic_scattering_reaction_IDs(i, nuclide, data) ) - reaction = mcdc["neutron_inelastic_scattering_reactions"][reaction_ID] + reaction = simulation["neutron_inelastic_scattering_reactions"][reaction_ID] reaction_base_ID = reaction["parent_ID"] - reaction_base = mcdc["reactions"][reaction_base_ID] + reaction_base = simulation["reactions"][reaction_base_ID] xs = reaction_micro_xs(E, reaction_base, nuclide, data) total += xs if xi < total: - inelastic_scattering(reaction, particle_container, nuclide, mcdc, data) + inelastic_scattering( + reaction, particle_container, nuclide, simulation, data + ) return # Fission (arive here only if nuclide is fissionable) @@ -314,12 +326,12 @@ def collision(particle_container, mcdc, data): total -= sigma_fission for i in range(nuclide["N_fission_reaction"]): reaction_ID = int(mcdc_get.nuclide.fission_reaction_IDs(i, nuclide, data)) - reaction = mcdc["neutron_fission_reactions"][reaction_ID] + reaction = simulation["neutron_fission_reactions"][reaction_ID] reaction_base_ID = reaction["parent_ID"] - reaction_base = mcdc["reactions"][reaction_base_ID] + reaction_base = simulation["reactions"][reaction_base_ID] total += reaction_micro_xs(E, reaction_base, nuclide, data) if xi < total: - fission(reaction, particle_container, nuclide, mcdc, data) + fission(reaction, particle_container, nuclide, simulation, data) return @@ -329,7 +341,7 @@ def collision(particle_container, mcdc, data): @njit -def elastic_scattering(reaction, particle_container, nuclide, mcdc, data): +def elastic_scattering(reaction, particle_container, nuclide, simulation, data): # Particle attributes particle = particle_container[0] E = particle["E"] @@ -378,7 +390,7 @@ def elastic_scattering(reaction, particle_container, nuclide, mcdc, data): uz = vz / speed # Sample the scattering cosine from the multi-PDF distribution - multi_table = mcdc["multi_table_distributions"][reaction["mu_table_ID"]] + multi_table = simulation["multi_table_distributions"][reaction["mu_table_ID"]] mu0 = sample_multi_table(E, particle_container, multi_table, data) # Scatter the direction in COM @@ -463,7 +475,7 @@ def sample_nucleus_velocity(A, particle_container): @njit -def inelastic_scattering(reaction, particle_container, nuclide, mcdc, data): +def inelastic_scattering(reaction, particle_container, nuclide, simulation, data): # Particle attributes particle = particle_container[0] E = particle["E"] @@ -498,8 +510,8 @@ def inelastic_scattering(reaction, particle_container, nuclide, mcdc, data): elif angle_type == ANGLE_ISOTROPIC: mu = sample_isotropic_cosine(particle_container_new) elif angle_type == ANGLE_DISTRIBUTED: - distribution_base = mcdc["distributions"][reaction["mu_ID"]] - multi_table = mcdc["multi_table_distributions"][ + distribution_base = simulation["distributions"][reaction["mu_ID"]] + multi_table = simulation["multi_table_distributions"][ distribution_base["child_ID"] ] mu = sample_multi_table(E, particle_container_new, multi_table, data) @@ -515,7 +527,7 @@ def inelastic_scattering(reaction, particle_container, nuclide, mcdc, data): n, reaction, data ) ) - spectrum_base = mcdc["distributions"][ID] + spectrum_base = simulation["distributions"][ID] else: probability_grid = mcdc_get.neutron_inelastic_scattering_reaction.spectrum_probability_grid_all( reaction, data @@ -536,24 +548,24 @@ def inelastic_scattering(reaction, particle_container, nuclide, mcdc, data): j, reaction, data ) ) - spectrum_base = mcdc["distributions"][ID] + spectrum_base = simulation["distributions"][ID] break # Sample energy if not angle_type == ANGLE_ENERGY_CORRELATED: E_new = sample_distribution( - E, spectrum_base, particle_container_new, mcdc, data, scale=True + E, spectrum_base, particle_container_new, simulation, data, scale=True ) else: E_new, mu = sample_correlated_distribution( - E, spectrum_base, particle_container_new, mcdc, data, scale=True + E, spectrum_base, particle_container_new, simulation, data, scale=True ) # ============================================================================== # Frame transformation # ============================================================================== - reaction_base = mcdc["reactions"][int(reaction["parent_ID"])] + reaction_base = simulation["reactions"][int(reaction["parent_ID"])] reference_frame = reaction_base["reference_frame"] if reference_frame == REFERENCE_FRAME_COM: A = nuclide["atomic_weight_ratio"] @@ -586,7 +598,9 @@ def inelastic_scattering(reaction, particle_container, nuclide, mcdc, data): particle["uz"] = particle_new["uz"] particle["E"] = particle_new["E"] else: - particle_bank_module.bank_active_particle(particle_container_new, mcdc) + particle_bank_module.bank_active_particle( + particle_container_new, simulation + ) # ====================================================================================== @@ -595,8 +609,8 @@ def inelastic_scattering(reaction, particle_container, nuclide, mcdc, data): @njit -def fission(reaction, particle_container, nuclide, mcdc, data): - settings = mcdc["settings"] +def fission(reaction, particle_container, nuclide, simulation, data): + settings = simulation["settings"] # Particle properties particle = particle_container[0] @@ -611,20 +625,22 @@ def fission(reaction, particle_container, nuclide, mcdc, data): # Adjust production and product weights if weighted emission weight_production = 1.0 weight_product = particle["w"] - if mcdc["weighted_emission"]["active"]: - weight_target = mcdc["weighted_emission"]["weight_target"] + if simulation["weighted_emission"]["active"]: + weight_target = simulation["weighted_emission"]["weight_target"] weight_production = particle["w"] / weight_target weight_product = weight_target # Fission yields N_delayed = nuclide["N_fission_delayed_precursor"] - nu_p = fission_prompt_multiplicity(E, nuclide, mcdc, data) - nu_d = fission_delayed_multiplicity(E, nuclide, mcdc, data) + nu_p = fission_prompt_multiplicity(E, nuclide, simulation, data) + nu_d = fission_delayed_multiplicity(E, nuclide, simulation, data) nu = nu_p + nu_d # Get number of secondaries N = int( - math.floor(weight_production * nu / mcdc["k_eff"] + rng.lcg(particle_container)) + math.floor( + weight_production * nu / simulation["k_eff"] + rng.lcg(particle_container) + ) ) # Set up secondary partice container @@ -666,25 +682,35 @@ def fission(reaction, particle_container, nuclide, mcdc, data): elif angle_type == ANGLE_ISOTROPIC: mu = sample_isotropic_cosine(particle_container_new) elif angle_type == ANGLE_DISTRIBUTED: - distribution_base = mcdc["distributions"][reaction["mu_ID"]] - multi_table = mcdc["multi_table_distributions"][ + distribution_base = simulation["distributions"][reaction["mu_ID"]] + multi_table = simulation["multi_table_distributions"][ distribution_base["child_ID"] ] mu = sample_multi_table(E, particle_container_new, multi_table, data) # Sample energy (also angle if correlated) - spectrum_base = mcdc["distributions"][reaction["spectrum_ID"]] + spectrum_base = simulation["distributions"][reaction["spectrum_ID"]] if not angle_type == ANGLE_ENERGY_CORRELATED: E_new = sample_distribution( - E, spectrum_base, particle_container_new, mcdc, data, scale=True + E, + spectrum_base, + particle_container_new, + simulation, + data, + scale=True, ) else: E_new, mu = sample_correlated_distribution( - E, spectrum_base, particle_container_new, mcdc, data, scale=True + E, + spectrum_base, + particle_container_new, + simulation, + data, + scale=True, ) # Frame transformation - reaction_base = mcdc["reactions"][int(reaction["parent_ID"])] + reaction_base = simulation["reactions"][int(reaction["parent_ID"])] reference_frame = reaction_base["reference_frame"] if reference_frame == REFERENCE_FRAME_COM: A = nuclide["atomic_weight_ratio"] @@ -724,7 +750,9 @@ def fission(reaction, particle_container, nuclide, mcdc, data): # Eigenvalue mode: bank right away if settings["eigenvalue_mode"]: - particle_bank_module.bank_census_particle(particle_container_new, mcdc) + particle_bank_module.bank_census_particle( + particle_container_new, simulation + ) continue # Below is only relevant for fixed-source problem @@ -735,7 +763,7 @@ def fission(reaction, particle_container, nuclide, mcdc, data): # Check if it hits current or next census times hit_current_census = False hit_future_census = False - idx_census = mcdc["idx_census"] + idx_census = simulation["idx_census"] if settings["N_census"] > 1: if particle_new["t"] > mcdc_get.settings.census_time( idx_census, settings, data @@ -759,26 +787,32 @@ def fission(reaction, particle_container, nuclide, mcdc, data): particle["E"] = particle_new["E"] particle["w"] = particle_new["w"] else: - particle_bank_module.bank_active_particle(particle_container_new, mcdc) + particle_bank_module.bank_active_particle( + particle_container_new, simulation + ) # Hit future census --> add to future bank elif hit_future_census: # Particle will participate in the future - particle_bank_module.bank_future_particle(particle_container_new, mcdc) + particle_bank_module.bank_future_particle( + particle_container_new, simulation + ) # Hit current census --> add to census bank else: # Particle will participate after the current census is completed - particle_bank_module.bank_census_particle(particle_container_new, mcdc) + particle_bank_module.bank_census_particle( + particle_container_new, simulation + ) @njit -def fission_prompt_multiplicity(E, nuclide, mcdc, data): - data_base = mcdc["data"][nuclide["fission_prompt_multiplicity_ID"]] - return evaluate_data(E, data_base, mcdc, data) +def fission_prompt_multiplicity(E, nuclide, simulation, data): + data_base = simulation["data"][nuclide["fission_prompt_multiplicity_ID"]] + return evaluate_data(E, data_base, simulation, data) @njit -def fission_delayed_multiplicity(E, nuclide, mcdc, data): - data_base = mcdc["data"][nuclide["fission_delayed_multiplicity_ID"]] - return evaluate_data(E, data_base, mcdc, data) +def fission_delayed_multiplicity(E, nuclide, simulation, data): + data_base = simulation["data"][nuclide["fission_delayed_multiplicity_ID"]] + return evaluate_data(E, data_base, simulation, data) diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index fca470c5..24f77716 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -29,13 +29,13 @@ # ====================================================================================== -def fixed_source_simulation(mcdc_arr, data): +def fixed_source_simulation(simulation_container, data): # Ensure `mcdc` exist for the lifetime of the program by intentionally leaking their memory - # adapt.leak(mcdc_arr) - mcdc = mcdc_arr[0] + # adapt.leak(simulation_container) + simulation = simulation_container[0] # Get some settings - settings = mcdc["settings"] + settings = simulation["settings"] N_batch = settings["N_batch"] N_particle = settings["N_particle"] N_census = settings["N_census"] @@ -43,11 +43,11 @@ def fixed_source_simulation(mcdc_arr, data): # Loop over batches for idx_batch in range(N_batch): - mcdc["idx_batch"] = idx_batch + simulation["idx_batch"] = idx_batch seed_batch = rng.split_seed(uint64(idx_batch), settings["rng_seed"]) # Distribute work - mpi.distribute_work(N_particle, mcdc) + mpi.distribute_work(N_particle, simulation) # Print multi-batch header if N_batch > 1: @@ -56,104 +56,104 @@ def fixed_source_simulation(mcdc_arr, data): # Loop over time censuses for idx_census in range(N_census): - mcdc["idx_census"] = idx_census + simulation["idx_census"] = idx_census seed_census = rng.split_seed(uint64(seed_batch), rng.SEED_SPLIT_CENSUS) # Reset tally time filters if census-based tally is used if use_census_based_tally: - tally_module.filter.set_census_based_time_grid(mcdc, data) + tally_module.filter.set_census_based_time_grid(simulation, data) # Accordingly promote future particles to censused particles - if particle_bank_module.get_bank_size(mcdc["bank_future"]) > 0: - particle_bank_module.promote_future_particles(mcdc, data) + if particle_bank_module.get_bank_size(simulation["bank_future"]) > 0: + particle_bank_module.promote_future_particles(simulation, data) # Loop over source particles seed_source = rng.split_seed(uint64(seed_census), rng.SEED_SPLIT_SOURCE) - source_loop(uint64(seed_source), mcdc, data) + source_loop(uint64(seed_source), simulation, data) # Manage particle banks: population control and work rebalance - particle_bank_module.manage_particle_banks(mcdc) + particle_bank_module.manage_particle_banks(simulation) # Time census-based tally closeout if use_census_based_tally: - tally_module.closeout.reduce(mcdc, data) - tally_module.closeout.accumulate(mcdc, data) - if mcdc["mpi_master"]: + tally_module.closeout.reduce(simulation, data) + tally_module.closeout.accumulate(simulation, data) + if simulation["mpi_master"]: with objmode(): - output_module.generate_census_based_tally(mcdc, data) - tally_module.closeout.reset_sum_bins(mcdc, data) + output_module.generate_census_based_tally(simulation, data) + tally_module.closeout.reset_sum_bins(simulation, data) # Terminate census loop if all banks are empty if ( idx_census > 0 - and particle_bank_module.total_size(mcdc["bank_source"]) == 0 - and particle_bank_module.total_size(mcdc["bank_census"]) == 0 - and particle_bank_module.total_size(mcdc["bank_future"]) == 0 + and particle_bank_module.total_size(simulation["bank_source"]) == 0 + and particle_bank_module.total_size(simulation["bank_census"]) == 0 + and particle_bank_module.total_size(simulation["bank_future"]) == 0 ): break # Multi-batch closeout if N_batch > 1: # Reset banks - particle_bank_module.set_bank_size(mcdc["bank_active"], 0) - particle_bank_module.set_bank_size(mcdc["bank_census"], 0) - particle_bank_module.set_bank_size(mcdc["bank_source"], 0) - particle_bank_module.set_bank_size(mcdc["bank_future"], 0) + particle_bank_module.set_bank_size(simulation["bank_active"], 0) + particle_bank_module.set_bank_size(simulation["bank_census"], 0) + particle_bank_module.set_bank_size(simulation["bank_source"], 0) + particle_bank_module.set_bank_size(simulation["bank_future"], 0) if not use_census_based_tally: # Tally history closeout - tally_module.closeout.reduce(mcdc, data) - tally_module.closeout.accumulate(mcdc, data) + tally_module.closeout.reduce(simulation, data) + tally_module.closeout.accumulate(simulation, data) # Tally closeout if not use_census_based_tally: - tally_module.closeout.finalize(mcdc, data) + tally_module.closeout.finalize(simulation, data) -def eigenvalue_simulation(mcdc_arr, data): +def eigenvalue_simulation(simulation_container, data): # Ensure `mcdc` exist for the lifetime of the program # by intentionally leaking their memory - # adapt.leak(mcdc_arr) - mcdc = mcdc_arr[0] + # adapt.leak(simulation_container) + simulation = simulation_container[0] # Get some settings - settings = mcdc["settings"] + settings = simulation["settings"] N_inactive = settings["N_inactive"] N_cycle = settings["N_cycle"] N_particle = settings["N_particle"] # Distribute work - mpi.distribute_work(N_particle, mcdc) + mpi.distribute_work(N_particle, simulation) # Loop over power iteration cycles for idx_cycle in range(N_cycle): - mcdc["idx_cycle"] = idx_cycle + simulation["idx_cycle"] = idx_cycle seed_cycle = rng.split_seed(uint64(idx_cycle), settings["rng_seed"]) # Loop over source particles - source_loop(uint64(seed_cycle), mcdc, data) + source_loop(uint64(seed_cycle), simulation, data) # Tally "history" closeout - tally_module.closeout.eigenvalue_cycle(mcdc, data) - if mcdc["cycle_active"]: - tally_module.closeout.reduce(mcdc, data) - tally_module.closeout.accumulate(mcdc, data) + tally_module.closeout.eigenvalue_cycle(simulation, data) + if simulation["cycle_active"]: + tally_module.closeout.reduce(simulation, data) + tally_module.closeout.accumulate(simulation, data) # Manage particle banks: population control and work rebalance - particle_bank_module.manage_particle_banks(mcdc) + particle_bank_module.manage_particle_banks(simulation) # Print progress with objmode(): - print_progress_eigenvalue(mcdc, data) + print_progress_eigenvalue(simulation, data) # Entering active cycle? - mcdc["idx_cycle"] += 1 - if mcdc["idx_cycle"] >= N_inactive: - mcdc["cycle_active"] = True + simulation["idx_cycle"] += 1 + if simulation["idx_cycle"] >= N_inactive: + simulation["cycle_active"] = True # Tally closeout - tally_module.closeout.finalize(mcdc, data) - tally_module.closeout.eigenvalue_simulation(mcdc) + tally_module.closeout.finalize(simulation, data) + tally_module.closeout.eigenvalue_simulation(simulation) # ============================================================================= @@ -162,41 +162,43 @@ def eigenvalue_simulation(mcdc_arr, data): @njit -def source_loop(seed, mcdc, data): +def source_loop(seed, simulation, data): # Progress bar indicator N_prog = 0 # Loop over particle sources - work_start = mcdc["mpi_work_start"] - work_size = mcdc["mpi_work_size"] + work_start = simulation["mpi_work_start"] + work_size = simulation["mpi_work_size"] for idx_work in range(work_size): - mcdc["idx_work"] = work_start + idx_work - generate_source_particle(work_start, idx_work, seed, mcdc, data) + simulation["idx_work"] = work_start + idx_work + generate_source_particle(work_start, idx_work, seed, simulation, data) # Run the source particle and its secondaries - exhaust_active_bank(mcdc, data) + exhaust_active_bank(simulation, data) - source_closeout(mcdc, idx_work, N_prog, data) + source_closeout(simulation, idx_work, N_prog, data) @njit -def generate_source_particle(work_start, idx_work, seed, mcdc, data): +def generate_source_particle(work_start, idx_work, seed, simulation, data): """Get a source particle and put into one of the banks""" - settings = mcdc["settings"] + settings = simulation["settings"] particle_container = np.zeros(1, type_.particle_data) particle = particle_container[0] # Get from fixed-source? - if particle_bank_module.get_bank_size(mcdc["bank_source"]) == 0: + if particle_bank_module.get_bank_size(simulation["bank_source"]) == 0: # Sample source seed_work = rng.split_seed(work_start + idx_work, seed) - source_particle(particle_container, seed_work, mcdc, data) + source_particle(particle_container, seed_work, simulation, data) # Get from source bank else: - particle_container = mcdc["bank_source"]["particles"][idx_work : (idx_work + 1)] + particle_container = simulation["bank_source"]["particles"][ + idx_work : (idx_work + 1) + ] particle = particle_container[0] # Skip if beyond time boundary @@ -206,7 +208,7 @@ def generate_source_particle(work_start, idx_work, seed, mcdc, data): # Check if it is beyond current or next census times hit_census = False hit_next_census = False - idx_census = mcdc["idx_census"] + idx_census = simulation["idx_census"] if idx_census < settings["N_census"] - 1: if particle["t"] > mcdc_get.settings.census_time( @@ -219,49 +221,52 @@ def generate_source_particle(work_start, idx_work, seed, mcdc, data): # Put into the right bank if not hit_census: - particle_bank_module.bank_active_particle(particle_container, mcdc) + particle_bank_module.bank_active_particle(particle_container, simulation) elif not hit_next_census: # Particle will participate after the current census - particle_bank_module.bank_census_particle(particle_container, mcdc) + particle_bank_module.bank_census_particle(particle_container, simulation) else: # Particle will participate in the future - particle_bank_module.bank_future_particle(particle_container, mcdc) + particle_bank_module.bank_future_particle(particle_container, simulation) @njit -def exhaust_active_bank(mcdc, data): +def exhaust_active_bank(simulation, data): particle_container = np.zeros(1, type_.particle) particle = particle_container[0] # Loop until active bank is exhausted - while particle_bank_module.get_bank_size(mcdc["bank_active"]) > 0: + while particle_bank_module.get_bank_size(simulation["bank_active"]) > 0: # Get particle from active bank - particle_bank_module.pop_particle(particle_container, mcdc["bank_active"]) + particle_bank_module.pop_particle(particle_container, simulation["bank_active"]) - prep_particle(particle_container, mcdc) + prep_particle(particle_container, simulation) # Particle loop - particle_loop(particle_container, mcdc, data) + particle_loop(particle_container, simulation, data) @njit -def prep_particle(particle_container, mcdc): +def prep_particle(particle_container, simulation): particle = particle_container[0] @njit -def source_closeout(mcdc, idx_work, N_prog, data): +def source_closeout(simulation, idx_work, N_prog, data): # Tally history closeout for one-batch fixed-source simulation - if not mcdc["settings"]["eigenvalue_mode"] and mcdc["settings"]["N_batch"] == 1: - if not mcdc["settings"]["use_census_based_tally"]: - tally_module.closeout.accumulate(mcdc, data) + if ( + not simulation["settings"]["eigenvalue_mode"] + and simulation["settings"]["N_batch"] == 1 + ): + if not simulation["settings"]["use_census_based_tally"]: + tally_module.closeout.accumulate(simulation, data) # Progress printout - percent = (idx_work + 1.0) / mcdc["mpi_work_size"] - if mcdc["settings"]["use_progress_bar"] and int(percent * 100.0) > N_prog: + percent = (idx_work + 1.0) / simulation["mpi_work_size"] + if simulation["settings"]["use_progress_bar"] and int(percent * 100.0) > N_prog: N_prog += 1 with objmode(): - print_progress(percent, mcdc) + print_progress(percent, simulation) # ====================================================================================== @@ -270,19 +275,19 @@ def source_closeout(mcdc, idx_work, N_prog, data): @njit -def particle_loop(particle_container, mcdc, data): +def particle_loop(particle_container, simulation, data): particle = particle_container[0] while particle["alive"]: - step_particle(particle_container, mcdc, data) + step_particle(particle_container, simulation, data) @njit -def step_particle(particle_container, mcdc, data): +def step_particle(particle_container, simulation, data): particle = particle_container[0] # Determine and move to event - move_to_event(particle_container, mcdc, data) + move_to_event(particle_container, simulation, data) # Execute events if particle["event"] == EVENT_LOST: @@ -290,15 +295,15 @@ def step_particle(particle_container, mcdc, data): # Collision if particle["event"] & EVENT_COLLISION: - physics.collision(particle_container, mcdc, data) + physics.collision(particle_container, simulation, data) # Surface and domain crossing if particle["event"] & EVENT_SURFACE_CROSSING: - geometry.surface_crossing(particle_container, mcdc, data) + geometry.surface_crossing(particle_container, simulation, data) # Census time crossing if particle["event"] & EVENT_TIME_CENSUS: - particle_bank_module.bank_census_particle(particle_container, mcdc) + particle_bank_module.bank_census_particle(particle_container, simulation) particle["alive"] = False # Time boundary crossing @@ -307,12 +312,12 @@ def step_particle(particle_container, mcdc, data): # Weight roulette if particle["alive"]: - technique.weight_roulette(particle_container, mcdc) + technique.weight_roulette(particle_container, simulation) @njit -def move_to_event(particle_container, mcdc, data): - settings = mcdc["settings"] +def move_to_event(particle_container, simulation, data): + settings = simulation["settings"] # ================================================================================== # Preparation (as needed) @@ -324,7 +329,7 @@ def move_to_event(particle_container, mcdc, data): if settings["multigroup_mode"]: # If material is not identified yet, locate the particle if particle["material_ID"] == -1: - if not geometry.locate_particle(particle_container, mcdc, data): + if not geometry.locate_particle(particle_container, simulation, data): # Particle is lost particle["event"] = EVENT_LOST return @@ -337,7 +342,7 @@ def move_to_event(particle_container, mcdc, data): # - Set particle boundary event (surface or lattice crossing, or lost) # - Return distance to boundary (surface or lattice) - d_boundary = geometry.inspect_geometry(particle_container, mcdc, data) + d_boundary = geometry.inspect_geometry(particle_container, simulation, data) # Particle is lost? if particle["event"] == EVENT_LOST: @@ -348,19 +353,19 @@ def move_to_event(particle_container, mcdc, data): # ================================================================================== # Distance to domain - speed = physics.particle_speed(particle_container, mcdc, data) + speed = physics.particle_speed(particle_container, simulation, data) # Distance to time boundary d_time_boundary = speed * (settings["time_boundary"] - particle["t"]) # Distance to census time - idx = mcdc["idx_census"] + idx = simulation["idx_census"] d_time_census = speed * ( mcdc_get.settings.census_time(idx, settings, data) - particle["t"] ) # Distance to next collision - d_collision = physics.collision_distance(particle_container, mcdc, data) + d_collision = physics.collision_distance(particle_container, simulation, data) # ================================================================================== # Determine event(s) @@ -396,32 +401,34 @@ def move_to_event(particle_container, mcdc, data): # ================================================================================== # Score tracklength tallies - if mcdc["cycle_active"]: + if simulation["cycle_active"]: # Cell tallies - cell = mcdc["cells"][particle["cell_ID"]] + cell = simulation["cells"][particle["cell_ID"]] for i in range(cell["N_tally"]): tally_ID = int(mcdc_get.cell.tally_IDs(i, cell, data)) - tally = mcdc["cell_tallies"][tally_ID] + tally = simulation["cell_tallies"][tally_ID] tally_module.score.tracklength_tally( - particle_container, distance, tally, mcdc, data + particle_container, distance, tally, simulation, data ) # Global tallies - for i in range(mcdc["N_global_tally"]): - tally = mcdc["global_tallies"][i] + for i in range(simulation["N_global_tally"]): + tally = simulation["global_tallies"][i] tally_module.score.tracklength_tally( - particle_container, distance, tally, mcdc, data + particle_container, distance, tally, simulation, data ) # Mesh tallies - for i in range(mcdc["N_mesh_tally"]): - tally = mcdc["mesh_tallies"][i] + for i in range(simulation["N_mesh_tally"]): + tally = simulation["mesh_tallies"][i] tally_module.score.mesh_tally( - particle_container, distance, tally, mcdc, data + particle_container, distance, tally, simulation, data ) if settings["eigenvalue_mode"]: - tally_module.score.eigenvalue_tally(particle_container, distance, mcdc, data) + tally_module.score.eigenvalue_tally( + particle_container, distance, simulation, data + ) # Move particle - particle_module.move(particle_container, distance, mcdc, data) + particle_module.move(particle_container, distance, simulation, data) diff --git a/mcdc/transport/source.py b/mcdc/transport/source.py index 776fc9f0..f8f57145 100644 --- a/mcdc/transport/source.py +++ b/mcdc/transport/source.py @@ -18,7 +18,7 @@ @njit -def source_particle(P_rec_arr, seed, mcdc, data): +def source_particle(P_rec_arr, seed, simulation, data): P_rec = P_rec_arr[0] P_rec["rng_seed"] = seed @@ -26,7 +26,7 @@ def source_particle(P_rec_arr, seed, mcdc, data): # TODO: use cdf and binary search instead xi = rng.lcg(P_rec_arr) tot = 0.0 - for source in mcdc["sources"]: + for source in simulation["sources"]: tot += source["probability"] if tot >= xi: break @@ -59,13 +59,13 @@ def source_particle(P_rec_arr, seed, mcdc, data): ) # Energy - if mcdc["settings"]["multigroup_mode"]: + if simulation["settings"]["multigroup_mode"]: E = 0.0 if source["mono_energetic"]: g = source["energy_group"] else: ID = source["energy_group_pmf_ID"] - pmf = mcdc["pmf_distributions"][ID] + pmf = simulation["pmf_distributions"][ID] g = sample_pmf(pmf, P_rec_arr, data) else: g = 0 @@ -73,7 +73,7 @@ def source_particle(P_rec_arr, seed, mcdc, data): E = source["energy"] else: ID = source["energy_pdf_ID"] - table = mcdc["tabulated_distributions"][ID] + table = simulation["tabulated_distributions"][ID] E = sample_tabulated(table, P_rec_arr, data) # Time diff --git a/mcdc/transport/tally/closeout.py b/mcdc/transport/tally/closeout.py index 70743b7b..aa60a7fe 100644 --- a/mcdc/transport/tally/closeout.py +++ b/mcdc/transport/tally/closeout.py @@ -26,19 +26,19 @@ @njit -def reduce(mcdc, data): - for tally in mcdc["tallies"]: - _reduce(tally, mcdc, data) +def reduce(simulation, data): + for tally in simulation["tallies"]: + _reduce(tally, simulation, data) @njit -def _reduce(tally, mcdc, data): +def _reduce(tally, simulation, data): N = tally["bin_length"] start = tally["bin_offset"] end = start + N # Normalize - N_particle = mcdc["settings"]["N_particle"] + N_particle = simulation["settings"]["N_particle"] for i in range(N): data[start + i] /= N_particle @@ -55,8 +55,8 @@ def _reduce(tally, mcdc, data): @njit -def accumulate(mcdc, data): - for tally in mcdc["tallies"]: +def accumulate(simulation, data): + for tally in simulation["tallies"]: _accumulate(tally, data) @@ -91,15 +91,15 @@ def _accumulate(tally, data): @njit -def finalize(mcdc, data): - for tally in mcdc["tallies"]: - _finalize(tally, mcdc, data) +def finalize(simulation, data): + for tally in simulation["tallies"]: + _finalize(tally, simulation, data) @njit -def _finalize(tally, mcdc, data): - N_history = mcdc["settings"]["N_particle"] - N_batch = mcdc["settings"]["N_batch"] +def _finalize(tally, simulation, data): + N_history = simulation["settings"]["N_particle"] + N_batch = simulation["settings"]["N_batch"] N_bin = tally["bin_length"] sum_start = tally["bin_sum_offset"] sum_sq_start = tally["bin_sum_square_offset"] @@ -109,8 +109,8 @@ def _finalize(tally, mcdc, data): if N_batch > 1: N_history = N_batch - elif mcdc["settings"]["eigenvalue_mode"]: - N_history = mcdc["settings"]["N_active"] + elif simulation["settings"]["eigenvalue_mode"]: + N_history = simulation["settings"]["N_active"] else: # MPI Reduce @@ -147,8 +147,8 @@ def _finalize(tally, mcdc, data): @njit -def reset_sum_bins(mcdc, data): - for tally in mcdc["tallies"]: +def reset_sum_bins(simulation, data): + for tally in simulation["tallies"]: _reset_sum_bins(tally, data) @@ -169,9 +169,9 @@ def _reset_sum_bins(tally, data): @njit -def eigenvalue_cycle(mcdc, data): - idx_cycle = mcdc["idx_cycle"] - N_particle = mcdc["settings"]["N_particle"] +def eigenvalue_cycle(simulation, data): + idx_cycle = simulation["idx_cycle"] + N_particle = simulation["settings"]["N_particle"] # MPI Allreduce buff_nuSigmaF = np.zeros(1, np.float64) @@ -181,64 +181,68 @@ def eigenvalue_cycle(mcdc, data): buff_Cmax = np.zeros(1, np.float64) with objmode(): MPI.COMM_WORLD.Allreduce( - np.array(mcdc["eigenvalue_tally_nuSigmaF"]), buff_nuSigmaF, MPI.SUM + np.array(simulation["eigenvalue_tally_nuSigmaF"]), buff_nuSigmaF, MPI.SUM ) - if mcdc["cycle_active"]: + if simulation["cycle_active"]: MPI.COMM_WORLD.Allreduce( - np.array(mcdc["eigenvalue_tally_n"]), buff_n, MPI.SUM + np.array(simulation["eigenvalue_tally_n"]), buff_n, MPI.SUM ) - MPI.COMM_WORLD.Allreduce(np.array([mcdc["n_max"]]), buff_nmax, MPI.MAX) MPI.COMM_WORLD.Allreduce( - np.array(mcdc["eigenvalue_tally_C"]), buff_C, MPI.SUM + np.array([simulation["n_max"]]), buff_nmax, MPI.MAX + ) + MPI.COMM_WORLD.Allreduce( + np.array(simulation["eigenvalue_tally_C"]), buff_C, MPI.SUM + ) + MPI.COMM_WORLD.Allreduce( + np.array([simulation["C_max"]]), buff_Cmax, MPI.MAX ) - MPI.COMM_WORLD.Allreduce(np.array([mcdc["C_max"]]), buff_Cmax, MPI.MAX) # Update and store k_eff - mcdc["k_eff"] = buff_nuSigmaF[0] / N_particle - mcdc_set.simulation.k_cycle(idx_cycle, mcdc, data, value=mcdc["k_eff"]) + simulation["k_eff"] = buff_nuSigmaF[0] / N_particle + mcdc_set.simulation.k_cycle(idx_cycle, simulation, data, value=simulation["k_eff"]) # Normalize other eigenvalue/global tallies tally_n = buff_n[0] / N_particle tally_C = buff_C[0] / N_particle # Maximum densities - mcdc["n_max"] = buff_nmax[0] - mcdc["C_max"] = buff_Cmax[0] + simulation["n_max"] = buff_nmax[0] + simulation["C_max"] = buff_Cmax[0] # Accumulate running average - if mcdc["cycle_active"]: - mcdc["k_avg"] += mcdc["k_eff"] - mcdc["k_sdv"] += mcdc["k_eff"] * mcdc["k_eff"] - mcdc["n_avg"] += tally_n - mcdc["n_sdv"] += tally_n * tally_n - mcdc["C_avg"] += tally_C - mcdc["C_sdv"] += tally_C * tally_C - - N = 1 + mcdc["idx_cycle"] - mcdc["settings"]["N_inactive"] - mcdc["k_avg_running"] = mcdc["k_avg"] / N + if simulation["cycle_active"]: + simulation["k_avg"] += simulation["k_eff"] + simulation["k_sdv"] += simulation["k_eff"] * simulation["k_eff"] + simulation["n_avg"] += tally_n + simulation["n_sdv"] += tally_n * tally_n + simulation["C_avg"] += tally_C + simulation["C_sdv"] += tally_C * tally_C + + N = 1 + simulation["idx_cycle"] - simulation["settings"]["N_inactive"] + simulation["k_avg_running"] = simulation["k_avg"] / N if N == 1: - mcdc["k_sdv_running"] = 0.0 + simulation["k_sdv_running"] = 0.0 else: - mcdc["k_sdv_running"] = math.sqrt( - (mcdc["k_sdv"] / N - mcdc["k_avg_running"] ** 2) / (N - 1) + simulation["k_sdv_running"] = math.sqrt( + (simulation["k_sdv"] / N - simulation["k_avg_running"] ** 2) / (N - 1) ) # Reset accumulators - mcdc["eigenvalue_tally_nuSigmaF"][0] = 0.0 - mcdc["eigenvalue_tally_n"][0] = 0.0 - mcdc["eigenvalue_tally_C"][0] = 0.0 + simulation["eigenvalue_tally_nuSigmaF"][0] = 0.0 + simulation["eigenvalue_tally_n"][0] = 0.0 + simulation["eigenvalue_tally_C"][0] = 0.0 # ===================================================================== # Gyration radius # ===================================================================== - if mcdc["settings"]["use_gyration_radius"]: + if simulation["settings"]["use_gyration_radius"]: # Center of mass - N_local = particle_bank_module.get_bank_size(mcdc["bank_census"]) + N_local = particle_bank_module.get_bank_size(simulation["bank_census"]) total_local = np.zeros(4, np.float64) # [x,y,z,W] total = np.zeros(4, np.float64) for i in range(N_local): - P = mcdc["bank_census"]["particles"][i] + P = simulation["bank_census"]["particles"][i] total_local[0] += P["x"] * P["w"] total_local[1] += P["y"] * P["w"] total_local[2] += P["z"] * P["w"] @@ -255,10 +259,10 @@ def eigenvalue_cycle(mcdc, data): # Distance RMS rms_local = np.zeros(1, np.float64) rms = np.zeros(1, np.float64) - gr_type = mcdc["settings"]["gyration_radius_type"] + gr_type = simulation["settings"]["gyration_radius_type"] if gr_type == GYRATION_RADIUS_ALL: for i in range(N_local): - P = mcdc["bank_census"]["particles"][i] + P = simulation["bank_census"]["particles"][i] rms_local[0] += ( (P["x"] - com_x) ** 2 + (P["y"] - com_y) ** 2 @@ -266,27 +270,27 @@ def eigenvalue_cycle(mcdc, data): ) * P["w"] elif gr_type == GYRATION_RADIUS_INFINITE_X: for i in range(N_local): - P = mcdc["bank_census"]["particles"][i] + P = simulation["bank_census"]["particles"][i] rms_local[0] += ((P["y"] - com_y) ** 2 + (P["z"] - com_z) ** 2) * P["w"] elif gr_type == GYRATION_RADIUS_INFINITE_Y: for i in range(N_local): - P = mcdc["bank_census"]["particles"][i] + P = simulation["bank_census"]["particles"][i] rms_local[0] += ((P["x"] - com_x) ** 2 + (P["z"] - com_z) ** 2) * P["w"] elif gr_type == GYRATION_RADIUS_INFINITE_Z: for i in range(N_local): - P = mcdc["bank_census"]["particles"][i] + P = simulation["bank_census"]["particles"][i] rms_local[0] += ((P["x"] - com_x) ** 2 + (P["y"] - com_y) ** 2) * P["w"] elif gr_type == GYRATION_RADIUS_ONLY_X: for i in range(N_local): - P = mcdc["bank_census"]["particles"][i] + P = simulation["bank_census"]["particles"][i] rms_local[0] += ((P["x"] - com_x) ** 2) * P["w"] elif gr_type == GYRATION_RADIUS_ONLY_Y: for i in range(N_local): - P = mcdc["bank_census"]["particles"][i] + P = simulation["bank_census"]["particles"][i] rms_local[0] += ((P["y"] - com_y) ** 2) * P["w"] elif gr_type == GYRATION_RADIUS_ONLY_Z: for i in range(N_local): - P = mcdc["bank_census"]["particles"][i] + P = simulation["bank_census"]["particles"][i] rms_local[0] += ((P["z"] - com_z) ** 2) * P["w"] # MPI Allreduce @@ -295,17 +299,21 @@ def eigenvalue_cycle(mcdc, data): rms = math.sqrt(rms[0] / W) # Gyration radius - mcdc_set.simulation.gyration_radius(idx_cycle, mcdc, data, value=rms) + mcdc_set.simulation.gyration_radius(idx_cycle, simulation, data, value=rms) @njit -def eigenvalue_simulation(mcdc): - N = mcdc["settings"]["N_active"] - mcdc["n_avg"] /= N - mcdc["C_avg"] /= N +def eigenvalue_simulation(simulation): + N = simulation["settings"]["N_active"] + simulation["n_avg"] /= N + simulation["C_avg"] /= N if N > 1: - mcdc["n_sdv"] = math.sqrt((mcdc["n_sdv"] / N - mcdc["n_avg"] ** 2) / (N - 1)) - mcdc["C_sdv"] = math.sqrt((mcdc["C_sdv"] / N - mcdc["C_avg"] ** 2) / (N - 1)) + simulation["n_sdv"] = math.sqrt( + (simulation["n_sdv"] / N - simulation["n_avg"] ** 2) / (N - 1) + ) + simulation["C_sdv"] = math.sqrt( + (simulation["C_sdv"] / N - simulation["C_avg"] ** 2) / (N - 1) + ) else: - mcdc["n_sdv"] = 0.0 - mcdc["C_sdv"] = 0.0 + simulation["n_sdv"] = 0.0 + simulation["C_sdv"] = 0.0 diff --git a/mcdc/transport/tally/filter.py b/mcdc/transport/tally/filter.py index 22856ba1..3fe90f5b 100644 --- a/mcdc/transport/tally/filter.py +++ b/mcdc/transport/tally/filter.py @@ -87,10 +87,10 @@ def get_time_index(particle_container, tally, data): @njit -def set_census_based_time_grid(mcdc, data): - settings = mcdc["settings"] +def set_census_based_time_grid(simulation, data): + settings = simulation["settings"] tally_frequency = settings["census_tally_frequency"] - idx_census = mcdc["idx_census"] + idx_census = simulation["idx_census"] # Starting time if idx_census == 0: @@ -105,7 +105,7 @@ def set_census_based_time_grid(mcdc, data): dt = (t_end - t_start) / tally_frequency # Set the time grid to all tallies - for tally in mcdc["tallies"]: + for tally in simulation["tallies"]: mcdc_set.tally.time(0, tally, data, t_start) for j in range(tally_frequency): t_next = mcdc_get.tally.time(j, tally, data) + dt diff --git a/mcdc/transport/tally/score.py b/mcdc/transport/tally/score.py index f4e20431..0bba4c51 100644 --- a/mcdc/transport/tally/score.py +++ b/mcdc/transport/tally/score.py @@ -31,9 +31,9 @@ @njit -def make_scores(particle_container, flux, tally, idx_base, mcdc, data): +def make_scores(particle_container, flux, tally, idx_base, simulation, data): particle = particle_container[0] - speed = physics.particle_speed(particle_container, mcdc, data) + speed = physics.particle_speed(particle_container, simulation, data) multiplier = 1.0 for i_multiplier in range(tally["multipliers_length"]): @@ -50,30 +50,30 @@ def make_scores(particle_container, flux, tally, idx_base, mcdc, data): score = flux / speed elif score_type == SCORE_COLLISION: score = flux * physics.macro_xs( - REACTION_TOTAL, particle_container, mcdc, data + REACTION_TOTAL, particle_container, simulation, data ) elif score_type == SCORE_CAPTURE: score = flux * physics.macro_xs( - REACTION_NEUTRON_CAPTURE, particle_container, mcdc, data + REACTION_NEUTRON_CAPTURE, particle_container, simulation, data ) elif score_type == SCORE_FISSION: score = flux * physics.macro_xs( - REACTION_NEUTRON_FISSION, particle_container, mcdc, data + REACTION_NEUTRON_FISSION, particle_container, simulation, data ) elif score_type == SCORE_NET_CURRENT: - surface = mcdc["surfaces"][particle["surface_ID"]] + surface = simulation["surfaces"][particle["surface_ID"]] mu = get_normal_component(particle_container, speed, surface, data) score = flux * mu atomic_add(data, idx_base + i_score, score * multiplier) @njit -def tracklength_tally(particle_container, distance, tally, mcdc, data): +def tracklength_tally(particle_container, distance, tally, simulation, data): particle = particle_container[0] - tally_base = mcdc["tallies"][tally["parent_ID"]] + tally_base = simulation["tallies"][tally["parent_ID"]] # Get filter indices - MG_mode = mcdc["settings"]["multigroup_mode"] + MG_mode = simulation["settings"]["multigroup_mode"] i_mu, i_azi, i_energy, i_time = get_filter_indices( particle_container, tally_base, data, MG_mode ) @@ -83,7 +83,7 @@ def tracklength_tally(particle_container, distance, tally, mcdc, data): return # Particle/track properties - ut = 1.0 / physics.particle_speed(particle_container, mcdc, data) + ut = 1.0 / physics.particle_speed(particle_container, simulation, data) t = particle["t"] t_final = t + ut * distance @@ -123,7 +123,7 @@ def tracklength_tally(particle_container, distance, tally, mcdc, data): # Score flux = distance_scored * particle["w"] - make_scores(particle_container, flux, tally_base, idx_base, mcdc, data) + make_scores(particle_container, flux, tally_base, idx_base, simulation, data) # Accumulate distance swept distance_swept += distance_scored @@ -142,12 +142,12 @@ def tracklength_tally(particle_container, distance, tally, mcdc, data): @njit -def surface_tally(particle_container, surface, tally, mcdc, data): +def surface_tally(particle_container, surface, tally, simulation, data): particle = particle_container[0] - tally_base = mcdc["tallies"][tally["parent_ID"]] + tally_base = simulation["tallies"][tally["parent_ID"]] # Get filter indices - MG_mode = mcdc["settings"]["multigroup_mode"] + MG_mode = simulation["settings"]["multigroup_mode"] i_mu, i_azi, i_energy, i_time = get_filter_indices( particle_container, tally_base, data, MG_mode ) @@ -166,21 +166,21 @@ def surface_tally(particle_container, surface, tally, mcdc, data): ) # Flux - speed = physics.particle_speed(particle_container, mcdc, data) + speed = physics.particle_speed(particle_container, simulation, data) mu = get_normal_component(particle_container, speed, surface, data) flux = particle["w"] / abs(mu) # Score - make_scores(particle_container, flux, tally_base, idx_base, mcdc, data) + make_scores(particle_container, flux, tally_base, idx_base, simulation, data) @njit -def mesh_tally(particle_container, distance, tally, mcdc, data): +def mesh_tally(particle_container, distance, tally, simulation, data): particle = particle_container[0] - tally_base = mcdc["tallies"][tally["parent_ID"]] + tally_base = simulation["tallies"][tally["parent_ID"]] # Get filter indices - MG_mode = mcdc["settings"]["multigroup_mode"] + MG_mode = simulation["settings"]["multigroup_mode"] i_mu, i_azi, i_energy, i_time = get_filter_indices( particle_container, tally_base, data, MG_mode ) @@ -190,7 +190,7 @@ def mesh_tally(particle_container, distance, tally, mcdc, data): return # Get the mesh - mesh = mcdc["meshes"][tally["mesh_ID"]] + mesh = simulation["meshes"][tally["mesh_ID"]] # Particle/track properties x = particle["x"] @@ -200,7 +200,7 @@ def mesh_tally(particle_container, distance, tally, mcdc, data): ux = particle["ux"] uy = particle["uy"] uz = particle["uz"] - ut = 1.0 / physics.particle_speed(particle_container, mcdc, data) + ut = 1.0 / physics.particle_speed(particle_container, simulation, data) x_final = x + ux * distance y_final = y + uy * distance z_final = z + uz * distance @@ -220,12 +220,12 @@ def mesh_tally(particle_container, distance, tally, mcdc, data): i_time = 0 # Get mesh bin indices - i_x, i_y, i_z = mesh_module.get_indices(particle_container, mesh, mcdc, data) + i_x, i_y, i_z = mesh_module.get_indices(particle_container, mesh, simulation, data) # No score if particle does not cross the mesh bins # Also get the appropriate index if needed - x_min = mesh_module.get_x(0, mesh, mcdc, data) - x_max = mesh_module.get_x(mesh["Nx"], mesh, mcdc, data) + x_min = mesh_module.get_x(0, mesh, simulation, data) + x_max = mesh_module.get_x(mesh["Nx"], mesh, simulation, data) if ux > 0.0: if x_final < x_min + COINCIDENCE_TOLERANCE or x > x_max - COINCIDENCE_TOLERANCE: return @@ -236,8 +236,8 @@ def mesh_tally(particle_container, distance, tally, mcdc, data): return if x > x_max - COINCIDENCE_TOLERANCE: i_x = mesh["Nx"] - y_min = mesh_module.get_y(0, mesh, mcdc, data) - y_max = mesh_module.get_y(mesh["Ny"], mesh, mcdc, data) + y_min = mesh_module.get_y(0, mesh, simulation, data) + y_max = mesh_module.get_y(mesh["Ny"], mesh, simulation, data) if uy > 0.0: if y_final < y_min + COINCIDENCE_TOLERANCE or y > y_max - COINCIDENCE_TOLERANCE: return @@ -248,8 +248,8 @@ def mesh_tally(particle_container, distance, tally, mcdc, data): return if y > y_max - COINCIDENCE_TOLERANCE: i_y = mesh["Ny"] - z_min = mesh_module.get_z(0, mesh, mcdc, data) - z_max = mesh_module.get_z(mesh["Nz"], mesh, mcdc, data) + z_min = mesh_module.get_z(0, mesh, simulation, data) + z_max = mesh_module.get_z(mesh["Nz"], mesh, simulation, data) if uz > 0.0: if z_final < z_min + COINCIDENCE_TOLERANCE or z > z_max - COINCIDENCE_TOLERANCE: return @@ -285,10 +285,10 @@ def mesh_tally(particle_container, distance, tally, mcdc, data): dx = INF else: if ux > 0.0: - x_next = mesh_module.get_x(i_x + 1, mesh, mcdc, data) + x_next = mesh_module.get_x(i_x + 1, mesh, simulation, data) x_next = min(x_next, x_final) else: - x_next = mesh_module.get_x(i_x, mesh, mcdc, data) + x_next = mesh_module.get_x(i_x, mesh, simulation, data) x_next = max(x_next, x_final) dx = (x_next - x) / ux @@ -297,10 +297,10 @@ def mesh_tally(particle_container, distance, tally, mcdc, data): dy = INF else: if uy > 0.0: - y_next = mesh_module.get_y(i_y + 1, mesh, mcdc, data) + y_next = mesh_module.get_y(i_y + 1, mesh, simulation, data) y_next = min(y_next, y_final) else: - y_next = mesh_module.get_y(i_y, mesh, mcdc, data) + y_next = mesh_module.get_y(i_y, mesh, simulation, data) y_next = max(y_next, y_final) dy = (y_next - y) / uy @@ -309,10 +309,10 @@ def mesh_tally(particle_container, distance, tally, mcdc, data): dz = INF else: if uz > 0.0: - z_next = mesh_module.get_z(i_z + 1, mesh, mcdc, data) + z_next = mesh_module.get_z(i_z + 1, mesh, simulation, data) z_next = min(z_next, z_final) else: - z_next = mesh_module.get_z(i_z, mesh, mcdc, data) + z_next = mesh_module.get_z(i_z, mesh, simulation, data) z_next = max(z_next, z_final) dz = (z_next - z) / uz @@ -341,7 +341,7 @@ def mesh_tally(particle_container, distance, tally, mcdc, data): # Score flux = distance_scored * particle["w"] - make_scores(particle_container, flux, tally_base, idx_base, mcdc, data) + make_scores(particle_container, flux, tally_base, idx_base, simulation, data) # Accumulate distance swept distance_swept += distance_scored @@ -399,33 +399,33 @@ def mesh_tally(particle_container, distance, tally, mcdc, data): @njit -def eigenvalue_tally(particle_container, distance, mcdc, data): +def eigenvalue_tally(particle_container, distance, simulation, data): particle = particle_container[0] flux = distance * particle["w"] # Get nu-fission nuSigmaF = physics.neutron_production_xs( - REACTION_NEUTRON_FISSION, particle_container, mcdc, data + REACTION_NEUTRON_FISSION, particle_container, simulation, data ) # Fission production (needed even during inactive cycle) - atomic_add(mcdc["eigenvalue_tally_nuSigmaF"], 0, flux * nuSigmaF) + atomic_add(simulation["eigenvalue_tally_nuSigmaF"], 0, flux * nuSigmaF) # Done, if inactive - if not mcdc["cycle_active"]: + if not simulation["cycle_active"]: return # ================================================================================== # Neutron density # ================================================================================== - v = physics.particle_speed(particle_container, mcdc, data) + v = physics.particle_speed(particle_container, simulation, data) n_density = flux / v - atomic_add(mcdc["eigenvalue_tally_n"], 0, n_density) + atomic_add(simulation["eigenvalue_tally_n"], 0, n_density) # Maximum neutron density - if mcdc["n_max"] < n_density: - mcdc["n_max"] = n_density + if simulation["n_max"] < n_density: + simulation["n_max"] = n_density # ================================================================================== # TODO: Delayed neutron precursor density @@ -433,7 +433,7 @@ def eigenvalue_tally(particle_container, distance, mcdc, data): return # Get the decay-wighted multiplicity total = 0.0 - if mcdc["settings"]["multigroup_mode"]: + if simulation["settings"]["multigroup_mode"]: g = particle["g"] for j in range(J): nu_d = mcdc_get.material.mgxs_nu_d(g, j, material, data) @@ -443,7 +443,7 @@ def eigenvalue_tally(particle_container, distance, mcdc, data): E = P["E"] for i in range(material["N_nuclide"]): ID_nuclide = material["nuclide_IDs"][i] - nuclide = mcdc["nuclides"][ID_nuclide] + nuclide = simulation["nuclides"][ID_nuclide] if not nuclide["fissionable"]: continue for j in range(J): @@ -451,10 +451,12 @@ def eigenvalue_tally(particle_container, distance, mcdc, data): decay = nuclide["ce_decay"][j] total += nu_d / decay - SigmaF = physics.macro_xs(REACTION_NEUTRON_FISSION, particle_container, mcdc, data) - C_density = flux * total * SigmaF / mcdc["k_eff"] - atomic_add(mcdc["eigenvalue_tally_C"], 0, C_density) + SigmaF = physics.macro_xs( + REACTION_NEUTRON_FISSION, particle_container, simulation, data + ) + C_density = flux * total * SigmaF / simulation["k_eff"] + atomic_add(simulation["eigenvalue_tally_C"], 0, C_density) # Maximum precursor density - if mcdc["C_max"] < C_density: - mcdc["C_max"] = C_density + if simulation["C_max"] < C_density: + simulation["C_max"] = C_density diff --git a/mcdc/transport/technique.py b/mcdc/transport/technique.py index f8b15c05..f7b15875 100644 --- a/mcdc/transport/technique.py +++ b/mcdc/transport/technique.py @@ -16,10 +16,10 @@ @njit -def weight_roulette(particle_container, mcdc): +def weight_roulette(particle_container, simulation): particle = particle_container[0] - if particle["w"] < mcdc["weight_roulette"]["weight_threshold"]: - w_target = mcdc["weight_roulette"]["weight_target"] + if particle["w"] < simulation["weight_roulette"]["weight_threshold"]: + w_target = simulation["weight_roulette"]["weight_target"] survival_probability = particle["w"] / w_target if rng.lcg(particle_container) < survival_probability: particle["w"] = w_target @@ -33,15 +33,15 @@ def weight_roulette(particle_container, mcdc): @njit -def population_control(mcdc): +def population_control(simulation): """Uniform Splitting-Roulette technique""" - bank_census = mcdc["bank_census"] - M = mcdc["settings"]["N_particle"] - bank_source = mcdc["bank_source"] + bank_census = simulation["bank_census"] + M = simulation["settings"]["N_particle"] + bank_source = simulation["bank_source"] # Scan the bank - idx_start, N_local, N = particle_bank_module.bank_scanning(bank_census, mcdc) + idx_start, N_local, N = particle_bank_module.bank_scanning(bank_census, simulation) idx_end = idx_start + N_local # Abort if census bank is empty @@ -79,4 +79,4 @@ def population_control(mcdc): ) # Set weight P_rec["w"] = w_survive - particle_bank_module.bank_source_particle(P_rec_arr, mcdc) + particle_bank_module.bank_source_particle(P_rec_arr, simulation) From ed256624296701c19fc059bd02f04446258fbd9a Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Sat, 7 Mar 2026 07:36:35 +0700 Subject: [PATCH 13/50] fix typo on simulationPy --- mcdc/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mcdc/main.py b/mcdc/main.py index 01e3cdd0..d338b608 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -148,7 +148,7 @@ def preparation(): # Set nuclear and atomic data for transported particles if settings.neutron_transport: - for nuclide in simulation.nuclides: + for nuclide in simulationPy.nuclides: nuclide.set_neutron_data() # Set physics mode From c487803ee0d2f540e861a26df12d391ed1698918 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Sun, 8 Mar 2026 19:55:16 -0700 Subject: [PATCH 14/50] attempt to focus on alloc_device_bytes --- mcdc/code_factory/gpu/program_builder.py | 41 +++++++++++--------- mcdc/code_factory/numba_objects_generator.py | 22 +++++++---- 2 files changed, 37 insertions(+), 26 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 306ffcac..71f7ac87 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -113,23 +113,26 @@ def find_cell(program: nb.uintp, particle: particle_gpu): # Base functions # ============== - def make_work(prog: nb.uintp) -> nb.boolean: - mcdc = adapt.mcdc_global(prog) + def make_work(program: nb.uintp) -> nb.boolean: + simulation = simulation_gpu(program) - idx_work = adapt.global_add(mcdc["mpi_work_iter"], 0, 1) + idx_work = adapt.global_add(simulation["mpi_work_iter"], 0, 1) - if idx_work >= mcdc["mpi_work_size"]: + if idx_work >= simulation["mpi_work_size"]: return False generate_source_particle( - mcdc["mpi_work_start"], nb.uint64(idx_work), mcdc["source_seed"], prog + simulation["mpi_work_start"], + nb.uint64(idx_work), + simulation["source_seed"], + program, ) return True - def initialize(prog: nb.uintp): + def initialize(program: nb.uintp): pass - def finalize(prog: nb.uintp): + def finalize(program: nb.uintp): pass # ================ @@ -138,19 +141,19 @@ def finalize(prog: nb.uintp): shape = (size,) - def step(prog: nb.uintp, P_input: particle_gpu): - mcdc = adapt.mcdc_global(prog) - data_ptr = adapt.mcdc_data(prog) + def step(program: nb.uintp, particle_input: particle_gpu): + simulation = simulation_gpu(program) + data_ptr = adapt.mcdc_data(program) data = adapt.harm.array_from_ptr(data_ptr, shape, nb.float64) - P_arr = adapt.local_array(1, type_.particle) - P_arr[0] = P_input - P = P_arr[0] - if P["fresh"]: - prep_particle(P_arr, prog) - P["fresh"] = False - step_particle(P_arr, data, prog) - if P["alive"]: - adapt.step_async(prog, P) + particle_container = adapt.local_array(1, type_.particle) + particle_container[0] = particle_input + particle = particle_container[0] + if particle["fresh"]: + prep_particle(particle_container, program) + particle["fresh"] = False + step_particle(particle_container, data, program) + if particle["alive"]: + adapt.step_async(program, particle) # Bind them all base_fns = (initialize, finalize, make_work) diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index f153e010..652f5a94 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -255,11 +255,13 @@ def generate_numba_objects(simulation): set_object(object_, annotations, structures, records, data) set_object(simulation, annotations, structures, records, data) + """ # Build GPU program if needed if config.target == "gpu": from mcdc.code_factory.gpu.program_builder import build_gpu_program build_gpu_program(simulation, data["size"]) + """ # Allocate the flattened data and re-set the objects data["array"], data["pointer"] = create_data_array(data["size"], type_map[float]) @@ -646,18 +648,24 @@ def set_object( # ============================================================================= -# Global GPU/CPU Array Variable Constructors +# Global GPU/CPU variable array constructors # ============================================================================= def create_data_array(size, dtype): - if config.target == "gpu": - import mcdc.code_factory.gpu.program_builder as gpu_builder - - return gpu_builder.create_data_array(size, dtype) + if not config.target == "gpu": + data = np.zeros(size, dtype=dtype) + return data, 0 else: - data_tally = np.zeros(size, dtype=dtype) - return data_tally, 0 + import harmonize + + if config.gpu_state_storage == "managed": + data_ptr = harmonize.alloc_managed_bytes(size) + else: + data_ptr = harmonize.alloc_device_bytes(size) + data_uint = voidptr_to_uintp(data_ptr) + data = numba.carray(data_ptr, (size,), type_.float64) + return data, data_uint def create_mcdc_container(dtype): From c6a1ccb559b7d827b34de22eddb29e2f1d114148 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Sun, 8 Mar 2026 22:03:44 -0700 Subject: [PATCH 15/50] organize --- mcdc/code_factory/numba_objects_generator.py | 65 +++++++++++++++++--- 1 file changed, 55 insertions(+), 10 deletions(-) diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index 652f5a94..63352b2b 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -2,10 +2,21 @@ #### +import importlib +import numba as nb import numpy as np -from pathlib import Path from mpi4py import MPI +from numba import njit +from numba.extending import intrinsic +from pathlib import Path + +if importlib.util.find_spec("harmonize") is None: + HAS_HARMONIZE = False +else: + import harmonize + + HAS_HARMONIZE = True #### @@ -657,18 +668,23 @@ def create_data_array(size, dtype): data = np.zeros(size, dtype=dtype) return data, 0 else: - import harmonize + create_data_array_on_gpu(size, dtype) - if config.gpu_state_storage == "managed": - data_ptr = harmonize.alloc_managed_bytes(size) - else: - data_ptr = harmonize.alloc_device_bytes(size) - data_uint = voidptr_to_uintp(data_ptr) - data = numba.carray(data_ptr, (size,), type_.float64) - return data, data_uint + +@njit +def create_data_array_on_gpu(size, dtype): + if config.gpu_state_storage == "managed": + pass + # data_ptr = harmonize.alloc_managed_bytes(size) + else: + data_ptr = harmonize.alloc_device_bytes(size) + data_uint = voidptr_to_uintp(data_ptr) + data = nb.carray(data_ptr, (size,), dtype) + return data, data_uint -def create_mcdc_container(dtype): +@njit +def create_mcdc_container_on_gpu(dtype): if config.target == "gpu": import mcdc.code_factory.gpu.program_builder as gpu_builder @@ -678,6 +694,35 @@ def create_mcdc_container(dtype): return mcdc_container, 0 +# ============================================================================= +# Type casters +# ============================================================================= + + +@intrinsic +def cast_voidptr_to_uintp(typingctx, src): + # check for accepted types + if isinstance(src, nb.types.RawPointer): + # create the expected type signature + result_type = nb.types.uintp + sig = result_type(nb.types.voidptr) + + # defines the custom code generation + def codegen(context, builder, signature, args): + # llvm IRBuilder code here + [src] = args + rtype = signature.return_type + llrtype = context.get_value_type(rtype) + return builder.ptrtoint(src, llrtype) + + return sig, codegen + + +@njit() +def voidptr_to_uintp(value): + return cast_voidptr_to_uintp(value) + + # ====================================================================================== # Alignment Logic # ====================================================================================== From 79910cc3b6e934b2e9cf44b0c37d46eeb28d7b82 Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Mon, 9 Mar 2026 13:44:34 +0700 Subject: [PATCH 16/50] clearn up gpu forward declare --- mcdc/code_factory/gpu/program_builder.py | 14 ++++---------- mcdc/code_factory/numba_objects_generator.py | 18 +++++------------- 2 files changed, 9 insertions(+), 23 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 71f7ac87..d5d2e4ba 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -10,10 +10,6 @@ import mcdc.config as config import mcdc.numba_types as type_ -# ====================================================================================== -# Build GPU program -# ====================================================================================== - # Main types none_type = None simulation_type = None @@ -41,7 +37,7 @@ free_state = lambda pointer: None -def build_gpu_program(simulation, size): +def forward_declare_gpu_program(simulation, data_size): global none_type, simulation_type, data_type global state_spec, simulation_gpu, data_gpu, group_gpu, thread_gpu, particle_gpu, particle_record_gpu global step_async, find_cell_async @@ -55,10 +51,6 @@ def build_gpu_program(simulation, size): else: harmonize.config.should_compile(harmonize.config.ShouldCompile.NEVER) - # ================================================================================== - # Forward declaration - # ================================================================================== - # ROCm and CUDA paths if config.args.gpu_cuda_path != None: harmonize.config.set_cuda_path(config.args.gpu_cuda_path) @@ -105,6 +97,8 @@ def find_cell(program: nb.uintp, particle: particle_gpu): alloc_managed_bytes = harmonize.alloc_managed_bytes alloc_device_bytes = harmonize.alloc_device_bytes + +''' # ================================================================================== # "gpu_sources_spec" # ================================================================================== @@ -240,7 +234,7 @@ def step(program: nb.uintp, particle_input: particle_gpu): def teardown_gpu_program(mcdc): src_free_program(cast_uintp_to_voidptr(mcdc["gpu_meta"]["source_program_pointer"])) free_state(cast_uintp_to_voidptr(mcdc["gpu_meta"]["state_pointer"])) - +''' # ====================================================================================== # Simulation structure and data creators diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index 63352b2b..5159faad 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -11,19 +11,13 @@ from numba.extending import intrinsic from pathlib import Path -if importlib.util.find_spec("harmonize") is None: - HAS_HARMONIZE = False -else: - import harmonize - - HAS_HARMONIZE = True - #### import mcdc import mcdc.config as config import mcdc.object_ as object_module import mcdc.object_.base as base +import mcdc.code_factory.gpu.program_builder as gpu_builder from mcdc.object_.base import ( ObjectBase, @@ -266,13 +260,11 @@ def generate_numba_objects(simulation): set_object(object_, annotations, structures, records, data) set_object(simulation, annotations, structures, records, data) - """ - # Build GPU program if needed + # Forward declare GPU program, if needed if config.target == "gpu": - from mcdc.code_factory.gpu.program_builder import build_gpu_program + from mcdc.code_factory.gpu.program_builder import forward_declare_gpu_program - build_gpu_program(simulation, data["size"]) - """ + forward_declare_gpu_program(simulation, data["size"]) # Allocate the flattened data and re-set the objects data["array"], data["pointer"] = create_data_array(data["size"], type_map[float]) @@ -677,7 +669,7 @@ def create_data_array_on_gpu(size, dtype): pass # data_ptr = harmonize.alloc_managed_bytes(size) else: - data_ptr = harmonize.alloc_device_bytes(size) + data_ptr = gpu_builder.alloc_device_bytes(size) data_uint = voidptr_to_uintp(data_ptr) data = nb.carray(data_ptr, (size,), dtype) return data, data_uint From 18213f38a59f4810e07670e0a9393d6f2431db69 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Mon, 9 Mar 2026 01:12:27 -0700 Subject: [PATCH 17/50] clean up prints --- mcdc/code_factory/gpu/program_builder.py | 3 ++- mcdc/code_factory/numba_objects_generator.py | 7 +++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index d5d2e4ba..0f06c64b 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -8,7 +8,6 @@ #### import mcdc.config as config -import mcdc.numba_types as type_ # Main types none_type = None @@ -38,6 +37,8 @@ def forward_declare_gpu_program(simulation, data_size): + import mcdc.numba_types as type_ + global none_type, simulation_type, data_type global state_spec, simulation_gpu, data_gpu, group_gpu, thread_gpu, particle_gpu, particle_record_gpu global step_async, find_cell_async diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index 5159faad..48f60e63 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -14,10 +14,10 @@ #### import mcdc +import mcdc.code_factory.gpu.program_builder as gpu_builder import mcdc.config as config import mcdc.object_ as object_module import mcdc.object_.base as base -import mcdc.code_factory.gpu.program_builder as gpu_builder from mcdc.object_.base import ( ObjectBase, @@ -663,11 +663,10 @@ def create_data_array(size, dtype): create_data_array_on_gpu(size, dtype) -@njit +# @njit def create_data_array_on_gpu(size, dtype): if config.gpu_state_storage == "managed": - pass - # data_ptr = harmonize.alloc_managed_bytes(size) + data_ptr = gpu_builder.alloc_managed_bytes(size) else: data_ptr = gpu_builder.alloc_device_bytes(size) data_uint = voidptr_to_uintp(data_ptr) From cc25d3df25916fd199515a42a56040aab813b078 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Mon, 9 Mar 2026 16:03:23 -0700 Subject: [PATCH 18/50] refactor gpu builder --- mcdc/code_factory/gpu/adapt.py | 613 ------------------- mcdc/code_factory/gpu/program_builder.py | 31 +- mcdc/code_factory/numba_objects_generator.py | 79 ++- 3 files changed, 72 insertions(+), 651 deletions(-) delete mode 100644 mcdc/code_factory/gpu/adapt.py diff --git a/mcdc/code_factory/gpu/adapt.py b/mcdc/code_factory/gpu/adapt.py deleted file mode 100644 index 4d504f2e..00000000 --- a/mcdc/code_factory/gpu/adapt.py +++ /dev/null @@ -1,613 +0,0 @@ -import importlib -import inspect -import numba -import numpy as np - -from numba import njit, jit, types -from numba.extending import intrinsic - -if importlib.util.find_spec("harmonize") is None: - HAS_HARMONIZE = False -else: - import harmonize as harm - - HAS_HARMONIZE = True - -#### - - -import mcdc.config as config - -from mcdc.print_ import print_error - -# ============================================================================= -# Error Messangers -# ============================================================================= - - -def unknown_target(target): - print_error(f"ERROR: Unrecognized target '{target}'") - - -# ============================================================================= -# uintp/voidptr casters -# ============================================================================= - - -@intrinsic -def cast_uintp_to_voidptr(typingctx, src): - # check for accepted types - if isinstance(src, types.Integer): - # create the expected type signature - result_type = types.voidptr - sig = result_type(types.uintp) - - # defines the custom code generation - def codegen(context, builder, signature, args): - # llvm IRBuilder code here - [src] = args - rtype = signature.return_type - llrtype = context.get_value_type(rtype) - return builder.inttoptr(src, llrtype) - - return sig, codegen - - -@intrinsic -def cast_voidptr_to_uintp(typingctx, src): - # check for accepted types - if isinstance(src, types.RawPointer): - # create the expected type signature - result_type = types.uintp - sig = result_type(types.voidptr) - - # defines the custom code generation - def codegen(context, builder, signature, args): - # llvm IRBuilder code here - [src] = args - rtype = signature.return_type - llrtype = context.get_value_type(rtype) - return builder.ptrtoint(src, llrtype) - - return sig, codegen - - -@njit() -def uintp_to_voidptr(value): - val = numba.uintp(value) - return cast_uintp_to_voidptr(val) - - -@njit() -def voidptr_to_uintp(value): - return cast_voidptr_to_uintp(value) - - -def leak(arg): - pass - - -@intrinsic -def leak_inner(typingctx, kind): - def codegen(context, builder, signature, args): - context.nrt.incref(builder, kind, args[0]) - - return numba.void(kind), codegen - - -@numba.extending.overload(leak) -def leak_overload(arg): - def impl(arg): - leak_inner(arg) - - return impl - - -# ============================================================================= -# Generic GPU/CPU Local Array Variable Constructors -# ============================================================================= - - -def local_array(shape, dtype): - return np.zeros(shape, dtype=dtype) - - -@numba.extending.type_callable(local_array) -def type_local_array(context): - - from numba.core.typing.npydecl import parse_dtype, parse_shape - - if isinstance(context, numba.core.typing.context.Context): - - # Function repurposed from Numba's ol_np_empty. - def typer(shape, dtype): - numba.np.arrayobj._check_const_str_dtype("empty", dtype) - - # Only integer literals and tuples of integer literals are valid - # shapes - if isinstance(shape, types.Integer): - if not isinstance(shape, types.IntegerLiteral): - raise numba.core.errors.UnsupportedError( - f"Integer shape type {shape} is not literal." - ) - elif isinstance(shape, (types.Tuple, types.UniTuple)): - if any([not isinstance(s, types.IntegerLiteral) for s in shape]): - raise numba.core.errors.UnsupportedError( - f"At least one element of shape tuple type{shape} is not an integer literal." - ) - else: - raise numba.core.errors.UnsupportedError( - f"Shape is of unsupported type {shape}." - ) - - # No default arguments. - nb_dtype = parse_dtype(dtype) - nb_shape = parse_shape(shape) - - if nb_dtype is not None and nb_shape is not None: - retty = types.Array(dtype=nb_dtype, ndim=nb_shape, layout="C") - # Inlining the signature construction from numpy_empty_nd - sig = retty(shape, dtype) - return sig - else: - msg = f"Cannot parse input types to function np.empty({shape}, {dtype})" - raise numba.errors.TypingError(msg) - - return typer - - elif isinstance(context, numba.cuda.target.CUDATypingContext): - - # Function repurposed from Numba's Cuda_array_decl. - def typer(shape, dtype): - - # Only integer literals and tuples of integer literals are valid - # shapes - if isinstance(shape, types.Integer): - if not isinstance(shape, types.IntegerLiteral): - return None - elif isinstance(shape, (types.Tuple, types.UniTuple)): - if any([not isinstance(s, types.IntegerLiteral) for s in shape]): - return None - else: - return None - - ndim = parse_shape(shape) - nb_dtype = parse_dtype(dtype) - if nb_dtype is not None and ndim is not None: - return types.Array(dtype=nb_dtype, ndim=ndim, layout="C") - - return typer - - elif isinstance(context, numba.hip.target.HIPTypingContext): - - def typer(shape, dtype): - # Only integer literals and tuples of integer literals are valid - # shapes - if isinstance(shape, types.Integer): - if not isinstance(shape, types.IntegerLiteral): - return None - elif isinstance(shape, (types.Tuple, types.UniTuple)): - if any([not isinstance(s, types.IntegerLiteral) for s in shape]): - return None - else: - return None - - ndim = parse_shape(shape) - nb_dtype = parse_dtype(dtype) - if nb_dtype is not None and ndim is not None: - result = types.Array(dtype=nb_dtype, ndim=ndim, layout="C") - return result - - return typer - - else: - raise numba.core.errors.UnsupportedError( - f"Unsupported target context {context}." - ) - - -@numba.extending.lower_builtin(local_array, types.IntegerLiteral, types.Any) -def builtin_local_array(context, builder, sig, args): - - shape, dtype = sig.args - - from numba.core.typing.npydecl import parse_dtype, parse_shape - import numba.np.arrayobj as arrayobj - - if isinstance(context, numba.core.cpu.CPUContext): - - # No default arguments. - nb_dtype = parse_dtype(dtype) - nb_shape = parse_shape(shape) - - retty = types.Array(dtype=nb_dtype, ndim=nb_shape, layout="C") - - # In ol_np_empty, the reference type of the array is fed into the - # signatrue as a third argument. This third argument is not used by - # _parse_empty_args. - sig = retty(shape, dtype) - - arrtype, shapes = arrayobj._parse_empty_args(context, builder, sig, args) - ary = arrayobj._empty_nd_impl(context, builder, arrtype, shapes) - - return ary._getvalue() - elif isinstance(context, numba.cuda.target.CUDATargetContext): - length = sig.args[0].literal_value - dtype = parse_dtype(sig.args[1]) - return numba.cuda.cudaimpl._generic_array( - context, - builder, - shape=(length,), - dtype=dtype, - symbol_name="_cudapy_harm_lmem", - addrspace=numba.cuda.cudadrv.nvvm.ADDRSPACE_LOCAL, - can_dynsized=False, - ) - elif isinstance(context, numba.hip.target.HIPTargetContext): - length = sig.args[0].literal_value - dtype = parse_dtype(sig.args[1]) - result = numba.hip.typing_lowering.hip.lowering._generic_array( - context, - builder, - shape=(length,), - dtype=dtype, - symbol_name="_HIPpy_lmem", - addrspace=numba.hip.amdgcn.ADDRSPACE_LOCAL, - can_dynsized=False, - ) - return result - else: - raise numba.core.errors.UnsupportedError( - f"Unsupported target context {context}." - ) - - -# ============================================================================= -# Decorators -# ============================================================================= - -target_rosters = {} - -late_jit_roster = set() - -do_nothing_id = 0 - - -do_nothing_id = 0 - - -def generate_do_nothing(arg_count, crash_on_call=None): - """ - Create a no-op function (or one that always asserts) that takes - `arg_count` positional arguments. - - On Python 3.13+, we can't rely on exec() mutating the current frame's - locals in a way eval() can see, so we execute into an explicit - namespace and pull the function out of that. - """ - global do_nothing_id - - name = f"do_nothing_{do_nothing_id}" - args = ", ".join([f"arg_{i}" for i in range(arg_count)]) - source = f"def {name}({args}):\n" - if crash_on_call is not None: - source += f" assert False, '{crash_on_call}'\n" - else: - source += " pass\n" - - ns = {} - exec(source, globals(), ns) - result = ns[name] - - do_nothing_id += 1 - return result - - -def overwrite_func(func, revised_func): - mod_name = func.__module__ - fn_name = func.__name__ - new_fn_name = revised_func.__name__ - module = __import__(mod_name, fromlist=[fn_name]) - setattr(module, fn_name, revised_func) - - -blankout_roster = {} - - -def blankout_fn(func): - global blankout_roster - - mod_name = func.__module__ - fn_name = func.__name__ - id = (mod_name, fn_name) - - if id not in blankout_roster: - global do_nothing_id - name = func.__name__ - arg_count = len(inspect.signature(func).parameters) - blankout_roster[id] = generate_do_nothing( - arg_count, crash_on_call=f"blankout fn for {name} should never be called" - ) - - blank = blankout_roster[id] - - return blank - - -def for_(target, on_target=[]): - def for_inner(func): - global target_rosters - mod_name = func.__module__ - fn_name = func.__name__ - params = inspect.signature(func).parameters - if target not in target_rosters: - target_rosters[target] = {} - target_rosters[target][(mod_name, fn_name)] = func - - param_str = ", ".join(p for p in params) - jit_str = ( - f"def jit_func({param_str}):\n" - f" global target_rosters\n" - f" return target_rosters['{target}'][('{mod_name}','{fn_name}')]" - ) - - # Execute into an explicit namespace so we can reliably retrieve jit_func - ns = {} - exec(jit_str, globals(), ns) - result = ns["jit_func"] - - blank = blankout_fn(func) - if target == "gpu": - numba.core.extending.overload(blank, target=target)(result) - else: - numba.core.extending.overload(blank, target=target)(result) - return blank - - return for_inner - - -def for_cpu(on_target=[]): - return for_("cpu", on_target=on_target) - - -def for_gpu(on_target=[]): - return for_("gpu", on_target=on_target) - - -def jit_on_target(): - def jit_on_target_inner(func): - late_jit_roster.add(func) - return func - - return jit_on_target_inner - - -def nopython_mode(is_on): - if is_on: - return - if not isinstance(target_rosters["cpu"], dict): - return - - for impl in target_rosters["cpu"].values(): - overwrite_func(impl, impl) - - -# ============================================================================= -# GPU Type / Extern Functions Forward Declarations -# ============================================================================= - - -SIMPLE_ASYNC = True - -none_type = None -mcdc_global_type = None -mcdc_data_type = None -mcdc_shared_type = None -state_spec = None -mcdc_global_gpu = None -mcdc_data_gpu = None -group_gpu = None -thread_gpu = None -particle_gpu = None -prep_gpu = None -step_async = None -halt_early = None -find_cell_async = None -tally_width = None -tally_length = None -tally_size = None -tally_shape_literal = None - - -def gpu_forward_declare( - args, data_shape, global_type, particle_type, particle_data_type -): - - if args.gpu_rocm_path != None: - harm.config.set_rocm_path(args.gpu_rocm_path) - - if args.gpu_cuda_path != None: - harm.config.set_cuda_path(args.gpu_cuda_path) - - global none_type, mcdc_global_type, mcdc_data_type, mcdc_shared_type - global state_spec - global mcdc_global_gpu, mcdc_data_gpu - global group_gpu, thread_gpu - global particle_gpu, particle_record_gpu - global step_async, find_cell_async, halt_early - global tally_width, tally_length, tally_size - - tally_size = data_shape[0] * 8 - - global tally_shape_literal - tally_shape_literal = data_shape - - none_type = numba.from_dtype(np.dtype([])) - mcdc_global_type = numba.types.Array(numba.from_dtype(global_type), (1,), "C") - # mcdc_global_type = numba.from_dtype(global_type) - - tally_dims = len(data_shape) - mcdc_data_type = numba.types.Array(numba.float64, tally_dims, "C") - state_spec = ( - { - "global": mcdc_global_type, - "data": mcdc_data_type, - }, - none_type, - none_type, - ) - access_fns = harm.RuntimeSpec.access_fns(state_spec) - mcdc_global_gpu = access_fns["device"]["global"]["indirect"] - mcdc_data_gpu = access_fns["device"]["data"]["direct"] - group_gpu = access_fns["group"] - thread_gpu = access_fns["thread"] - particle_gpu = numba.from_dtype(particle_type) - particle_record_gpu = numba.from_dtype(particle_data_type) - - def step(prog: numba.uintp, P: particle_gpu): - pass - - def find_cell(prog: numba.uintp, P: particle_gpu): - pass - - import harmonize - - step_async, find_cell_async = harmonize.RuntimeSpec.async_dispatch(step, find_cell) - interface = harmonize.RuntimeSpec.program_interface() - halt_early = interface["halt_early"] - - -# ============================================================================= -# Seperate GPU/CPU Functions to Target Different Platforms -# ============================================================================= - - -def mcdc_global(prog): - return prog - - -@for_cpu() -def mcdc_global(prog): - return prog - - -@for_gpu() -def mcdc_global(prog): - return mcdc_global_gpu(prog) - - -@for_cpu() -def mcdc_data(prog): - return None - - -@for_gpu() -def mcdc_data(prog): - return mcdc_data_gpu(prog) - - -@for_cpu() -def group(prog): - return prog - - -@for_gpu() -def group(prog): - return group_gpu(prog) - - -@for_cpu() -def thread(prog): - return prog - - -@for_gpu() -def thread(prog): - return thread_gpu(prog) - - -@for_cpu() -def global_add(ary, idx, val): - result = ary[idx] - ary[idx] += val - return result - - -@for_gpu() -def global_add(ary, idx, val): - return harm.array_atomic_add(ary, idx, val) - - -@for_cpu() -def global_max(ary, idx, val): - result = ary[idx] - if ary[idx] < val: - ary[idx] = val - return result - - -@for_gpu() -def global_max(ary, idx, val): - return harm.array_atomic_max(ary, idx, val) - - -# ========================================================================= -# Program Specifications -# ========================================================================= - -state_spec = None -one_event_fns = None -multi_event_fns = None - - -device_gpu, group_gpu, thread_gpu = None, None, None -iterate_async = None - - -def make_spec(target): - global state_spec, one_event_fns, multi_event_fns - global device_gpu, group_gpu, thread_gpu - global iterate_async - if target == "gpu": - state_spec = (dev_state_type, grp_state_type, thd_state_type) - one_event_fns = [iterate] - # multi_event_fns = [source,move,scattering,fission,leakage,bcollision] - device_gpu, group_gpu, thread_gpu = harm.RuntimeSpec.access_fns(state_spec) - (iterate_async,) = harm.RuntimeSpec.async_dispatch(iterate) - elif target != "cpu": - unknown_target(target) - - -@njit -def empty_base_func(prog): - pass - - -def make_gpu_loop( - state_spec, - work_make_fn, - step_fn, - check_fn, - arg_type, - initial_fn=empty_base_func, - final_fn=empty_base_func, -): - async_fn_list = [step_fn] - device_gpu, group_gpu, thread_gpu = harm.RuntimeSpec.access_fns(state_spec) - - def make_work(prog: numba.uintp) -> numba.boolean: - return work_make_fn(prog) - - def initialize(prog: numba.uintp): - initial_fn(prog) - - def finalize(prog: numba.uintp): - final_fn(prog) - - def step(prog: numba.uintp, arg: arg_type): - - step_async() - - (step_async,) = harm.RuntimeSpec.async_dispatch(step) - - pass diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 0f06c64b..37b47512 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -9,6 +9,26 @@ import mcdc.config as config + +def adapt_transport_functions(): + import ast, pathlib + + base_path = pathlib.Path(__file__).parent.resolve() / "transport" + print(base_path) + file_paths = [str(p) for p in base_path.rglob("*") if p.is_file()] + + collection = {} + for file_path in file_paths: + with open(file_path, "r", encoding="utf-8") as f: + tree = ast.parse(f.read()) + function_names = [ + node.name for node in ast.walk(tree) if isinstance(node, ast.FunctionDef) + ] + collection[file_path] = function_names + print(file_path, function_names) + exit() + + # Main types none_type = None simulation_type = None @@ -36,14 +56,13 @@ free_state = lambda pointer: None -def forward_declare_gpu_program(simulation, data_size): +def forward_declare_gpu_program(): import mcdc.numba_types as type_ global none_type, simulation_type, data_type global state_spec, simulation_gpu, data_gpu, group_gpu, thread_gpu, particle_gpu, particle_record_gpu global step_async, find_cell_async global alloc_managed_bytes, alloc_device_bytes - global src_free_program, free_state # Compilation check if MPI.COMM_WORLD.Get_rank() == 0: @@ -99,7 +118,9 @@ def find_cell(program: nb.uintp, particle: particle_gpu): alloc_device_bytes = harmonize.alloc_device_bytes -''' +def build_gpu_program(data_size): + global src_free_program, free_state + # ================================================================================== # "gpu_sources_spec" # ================================================================================== @@ -134,7 +155,7 @@ def finalize(program: nb.uintp): # Async. functions # ================ - shape = (size,) + shape = (data_size,) def step(program: nb.uintp, particle_input: particle_gpu): simulation = simulation_gpu(program) @@ -235,7 +256,7 @@ def step(program: nb.uintp, particle_input: particle_gpu): def teardown_gpu_program(mcdc): src_free_program(cast_uintp_to_voidptr(mcdc["gpu_meta"]["source_program_pointer"])) free_state(cast_uintp_to_voidptr(mcdc["gpu_meta"]["state_pointer"])) -''' + # ====================================================================================== # Simulation structure and data creators diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index 48f60e63..a48d0cf2 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -227,7 +227,7 @@ def generate_numba_objects(simulation): ] # ================================================================================== - # Set records and data based on the simulation structures and objects + # Set records and data size based on the simulation structures and objects # ================================================================================== # Allocate object containers @@ -255,33 +255,11 @@ def generate_numba_objects(simulation): if type(item) in mcdc_classes: objects.append(item) - # Set the objects + # Set the records and the data size for object_ in objects: set_object(object_, annotations, structures, records, data) set_object(simulation, annotations, structures, records, data) - # Forward declare GPU program, if needed - if config.target == "gpu": - from mcdc.code_factory.gpu.program_builder import forward_declare_gpu_program - - forward_declare_gpu_program(simulation, data["size"]) - - # Allocate the flattened data and re-set the objects - data["array"], data["pointer"] = create_data_array(data["size"], type_map[float]) - - data["size"] = 0 - records = {} - for mcdc_class in mcdc_classes: - if issubclass(mcdc_class, ObjectNonSingleton): - records[mcdc_class.label] = [] - else: - records[mcdc_class.label] = {} - records["simulation"] = records.pop("simulation") - - for object_ in objects: - set_object(object_, annotations, structures, records, data, set_data=True) - set_object(simulation, annotations, structures, records, data, set_data=True) - # ================================================================================== # Finalize the simulation object structure and set record # ================================================================================== @@ -352,12 +330,38 @@ def generate_numba_objects(simulation): f.write(text) + # ================================================================================== + # GPU setup: Adapt transport functions, forward declare, and build program + # ================================================================================== + + if config.target == "gpu": + from mcdc.code_factory.gpu.program_builder import ( + adapt_transport_functions, + forward_declare_gpu_program, + build_gpu_program, + ) + + adapt_transport_functions() + forward_declare_gpu_program() + build_gpu_program(data["size"]) + + # ================================================================================== + # Allocate the flattened data and re-set the objects + # ================================================================================== + + data["array"], data["pointer"] = create_data_array(data["size"], type_map[float]) + + data["size"] = 0 + for object_ in objects: + set_object(object_, annotations, structures, records, data, set_data=True) + set_object(simulation, annotations, structures, records, data, set_data=True) + # ================================================================================== # Set with records # ================================================================================== # The global structure/variable container - mcdc_simulation_container, mcdc_simulation_pointer = create_mcdc_container( + mcdc_simulation_container, mcdc_simulation_pointer = create_simulation_container( into_dtype(structures["simulation"]) ) mcdc_simulation = mcdc_simulation_container[0] @@ -663,7 +667,7 @@ def create_data_array(size, dtype): create_data_array_on_gpu(size, dtype) -# @njit +@njit def create_data_array_on_gpu(size, dtype): if config.gpu_state_storage == "managed": data_ptr = gpu_builder.alloc_managed_bytes(size) @@ -674,15 +678,24 @@ def create_data_array_on_gpu(size, dtype): return data, data_uint -@njit -def create_mcdc_container_on_gpu(dtype): - if config.target == "gpu": - import mcdc.code_factory.gpu.program_builder as gpu_builder +def create_simulation_container(dtype): + if not config.target == "gpu": + simulation_container = np.zeros((1,), dtype=dtype) + return simulation_container, 0 + else: + create_simulation_container_on_gpu(dtype) + - return gpu_builder.create_mcdc_container(dtype) +@njit +def create_simulation_container_on_gpu(dtype): + size = dtype.itemsize + if config.gpu_state_storage == "managed": + simulation_ptr = gpu_builder.alloc_managed_bytes(size) else: - mcdc_container = np.zeros((1,), dtype=dtype) - return mcdc_container, 0 + simulation_ptr = gpu_builder.alloc_device_bytes(size) + simulation_uint = voidptr_to_uintp(simulation_ptr) + simulation = nb.carray(simulation_ptr, (size,), dtype) + return simulation, simulation_uint # ============================================================================= From 54570ae2b02ba070bc4641e2070260b9bad1961f Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Tue, 10 Mar 2026 07:02:50 +0700 Subject: [PATCH 19/50] implement gpu function adapter --- mcdc/code_factory/gpu/program_builder.py | 26 +++++++------------ mcdc/code_factory/gpu/transport/__init__.py | 5 ++++ .../gpu/transport/geometry/__init__.py | 1 + mcdc/code_factory/gpu/transport/util.py | 4 ++- mcdc/transport/__init__.py | 4 +++ mcdc/transport/geometry/__init__.py | 2 ++ 6 files changed, 25 insertions(+), 17 deletions(-) create mode 100644 mcdc/code_factory/gpu/transport/__init__.py create mode 100644 mcdc/code_factory/gpu/transport/geometry/__init__.py create mode 100644 mcdc/transport/__init__.py diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 37b47512..336a33f1 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -1,4 +1,3 @@ -import harmonize import numba as nb import numba.extending as nbxt import numpy as np @@ -11,22 +10,14 @@ def adapt_transport_functions(): - import ast, pathlib + import mcdc.code_factory.gpu.transport as gpu_transport + import mcdc.transport as transport - base_path = pathlib.Path(__file__).parent.resolve() / "transport" - print(base_path) - file_paths = [str(p) for p in base_path.rglob("*") if p.is_file()] - - collection = {} - for file_path in file_paths: - with open(file_path, "r", encoding="utf-8") as f: - tree = ast.parse(f.read()) - function_names = [ - node.name for node in ast.walk(tree) if isinstance(node, ast.FunctionDef) - ] - collection[file_path] = function_names - print(file_path, function_names) - exit() + # TODO: Make the following automatic + transport.util.atomic_add = gpu_transport.util.atomic_add + transport.geometry.interface = gpu_transport.geometry.interface + transport.particle_bank = gpu_transport.particle_bank + # transport.simulation = gpu_transport.simulation # Main types @@ -57,6 +48,9 @@ def adapt_transport_functions(): def forward_declare_gpu_program(): + import harmonize + + ### import mcdc.numba_types as type_ global none_type, simulation_type, data_type diff --git a/mcdc/code_factory/gpu/transport/__init__.py b/mcdc/code_factory/gpu/transport/__init__.py new file mode 100644 index 00000000..97d7eb91 --- /dev/null +++ b/mcdc/code_factory/gpu/transport/__init__.py @@ -0,0 +1,5 @@ +import mcdc.code_factory.gpu.transport.geometry as geometry +import mcdc.code_factory.gpu.transport.particle_bank as particle_bank + +# import mcdc.code_factory.gpu.transport.simulation as simulation +import mcdc.code_factory.gpu.transport.util as util diff --git a/mcdc/code_factory/gpu/transport/geometry/__init__.py b/mcdc/code_factory/gpu/transport/geometry/__init__.py new file mode 100644 index 00000000..e10e84a5 --- /dev/null +++ b/mcdc/code_factory/gpu/transport/geometry/__init__.py @@ -0,0 +1 @@ +import mcdc.code_factory.gpu.transport.geometry.interface as interface diff --git a/mcdc/code_factory/gpu/transport/util.py b/mcdc/code_factory/gpu/transport/util.py index 85ff979c..a5f0ed7a 100644 --- a/mcdc/code_factory/gpu/transport/util.py +++ b/mcdc/code_factory/gpu/transport/util.py @@ -1,6 +1,8 @@ +# import harmonize + from numba import njit @njit def atomic_add(array, idx, value): - return harmonize.array_atomic_add(array, idx, value) + harmonize.array_atomic_add(array, idx, value) diff --git a/mcdc/transport/__init__.py b/mcdc/transport/__init__.py new file mode 100644 index 00000000..c2e7b125 --- /dev/null +++ b/mcdc/transport/__init__.py @@ -0,0 +1,4 @@ +import mcdc.transport.geometry as geometry +import mcdc.transport.particle_bank as particle_bank +import mcdc.transport.simulation as simulation +import mcdc.transport.util as util diff --git a/mcdc/transport/geometry/__init__.py b/mcdc/transport/geometry/__init__.py index 4153e038..a785177f 100644 --- a/mcdc/transport/geometry/__init__.py +++ b/mcdc/transport/geometry/__init__.py @@ -1,3 +1,5 @@ +import mcdc.transport.geometry.interface as interface + from .interface import ( inspect_geometry, locate_particle, From 34eac81ad94975ae50e30f329e6e787ec3933874 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Mon, 9 Mar 2026 18:41:47 -0700 Subject: [PATCH 20/50] minor update. implement local array --- mcdc/code_factory/gpu/program_builder.py | 66 +++++++--- mcdc/code_factory/gpu/transport/util.py | 161 ++++++++++++++++++++++- mcdc/object_/simulation.py | 2 + mcdc/transport/particle_bank.py | 4 +- mcdc/transport/simulation.py | 5 +- mcdc/transport/util.py | 6 + 6 files changed, 218 insertions(+), 26 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 336a33f1..a61a977c 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -8,18 +8,27 @@ import mcdc.config as config +# ====================================================================================== +# Transport function adapter +# ====================================================================================== + def adapt_transport_functions(): import mcdc.code_factory.gpu.transport as gpu_transport import mcdc.transport as transport # TODO: Make the following automatic - transport.util.atomic_add = gpu_transport.util.atomic_add transport.geometry.interface = gpu_transport.geometry.interface transport.particle_bank = gpu_transport.particle_bank # transport.simulation = gpu_transport.simulation + transport.util.atomic_add = gpu_transport.util.atomic_add + transport.util.local_array = gpu_transport.util.local_array +# ====================================================================================== +# Forward declaration +# ====================================================================================== + # Main types none_type = None simulation_type = None @@ -27,10 +36,10 @@ def adapt_transport_functions(): # Access functions state_spec = None -simulation_gpu = None -data_gpu = None -group_gpu = None -thread_gpu = None +access_simulation = None +access_data_ptr = None +access_group = None +access_thread = None particle_gpu = None particle_record_gpu = None @@ -42,19 +51,14 @@ def adapt_transport_functions(): alloc_managed_bytes = None alloc_device_bytes = None -# For teardown functions -src_free_program = lambda pointer: None -free_state = lambda pointer: None - def forward_declare_gpu_program(): import harmonize - - ### import mcdc.numba_types as type_ + # Get to set the globals global none_type, simulation_type, data_type - global state_spec, simulation_gpu, data_gpu, group_gpu, thread_gpu, particle_gpu, particle_record_gpu + global state_spec, access_simulation, access_data_ptr, access_group, access_thread, particle_gpu, particle_record_gpu global step_async, find_cell_async global alloc_managed_bytes, alloc_device_bytes @@ -79,17 +83,17 @@ def forward_declare_gpu_program(): # Set access functions state_spec = ( { - "global": simulation_type, + "simulation": simulation_type, "data": data_type, }, none_type, none_type, ) access_fns = harmonize.RuntimeSpec.access_fns(state_spec) - simulation_gpu = access_fns["device"]["global"]["indirect"] - data_gpu = access_fns["device"]["data"]["direct"] - group_gpu = access_fns["group"] - thread_gpu = access_fns["thread"] + access_simulation = access_fns["device"]["simulation"]["indirect"] + access_data_ptr = access_fns["device"]["data"]["direct"] + access_group = access_fns["group"] + access_thread = access_fns["thread"] particle_gpu = nb.from_dtype(type_.particle) particle_record_gpu = nb.from_dtype(type_.particle_data) @@ -112,7 +116,20 @@ def find_cell(program: nb.uintp, particle: particle_gpu): alloc_device_bytes = harmonize.alloc_device_bytes +# ====================================================================================== +# Program builder +# ====================================================================================== + +src_free_program = lambda pointer: None +free_state = lambda pointer: None + + def build_gpu_program(data_size): + import harmonize + + from mcdc.transport.util import atomic_add + from mcdc.transport.simulation import generate_source_particle + global src_free_program, free_state # ================================================================================== @@ -124,18 +141,24 @@ def build_gpu_program(data_size): # ============== def make_work(program: nb.uintp) -> nb.boolean: - simulation = simulation_gpu(program) + simulation = access_simulation(program) + data_ptr = access_data_ptr(program) + data = adapt.harm.array_from_ptr(data_ptr, shape, nb.float64) - idx_work = adapt.global_add(simulation["mpi_work_iter"], 0, 1) + atomic_add(simulation["mpi_work_iter"], 0, 1) + idx_work = simulation["mpi_work_iter"][0] if idx_work >= simulation["mpi_work_size"]: return False + work_start = simulation["mpi_work_start"] + generate_source_particle( simulation["mpi_work_start"], nb.uint64(idx_work), simulation["source_seed"], program, + data, ) return True @@ -152,9 +175,10 @@ def finalize(program: nb.uintp): shape = (data_size,) def step(program: nb.uintp, particle_input: particle_gpu): - simulation = simulation_gpu(program) - data_ptr = adapt.mcdc_data(program) + simulation = access_simulation(program) + data_ptr = access_data(program) data = adapt.harm.array_from_ptr(data_ptr, shape, nb.float64) + particle_container = adapt.local_array(1, type_.particle) particle_container[0] = particle_input particle = particle_container[0] diff --git a/mcdc/code_factory/gpu/transport/util.py b/mcdc/code_factory/gpu/transport/util.py index a5f0ed7a..1c795b74 100644 --- a/mcdc/code_factory/gpu/transport/util.py +++ b/mcdc/code_factory/gpu/transport/util.py @@ -1,4 +1,4 @@ -# import harmonize +import harmonize from numba import njit @@ -6,3 +6,162 @@ @njit def atomic_add(array, idx, value): harmonize.array_atomic_add(array, idx, value) + + +# ============================================================================= +# Generic GPU/CPU local array variable constructors +# ============================================================================= + + +def local_array(shape, dtype): + return np.zeros(shape, dtype=dtype) + + +@numba.extending.type_callable(local_array) +def type_local_array(context): + + from numba.core.typing.npydecl import parse_dtype, parse_shape + + if isinstance(context, numba.core.typing.context.Context): + + # Function repurposed from Numba's ol_np_empty. + def typer(shape, dtype): + numba.np.arrayobj._check_const_str_dtype("empty", dtype) + + # Only integer literals and tuples of integer literals are valid + # shapes + if isinstance(shape, types.Integer): + if not isinstance(shape, types.IntegerLiteral): + raise numba.core.errors.UnsupportedError( + f"Integer shape type {shape} is not literal." + ) + elif isinstance(shape, (types.Tuple, types.UniTuple)): + if any([not isinstance(s, types.IntegerLiteral) for s in shape]): + raise numba.core.errors.UnsupportedError( + f"At least one element of shape tuple type{shape} is not an integer literal." + ) + else: + raise numba.core.errors.UnsupportedError( + f"Shape is of unsupported type {shape}." + ) + + # No default arguments. + nb_dtype = parse_dtype(dtype) + nb_shape = parse_shape(shape) + + if nb_dtype is not None and nb_shape is not None: + retty = types.Array(dtype=nb_dtype, ndim=nb_shape, layout="C") + # Inlining the signature construction from numpy_empty_nd + sig = retty(shape, dtype) + return sig + else: + msg = f"Cannot parse input types to function np.empty({shape}, {dtype})" + raise numba.errors.TypingError(msg) + + return typer + + elif isinstance(context, numba.cuda.target.CUDATypingContext): + + # Function repurposed from Numba's Cuda_array_decl. + def typer(shape, dtype): + + # Only integer literals and tuples of integer literals are valid + # shapes + if isinstance(shape, types.Integer): + if not isinstance(shape, types.IntegerLiteral): + return None + elif isinstance(shape, (types.Tuple, types.UniTuple)): + if any([not isinstance(s, types.IntegerLiteral) for s in shape]): + return None + else: + return None + + ndim = parse_shape(shape) + nb_dtype = parse_dtype(dtype) + if nb_dtype is not None and ndim is not None: + return types.Array(dtype=nb_dtype, ndim=ndim, layout="C") + + return typer + + elif isinstance(context, numba.hip.target.HIPTypingContext): + + def typer(shape, dtype): + # Only integer literals and tuples of integer literals are valid + # shapes + if isinstance(shape, types.Integer): + if not isinstance(shape, types.IntegerLiteral): + return None + elif isinstance(shape, (types.Tuple, types.UniTuple)): + if any([not isinstance(s, types.IntegerLiteral) for s in shape]): + return None + else: + return None + + ndim = parse_shape(shape) + nb_dtype = parse_dtype(dtype) + if nb_dtype is not None and ndim is not None: + result = types.Array(dtype=nb_dtype, ndim=ndim, layout="C") + return result + + return typer + + else: + raise numba.core.errors.UnsupportedError( + f"Unsupported target context {context}." + ) + + +@numba.extending.lower_builtin(local_array, types.IntegerLiteral, types.Any) +def builtin_local_array(context, builder, sig, args): + + shape, dtype = sig.args + + from numba.core.typing.npydecl import parse_dtype, parse_shape + import numba.np.arrayobj as arrayobj + + if isinstance(context, numba.core.cpu.CPUContext): + + # No default arguments. + nb_dtype = parse_dtype(dtype) + nb_shape = parse_shape(shape) + + retty = types.Array(dtype=nb_dtype, ndim=nb_shape, layout="C") + + # In ol_np_empty, the reference type of the array is fed into the + # signatrue as a third argument. This third argument is not used by + # _parse_empty_args. + sig = retty(shape, dtype) + + arrtype, shapes = arrayobj._parse_empty_args(context, builder, sig, args) + ary = arrayobj._empty_nd_impl(context, builder, arrtype, shapes) + + return ary._getvalue() + elif isinstance(context, numba.cuda.target.CUDATargetContext): + length = sig.args[0].literal_value + dtype = parse_dtype(sig.args[1]) + return numba.cuda.cudaimpl._generic_array( + context, + builder, + shape=(length,), + dtype=dtype, + symbol_name="_cudapy_harm_lmem", + addrspace=numba.cuda.cudadrv.nvvm.ADDRSPACE_LOCAL, + can_dynsized=False, + ) + elif isinstance(context, numba.hip.target.HIPTargetContext): + length = sig.args[0].literal_value + dtype = parse_dtype(sig.args[1]) + result = numba.hip.typing_lowering.hip.lowering._generic_array( + context, + builder, + shape=(length,), + dtype=dtype, + symbol_name="_HIPpy_lmem", + addrspace=numba.hip.amdgcn.ADDRSPACE_LOCAL, + can_dynsized=False, + ) + return result + else: + raise numba.core.errors.UnsupportedError( + f"Unsupported target context {context}." + ) diff --git a/mcdc/object_/simulation.py b/mcdc/object_/simulation.py index f7a1156e..a07958dd 100644 --- a/mcdc/object_/simulation.py +++ b/mcdc/object_/simulation.py @@ -126,6 +126,7 @@ class Simulation(ObjectSingleton): # GPU metadata gpu_meta: GPUMeta + source_seed: int def __init__(self): super().__init__() @@ -223,6 +224,7 @@ def __init__(self): # GPU metadata self.gpu_meta = GPUMeta() + self.source_seed = 0 def set_root_universe(self, cells=[]): self.universes[0].cells = cells diff --git a/mcdc/transport/particle_bank.py b/mcdc/transport/particle_bank.py index 29deeb6a..7a02d9ae 100644 --- a/mcdc/transport/particle_bank.py +++ b/mcdc/transport/particle_bank.py @@ -16,7 +16,7 @@ from mcdc.constant import * from mcdc.print_ import print_error -from mcdc.transport.util import atomic_add +from mcdc.transport.util import atomic_add, local_array # ============================================================================= # Bank size @@ -126,7 +126,7 @@ def promote_future_particles(simulation, data): next_census_time = mcdc_get.settings.census_time(idx, simulation["settings"], data) # Particle container - particle_container = np.zeros(1, type_.particle_data) + particle_container = local_array(1, type_.particle_data) particle = particle_container[0] # Loop over all particles in future bank diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 1e1bbecb..9737b9ec 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -23,6 +23,7 @@ print_progress_eigenvalue, ) from mcdc.transport.source import source_particle +from mcdc.transport.util import local_array # ====================================================================================== # Main simulations @@ -185,7 +186,7 @@ def generate_source_particle(work_start, idx_work, seed, simulation, data): """Get a source particle and put into one of the banks""" settings = simulation["settings"] - particle_container = np.zeros(1, type_.particle_data) + particle_container = local_array(1, type_.particle_data) particle = particle_container[0] # Get from fixed-source? @@ -232,7 +233,7 @@ def generate_source_particle(work_start, idx_work, seed, simulation, data): @njit def exhaust_active_bank(simulation, data): - particle_container = np.zeros(1, type_.particle) + particle_container = local_array(1, type_.particle) particle = particle_container[0] # Loop until active bank is exhausted diff --git a/mcdc/transport/util.py b/mcdc/transport/util.py index a82609ef..9d127ab4 100644 --- a/mcdc/transport/util.py +++ b/mcdc/transport/util.py @@ -1,4 +1,5 @@ import math +import numpy as np from numba import njit from typing import Sequence @@ -100,6 +101,11 @@ def atomic_add(array, idx, value): array[idx] += value +@njit +def local_array(shape, dtype): + return np.zeros(shape, dtype=dtype) + + # ====================================================================================== # Interpolation # ====================================================================================== From 6f07c989f64d963c882f6f3202acf3a12d591b24 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Tue, 10 Mar 2026 00:38:47 -0700 Subject: [PATCH 21/50] refactor find bin to be more explicit --- mcdc/code_factory/gpu/program_builder.py | 34 ++++++++------- mcdc/code_factory/gpu/transport/util.py | 45 +++++++++----------- mcdc/code_factory/numba_objects_generator.py | 10 ++++- mcdc/transport/distribution.py | 12 +++++- mcdc/transport/geometry/surface/interface.py | 5 ++- mcdc/transport/mesh/structured.py | 16 +++++-- mcdc/transport/particle_bank.py | 6 +-- mcdc/transport/simulation.py | 16 +++---- mcdc/transport/source.py | 5 ++- mcdc/transport/tally/filter.py | 28 +++++++++--- mcdc/transport/tally/score.py | 10 ++--- mcdc/transport/util.py | 35 ++++++++++++--- 12 files changed, 139 insertions(+), 83 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index a61a977c..08749b20 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -25,6 +25,12 @@ def adapt_transport_functions(): transport.util.local_array = gpu_transport.util.local_array +def adapt_transport_functions_post_declare(): + import mcdc.transport as transport + + transport.util.access_simulation = access_simulation + + # ====================================================================================== # Forward declaration # ====================================================================================== @@ -126,15 +132,14 @@ def find_cell(program: nb.uintp, particle: particle_gpu): def build_gpu_program(data_size): import harmonize + import mcdc.numba_types as type_ + import mcdc.transport.util as util - from mcdc.transport.util import atomic_add - from mcdc.transport.simulation import generate_source_particle + from mcdc.transport.simulation import generate_source_particle, step_particle global src_free_program, free_state - # ================================================================================== - # "gpu_sources_spec" - # ================================================================================== + shape = eval(f"{(data_size,)}") # ============== # Base functions @@ -143,9 +148,9 @@ def build_gpu_program(data_size): def make_work(program: nb.uintp) -> nb.boolean: simulation = access_simulation(program) data_ptr = access_data_ptr(program) - data = adapt.harm.array_from_ptr(data_ptr, shape, nb.float64) + data = harmonize.array_from_ptr(data_ptr, shape, nb.float64) - atomic_add(simulation["mpi_work_iter"], 0, 1) + util.atomic_add(simulation["mpi_work_iter"], 0, 1) idx_work = simulation["mpi_work_iter"][0] if idx_work >= simulation["mpi_work_size"]: @@ -172,27 +177,26 @@ def finalize(program: nb.uintp): # Async. functions # ================ - shape = (data_size,) - def step(program: nb.uintp, particle_input: particle_gpu): simulation = access_simulation(program) - data_ptr = access_data(program) - data = adapt.harm.array_from_ptr(data_ptr, shape, nb.float64) + data_ptr = access_data_ptr(program) + data = harmonize.array_from_ptr(data_ptr, shape, nb.float64) - particle_container = adapt.local_array(1, type_.particle) + particle_container = util.local_array(1, type_.particle) particle_container[0] = particle_input particle = particle_container[0] - if particle["fresh"]: - prep_particle(particle_container, program) particle["fresh"] = False step_particle(particle_container, data, program) if particle["alive"]: - adapt.step_async(program, particle) + step_async(program, particle) # Bind them all base_fns = (initialize, finalize, make_work) async_fns = [step] + async_fns = [] src_spec = harmonize.RuntimeSpec("mcdc_source", state_spec, base_fns, async_fns) + print("PASS") + exit() harmonize.RuntimeSpec.bind_specs() # ================================================================================== diff --git a/mcdc/code_factory/gpu/transport/util.py b/mcdc/code_factory/gpu/transport/util.py index 1c795b74..184dc624 100644 --- a/mcdc/code_factory/gpu/transport/util.py +++ b/mcdc/code_factory/gpu/transport/util.py @@ -1,6 +1,7 @@ import harmonize +import numba as nb -from numba import njit +from numba import njit, types @njit @@ -17,31 +18,31 @@ def local_array(shape, dtype): return np.zeros(shape, dtype=dtype) -@numba.extending.type_callable(local_array) +@nb.extending.type_callable(local_array) def type_local_array(context): from numba.core.typing.npydecl import parse_dtype, parse_shape - if isinstance(context, numba.core.typing.context.Context): + if isinstance(context, nb.core.typing.context.Context): # Function repurposed from Numba's ol_np_empty. def typer(shape, dtype): - numba.np.arrayobj._check_const_str_dtype("empty", dtype) + nb.np.arrayobj._check_const_str_dtype("empty", dtype) # Only integer literals and tuples of integer literals are valid # shapes if isinstance(shape, types.Integer): if not isinstance(shape, types.IntegerLiteral): - raise numba.core.errors.UnsupportedError( + raise nb.core.errors.UnsupportedError( f"Integer shape type {shape} is not literal." ) elif isinstance(shape, (types.Tuple, types.UniTuple)): if any([not isinstance(s, types.IntegerLiteral) for s in shape]): - raise numba.core.errors.UnsupportedError( + raise nb.core.errors.UnsupportedError( f"At least one element of shape tuple type{shape} is not an integer literal." ) else: - raise numba.core.errors.UnsupportedError( + raise nb.core.errors.UnsupportedError( f"Shape is of unsupported type {shape}." ) @@ -56,11 +57,11 @@ def typer(shape, dtype): return sig else: msg = f"Cannot parse input types to function np.empty({shape}, {dtype})" - raise numba.errors.TypingError(msg) + raise nb.errors.TypingError(msg) return typer - elif isinstance(context, numba.cuda.target.CUDATypingContext): + elif isinstance(context, nb.cuda.target.CUDATypingContext): # Function repurposed from Numba's Cuda_array_decl. def typer(shape, dtype): @@ -83,7 +84,7 @@ def typer(shape, dtype): return typer - elif isinstance(context, numba.hip.target.HIPTypingContext): + elif isinstance(context, nb.hip.target.HIPTypingContext): def typer(shape, dtype): # Only integer literals and tuples of integer literals are valid @@ -106,12 +107,10 @@ def typer(shape, dtype): return typer else: - raise numba.core.errors.UnsupportedError( - f"Unsupported target context {context}." - ) + raise nb.core.errors.UnsupportedError(f"Unsupported target context {context}.") -@numba.extending.lower_builtin(local_array, types.IntegerLiteral, types.Any) +@nb.extending.lower_builtin(local_array, types.IntegerLiteral, types.Any) def builtin_local_array(context, builder, sig, args): shape, dtype = sig.args @@ -119,7 +118,7 @@ def builtin_local_array(context, builder, sig, args): from numba.core.typing.npydecl import parse_dtype, parse_shape import numba.np.arrayobj as arrayobj - if isinstance(context, numba.core.cpu.CPUContext): + if isinstance(context, nb.core.cpu.CPUContext): # No default arguments. nb_dtype = parse_dtype(dtype) @@ -136,32 +135,30 @@ def builtin_local_array(context, builder, sig, args): ary = arrayobj._empty_nd_impl(context, builder, arrtype, shapes) return ary._getvalue() - elif isinstance(context, numba.cuda.target.CUDATargetContext): + elif isinstance(context, nb.cuda.target.CUDATargetContext): length = sig.args[0].literal_value dtype = parse_dtype(sig.args[1]) - return numba.cuda.cudaimpl._generic_array( + return nb.cuda.cudaimpl._generic_array( context, builder, shape=(length,), dtype=dtype, symbol_name="_cudapy_harm_lmem", - addrspace=numba.cuda.cudadrv.nvvm.ADDRSPACE_LOCAL, + addrspace=nb.cuda.cudadrv.nvvm.ADDRSPACE_LOCAL, can_dynsized=False, ) - elif isinstance(context, numba.hip.target.HIPTargetContext): + elif isinstance(context, nb.hip.target.HIPTargetContext): length = sig.args[0].literal_value dtype = parse_dtype(sig.args[1]) - result = numba.hip.typing_lowering.hip.lowering._generic_array( + result = nb.hip.typing_lowering.hip.lowering._generic_array( context, builder, shape=(length,), dtype=dtype, symbol_name="_HIPpy_lmem", - addrspace=numba.hip.amdgcn.ADDRSPACE_LOCAL, + addrspace=nb.hip.amdgcn.ADDRSPACE_LOCAL, can_dynsized=False, ) return result else: - raise numba.core.errors.UnsupportedError( - f"Unsupported target context {context}." - ) + raise nb.core.errors.UnsupportedError(f"Unsupported target context {context}.") diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index a48d0cf2..598d5556 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -338,11 +338,19 @@ def generate_numba_objects(simulation): from mcdc.code_factory.gpu.program_builder import ( adapt_transport_functions, forward_declare_gpu_program, - build_gpu_program, ) adapt_transport_functions() forward_declare_gpu_program() + + from mcdc.code_factory.gpu.program_builder import ( + adapt_transport_functions_post_declare, + ) + + adapt_transport_functions_post_declare() + + from mcdc.code_factory.gpu.program_builder import build_gpu_program + build_gpu_program(data["size"]) # ================================================================================== diff --git a/mcdc/transport/distribution.py b/mcdc/transport/distribution.py index 3024e350..69446b16 100644 --- a/mcdc/transport/distribution.py +++ b/mcdc/transport/distribution.py @@ -150,7 +150,11 @@ def sample_direction(polar_cosine, azimuthal, polar_coordinate, rng_state): @njit def sample_tabulated(table, rng_state, data): xi = rng.lcg(rng_state) - idx = find_bin(xi, mcdc_get.tabulated_distribution.cdf_all(table, data)) + + cdf = data[table["cdf_offset"] : (table["cdf_offset"] + table["cdf_length"])] + # Above is equivalent to: cmf = mcdc_get.tabulated_distribution.cdf_all(table, data) + + idx = find_bin(xi, cdf) cdf_low = mcdc_get.tabulated_distribution.cdf(idx, table, data) cdf_high = mcdc_get.tabulated_distribution.cdf(idx + 1, table, data) value_low = mcdc_get.tabulated_distribution.value(idx, table, data) @@ -161,7 +165,11 @@ def sample_tabulated(table, rng_state, data): @njit def sample_pmf(pmf, rng_state, data): xi = rng.lcg(rng_state) - idx = find_bin(xi, mcdc_get.pmf_distribution.cmf_all(pmf, data)) + + cmf = data[pmf["cmf_offset"] : (pmf["cmf_offset"] + pmf["cmf_length"])] + # Above is equivalent to: cmf = mcdc_get.pmf_distribution.cmf_all(pmf, data) + + idx = find_bin(xi, cmf) return mcdc_get.pmf_distribution.value(idx, pmf, data) diff --git a/mcdc/transport/geometry/surface/interface.py b/mcdc/transport/geometry/surface/interface.py index 9e3427b9..98568fb7 100644 --- a/mcdc/transport/geometry/surface/interface.py +++ b/mcdc/transport/geometry/surface/interface.py @@ -36,7 +36,7 @@ SURFACE_CONE_Y, SURFACE_CONE_Z, ) -from mcdc.transport.util import find_bin +from mcdc.transport.util import find_bin_with_rules @njit @@ -351,7 +351,8 @@ def _get_move_idx(t, surface, data): Get moving interval index wrt the given time """ time_grid = mcdc_get.surface.move_time_grid_all(surface, data) - idx = find_bin(t, time_grid, epsilon=COINCIDENCE_TOLERANCE_TIME, go_lower=False) + tolerance = COINCIDENCE_TOLERANCE_TIME + idx = find_bin_with_rules(t, time_grid, tolerance, go_lower=False) # Coinciding cases if abs(time_grid[idx + 1] - t) < COINCIDENCE_TOLERANCE: diff --git a/mcdc/transport/mesh/structured.py b/mcdc/transport/mesh/structured.py index 7ea77863..b6376062 100644 --- a/mcdc/transport/mesh/structured.py +++ b/mcdc/transport/mesh/structured.py @@ -5,7 +5,7 @@ import mcdc.mcdc_get as mcdc_get from mcdc.constant import COINCIDENCE_TOLERANCE, COINCIDENCE_TOLERANCE_TIME, INF -from mcdc.transport.util import find_bin +from mcdc.transport.util import find_bin_with_rules @njit @@ -24,9 +24,17 @@ def get_indices(particle_container, mesh, data): uz = particle["uz"] tolerance = COINCIDENCE_TOLERANCE - ix = find_bin(x, mcdc_get.structured_mesh.x_all(mesh, data), tolerance, ux < 0.0) - iy = find_bin(y, mcdc_get.structured_mesh.y_all(mesh, data), tolerance, uy < 0.0) - iz = find_bin(z, mcdc_get.structured_mesh.z_all(mesh, data), tolerance, uz < 0.0) + + grid_x = data[mesh["x_offset"] : (mesh["x_offset"] + mesh["x_length"])] + # Above is equivalent to: grid_x = mcdc_get.structured_mesh.x_all(mesh, data) + grid_y = data[mesh["y_offset"] : (mesh["y_offset"] + mesh["y_length"])] + # Above is equivalent to: grid_y = mcdc_get.structured_mesh.y_all(mesh, data) + grid_z = data[mesh["z_offset"] : (mesh["z_offset"] + mesh["z_length"])] + # Above is equivalent to: grid_z = mcdc_get.structured_mesh.z_all(mesh, data) + + ix = find_bin_with_rules(x, grid_x, tolerance, go_lower=ux < 0.0) + iy = find_bin_with_rules(y, grid_y, tolerance, go_lower=uy < 0.0) + iz = find_bin_with_rules(z, grid_z, tolerance, go_lower=uz < 0.0) return ix, iy, iz diff --git a/mcdc/transport/particle_bank.py b/mcdc/transport/particle_bank.py index 7a02d9ae..975f0fb4 100644 --- a/mcdc/transport/particle_bank.py +++ b/mcdc/transport/particle_bank.py @@ -13,10 +13,10 @@ import mcdc.transport.mpi as mpi import mcdc.transport.particle as particle_module import mcdc.transport.technique as technique +import mcdc.transport.util as util from mcdc.constant import * from mcdc.print_ import print_error -from mcdc.transport.util import atomic_add, local_array # ============================================================================= # Bank size @@ -35,7 +35,7 @@ def set_bank_size(bank, value): @njit def add_bank_size(bank, value): - atomic_add(bank["size"], 0, value) + util.atomic_add(bank["size"], 0, value) # ============================================================================= @@ -126,7 +126,7 @@ def promote_future_particles(simulation, data): next_census_time = mcdc_get.settings.census_time(idx, simulation["settings"], data) # Particle container - particle_container = local_array(1, type_.particle_data) + particle_container = util.local_array(1, type_.particle_data) particle = particle_container[0] # Loop over all particles in future bank diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 9737b9ec..6de5938b 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -15,6 +15,7 @@ import mcdc.transport.rng as rng import mcdc.transport.tally as tally_module import mcdc.transport.technique as technique +import mcdc.transport.util as util from mcdc.constant import * from mcdc.print_ import ( @@ -23,7 +24,6 @@ print_progress_eigenvalue, ) from mcdc.transport.source import source_particle -from mcdc.transport.util import local_array # ====================================================================================== # Main simulations @@ -182,11 +182,12 @@ def source_loop(seed, simulation, data): @njit -def generate_source_particle(work_start, idx_work, seed, simulation, data): +def generate_source_particle(work_start, idx_work, seed, program, data): """Get a source particle and put into one of the banks""" + simulation = util.access_simulation(program) settings = simulation["settings"] - particle_container = local_array(1, type_.particle_data) + particle_container = util.local_array(1, type_.particle_data) particle = particle_container[0] # Get from fixed-source? @@ -233,7 +234,7 @@ def generate_source_particle(work_start, idx_work, seed, simulation, data): @njit def exhaust_active_bank(simulation, data): - particle_container = local_array(1, type_.particle) + particle_container = util.local_array(1, type_.particle) particle = particle_container[0] # Loop until active bank is exhausted @@ -241,17 +242,10 @@ def exhaust_active_bank(simulation, data): # Get particle from active bank particle_bank_module.pop_particle(particle_container, simulation["bank_active"]) - prep_particle(particle_container, simulation) - # Particle loop particle_loop(particle_container, simulation, data) -@njit -def prep_particle(particle_container, simulation): - particle = particle_container[0] - - @njit def source_closeout(simulation, idx_work, N_prog, data): # Tally history closeout for one-batch fixed-source simulation diff --git a/mcdc/transport/source.py b/mcdc/transport/source.py index f8f57145..7c4f8d30 100644 --- a/mcdc/transport/source.py +++ b/mcdc/transport/source.py @@ -14,7 +14,7 @@ sample_isotropic_direction, sample_direction, ) -from mcdc.transport.util import find_bin +from mcdc.transport.util import find_bin_with_rules @njit @@ -86,7 +86,8 @@ def source_particle(P_rec_arr, seed, simulation, data): if source["moving"]: # Get moving interval index wrt the given time time_grid = mcdc_get.source.move_time_grid_all(source, data) - idx = find_bin(t, time_grid, epsilon=COINCIDENCE_TOLERANCE_TIME, go_lower=False) + tolerance = COINCIDENCE_TOLERANCE_TIME + idx = find_bin_with_rules(t, time_grid, tolerance, go_lower=False) # Coinciding cases if abs(time_grid[idx + 1] - t) < COINCIDENCE_TOLERANCE: diff --git a/mcdc/transport/tally/filter.py b/mcdc/transport/tally/filter.py index 3fe90f5b..d3a3420d 100644 --- a/mcdc/transport/tally/filter.py +++ b/mcdc/transport/tally/filter.py @@ -12,7 +12,7 @@ COINCIDENCE_TOLERANCE_ENERGY, COINCIDENCE_TOLERANCE_TIME, ) -from mcdc.transport.util import find_bin +from mcdc.transport.util import find_bin_with_tolerance, find_bin_with_rules @njit @@ -55,8 +55,14 @@ def get_direction_index(particle_container, tally, data): azi *= -1 tolerance = COINCIDENCE_TOLERANCE_DIRECTION - i_mu = find_bin(mu, mcdc_get.tally.mu_all(tally, data), tolerance) - i_azi = find_bin(azi, mcdc_get.tally.azi_all(tally, data), tolerance) + + grid_mu = data[tally["mu_offset"] : (tally["mu_offset"] + tally["mu_length"])] + # Above is equivalent to: grid_mu = mcdc_get.tally.mu_all(tally, data) + grid_azi = data[tally["azi_offset"] : (tally["azi_offset"] + tally["azi_length"])] + # Above is equivalent to: grid_azi = mcdc_get.tally.azi_all(tally, data) + + i_mu = find_bin_with_tolerance(mu, grid_mu, tolerance) + i_azi = find_bin_with_tolerance(azi, grid_azi, tolerance) return i_mu, i_azi @@ -70,7 +76,12 @@ def get_energy_index(particle_container, tally, data, multigroup_mode): E = particle["E"] tolerance = COINCIDENCE_TOLERANCE_ENERGY - return find_bin(E, mcdc_get.tally.energy_all(tally, data), tolerance) + grid_energy = data[ + tally["energy_offset"] : (tally["energy_offset"] + tally["energy_length"]) + ] + # Above is equivalent to: grid_energy = mcdc_get.tally.energy_all(tally, data) + + return find_bin_with_tolerance(E, grid_energy, tolerance) @njit @@ -81,9 +92,12 @@ def get_time_index(particle_container, tally, data): time = particle["t"] tolerance = COINCIDENCE_TOLERANCE_TIME - return find_bin( - time, mcdc_get.tally.time_all(tally, data), tolerance, go_lower=False - ) + grid_time = data[ + tally["time_offset"] : (tally["time_offset"] + tally["time_length"]) + ] + # Above is equivalent to: grid_time = mcdc_get.tally.time_all(tally, data) + + return find_bin_with_rules(time, grid_time, tolerance, go_lower=False) @njit diff --git a/mcdc/transport/tally/score.py b/mcdc/transport/tally/score.py index bc39b6dd..10722863 100644 --- a/mcdc/transport/tally/score.py +++ b/mcdc/transport/tally/score.py @@ -5,6 +5,7 @@ import mcdc.mcdc_get as mcdc_get import mcdc.transport.mesh as mesh_module import mcdc.transport.physics as physics +import mcdc.transport.util as util from mcdc.constant import ( AXIS_T, @@ -27,7 +28,6 @@ ) from mcdc.transport.geometry.surface import get_normal_component from mcdc.transport.tally.filter import get_filter_indices -from mcdc.transport.util import atomic_add @njit @@ -58,7 +58,7 @@ def make_scores(particle_container, flux, tally, idx_base, simulation, data): surface = simulation["surfaces"][particle["surface_ID"]] mu = get_normal_component(particle_container, speed, surface, data) score = flux * mu - atomic_add(data, idx_base + i_score, score) + util.atomic_add(data, idx_base + i_score, score) # ====================================================================================== @@ -378,7 +378,7 @@ def eigenvalue_tally(particle_container, distance, simulation, data): ) # Fission production (needed even during inactive cycle) - atomic_add(simulation["eigenvalue_tally_nuSigmaF"], 0, flux * nuSigmaF) + util.atomic_add(simulation["eigenvalue_tally_nuSigmaF"], 0, flux * nuSigmaF) # Done, if inactive if not simulation["cycle_active"]: @@ -390,7 +390,7 @@ def eigenvalue_tally(particle_container, distance, simulation, data): v = physics.particle_speed(particle_container, simulation, data) n_density = flux / v - atomic_add(simulation["eigenvalue_tally_n"], 0, n_density) + util.atomic_add(simulation["eigenvalue_tally_n"], 0, n_density) # Maximum neutron density if simulation["n_max"] < n_density: @@ -424,7 +424,7 @@ def eigenvalue_tally(particle_container, distance, simulation, data): NEUTRON_REACTION_FISSION, particle_container, simulation, data ) C_density = flux * total * SigmaF / simulation["k_eff"] - atomic_add(simulation["eigenvalue_tally_C"], 0, C_density) + util.atomic_add(simulation["eigenvalue_tally_C"], 0, C_density) # Maximum precursor density if simulation["C_max"] < C_density: diff --git a/mcdc/transport/util.py b/mcdc/transport/util.py index 9d127ab4..9178c16d 100644 --- a/mcdc/transport/util.py +++ b/mcdc/transport/util.py @@ -6,9 +6,7 @@ @njit -def find_bin( - value: float, grid: Sequence[float], epsilon: float = 0.0, go_lower: bool = True -) -> int: +def find_bin_with_rules(value, grid, epsilon=0.0, go_lower=True): """ Return the bin index i for which grid[i] <= value < grid[i+1], with optional epsilon tolerance and tie-breaking toward the lower/upper bin. @@ -97,13 +95,16 @@ def find_bin( @njit -def atomic_add(array, idx, value): - array[idx] += value +def find_bin(value, grid): + tolerance = 0.0 + go_lower = True + return find_bin_with_rules(value, grid, tolerance, go_lower) @njit -def local_array(shape, dtype): - return np.zeros(shape, dtype=dtype) +def find_bin_with_tolerance(value, grid, tolerance): + go_lower = True + return find_bin_with_rules(value, grid, tolerance, go_lower) # ====================================================================================== @@ -129,3 +130,23 @@ def log_interpolation(x, x1, x2, y1, y2): ly = ly1 + m * (math.log(x) - lx1) return math.exp(ly) + + +# ====================================================================================== +# Framework utilities +# ====================================================================================== + + +@njit +def atomic_add(array, idx, value): + array[idx] += value + + +@njit +def local_array(shape, dtype): + return np.zeros(shape, dtype=dtype) + + +@njit +def access_simulation(program): + return program From 303f7286a851b1c9501cdfa4680b8cec16cf9fd7 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Tue, 10 Mar 2026 00:58:28 -0700 Subject: [PATCH 22/50] remove old find_bin call with default values --- mcdc/transport/geometry/surface/interface.py | 3 ++- mcdc/transport/mesh/structured.py | 13 ++++++++----- mcdc/transport/simulation.py | 1 + mcdc/transport/source.py | 11 +++++++++-- mcdc/transport/tally/filter.py | 5 +++-- mcdc/transport/util.py | 6 +++--- 6 files changed, 26 insertions(+), 13 deletions(-) diff --git a/mcdc/transport/geometry/surface/interface.py b/mcdc/transport/geometry/surface/interface.py index 98568fb7..beb1b762 100644 --- a/mcdc/transport/geometry/surface/interface.py +++ b/mcdc/transport/geometry/surface/interface.py @@ -352,7 +352,8 @@ def _get_move_idx(t, surface, data): """ time_grid = mcdc_get.surface.move_time_grid_all(surface, data) tolerance = COINCIDENCE_TOLERANCE_TIME - idx = find_bin_with_rules(t, time_grid, tolerance, go_lower=False) + go_lower = False + idx = find_bin_with_rules(t, time_grid, tolerance, go_lower) # Coinciding cases if abs(time_grid[idx + 1] - t) < COINCIDENCE_TOLERANCE: diff --git a/mcdc/transport/mesh/structured.py b/mcdc/transport/mesh/structured.py index b6376062..9afe50a8 100644 --- a/mcdc/transport/mesh/structured.py +++ b/mcdc/transport/mesh/structured.py @@ -23,8 +23,6 @@ def get_indices(particle_container, mesh, data): uy = particle["uy"] uz = particle["uz"] - tolerance = COINCIDENCE_TOLERANCE - grid_x = data[mesh["x_offset"] : (mesh["x_offset"] + mesh["x_length"])] # Above is equivalent to: grid_x = mcdc_get.structured_mesh.x_all(mesh, data) grid_y = data[mesh["y_offset"] : (mesh["y_offset"] + mesh["y_length"])] @@ -32,9 +30,14 @@ def get_indices(particle_container, mesh, data): grid_z = data[mesh["z_offset"] : (mesh["z_offset"] + mesh["z_length"])] # Above is equivalent to: grid_z = mcdc_get.structured_mesh.z_all(mesh, data) - ix = find_bin_with_rules(x, grid_x, tolerance, go_lower=ux < 0.0) - iy = find_bin_with_rules(y, grid_y, tolerance, go_lower=uy < 0.0) - iz = find_bin_with_rules(z, grid_z, tolerance, go_lower=uz < 0.0) + tolerance = COINCIDENCE_TOLERANCE + ux_go_lower = ux < 0.0 + uy_go_lower = uy < 0.0 + uz_go_lower = uz < 0.0 + + ix = find_bin_with_rules(x, grid_x, tolerance, ux_go_lower) + iy = find_bin_with_rules(y, grid_y, tolerance, uy_go_lower) + iz = find_bin_with_rules(z, grid_z, tolerance, uz_go_lower) return ix, iy, iz diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 6de5938b..74e75d6d 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -203,6 +203,7 @@ def generate_source_particle(work_start, idx_work, seed, program, data): ] particle = particle_container[0] + return # Skip if beyond time boundary if particle["t"] > settings["time_boundary"]: return diff --git a/mcdc/transport/source.py b/mcdc/transport/source.py index 7c4f8d30..4f6fba5a 100644 --- a/mcdc/transport/source.py +++ b/mcdc/transport/source.py @@ -85,9 +85,16 @@ def source_particle(P_rec_arr, seed, simulation, data): # Motion translation if source["moving"]: # Get moving interval index wrt the given time - time_grid = mcdc_get.source.move_time_grid_all(source, data) + time_grid = data[ + source["move_time_grid_offset"] : ( + source["move_time_grid_offset"] + source["N_move_grid"] + ) + ] + # Above is equivalent to: time_grid = mcdc_get.source.move_time_grid_all(source, data) + tolerance = COINCIDENCE_TOLERANCE_TIME - idx = find_bin_with_rules(t, time_grid, tolerance, go_lower=False) + go_lower = False + idx = find_bin_with_rules(t, time_grid, tolerance, go_lower) # Coinciding cases if abs(time_grid[idx + 1] - t) < COINCIDENCE_TOLERANCE: diff --git a/mcdc/transport/tally/filter.py b/mcdc/transport/tally/filter.py index d3a3420d..038adf04 100644 --- a/mcdc/transport/tally/filter.py +++ b/mcdc/transport/tally/filter.py @@ -91,13 +91,14 @@ def get_time_index(particle_container, tally, data): # Particle properties time = particle["t"] - tolerance = COINCIDENCE_TOLERANCE_TIME grid_time = data[ tally["time_offset"] : (tally["time_offset"] + tally["time_length"]) ] # Above is equivalent to: grid_time = mcdc_get.tally.time_all(tally, data) - return find_bin_with_rules(time, grid_time, tolerance, go_lower=False) + tolerance = COINCIDENCE_TOLERANCE_TIME + go_lower = False + return find_bin_with_rules(time, grid_time, tolerance, go_lower) @njit diff --git a/mcdc/transport/util.py b/mcdc/transport/util.py index 9178c16d..4627841d 100644 --- a/mcdc/transport/util.py +++ b/mcdc/transport/util.py @@ -6,7 +6,7 @@ @njit -def find_bin_with_rules(value, grid, epsilon=0.0, go_lower=True): +def find_bin_with_rules(value, grid, epsilon, go_lower): """ Return the bin index i for which grid[i] <= value < grid[i+1], with optional epsilon tolerance and tie-breaking toward the lower/upper bin. @@ -17,10 +17,10 @@ def find_bin_with_rules(value, grid, epsilon=0.0, go_lower=True): Query point. grid : Sequence[float] Monotonically increasing bin edges of length N_grid = N_bin + 1. - epsilon : float, optional (default: 0.0) + epsilon : float Tolerance to treat values as being exactly on a grid edge if |value - grid[k]| <= epsilon. - go_lower : bool, optional (default: True) + go_lower : bool Tie-breaking rule when value is at/within epsilon of a grid edge: - True -> tie to the lower/left bin - False -> tie to the upper/right bin From 9634f20f582442d5594b32a3308200f631c016ca Mon Sep 17 00:00:00 2001 From: ilhamv Date: Tue, 10 Mar 2026 17:31:40 -0700 Subject: [PATCH 23/50] replace array function return with direct array slicing --- mcdc/transport/source.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/mcdc/transport/source.py b/mcdc/transport/source.py index 4f6fba5a..fb35f446 100644 --- a/mcdc/transport/source.py +++ b/mcdc/transport/source.py @@ -100,10 +100,18 @@ def source_particle(P_rec_arr, seed, simulation, data): if abs(time_grid[idx + 1] - t) < COINCIDENCE_TOLERANCE: idx += 1 - # Surface move translations, velocities, and time grid - trans_0 = mcdc_get.surface.move_translations_vector(idx, source, data) - time_0 = mcdc_get.surface.move_time_grid(idx, source, data) - V = mcdc_get.surface.move_velocities_vector(idx, source, data) + # Source move translations + start = source["move_translations_offset"] + idx * 3 + trans_0 = data[start : start + 3] + # Above is equivalent to: trans_0 = mcdc_get.source.move_translations_vector(idx, source, data) + + # Source move velocities + start = source["move_velocities_offset"] + idx * 3 + V = data[start : start + 3] + # Above is equivalent to: V = mcdc_get.source.move_velocities_vector(idx, source, data) + + # Source move time grid + time_0 = mcdc_get.source.move_time_grid(idx, source, data) # Translate the particle t_local = t - time_0 From 5ab53425400e5ec4f8a955c9ec4b528df66a0c21 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Tue, 10 Mar 2026 17:32:40 -0700 Subject: [PATCH 24/50] minor clean up --- mcdc/code_factory/gpu/transport/util.py | 1 + mcdc/transport/simulation.py | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mcdc/code_factory/gpu/transport/util.py b/mcdc/code_factory/gpu/transport/util.py index 184dc624..2acb013c 100644 --- a/mcdc/code_factory/gpu/transport/util.py +++ b/mcdc/code_factory/gpu/transport/util.py @@ -1,5 +1,6 @@ import harmonize import numba as nb +import numpy as np from numba import njit, types diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 74e75d6d..263d6fb0 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -187,11 +187,11 @@ def generate_source_particle(work_start, idx_work, seed, program, data): simulation = util.access_simulation(program) settings = simulation["settings"] - particle_container = util.local_array(1, type_.particle_data) - particle = particle_container[0] - # Get from fixed-source? if particle_bank_module.get_bank_size(simulation["bank_source"]) == 0: + particle_container = util.local_array(1, type_.particle_data) + particle = particle_container[0] + # Sample source seed_work = rng.split_seed(work_start + idx_work, seed) source_particle(particle_container, seed_work, simulation, data) @@ -203,7 +203,6 @@ def generate_source_particle(work_start, idx_work, seed, program, data): ] particle = particle_container[0] - return # Skip if beyond time boundary if particle["t"] > settings["time_boundary"]: return From 444a57256e5c708724346ea6f56001dcfc1ddf93 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Tue, 10 Mar 2026 18:26:39 -0700 Subject: [PATCH 25/50] replace particles with particle_data in particle banks --- mcdc/code_factory/numba_objects_generator.py | 4 +-- mcdc/transport/particle_bank.py | 28 ++++++++++---------- mcdc/transport/simulation.py | 2 +- mcdc/transport/tally/closeout.py | 16 +++++------ mcdc/transport/technique.py | 6 ++--- 5 files changed, 28 insertions(+), 28 deletions(-) diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index 598d5556..115f015f 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -214,12 +214,12 @@ def generate_numba_objects(simulation): else: structures[class_.label].append(("parent_ID", "i8")) - # Add particles to particle banks and add particle banks to the simulation + # Add particle data to particle banks and add particle banks to the simulation for name in bank_names: bank = getattr(simulation, name) size = int(bank.size[0]) structures[name] += [ - ("particles", into_dtype(structures["particle_data"]), (size,)) + ("particle_data", into_dtype(structures["particle_data"]), (size,)) ] # structures["simulation"] = [(name, into_dtype(structures[name]))] + structures[ diff --git a/mcdc/transport/particle_bank.py b/mcdc/transport/particle_bank.py index 975f0fb4..5c1e029c 100644 --- a/mcdc/transport/particle_bank.py +++ b/mcdc/transport/particle_bank.py @@ -46,12 +46,12 @@ def add_bank_size(bank, value): @njit def _bank_particle(particle_container, bank): # Check if bank is full - if get_bank_size(bank) == bank["particles"].shape[0]: + if get_bank_size(bank) == bank["particle_data"].shape[0]: report_full_bank(bank) # Set particle data idx = get_bank_size(bank) - particle_module.copy(bank["particles"][idx : idx + 1], particle_container) + particle_module.copy(bank["particle_data"][idx : idx + 1], particle_container) # Increment bank size add_bank_size(bank, 1) @@ -85,7 +85,7 @@ def pop_particle(particle_container, bank): # Set particle data idx = get_bank_size(bank) - 1 - particle_module.copy(particle_container, bank["particles"][idx : idx + 1]) + particle_module.copy(particle_container, bank["particle_data"][idx : idx + 1]) # Decrement bank size add_bank_size(bank, -1) @@ -136,7 +136,7 @@ def promote_future_particles(simulation, data): # NOTE: future bank size decreases as particles are promoted to census bank idx = i - (initial_size - get_bank_size(future_bank)) particle_module.copy( - particle_container, future_bank["particles"][idx : idx + 1] + particle_container, future_bank["particle_data"][idx : idx + 1] ) # Promote the future particle to census bank @@ -147,8 +147,8 @@ def promote_future_particles(simulation, data): # Consolidate the emptied space in the future bank j = get_bank_size(future_bank) particle_module.copy( - future_bank["particles"][idx : idx + 1], - future_bank["particles"][j : j + 1], + future_bank["particle_data"][idx : idx + 1], + future_bank["particle_data"][j : j + 1], ) @@ -185,11 +185,11 @@ def manage_particle_banks(simulation): census_bank = simulation["bank_census"] size = get_bank_size(census_bank) - if size >= source_bank["particles"].shape[0]: + if size >= source_bank["particle_data"].shape[0]: report_full_bank(source_bank) # TODO: better alternative? - source_bank["particles"][:size] = census_bank["particles"][:size] + source_bank["particle_data"][:size] = census_bank["particle_data"][:size] set_bank_size(source_bank, size) # Redistribute work and rebalance bank size across MPI ranks @@ -249,13 +249,13 @@ def bank_rebalance(simulation): # MPI nearest-neighbor send/receive buff = np.zeros( - simulation["bank_source"]["particles"].shape[0], dtype=type_.particle_data + simulation["bank_source"]["particle_data"].shape[0], dtype=type_.particle_data ) with objmode(size="int64"): # Create MPI-supported numpy object size = get_bank_size(simulation["bank_source"]) - bank = np.array(simulation["bank_source"]["particles"][:size]) + bank = np.array(simulation["bank_source"]["particle_data"][:size]) if receive_first: if receive_from_left: @@ -293,7 +293,7 @@ def bank_rebalance(simulation): # Set source bank from buffer set_bank_size(simulation["bank_source"], size) for i in range(size): - simulation["bank_source"]["particles"][i] = buff[i] + simulation["bank_source"]["particle_data"][i] = buff[i] # ====================================================================================== @@ -326,7 +326,7 @@ def bank_scanning_weight(bank, simulation): N_local = get_bank_size(bank) w_cdf = np.zeros(N_local + 1) for i in range(N_local): - w_cdf[i + 1] = w_cdf[i] + bank["particles"][i]["w"] + w_cdf[i + 1] = w_cdf[i] + bank["particle_data"][i]["w"] W_local = w_cdf[-1] # Starting weight @@ -352,7 +352,7 @@ def normalize_weight(bank, norm): # Normalize weight for i in range(get_bank_size(bank)): - bank["particles"][i]["w"] *= norm / W + bank["particle_data"][i]["w"] *= norm / W @njit @@ -360,7 +360,7 @@ def total_weight(bank): # Local total weight W_local = np.zeros(1) for i in range(get_bank_size(bank)): - W_local[0] += bank["particles"][i]["w"] + W_local[0] += bank["particle_data"][i]["w"] # MPI Allreduce buff = np.zeros(1, np.float64) diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 263d6fb0..43e40ec7 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -198,7 +198,7 @@ def generate_source_particle(work_start, idx_work, seed, program, data): # Get from source bank else: - particle_container = simulation["bank_source"]["particles"][ + particle_container = simulation["bank_source"]["particle_data"][ idx_work : (idx_work + 1) ] particle = particle_container[0] diff --git a/mcdc/transport/tally/closeout.py b/mcdc/transport/tally/closeout.py index aa60a7fe..ec88ed7e 100644 --- a/mcdc/transport/tally/closeout.py +++ b/mcdc/transport/tally/closeout.py @@ -242,7 +242,7 @@ def eigenvalue_cycle(simulation, data): total_local = np.zeros(4, np.float64) # [x,y,z,W] total = np.zeros(4, np.float64) for i in range(N_local): - P = simulation["bank_census"]["particles"][i] + P = simulation["bank_census"]["particle_data"][i] total_local[0] += P["x"] * P["w"] total_local[1] += P["y"] * P["w"] total_local[2] += P["z"] * P["w"] @@ -262,7 +262,7 @@ def eigenvalue_cycle(simulation, data): gr_type = simulation["settings"]["gyration_radius_type"] if gr_type == GYRATION_RADIUS_ALL: for i in range(N_local): - P = simulation["bank_census"]["particles"][i] + P = simulation["bank_census"]["particle_data"][i] rms_local[0] += ( (P["x"] - com_x) ** 2 + (P["y"] - com_y) ** 2 @@ -270,27 +270,27 @@ def eigenvalue_cycle(simulation, data): ) * P["w"] elif gr_type == GYRATION_RADIUS_INFINITE_X: for i in range(N_local): - P = simulation["bank_census"]["particles"][i] + P = simulation["bank_census"]["particle_data"][i] rms_local[0] += ((P["y"] - com_y) ** 2 + (P["z"] - com_z) ** 2) * P["w"] elif gr_type == GYRATION_RADIUS_INFINITE_Y: for i in range(N_local): - P = simulation["bank_census"]["particles"][i] + P = simulation["bank_census"]["particle_data"][i] rms_local[0] += ((P["x"] - com_x) ** 2 + (P["z"] - com_z) ** 2) * P["w"] elif gr_type == GYRATION_RADIUS_INFINITE_Z: for i in range(N_local): - P = simulation["bank_census"]["particles"][i] + P = simulation["bank_census"]["particle_data"][i] rms_local[0] += ((P["x"] - com_x) ** 2 + (P["y"] - com_y) ** 2) * P["w"] elif gr_type == GYRATION_RADIUS_ONLY_X: for i in range(N_local): - P = simulation["bank_census"]["particles"][i] + P = simulation["bank_census"]["particle_data"][i] rms_local[0] += ((P["x"] - com_x) ** 2) * P["w"] elif gr_type == GYRATION_RADIUS_ONLY_Y: for i in range(N_local): - P = simulation["bank_census"]["particles"][i] + P = simulation["bank_census"]["particle_data"][i] rms_local[0] += ((P["y"] - com_y) ** 2) * P["w"] elif gr_type == GYRATION_RADIUS_ONLY_Z: for i in range(N_local): - P = simulation["bank_census"]["particles"][i] + P = simulation["bank_census"]["particle_data"][i] rms_local[0] += ((P["z"] - com_z) ** 2) * P["w"] # MPI Allreduce diff --git a/mcdc/transport/technique.py b/mcdc/transport/technique.py index f7b15875..dd01216b 100644 --- a/mcdc/transport/technique.py +++ b/mcdc/transport/technique.py @@ -61,21 +61,21 @@ def population_control(simulation): particle_bank_module.set_bank_size(bank_source, 0) for idx in range(N_local): # Weight of the surviving particles - w = bank_census["particles"][idx]["w"] + w = bank_census["particle_data"][idx]["w"] w_survive = w * ws # Determine number of guaranteed splits N_split = math.floor(sn) # Survive the russian roulette? - xi = rng.lcg(bank_census["particles"][idx : idx + 1]) + xi = rng.lcg(bank_census["particle_data"][idx : idx + 1]) if xi < sn - N_split: N_split += 1 # Split the particle for i in range(N_split): particle_module.copy_as_child( - P_rec_arr, bank_census["particles"][idx : idx + 1] + P_rec_arr, bank_census["particle_data"][idx : idx + 1] ) # Set weight P_rec["w"] = w_survive From efc63a9c944cec8b29becaa757afd44ad67c516d Mon Sep 17 00:00:00 2001 From: ilhamv Date: Tue, 10 Mar 2026 18:42:00 -0700 Subject: [PATCH 26/50] fix transport function adapt --- mcdc/code_factory/gpu/program_builder.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 08749b20..2851439b 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -18,8 +18,18 @@ def adapt_transport_functions(): import mcdc.transport as transport # TODO: Make the following automatic - transport.geometry.interface = gpu_transport.geometry.interface - transport.particle_bank = gpu_transport.particle_bank + transport.geometry.interface.report_lost_particle = ( + gpu_transport.geometry.interface.report_lost_particle + ) + transport.particle_bank.bank_active_particle = ( + gpu_transport.particle_bank.bank_active_particle + ) + transport.particle_bank.report_full_bank = ( + gpu_transport.particle_bank.report_full_bank + ) + transport.particle_bank.report_empty_bank = ( + gpu_transport.particle_bank.report_empty_bank + ) # transport.simulation = gpu_transport.simulation transport.util.atomic_add = gpu_transport.util.atomic_add transport.util.local_array = gpu_transport.util.local_array From 60031b546ab2ed9adb7204f4e38d361e27369d31 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Tue, 10 Mar 2026 19:04:56 -0700 Subject: [PATCH 27/50] add gpu_strategy to settings --- .../gpu/transport/particle_bank.py | 23 ++++++++++++++----- mcdc/constant.py | 3 +++ mcdc/object_/settings.py | 3 +++ mcdc/transport/simulation.py | 2 +- 4 files changed, 24 insertions(+), 7 deletions(-) diff --git a/mcdc/code_factory/gpu/transport/particle_bank.py b/mcdc/code_factory/gpu/transport/particle_bank.py index c8133a89..f1f2af61 100644 --- a/mcdc/code_factory/gpu/transport/particle_bank.py +++ b/mcdc/code_factory/gpu/transport/particle_bank.py @@ -1,18 +1,29 @@ from numba import njit +### + +import mcdc.numba_types as type_ +import mcdc.transport.particle as particle_module +import mcdc.transport.util as util +import mcdc.code_factory.gpu.program_builder as gpu_program + +from mcdc.constant import GPU_STRATEGY_SIMPLE_ASYNC + # ============================================================================= # Bank and pop particle # ============================================================================= @njit -def bank_active_particle(P_rec_arr, mcdc): - particle_container = local_array(1, type_.particle) - kernel.recordlike_to_particle(particle_container, P_rec_arr) - if SIMPLE_ASYNC: - step_async(prog, particle_container[0]) +def bank_active_particle(particle_container, program): + simulation = util.access_simulation(program) + + active_particle_container = util.local_array(1, type_.particle) + particle_module.copy(active_particle_container, particle_container) + if simulation["settings"]["gpu_strategy"] == GPU_STRATEGY_SIMPLE_ASYNC: + gpu_program.step_async(program, active_particle_container[0]) else: - find_cell_async(prog, particle_container[0]) + gpu_program.find_cell_async(program, active_particle_container[0]) @njit diff --git a/mcdc/constant.py b/mcdc/constant.py index f4998a3b..ebd99b42 100644 --- a/mcdc/constant.py +++ b/mcdc/constant.py @@ -183,3 +183,6 @@ # Weight Windows Modifications WW_MIN = 0 WW_WOLLABER = 1 + +# GPU strategies +GPU_STRATEGY_SIMPLE_ASYNC = 0 diff --git a/mcdc/object_/settings.py b/mcdc/object_/settings.py index 5e4cd501..fb149332 100644 --- a/mcdc/object_/settings.py +++ b/mcdc/object_/settings.py @@ -66,6 +66,9 @@ class Settings(ObjectSingleton): electron_transport: bool = False proton_transport: bool = False + # GPU mode + gpu_strategy: int = GPU_STRATEGY_SIMPLE_ASYNC + def __post_init__(self): super().__init__() diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 43e40ec7..39cd34f4 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -223,7 +223,7 @@ def generate_source_particle(work_start, idx_work, seed, program, data): # Put into the right bank if not hit_census: - particle_bank_module.bank_active_particle(particle_container, simulation) + particle_bank_module.bank_active_particle(particle_container, program) elif not hit_next_census: # Particle will participate after the current census particle_bank_module.bank_census_particle(particle_container, simulation) From f788e21e2578fec330d9b6f2accc6fb577b16186 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Wed, 11 Mar 2026 03:12:19 -0700 Subject: [PATCH 28/50] redesign literals to work on gpu mode --- mcdc/code_factory/gpu/program_builder.py | 3 +- mcdc/code_factory/literals_generator.py | 26 +++++++++++ mcdc/code_factory/numba_objects_generator.py | 46 -------------------- mcdc/main.py | 2 +- mcdc/transport/.gitignore | 1 - mcdc/transport/__init__.py | 1 + mcdc/transport/geometry/interface.py | 3 +- mcdc/transport/literals.py | 6 +++ mcdc/transport/simulation.py | 3 +- 9 files changed, 39 insertions(+), 52 deletions(-) create mode 100644 mcdc/code_factory/literals_generator.py delete mode 100644 mcdc/transport/.gitignore create mode 100644 mcdc/transport/literals.py diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 2851439b..f294be95 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -196,14 +196,13 @@ def step(program: nb.uintp, particle_input: particle_gpu): particle_container[0] = particle_input particle = particle_container[0] particle["fresh"] = False - step_particle(particle_container, data, program) + step_particle(particle_container, program, data) if particle["alive"]: step_async(program, particle) # Bind them all base_fns = (initialize, finalize, make_work) async_fns = [step] - async_fns = [] src_spec = harmonize.RuntimeSpec("mcdc_source", state_spec, base_fns, async_fns) print("PASS") exit() diff --git a/mcdc/code_factory/literals_generator.py b/mcdc/code_factory/literals_generator.py new file mode 100644 index 00000000..83cbb5d1 --- /dev/null +++ b/mcdc/code_factory/literals_generator.py @@ -0,0 +1,26 @@ +import numpy as np + +from numba import njit + + +def _literalize(value): + jit_str = f"@njit\ndef impl():\n return {value}\n" + exec(jit_str, globals(), locals()) + return eval("impl") + + +def make_literals(simulation): + import mcdc.transport as transport + + # RPN evaluation buffer size + if len(simulation.cells) == 0: + rpn_evaluation_buffer_size = 1 + else: + rpn_evaluation_buffer_size = int( + max( + [np.sum(np.array(x.region_RPN_tokens) >= 0.0) for x in simulation.cells] + ) + ) + transport.literals.rpn_evaluation_buffer_size = _literalize( + rpn_evaluation_buffer_size + ) diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index 115f015f..9622424e 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -1187,49 +1187,3 @@ def singular_to_plural(word: str) -> str: parts[-1] = w + "s" return "_".join(parts) - - -# ============================================================================== -# MC/DC Member Array Sizes -# ============================================================================== - - -def literalize(value): - jit_str = f"@njit\ndef impl():\n return {value}\n" - exec(jit_str, globals(), locals()) - return eval("impl") - - -def rpn_buffer_size(): - pass - - -def make_size_rpn(cells): - global rpn_buffer_size - size = max([np.sum(np.array(x.region_RPN_tokens) >= 0.0) for x in cells]) - rpn_buffer_size = literalize(size) - - -# ====================================================================================== -# Make literals -# ====================================================================================== - - -def make_literals(simulation): - # Sizes - if len(simulation.cells) == 0: - rpn_evaluation_buffer_size = 1 - else: - rpn_evaluation_buffer_size = int( - max( - [np.sum(np.array(x.region_RPN_tokens) >= 0.0) for x in simulation.cells] - ) - ) - - path = f"{Path(mcdc.__file__).parent}" - with open(f"{path}/transport/literals.py", "w") as f: - text = "# The following is automatically generated by code_factory.py\n\n" - - text += f"rpn_evaluation_buffer_size = {rpn_evaluation_buffer_size}\n" - - f.write(text) diff --git a/mcdc/main.py b/mcdc/main.py index d338b608..6ef360c5 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -227,7 +227,7 @@ def preparation(): from mcdc.code_factory.numba_objects_generator import generate_numba_objects if MPI.COMM_WORLD.Get_rank() == 0: - from mcdc.code_factory.numba_objects_generator import make_literals + from mcdc.code_factory.literals_generator import make_literals make_literals(simulationPy) simulation_container, data = generate_numba_objects(simulationPy) diff --git a/mcdc/transport/.gitignore b/mcdc/transport/.gitignore deleted file mode 100644 index b9792f1c..00000000 --- a/mcdc/transport/.gitignore +++ /dev/null @@ -1 +0,0 @@ -literals.py diff --git a/mcdc/transport/__init__.py b/mcdc/transport/__init__.py index c2e7b125..de5264ac 100644 --- a/mcdc/transport/__init__.py +++ b/mcdc/transport/__init__.py @@ -1,4 +1,5 @@ import mcdc.transport.geometry as geometry +import mcdc.transport.literals as literals import mcdc.transport.particle_bank as particle_bank import mcdc.transport.simulation as simulation import mcdc.transport.util as util diff --git a/mcdc/transport/geometry/interface.py b/mcdc/transport/geometry/interface.py index 23c50b72..5c396308 100644 --- a/mcdc/transport/geometry/interface.py +++ b/mcdc/transport/geometry/interface.py @@ -10,6 +10,7 @@ import mcdc.transport.mesh as mesh import mcdc.transport.physics as physics import mcdc.transport.tally as tally_module +import mcdc.transport.util as util from mcdc.constant import * from mcdc.transport.geometry.surface import get_distance, check_sense, reflect @@ -361,7 +362,7 @@ def check_cell(particle_container, cell, simulation, data): return True # Create local value array - value = np.zeros(literals.rpn_evaluation_buffer_size, np.bool_) + value = util.local_array(literals.rpn_evaluation_buffer_size(), np.bool_) N_value = 0 # Particle parameters diff --git a/mcdc/transport/literals.py b/mcdc/transport/literals.py new file mode 100644 index 00000000..3cec7f37 --- /dev/null +++ b/mcdc/transport/literals.py @@ -0,0 +1,6 @@ +# The following will be replaced with their respective literals by +# code_factory/literals_generator.py + + +def rpn_evaluation_buffer_size(): + pass diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 39cd34f4..5aeadea4 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -278,7 +278,8 @@ def particle_loop(particle_container, simulation, data): @njit -def step_particle(particle_container, simulation, data): +def step_particle(particle_container, program, data): + simulation = util.access_simulation(program) particle = particle_container[0] # Determine and move to event From 2c0825befc78b45ed85653e53b44409f60bc626c Mon Sep 17 00:00:00 2001 From: ilhamv Date: Wed, 11 Mar 2026 08:06:30 -0700 Subject: [PATCH 29/50] make surface move gpu-compatible --- mcdc/transport/geometry/surface/interface.py | 27 +++++++++++++++----- 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/mcdc/transport/geometry/surface/interface.py b/mcdc/transport/geometry/surface/interface.py index beb1b762..ecc10301 100644 --- a/mcdc/transport/geometry/surface/interface.py +++ b/mcdc/transport/geometry/surface/interface.py @@ -350,7 +350,12 @@ def _get_move_idx(t, surface, data): """ Get moving interval index wrt the given time """ - time_grid = mcdc_get.surface.move_time_grid_all(surface, data) + time_grid = data[ + surface["move_time_grid_offset"] : ( + surface["move_time_grid_offset"] + surface["N_move_grid"] + ) + ] + # Above is equivalent to: time_grid = mcdc_get.surface.move_time_grid_all(surface, data) tolerance = COINCIDENCE_TOLERANCE_TIME go_lower = False idx = find_bin_with_rules(t, time_grid, tolerance, go_lower) @@ -369,10 +374,18 @@ def _translate_particle_position(particle_container, surface, idx, data): """ particle = particle_container[0] - # Surface move translations, velocities, and time grid - trans_0 = mcdc_get.surface.move_translations_vector(idx, surface, data) + # Surface move translations + start = surface["move_translations_offset"] + idx * 3 + trans_0 = data[start : start + 3] + # Above is equivalent to: trans_0 = mcdc_get.surface.move_translations_vector(idx, surface, data) + + # Surface move velocities + start = surface["move_velocities_offset"] + idx * 3 + V = data[start : start + 3] + # Above is equivalent to: V = mcdc_get.surface.move_velocities_vector(idx, surface, data) + + # Surface move time grid time_0 = mcdc_get.surface.move_time_grid(idx, surface, data) - V = mcdc_get.surface.move_velocities_vector(idx, surface, data) # Translate the particle t_local = particle["t"] - time_0 @@ -388,8 +401,10 @@ def _translate_particle_direction(particle_container, speed, surface, idx, data) """ particle = particle_container[0] - # Surface move translations, velocities, and time grid - V = mcdc_get.surface.move_velocities_vector(idx, surface, data) + # Surface move velocities + start = surface["move_velocities_offset"] + idx * 3 + V = data[start : start + 3] + # Above is equivalent to: V = mcdc_get.surface.move_velocities_vector(idx, surface, data) # Translate the particle particle["ux"] -= V[0] / speed From bdce631d42b4f2f1d348a398a3a3708db6306815 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Wed, 11 Mar 2026 18:01:24 -0700 Subject: [PATCH 30/50] redesign neutron physics model selection --- mcdc/main.py | 18 +++++++------ mcdc/object_/settings.py | 10 ++++---- mcdc/output.py | 2 +- mcdc/print_.py | 2 +- mcdc/transport/particle_bank.py | 2 +- mcdc/transport/physics/neutron/interface.py | 27 +++++++++++++++----- mcdc/transport/physics/neutron/multigroup.py | 2 +- mcdc/transport/physics/neutron/native.py | 2 +- mcdc/transport/simulation.py | 6 ++--- mcdc/transport/source.py | 2 +- mcdc/transport/tally/closeout.py | 2 +- mcdc/transport/tally/filter.py | 4 +-- mcdc/transport/tally/score.py | 6 ++--- 13 files changed, 51 insertions(+), 34 deletions(-) diff --git a/mcdc/main.py b/mcdc/main.py index 6ef360c5..43b1a128 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -61,7 +61,7 @@ def run(): print_module.print_banner() print_module.print_configuration() print(" Now running the particle transport...") - if settings.eigenvalue_mode: + if settings.neutron_eigenvalue_mode: print_module.print_eigenvalue_header(simulation) # TIMER: preparation @@ -77,7 +77,7 @@ def run(): # Run simulation import mcdc.transport.simulation as simulation_module - if settings.eigenvalue_mode: + if settings.neutron_eigenvalue_mode: simulation_module.eigenvalue_simulation(simulation_container, data) else: simulation_module.fixed_source_simulation(simulation_container, data) @@ -154,9 +154,11 @@ def preparation(): # Set physics mode if len(simulationPy.materials) == 0: # Default physics in dummy mode - settings.multigroup_mode = True + settings.neutron_multigroup_mode = True else: - settings.multigroup_mode = isinstance(simulationPy.materials[0], MaterialMG) + settings.neutron_multigroup_mode = isinstance( + simulationPy.materials[0], MaterialMG + ) # Set appropriate time boundary settings.time_boundary = min( @@ -188,7 +190,7 @@ def preparation(): simulationPy.k_eff = settings.k_init # Activate tally scoring for fixed-source - if not settings.eigenvalue_mode: + if not settings.neutron_eigenvalue_mode: simulationPy.cycle_active = True # All active eigenvalue cycle? elif settings.N_inactive == 0: @@ -204,9 +206,9 @@ def preparation(): N_census = settings.N_census # Determine bank size - if settings.eigenvalue_mode or N_census == 1: + if settings.neutron_eigenvalue_mode or N_census == 1: settings.future_bank_buffer_ratio = 0.0 - if not settings.eigenvalue_mode and N_census == 1: + if not settings.neutron_eigenvalue_mode and N_census == 1: settings.census_bank_buffer_ratio = 0.0 settings.source_bank_buffer_ratio = 0.0 size_active = settings.active_bank_buffer @@ -248,7 +250,7 @@ def preparation(): # Pick physics model import mcdc.transport.physics as physics - if settings.multigroup_mode: + if settings.neutron_multigroup_mode: physics.neutron.particle_speed = physics.neutron.multigroup.particle_speed physics.neutron.macro_xs = physics.neutron.multigroup.macro_xs physics.neutron.neutron_production_xs = ( diff --git a/mcdc/object_/settings.py b/mcdc/object_/settings.py index fb149332..dc819bcb 100644 --- a/mcdc/object_/settings.py +++ b/mcdc/object_/settings.py @@ -27,10 +27,6 @@ class Settings(ObjectSingleton): N_batch: int = 1 rng_seed: int = 1 - # Simulation mode - multigroup_mode: bool = False - eigenvalue_mode: bool = False - # k-eigenvalue N_inactive: int = 0 N_active: int = 0 @@ -66,6 +62,10 @@ class Settings(ObjectSingleton): electron_transport: bool = False proton_transport: bool = False + # Neutron transport modes + neutron_multigroup_mode: bool = False + neutron_eigenvalue_mode: bool = False + # GPU mode gpu_strategy: int = GPU_STRATEGY_SIMPLE_ASYNC @@ -106,7 +106,7 @@ def set_eigenmode( self.N_inactive = N_inactive self.N_active = N_active self.N_cycle = self.N_inactive + self.N_active - self.eigenvalue_mode = True + self.neutron_eigenvalue_mode = True self.k_init = k_init self.save_particle = save_particle diff --git a/mcdc/output.py b/mcdc/output.py index 39f36d6d..7a3e13f6 100644 --- a/mcdc/output.py +++ b/mcdc/output.py @@ -48,7 +48,7 @@ def generate_output(mcdc, data): create_tally_dataset(file, mcdc, data) # Eigenvalues - if mcdc["settings"]["eigenvalue_mode"]: + if mcdc["settings"]["neutron_eigenvalue_mode"]: N_cycle = mcdc["settings"]["N_cycle"] file.create_dataset( "k_cycle", data=mcdc_get.simulation.k_cycle_chunk(0, N_cycle, mcdc, data) diff --git a/mcdc/print_.py b/mcdc/print_.py index 4da5824e..56823f68 100644 --- a/mcdc/print_.py +++ b/mcdc/print_.py @@ -140,7 +140,7 @@ def print_warning(msg): def print_progress(percent, mcdc): if master: sys.stdout.write("\r") - if not mcdc["settings"]["eigenvalue_mode"]: + if not mcdc["settings"]["neutron_eigenvalue_mode"]: if mcdc["settings"]["N_census"] == 1: sys.stdout.write( " [%-28s] %d%%" % ("=" * int(percent * 28), percent * 100.0) diff --git a/mcdc/transport/particle_bank.py b/mcdc/transport/particle_bank.py index 5c1e029c..e4c764b7 100644 --- a/mcdc/transport/particle_bank.py +++ b/mcdc/transport/particle_bank.py @@ -171,7 +171,7 @@ def manage_particle_banks(simulation): set_bank_size(simulation["bank_source"], 0) # Normalize weight - if simulation["settings"]["eigenvalue_mode"]: + if simulation["settings"]["neutron_eigenvalue_mode"]: normalize_weight( simulation["bank_census"], simulation["settings"]["N_particle"] ) diff --git a/mcdc/transport/physics/neutron/interface.py b/mcdc/transport/physics/neutron/interface.py index 3309c087..883ad35a 100644 --- a/mcdc/transport/physics/neutron/interface.py +++ b/mcdc/transport/physics/neutron/interface.py @@ -2,6 +2,7 @@ #### +import mcdc.transport.physics.neutron.multigroup as multigroup import mcdc.transport.physics.neutron.native as native # ====================================================================================== @@ -11,7 +12,10 @@ @njit def particle_speed(particle_container, simulation, data): - return native.particle_speed(particle_container) + if simulation["settings"]["neutron_multigroup_mode"]: + return multigroup.particle_speed(particle_container) + else: + return native.particle_speed(particle_container) # ====================================================================================== @@ -21,14 +25,22 @@ def particle_speed(particle_container, simulation, data): @njit def macro_xs(reaction_type, particle_container, simulation, data): - return native.macro_xs(reaction_type, particle_container, simulation, data) + if simulation["settings"]["neutron_multigroup_mode"]: + return multigroup.macro_xs(reaction_type, particle_container, simulation, data) + else: + return native.macro_xs(reaction_type, particle_container, simulation, data) @njit def neutron_production_xs(reaction_type, particle_container, simulation, data): - return native.neutron_production_xs( - reaction_type, particle_container, simulation, data - ) + if simulation["settings"]["neutron_multigroup_mode"]: + return multigroup.neutron_production_xs( + reaction_type, particle_container, simulation, data + ) + else: + return native.neutron_production_xs( + reaction_type, particle_container, simulation, data + ) # ====================================================================================== @@ -38,4 +50,7 @@ def neutron_production_xs(reaction_type, particle_container, simulation, data): @njit def collision(particle_container, simulation, data): - native.collision(particle_container, simulation, data) + if simulation["settings"]["neutron_multigroup_mode"]: + multigroup.collision(particle_container, simulation, data) + else: + native.collision(particle_container, simulation, data) diff --git a/mcdc/transport/physics/neutron/multigroup.py b/mcdc/transport/physics/neutron/multigroup.py index ffa4ea16..770f9839 100644 --- a/mcdc/transport/physics/neutron/multigroup.py +++ b/mcdc/transport/physics/neutron/multigroup.py @@ -303,7 +303,7 @@ def fission(particle_container, simulation, data): particle_new["t"] -= math.log(xi) / decay # Eigenvalue mode: bank right away - if settings["eigenvalue_mode"]: + if settings["neutron_eigenvalue_mode"]: particle_bank_module.bank_census_particle( particle_container_new, simulation ) diff --git a/mcdc/transport/physics/neutron/native.py b/mcdc/transport/physics/neutron/native.py index da1b39e5..6f866ff5 100644 --- a/mcdc/transport/physics/neutron/native.py +++ b/mcdc/transport/physics/neutron/native.py @@ -761,7 +761,7 @@ def fission(reaction, particle_container, nuclide, simulation, data): particle_new["t"] -= math.log(xi) / decay_rate # Eigenvalue mode: bank right away - if settings["eigenvalue_mode"]: + if settings["neutron_eigenvalue_mode"]: particle_bank_module.bank_census_particle( particle_container_new, simulation ) diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 5aeadea4..68340ffc 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -250,7 +250,7 @@ def exhaust_active_bank(simulation, data): def source_closeout(simulation, idx_work, N_prog, data): # Tally history closeout for one-batch fixed-source simulation if ( - not simulation["settings"]["eigenvalue_mode"] + not simulation["settings"]["neutron_eigenvalue_mode"] and simulation["settings"]["N_batch"] == 1 ): if not simulation["settings"]["use_census_based_tally"]: @@ -322,7 +322,7 @@ def move_to_event(particle_container, simulation, data): # Multigroup preparation # In MG mode, particle speed is material-dependent. - if settings["multigroup_mode"]: + if settings["neutron_multigroup_mode"]: # If material is not identified yet, locate the particle if particle["material_ID"] == -1: if not geometry.locate_particle(particle_container, simulation, data): @@ -419,7 +419,7 @@ def move_to_event(particle_container, simulation, data): particle_container, distance, tally, simulation, data ) - if settings["eigenvalue_mode"]: + if settings["neutron_eigenvalue_mode"]: tally_module.score.eigenvalue_tally( particle_container, distance, simulation, data ) diff --git a/mcdc/transport/source.py b/mcdc/transport/source.py index fb35f446..f56144f2 100644 --- a/mcdc/transport/source.py +++ b/mcdc/transport/source.py @@ -59,7 +59,7 @@ def source_particle(P_rec_arr, seed, simulation, data): ) # Energy - if simulation["settings"]["multigroup_mode"]: + if simulation["settings"]["neutron_multigroup_mode"]: E = 0.0 if source["mono_energetic"]: g = source["energy_group"] diff --git a/mcdc/transport/tally/closeout.py b/mcdc/transport/tally/closeout.py index ec88ed7e..96f2cd66 100644 --- a/mcdc/transport/tally/closeout.py +++ b/mcdc/transport/tally/closeout.py @@ -109,7 +109,7 @@ def _finalize(tally, simulation, data): if N_batch > 1: N_history = N_batch - elif simulation["settings"]["eigenvalue_mode"]: + elif simulation["settings"]["neutron_eigenvalue_mode"]: N_history = simulation["settings"]["N_active"] else: diff --git a/mcdc/transport/tally/filter.py b/mcdc/transport/tally/filter.py index 038adf04..a743f785 100644 --- a/mcdc/transport/tally/filter.py +++ b/mcdc/transport/tally/filter.py @@ -67,10 +67,10 @@ def get_direction_index(particle_container, tally, data): @njit -def get_energy_index(particle_container, tally, data, multigroup_mode): +def get_energy_index(particle_container, tally, data, neutron_multigroup_mode): particle = particle_container[0] - if multigroup_mode: + if neutron_multigroup_mode: E = particle["g"] else: E = particle["E"] diff --git a/mcdc/transport/tally/score.py b/mcdc/transport/tally/score.py index 10722863..b4abf654 100644 --- a/mcdc/transport/tally/score.py +++ b/mcdc/transport/tally/score.py @@ -72,7 +72,7 @@ def tracklength_tally(particle_container, distance, tally, simulation, data): tally_base = simulation["tallies"][tally["parent_ID"]] # Get filter indices - MG_mode = simulation["settings"]["multigroup_mode"] + MG_mode = simulation["settings"]["neutron_multigroup_mode"] i_mu, i_azi, i_energy, i_time = get_filter_indices( particle_container, tally_base, data, MG_mode ) @@ -335,7 +335,7 @@ def surface_tally(particle_container, surface, tally, simulation, data): tally_base = simulation["tallies"][tally["parent_ID"]] # Get filter indices - MG_mode = simulation["settings"]["multigroup_mode"] + MG_mode = simulation["settings"]["neutron_multigroup_mode"] i_mu, i_azi, i_energy, i_time = get_filter_indices( particle_container, tally_base, data, MG_mode ) @@ -402,7 +402,7 @@ def eigenvalue_tally(particle_container, distance, simulation, data): return # Get the decay-wighted multiplicity total = 0.0 - if simulation["settings"]["multigroup_mode"]: + if simulation["settings"]["neutron_multigroup_mode"]: g = particle["g"] for j in range(J): nu_d = mcdc_get.material.mgxs_nu_d(g, j, material, data) From 1c0d1f88eb67084b7c69f4600b5d68e3d012d056 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Wed, 11 Mar 2026 18:59:03 -0700 Subject: [PATCH 31/50] replace mcdc_get*_all and *_vector with explicit slicing for GPU compatibility --- mcdc/transport/data.py | 12 ++++++++-- mcdc/transport/distribution.py | 25 +++++++++++++++----- mcdc/transport/physics/neutron/interface.py | 2 +- mcdc/transport/physics/neutron/multigroup.py | 25 +++++++++++++++----- mcdc/transport/physics/neutron/native.py | 10 +++++--- mcdc/transport/physics/util.py | 6 ++++- 6 files changed, 61 insertions(+), 19 deletions(-) diff --git a/mcdc/transport/data.py b/mcdc/transport/data.py index c8e7a575..443dc629 100644 --- a/mcdc/transport/data.py +++ b/mcdc/transport/data.py @@ -29,7 +29,11 @@ def evaluate_data(x, data_base, simulation, data): @njit def evaluate_table(x, table, data): - grid = mcdc_get.table_data.x_all(table, data) + offset = table["x_offset"] + length = table["x_length"] + grid = data[offset : offset + length] + # Above is equivalent to: grid = mcdc_get.table_data.x_all(table, data) + idx = find_bin(x, grid) x1 = grid[idx] x2 = grid[idx + 1] @@ -44,7 +48,11 @@ def evaluate_table(x, table, data): @njit def evaluate_polynomial(x, polynomial, data): - coeffs = mcdc_get.polynomial_data.coefficients_all(polynomial, data) + offset = polynomial["coefficients_offset"] + length = polynomial["coefficients_length"] + coeffs = data[offset : offset + length] + # Above is equivalent to: coeffs = mcdc_get.polynomial_data.coefficients_all(polynomial, data) + total = 0.0 for i in range(len(coeffs)): total += coeffs[i] * x**i diff --git a/mcdc/transport/distribution.py b/mcdc/transport/distribution.py index 69446b16..90c22e1f 100644 --- a/mcdc/transport/distribution.py +++ b/mcdc/transport/distribution.py @@ -151,8 +151,10 @@ def sample_direction(polar_cosine, azimuthal, polar_coordinate, rng_state): def sample_tabulated(table, rng_state, data): xi = rng.lcg(rng_state) - cdf = data[table["cdf_offset"] : (table["cdf_offset"] + table["cdf_length"])] - # Above is equivalent to: cmf = mcdc_get.tabulated_distribution.cdf_all(table, data) + offset = table["cdf_offset"] + length = table["cdf_length"] + cdf = data[offset : offset + length] + # Above is equivalent to: cdf = mcdc_get.tabulated_distribution.cdf_all(table, data) idx = find_bin(xi, cdf) cdf_low = mcdc_get.tabulated_distribution.cdf(idx, table, data) @@ -166,7 +168,9 @@ def sample_tabulated(table, rng_state, data): def sample_pmf(pmf, rng_state, data): xi = rng.lcg(rng_state) - cmf = data[pmf["cmf_offset"] : (pmf["cmf_offset"] + pmf["cmf_length"])] + offset = pmf["cmf_offset"] + length = pmf["cmf_length"] + cmf = data[offset : offset + length] # Above is equivalent to: cmf = mcdc_get.pmf_distribution.cmf_all(pmf, data) idx = find_bin(xi, cmf) @@ -205,7 +209,10 @@ def sample_white_direction(nx, ny, nz, rng_state): @njit def sample_multi_table(E, rng_state, multi_table, data, scale=False): - grid = mcdc_get.multi_table_distribution.grid_all(multi_table, data) + offset = multi_table["grid_offset"] + length = multi_table["grid_length"] + grid = data[offset : offset + length] + # Above is equivalent to: grid = mcdc_get.multi_table_distribution.grid_all(multi_table, data) # Edge cases if E < grid[0]: @@ -353,7 +360,10 @@ def sample_evaporation(E, rng_state, evaporation, simulation, data): @njit def sample_kalbach_mann(E, rng_state, kalbach_mann, data): - grid = mcdc_get.kalbach_mann_distribution.energy_all(kalbach_mann, data) + offset = kalbach_mann["energy_offset"] + length = kalbach_mann["energy_length"] + grid = data[offset : offset + length] + # Above is equivalent to: grid = mcdc_get.kalbach_mann_distribution.energy_all(kalbach_mann, data) # Random numbers xi1 = rng.lcg(rng_state) @@ -456,7 +466,10 @@ def sample_kalbach_mann(E, rng_state, kalbach_mann, data): @njit def sample_tabulated_energy_angle(E, rng_state, table, data): - grid = mcdc_get.tabulated_energy_angle_distribution.energy_all(table, data) + offset = kalbach_mann["energy_offset"] + length = kalbach_mann["energy_length"] + grid = data[offset : offset + length] + # Above is equivalent to: grid = mcdc_get.tabulated_energy_angle_distribution.energy_all(table, data) # Random numbers xi1 = rng.lcg(rng_state) diff --git a/mcdc/transport/physics/neutron/interface.py b/mcdc/transport/physics/neutron/interface.py index 883ad35a..c358e83a 100644 --- a/mcdc/transport/physics/neutron/interface.py +++ b/mcdc/transport/physics/neutron/interface.py @@ -13,7 +13,7 @@ @njit def particle_speed(particle_container, simulation, data): if simulation["settings"]["neutron_multigroup_mode"]: - return multigroup.particle_speed(particle_container) + return multigroup.particle_speed(particle_container, simulation, data) else: return native.particle_speed(particle_container) diff --git a/mcdc/transport/physics/neutron/multigroup.py b/mcdc/transport/physics/neutron/multigroup.py index 770f9839..5de37cc0 100644 --- a/mcdc/transport/physics/neutron/multigroup.py +++ b/mcdc/transport/physics/neutron/multigroup.py @@ -186,7 +186,10 @@ def scattering(particle_container, simulation, data): particle_new["uz"] = uz_new # Get outgoing spectrum - chi_s = mcdc_get.multigroup_material.mgxs_chi_s_vector(g, material, data) + stride = material["G"] + start = material["mgxs_chi_s_offset"] + g * stride + chi_s = data[start : start + stride] + # Above is equivalent to: chi_s = mcdc_get.multigroup_material.mgxs_chi_s_vector(g, material, data) # Sample outgoing energy xi = rng.lcg(particle_container_new) @@ -240,7 +243,10 @@ def fission(particle_container, simulation, data): nu = mcdc_get.multigroup_material.mgxs_nu_f(g, material, data) nu_p = mcdc_get.multigroup_material.mgxs_nu_p(g, material, data) if J > 0: - nu_d = mcdc_get.multigroup_material.mgxs_nu_d_vector(g, material, data) + stride = material["G"] + start = material["mgxs_nu_d_offset"] + g * stride + nu_d = data[start : start + stride] + # Above is equivalent to: nu_d = mcdc_get.multigroup_material.mgxs_nu_d_vector(g, material, data) # Get number of secondaries N = int( @@ -272,7 +278,10 @@ def fission(particle_container, simulation, data): total = nu_p if xi < total: prompt = True - spectrum = mcdc_get.multigroup_material.mgxs_chi_p_vector(g, material, data) + stride = material["G"] + start = material["mgxs_chi_p"] + g * stride + spectrum = data[start : start + stride] + # Above is equivalent to: spectrum = mcdc_get.multigroup_material.mgxs_chi_p_vector(g, material, data) else: prompt = False @@ -280,9 +289,13 @@ def fission(particle_container, simulation, data): for j in range(J): total += nu_d[j] if xi < total: - spectrum = mcdc_get.multigroup_material.mgxs_chi_d_vector( - j, material, data - ) + stride = material["G"] + start = material["mgxs_chi_d_offset"] + j * stride + spectrum = data[start : start + stride] + # Above is equivalent to: + # spectrum = mcdc_get.multigroup_material.mgxs_chi_d_vector( + # j, material, data + # ) decay = mcdc_get.multigroup_material.mgxs_decay_rate( j, material, data ) diff --git a/mcdc/transport/physics/neutron/native.py b/mcdc/transport/physics/neutron/native.py index 6f866ff5..75a4632d 100644 --- a/mcdc/transport/physics/neutron/native.py +++ b/mcdc/transport/physics/neutron/native.py @@ -539,9 +539,13 @@ def inelastic_scattering(reaction, particle_container, nuclide, simulation, data ) spectrum_base = simulation["distributions"][ID] else: - probability_grid = mcdc_get.neutron_inelastic_scattering_reaction.spectrum_probability_grid_all( - reaction, data - ) + offset = reaction["spectrum_probability_grid_offset"] + length = reaction["spectrum_probability_grid_length"] + probability_grid = data[offset : offset + length] + # Above is equivalent to: + # probability_grid = mcdc_get.neutron_inelastic_scattering_reaction.spectrum_probability_grid_all( + # reaction, data + # ) probability_idx = find_bin(E, probability_grid) xi = rng.lcg(particle_container_new) total = 0.0 diff --git a/mcdc/transport/physics/util.py b/mcdc/transport/physics/util.py index d43b60ca..7b2c722d 100644 --- a/mcdc/transport/physics/util.py +++ b/mcdc/transport/physics/util.py @@ -11,7 +11,11 @@ @njit def evaluate_neutron_xs_energy_grid(e, nuclide, data): - energy_grid = mcdc_get.nuclide.neutron_xs_energy_grid_all(nuclide, data) + offset = nuclide["neutron_xs_energy_grid_offset"] + length = nuclide["neutron_xs_energy_grid_length"] + energy_grid = data[offset : offset + length] + # Above is equivalent to: energy_grid = mcdc_get.nuclide.neutron_xs_energy_grid_all(nuclide, data) + idx = find_bin(e, energy_grid) e0 = energy_grid[idx] e1 = energy_grid[idx + 1] From c59d01e1255a9fde13b01d15544389776341e5df Mon Sep 17 00:00:00 2001 From: ilhamv Date: Thu, 12 Mar 2026 20:55:12 -0700 Subject: [PATCH 32/50] good progress. identified an issue in find_cell_async --- mcdc/code_factory/gpu/program_builder.py | 2 +- mcdc/transport/distribution.py | 71 +++++-- mcdc/transport/physics/interface.py | 4 +- mcdc/transport/physics/neutron/interface.py | 9 +- mcdc/transport/physics/neutron/multigroup.py | 61 ++++-- mcdc/transport/physics/neutron/native.py | 192 ++++++++++--------- mcdc/transport/simulation.py | 2 +- mcdc/transport/tally/score.py | 1 + mcdc/transport/technique.py | 3 +- 9 files changed, 214 insertions(+), 131 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index f294be95..d789bdea 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -204,9 +204,9 @@ def step(program: nb.uintp, particle_input: particle_gpu): base_fns = (initialize, finalize, make_work) async_fns = [step] src_spec = harmonize.RuntimeSpec("mcdc_source", state_spec, base_fns, async_fns) + harmonize.RuntimeSpec.bind_specs() print("PASS") exit() - harmonize.RuntimeSpec.bind_specs() # ================================================================================== # diff --git a/mcdc/transport/distribution.py b/mcdc/transport/distribution.py index 90c22e1f..a2bd93e8 100644 --- a/mcdc/transport/distribution.py +++ b/mcdc/transport/distribution.py @@ -27,7 +27,17 @@ @njit -def sample_distribution(E, distribution, rng_state, simulation, data, scale=False): +def sample_distribution(E, distribution, rng_state, simulation, data): + return _sample_distribution(E, distribution, rng_state, simulation, data, False) + + +@njit +def sample_distribution_with_scale(E, distribution, rng_state, simulation, data): + return _sample_distribution(E, distribution, rng_state, simulation, data, True) + + +@njit +def _sample_distribution(E, distribution, rng_state, simulation, data, scale): distribution_type = distribution["child_type"] ID = distribution["child_ID"] @@ -37,7 +47,7 @@ def sample_distribution(E, distribution, rng_state, simulation, data, scale=Fals elif distribution_type == DISTRIBUTION_MULTITABLE: multi_table = simulation["multi_table_distributions"][ID] - return sample_multi_table(E, rng_state, multi_table, data, scale) + return _sample_multi_table(E, rng_state, multi_table, data, scale) elif distribution_type == DISTRIBUTION_LEVEL_SCATTERING: level_scattering = simulation["level_scattering_distributions"][ID] @@ -57,8 +67,24 @@ def sample_distribution(E, distribution, rng_state, simulation, data, scale=Fals @njit -def sample_correlated_distribution( - E, distribution, rng_state, simulation, data, scale=False +def sample_correlated_distribution(E, distribution, rng_state, simulation, data): + return _sample_correlated_distribution( + E, distribution, rng_state, simulation, data, False + ) + + +@njit +def sample_correlated_distribution_with_scale( + E, distribution, rng_state, simulation, data +): + return _sample_correlated_distribution( + E, distribution, rng_state, simulation, data, True + ) + + +@njit +def _sample_correlated_distribution( + E, distribution, rng_state, simulation, data, scale ): distribution_type = distribution["child_type"] ID = distribution["child_ID"] @@ -208,7 +234,12 @@ def sample_white_direction(nx, ny, nz, rng_state): @njit -def sample_multi_table(E, rng_state, multi_table, data, scale=False): +def sample_multi_table(E, rng_state, multi_table, data): + return _sample_multi_table(E, rng_state, multi_table, data, False) + + +@njit +def _sample_multi_table(E, rng_state, multi_table, data, scale): offset = multi_table["grid_offset"] length = multi_table["grid_length"] grid = data[offset : offset + length] @@ -274,7 +305,9 @@ def sample_multi_table(E, rng_state, multi_table, data, scale=False): size = end - start # The CDF - cdf = mcdc_get.multi_table_distribution.cdf_chunk(start, size, multi_table, data) + offset = multi_table["cdf_offset"] + cdf = data[start + offset : start + offset + size] + # Above is equivalent to: cdf = mcdc_get.multi_table_distribution.cdf_chunk(start, size, multi_table, data) # Generate random numbers xi = rng.lcg(rng_state) @@ -417,7 +450,9 @@ def sample_kalbach_mann(E, rng_state, kalbach_mann, data): size = end - start # The CDF - cdf = mcdc_get.kalbach_mann_distribution.cdf_chunk(start, size, kalbach_mann, data) + offset = kalbach_mann["cdf_offset"] + cdf = data[start + offset : start + offset + size] + # Above is equivalent to: cdf = mcdc_get.kalbach_mann_distribution.cdf_chunk(start, size, kalbach_mann, data) # Sample bin index idx = find_bin(xi2, cdf) @@ -466,8 +501,8 @@ def sample_kalbach_mann(E, rng_state, kalbach_mann, data): @njit def sample_tabulated_energy_angle(E, rng_state, table, data): - offset = kalbach_mann["energy_offset"] - length = kalbach_mann["energy_length"] + offset = table["energy_offset"] + length = table["energy_length"] grid = data[offset : offset + length] # Above is equivalent to: grid = mcdc_get.tabulated_energy_angle_distribution.energy_all(table, data) @@ -526,9 +561,12 @@ def sample_tabulated_energy_angle(E, rng_state, table, data): size = end - start # The CDF - cdf = mcdc_get.tabulated_energy_angle_distribution.cdf_chunk( - start, size, table, data - ) + offset = table["cdf_offset"] + cdf = data[start + offset : start + offset + size] + # Above is equivalent to: + # cdf = mcdc_get.tabulated_energy_angle_distribution.cdf_chunk( + # start, size, table, data + # ) # Sample bin index idx = find_bin(xi2, cdf) @@ -578,9 +616,12 @@ def sample_tabulated_energy_angle(E, rng_state, table, data): size = end - start # The CDF - cdf = mcdc_get.tabulated_energy_angle_distribution.cosine_cdf_chunk( - start, size, table, data - ) + offset = table["cosine_cdf_offset"] + cdf = data[start + offset : start + offset + size] + # Above is equivalent to: + # cdf = mcdc_get.tabulated_energy_angle_distribution.cosine_cdf_chunk( + # start, size, table, data + # ) # Sample bin index idx = find_bin(xi3, cdf) diff --git a/mcdc/transport/physics/interface.py b/mcdc/transport/physics/interface.py index b72af5d7..13b007ef 100644 --- a/mcdc/transport/physics/interface.py +++ b/mcdc/transport/physics/interface.py @@ -66,7 +66,7 @@ def collision_distance(particle_container, simulation, data): @njit -def collision(particle_container, simulation, data): +def collision(particle_container, program, data): particle = particle_container[0] if particle["particle_type"] == PARTICLE_NEUTRON: - neutron.collision(particle_container, simulation, data) + neutron.collision(particle_container, program, data) diff --git a/mcdc/transport/physics/neutron/interface.py b/mcdc/transport/physics/neutron/interface.py index c358e83a..670f05d2 100644 --- a/mcdc/transport/physics/neutron/interface.py +++ b/mcdc/transport/physics/neutron/interface.py @@ -4,6 +4,7 @@ import mcdc.transport.physics.neutron.multigroup as multigroup import mcdc.transport.physics.neutron.native as native +import mcdc.transport.util as util # ====================================================================================== # Particle attributes @@ -49,8 +50,10 @@ def neutron_production_xs(reaction_type, particle_container, simulation, data): @njit -def collision(particle_container, simulation, data): +def collision(particle_container, program, data): + simulation = util.access_simulation(program) + if simulation["settings"]["neutron_multigroup_mode"]: - multigroup.collision(particle_container, simulation, data) + multigroup.collision(particle_container, program, data) else: - native.collision(particle_container, simulation, data) + native.collision(particle_container, program, data) diff --git a/mcdc/transport/physics/neutron/multigroup.py b/mcdc/transport/physics/neutron/multigroup.py index 5de37cc0..a1f51494 100644 --- a/mcdc/transport/physics/neutron/multigroup.py +++ b/mcdc/transport/physics/neutron/multigroup.py @@ -10,6 +10,7 @@ import mcdc.transport.particle as particle_module import mcdc.transport.particle_bank as particle_bank_module import mcdc.transport.rng as rng +import mcdc.transport.util as util from mcdc.constant import ( PI, @@ -61,39 +62,54 @@ def macro_xs(reaction_type, particle_container, simulation, data): def neutron_production_xs(reaction_type, particle_container, simulation, data): particle = particle_container[0] material = simulation["multigroup_materials"][particle["material_ID"]] - g = particle["g"] + + # Total production if reaction_type == NEUTRON_REACTION_TOTAL: total = 0.0 - total += neutron_production_xs( - NEUTRON_REACTION_ELASTIC_SCATTERING, - particle_container, - simulation, - data, - ) - total += neutron_production_xs( - NEUTRON_REACTION_FISSION, particle_container, simulation, data - ) + + # Scattering production + nu = mcdc_get.multigroup_material.mgxs_nu_s(g, material, data) + xs = mcdc_get.multigroup_material.mgxs_scatter(g, material, data) + total += nu * xs + + # Fission production + nu = mcdc_get.multigroup_material.mgxs_nu_f(g, material, data) + xs = mcdc_get.multigroup_material.mgxs_fission(g, material, data) + total += nu * xs return total + + # Capture production (none) elif reaction_type == NEUTRON_REACTION_CAPTURE: return 0.0 + + # Scattering production elif reaction_type == NEUTRON_REACTION_ELASTIC_SCATTERING: nu = mcdc_get.multigroup_material.mgxs_nu_s(g, material, data) xs = mcdc_get.multigroup_material.mgxs_scatter(g, material, data) return nu * xs + + # Fission production elif reaction_type == NEUTRON_REACTION_FISSION: nu = mcdc_get.multigroup_material.mgxs_nu_f(g, material, data) xs = mcdc_get.multigroup_material.mgxs_fission(g, material, data) return nu * xs + + # Prompt fission production elif reaction_type == NEUTRON_REACTION_FISSION_PROMPT: nu = mcdc_get.multigroup_material.mgxs_nu_p(g, material, data) xs = mcdc_get.multigroup_material.mgxs_fission(g, material, data) return nu * xs + + # Delayed neutron production elif reaction_type == NEUTRON_REACTION_FISSION_DELAYED: nu = mcdc_get.multigroup_material.mgxs_nu_d_total(g, material, data) xs = mcdc_get.multigroup_material.mgxs_fission(g, material, data) return nu * xs + # Unsupported default + return 0.0 + # ====================================================================================== # Collision @@ -101,7 +117,8 @@ def neutron_production_xs(reaction_type, particle_container, simulation, data): @njit -def collision(particle_container, simulation, data): +def collision(particle_container, program, data): + simulation = util.access_simulation(program) particle = particle_container[0] # Get the reaction cross-sections @@ -121,11 +138,12 @@ def collision(particle_container, simulation, data): xi = rng.lcg(particle_container) * SigmaT total = SigmaS if total > xi: - scattering(particle_container, simulation, data) + scattering(particle_container, program, data) else: total += SigmaF if total > xi: - fission(particle_container, simulation, data) + return + fission(particle_container, program, data) else: particle["alive"] = False @@ -136,7 +154,9 @@ def collision(particle_container, simulation, data): @njit -def scattering(particle_container, simulation, data): +def scattering(particle_container, program, data): + simulation = util.access_simulation(program) + # Particle attributes particle = particle_container[0] g = particle["g"] @@ -164,7 +184,7 @@ def scattering(particle_container, simulation, data): N = int(math.floor(weight_production * nu_s + rng.lcg(particle_container))) # Set up secondary partice container - particle_container_new = np.zeros(1, type_.particle_data) + particle_container_new = util.local_array(1, type_.particle_data) particle_new = particle_container_new[0] # Create the secondaries @@ -210,13 +230,12 @@ def scattering(particle_container, simulation, data): particle["E"] = particle_new["E"] particle["w"] = particle_new["w"] else: - particle_bank_module.bank_active_particle( - particle_container_new, simulation - ) + particle_bank_module.bank_active_particle(particle_container_new, program) @njit -def fission(particle_container, simulation, data): +def fission(particle_container, program, data): + simulation = util.access_simulation(program) settings = simulation["settings"] # Particle properties @@ -256,7 +275,7 @@ def fission(particle_container, simulation, data): ) # Set up secondary partice container - particle_container_new = np.zeros(1, type_.particle_data) + particle_container_new = util.local_array(1, type_.particle_data) particle_new = particle_container_new[0] # Create the secondaries @@ -355,7 +374,7 @@ def fission(particle_container, simulation, data): particle["w"] = particle_new["w"] else: particle_bank_module.bank_active_particle( - particle_container_new, simulation + particle_container_new, program ) # Hit future census --> add to future bank diff --git a/mcdc/transport/physics/neutron/native.py b/mcdc/transport/physics/neutron/native.py index 75a4632d..43e6aaf1 100644 --- a/mcdc/transport/physics/neutron/native.py +++ b/mcdc/transport/physics/neutron/native.py @@ -10,6 +10,7 @@ import mcdc.transport.particle as particle_module import mcdc.transport.particle_bank as particle_bank_module import mcdc.transport.rng as rng +import mcdc.transport.util as util from mcdc.constant import ( ANGLE_DISTRIBUTED, @@ -31,8 +32,8 @@ ) from mcdc.transport.data import evaluate_data from mcdc.transport.distribution import ( - sample_correlated_distribution, - sample_distribution, + sample_correlated_distribution_with_scale, + sample_distribution_with_scale, sample_isotropic_cosine, sample_isotropic_direction, sample_multi_table, @@ -128,93 +129,109 @@ def reaction_micro_xs(E, reaction_base, nuclide, data): @njit def neutron_production_xs(reaction_type, particle_container, simulation, data): - particle = particle_container[0] - material_base = simulation["materials"][particle["material_ID"]] - material = simulation["native_materials"][material_base["child_ID"]] - + # Total production if reaction_type == NEUTRON_REACTION_TOTAL: - elastic_type = NEUTRON_REACTION_ELASTIC_SCATTERING - inelastic_type = NEUTRON_REACTION_INELASTIC_SCATTERING - fission_type = NEUTRON_REACTION_FISSION - elastic_xs = neutron_production_xs( - elastic_type, particle_container, simulation, data + elastic_xs = macro_xs( + NEUTRON_REACTION_ELASTIC_SCATTERING, particle_container, simulation, data ) - inelastic_xs = neutron_production_xs( - inelastic_type, particle_container, simulation, data + inelastic_xs = _neutron_inelastic_scattering_production_xs( + particle_container, simulation, data + ) + fission_xs = _neutron_fission_production_xs( + particle_container, simulation, data ) return elastic_xs + inelastic_xs + fission_xs + # Elastic scattering production elif reaction_type == NEUTRON_REACTION_ELASTIC_SCATTERING: return macro_xs(reaction_type, particle_container, simulation, data) + # Capture production (none) elif reaction_type == NEUTRON_REACTION_CAPTURE: return 0.0 + # Inelastic scattering production elif reaction_type == NEUTRON_REACTION_INELASTIC_SCATTERING: - total = 0.0 - for i in range(material["N_nuclide"]): - nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) - nuclide = simulation["nuclides"][nuclide_ID] - - E = particle["E"] - nuclide_density = mcdc_get.native_material.nuclide_densities( - i, material, data - ) + return _neutron_inelastic_scattering_production_xs( + particle_container, simulation, data + ) - for j in range(nuclide["N_neutron_inelastic_scattering_reaction"]): - reaction_ID = int( - mcdc_get.nuclide.neutron_inelastic_scattering_reaction_IDs( - j, nuclide, data - ) - ) - reaction_base = simulation["neutron_reactions"][reaction_ID] - reaction = simulation["neutron_inelastic_scattering_reactions"][ - reaction_base["child_ID"] - ] + # Fission production + elif reaction_type == NEUTRON_REACTION_FISSION: + return _neutron_fission_production_xs(particle_container, simulation, data) - xs = reaction_micro_xs(E, reaction_base, nuclide, data) - nu = reaction["multiplicity"] - total += nuclide_density * nu * xs + # Unsupported default + else: + return 0.0 - return total - elif reaction_type == NEUTRON_REACTION_FISSION: - if not material_base["fissionable"]: - return 0.0 - - total = 0.0 - for i in range(material["N_nuclide"]): - nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) - nuclide = simulation["nuclides"][nuclide_ID] - if not nuclide["fissionable"]: - continue - - E = particle["E"] - nuclide_density = mcdc_get.native_material.nuclide_densities( - i, material, data - ) +@njit +def _neutron_inelastic_scattering_production_xs(particle_container, simulation, data): + particle = particle_container[0] + material_base = simulation["materials"][particle["material_ID"]] + material = simulation["native_materials"][material_base["child_ID"]] - for j in range(nuclide["N_neutron_fission_reaction"]): - reaction_ID = int( - mcdc_get.nuclide.neutron_fission_reaction_IDs(j, nuclide, data) - ) - reaction_base = simulation["neutron_reactions"][reaction_ID] - reaction = simulation["neutron_fission_reactions"][ - reaction_base["child_ID"] - ] + total = 0.0 + for i in range(material["N_nuclide"]): + nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) + nuclide = simulation["nuclides"][nuclide_ID] + + E = particle["E"] + nuclide_density = mcdc_get.native_material.nuclide_densities(i, material, data) - xs = reaction_micro_xs(E, reaction_base, nuclide, data) - nu_p = neutron_fission_prompt_multiplicity(E, nuclide, simulation, data) - nu_d = neutron_fission_delayed_multiplicity( - E, nuclide, simulation, data + for j in range(nuclide["N_neutron_inelastic_scattering_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_inelastic_scattering_reaction_IDs( + j, nuclide, data ) - nu = nu_d + nu_p - total += nuclide_density * nu * xs + ) + reaction_base = simulation["neutron_reactions"][reaction_ID] + reaction = simulation["neutron_inelastic_scattering_reactions"][ + reaction_base["child_ID"] + ] - return total + xs = reaction_micro_xs(E, reaction_base, nuclide, data) + nu = reaction["multiplicity"] + total += nuclide_density * nu * xs - else: - return -1.0 + return total + + +@njit +def _neutron_fission_production_xs(particle_container, simulation, data): + particle = particle_container[0] + material_base = simulation["materials"][particle["material_ID"]] + material = simulation["native_materials"][material_base["child_ID"]] + + if not material_base["fissionable"]: + return 0.0 + + total = 0.0 + for i in range(material["N_nuclide"]): + nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) + nuclide = simulation["nuclides"][nuclide_ID] + if not nuclide["fissionable"]: + continue + + E = particle["E"] + nuclide_density = mcdc_get.native_material.nuclide_densities(i, material, data) + + for j in range(nuclide["N_neutron_fission_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_fission_reaction_IDs(j, nuclide, data) + ) + reaction_base = simulation["neutron_reactions"][reaction_ID] + reaction = simulation["neutron_fission_reactions"][ + reaction_base["child_ID"] + ] + + xs = reaction_micro_xs(E, reaction_base, nuclide, data) + nu_p = neutron_fission_prompt_multiplicity(E, nuclide, simulation, data) + nu_d = neutron_fission_delayed_multiplicity(E, nuclide, simulation, data) + nu = nu_d + nu_p + total += nuclide_density * nu * xs + + return total # ====================================================================================== @@ -223,7 +240,8 @@ def neutron_production_xs(reaction_type, particle_container, simulation, data): @njit -def collision(particle_container, simulation, data): +def collision(particle_container, program, data): + simulation = util.access_simulation(program) particle = particle_container[0] material = simulation["native_materials"][particle["material_ID"]] @@ -324,7 +342,7 @@ def collision(particle_container, simulation, data): total += xs if xi < total: inelastic_scattering( - reaction, particle_container, nuclide, simulation, data + reaction, particle_container, nuclide, program, data ) return @@ -341,7 +359,7 @@ def collision(particle_container, simulation, data): reaction_base = simulation["neutron_reactions"][reaction_base_ID] total += reaction_micro_xs(E, reaction_base, nuclide, data) if xi < total: - fission(reaction, particle_container, nuclide, simulation, data) + fission(reaction, particle_container, nuclide, program, data) return @@ -402,6 +420,7 @@ def elastic_scattering(reaction, particle_container, nuclide, simulation, data): # Sample the scattering cosine from the multi-PDF distribution multi_table = simulation["multi_table_distributions"][reaction["mu_table_ID"]] mu0 = sample_multi_table(E, particle_container, multi_table, data) + return # Scatter the direction in COM azi = 2.0 * PI * rng.lcg(particle_container) @@ -485,7 +504,9 @@ def sample_nucleus_velocity(A, particle_container): @njit -def inelastic_scattering(reaction, particle_container, nuclide, simulation, data): +def inelastic_scattering(reaction, particle_container, nuclide, program, data): + simulation = util.access_simulation(program) + # Particle attributes particle = particle_container[0] E = particle["E"] @@ -502,7 +523,7 @@ def inelastic_scattering(reaction, particle_container, nuclide, simulation, data use_all_spectrum = N == N_spectrum # Set up secondary partice container - particle_container_new = np.zeros(1, type_.particle_data) + particle_container_new = util.local_array(1, type_.particle_data) particle_new = particle_container_new[0] # Create the secondaries @@ -567,12 +588,12 @@ def inelastic_scattering(reaction, particle_container, nuclide, simulation, data # Sample energy if not angle_type == ANGLE_ENERGY_CORRELATED: - E_new = sample_distribution( - E, spectrum_base, particle_container_new, simulation, data, scale=True + E_new = sample_distribution_with_scale( + E, spectrum_base, particle_container_new, simulation, data ) else: - E_new, mu = sample_correlated_distribution( - E, spectrum_base, particle_container_new, simulation, data, scale=True + E_new, mu = sample_correlated_distribution_with_scale( + E, spectrum_base, particle_container_new, simulation, data ) # ============================================================================== @@ -612,9 +633,7 @@ def inelastic_scattering(reaction, particle_container, nuclide, simulation, data particle["uz"] = particle_new["uz"] particle["E"] = particle_new["E"] else: - particle_bank_module.bank_active_particle( - particle_container_new, simulation - ) + particle_bank_module.bank_active_particle(particle_container_new, program) # ====================================================================================== @@ -623,7 +642,8 @@ def inelastic_scattering(reaction, particle_container, nuclide, simulation, data @njit -def fission(reaction, particle_container, nuclide, simulation, data): +def fission(reaction, particle_container, nuclide, program, data): + simulation = util.access_simulation(program) settings = simulation["settings"] # Particle properties @@ -658,7 +678,7 @@ def fission(reaction, particle_container, nuclide, simulation, data): ) # Set up secondary partice container - particle_container_new = np.zeros(1, type_.particle_data) + particle_container_new = util.local_array(1, type_.particle_data) particle_new = particle_container_new[0] # Create the secondaries @@ -707,22 +727,20 @@ def fission(reaction, particle_container, nuclide, simulation, data): # Sample energy (also angle if correlated) spectrum_base = simulation["distributions"][reaction["spectrum_ID"]] if not angle_type == ANGLE_ENERGY_CORRELATED: - E_new = sample_distribution( + E_new = sample_distribution_with_scale( E, spectrum_base, particle_container_new, simulation, data, - scale=True, ) else: - E_new, mu = sample_correlated_distribution( + E_new, mu = sample_correlated_distribution_with_scale( E, spectrum_base, particle_container_new, simulation, data, - scale=True, ) # Frame transformation @@ -804,7 +822,7 @@ def fission(reaction, particle_container, nuclide, simulation, data): particle["w"] = particle_new["w"] else: particle_bank_module.bank_active_particle( - particle_container_new, simulation + particle_container_new, program ) # Hit future census --> add to future bank diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 68340ffc..cd092888 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -291,7 +291,7 @@ def step_particle(particle_container, program, data): # Collision if particle["event"] & EVENT_COLLISION: - physics.collision(particle_container, simulation, data) + physics.collision(particle_container, program, data) # Surface and domain crossing if particle["event"] & EVENT_SURFACE_CROSSING: diff --git a/mcdc/transport/tally/score.py b/mcdc/transport/tally/score.py index b4abf654..19e43970 100644 --- a/mcdc/transport/tally/score.py +++ b/mcdc/transport/tally/score.py @@ -378,6 +378,7 @@ def eigenvalue_tally(particle_container, distance, simulation, data): ) # Fission production (needed even during inactive cycle) + return util.atomic_add(simulation["eigenvalue_tally_nuSigmaF"], 0, flux * nuSigmaF) # Done, if inactive diff --git a/mcdc/transport/technique.py b/mcdc/transport/technique.py index dd01216b..fe629af3 100644 --- a/mcdc/transport/technique.py +++ b/mcdc/transport/technique.py @@ -9,6 +9,7 @@ import mcdc.transport.particle as particle_module import mcdc.transport.particle_bank as particle_bank_module import mcdc.transport.rng as rng +import mcdc.transport.util as util # ====================================================================================== # Weight Roulette @@ -54,7 +55,7 @@ def population_control(simulation): # Splitting Number sn = 1.0 / ws - P_rec_arr = np.zeros(1, type_.particle_data) + P_rec_arr = util.local_array(1, type_.particle_data) P_rec = P_rec_arr[0] # Perform split-roulette to all particles in local bank From 1c99e317bc45a761bc5131f2b9c9698f02258dff Mon Sep 17 00:00:00 2001 From: ilhamv Date: Sun, 15 Mar 2026 22:39:49 -0700 Subject: [PATCH 33/50] gpu setup done --- mcdc/code_factory/gpu/program_builder.py | 148 ++++++---- .../gpu/transport/particle_bank.py | 2 + mcdc/code_factory/gpu/transport/simulation.py | 267 ++---------------- mcdc/code_factory/literals_generator.py | 6 +- mcdc/code_factory/numba_objects_generator.py | 46 ++- mcdc/{transport => }/literals.py | 0 mcdc/main.py | 10 - mcdc/object_/gpu_tools.py | 5 +- mcdc/transport/__init__.py | 1 - mcdc/transport/geometry/interface.py | 2 +- 10 files changed, 136 insertions(+), 351 deletions(-) rename mcdc/{transport => }/literals.py (100%) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index d789bdea..83f57197 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -136,8 +136,28 @@ def find_cell(program: nb.uintp, particle: particle_gpu): # Program builder # ====================================================================================== -src_free_program = lambda pointer: None -free_state = lambda pointer: None +alloc_state = None +free_state = None + +alloc_program = None +free_program = None + +load_state_device_simulation = None +store_state_device_simulation = None +store_pointer_state_device_simulation = None + +load_state_device_data = None +store_state_device_data = None +store_pointer_state_device_data = None + +init_program = None +exec_program = None +complete = None +clear_flags = None +set_device = None + +ARENA_SIZE = 0 +BLOCK_COUNT = 0 def build_gpu_program(data_size): @@ -147,7 +167,16 @@ def build_gpu_program(data_size): from mcdc.transport.simulation import generate_source_particle, step_particle - global src_free_program, free_state + global alloc_state, free_state + + global alloc_program, free_program + + global load_state_device_simulation, store_state_device_simulation, store_pointer_state_device_simulation + + global load_state_device_data, store_state_device_data, store_pointer_state_device_data + + global init_program, exec_program, complete, clear_flags, set_device + global ARENA_SIZE, BLOCK_COUNT shape = eval(f"{(data_size,)}") @@ -205,17 +234,8 @@ def step(program: nb.uintp, particle_input: particle_gpu): async_fns = [step] src_spec = harmonize.RuntimeSpec("mcdc_source", state_spec, base_fns, async_fns) harmonize.RuntimeSpec.bind_specs() - print("PASS") - exit() - - # ================================================================================== - # - # ================================================================================== - - rank = MPI.COMM_WORLD.Get_rank() - - MPI.COMM_WORLD.Barrier() + # Load the specs harmonize.RuntimeSpec.load_specs() if config.args.gpu_strategy == "async": @@ -224,68 +244,72 @@ def step(program: nb.uintp, particle_input: particle_gpu): else: src_fns = src_spec.event_functions() - arena_size = config.args.gpu_arena_size - block_count = config.args.gpu_block_count + ARENA_SIZE = config.args.gpu_arena_size + BLOCK_COUNT = config.args.gpu_block_count alloc_state = src_fns["alloc_state"] free_state = src_fns["free_state"] - src_alloc_program = src_fns["alloc_program"] - src_free_program = src_fns["free_program"] - src_load_global = src_fns["load_state_device_global"] - src_store_global = src_fns["store_state_device_global"] - src_store_pointer_global = src_fns["store_pointer_state_device_global"] - src_load_data = src_fns["load_state_device_data"] - src_store_data = src_fns["store_state_device_data"] - src_store_pointer_data = src_fns["store_pointer_state_device_data"] - src_init_program = src_fns["init_program"] - src_exec_program = src_fns["exec_program"] - src_complete = src_fns["complete"] - src_clear_flags = src_fns["clear_flags"] - src_set_device = src_fns["set_device"] - - # ================================================================================== - # - # ================================================================================== - - """ - global loop_source - loop_source = gpu_loop_source - # - # Overwrite function - for impl in target_rosters["cpu"].values(): - overwrite_func(impl, impl) - """ - - # ================================================================================== - # Setup - # ================================================================================== - - device_id = rank % config.args.gpu_share_stride - - mcdc = mcdc_container[0] - src_set_device(device_id) - mcdc["gpu_meta"]["state_pointer"] = cast_voidptr_to_uintp(alloc_state()) + alloc_program = src_fns["alloc_program"] + free_program = src_fns["free_program"] + + load_state_device_simulation = src_fns["load_state_device_simulation"] + store_state_device_simulation = src_fns["store_state_device_simulation"] + store_pointer_state_device_simulation = src_fns[ + "store_pointer_state_device_simulation" + ] + + load_state_device_data = src_fns["load_state_device_data"] + store_state_device_data = src_fns["store_state_device_data"] + store_pointer_state_device_data = src_fns["store_pointer_state_device_data"] + + init_program = src_fns["init_program"] + exec_program = src_fns["exec_program"] + complete = src_fns["complete"] + clear_flags = src_fns["clear_flags"] + set_device = src_fns["set_device"] + + +# ====================================================================================== +# Setup GPU +# ====================================================================================== + +from numba import njit + +rank = MPI.COMM_WORLD.Get_rank() +device_id = rank % config.args.gpu_share_stride + + +@njit +def setup_gpu_program(simulation_container, data): + simulation = simulation_container[0] + + set_device(device_id) + simulation["gpu_meta"]["state_pointer"] = cast_voidptr_to_uintp(alloc_state()) + if config.gpu_state_storage == "separate": - src_store_pointer_global( - mcdc["gpu_meta"]["state_pointer"], mcdc["gpu_meta"]["global_pointer"] + store_pointer_state_device_simulation( + simulation["gpu_meta"]["state_pointer"], + simulation["gpu_meta"]["simulation_pointer"], ) - src_store_pointer_data( - mcdc["gpu_meta"]["state_pointer"], mcdc["gpu_meta"]["tally_pointer"] + store_pointer_state_device_data( + simulation["gpu_meta"]["state_pointer"], + simulation["gpu_meta"]["data_pointer"], ) else: - src_store_pointer_global(mcdc["gpu_meta"]["state_pointer"], mcdc_container) - src_store_pointer_data(mcdc["gpu_meta"]["state_pointer"], data) + store_pointer_state_device_simulation( + simulation["gpu_meta"]["state_pointer"], simulation_container + ) + store_pointer_state_device_data(simulation["gpu_meta"]["state_pointer"], data) - mcdc["gpu_meta"]["source_program_pointer"] = cast_voidptr_to_uintp( - src_alloc_program(mcdc["gpu_meta"]["state_pointer"], arena_size) + simulation["gpu_meta"]["program_pointer"] = cast_voidptr_to_uintp( + alloc_program(simulation["gpu_meta"]["state_pointer"], ARENA_SIZE) ) - src_init_program(mcdc["gpu_meta"]["source_program_pointer"], block_count) - return + init_program(simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT) def teardown_gpu_program(mcdc): - src_free_program(cast_uintp_to_voidptr(mcdc["gpu_meta"]["source_program_pointer"])) + free_program(cast_uintp_to_voidptr(mcdc["gpu_meta"]["program_pointer"])) free_state(cast_uintp_to_voidptr(mcdc["gpu_meta"]["state_pointer"])) diff --git a/mcdc/code_factory/gpu/transport/particle_bank.py b/mcdc/code_factory/gpu/transport/particle_bank.py index f1f2af61..8cdc46a4 100644 --- a/mcdc/code_factory/gpu/transport/particle_bank.py +++ b/mcdc/code_factory/gpu/transport/particle_bank.py @@ -22,8 +22,10 @@ def bank_active_particle(particle_container, program): particle_module.copy(active_particle_container, particle_container) if simulation["settings"]["gpu_strategy"] == GPU_STRATEGY_SIMPLE_ASYNC: gpu_program.step_async(program, active_particle_container[0]) + """ else: gpu_program.find_cell_async(program, active_particle_container[0]) + """ @njit diff --git a/mcdc/code_factory/gpu/transport/simulation.py b/mcdc/code_factory/gpu/transport/simulation.py index 0ce2a722..42084bea 100644 --- a/mcdc/code_factory/gpu/transport/simulation.py +++ b/mcdc/code_factory/gpu/transport/simulation.py @@ -1,98 +1,13 @@ -from mpi4py import MPI -import mcdc.code_factory.gpu.adapt as adapt +import mcdc.config as config caching = config.caching -# ============================================================================= -# Functions for GPU Interop -# ============================================================================= - -# The symbols declared below will be overwritten to reference external code that -# manages GPU execution (if GPU execution is supported and selected) -alloc_state, free_state = [None] * 2 - -src_alloc_program, src_free_program = [None] * 2 -( - src_load_global, - src_load_constant, - src_store_global, - src_store_data, - src_store_pointer_data, -) = [None] * 5 -src_init_program, src_exec_program, src_complete, src_clear_flags = [None] * 4 - -pre_alloc_program, pre_free_program = [None] * 2 -pre_load_global, pre_load_data, pre_store_global, pre_store_data = [None] * 4 -pre_init_program, pre_exec_program, pre_complete, pre_clear_flags = [None] * 4 - - -# If GPU execution is supported and selected, the functions shown below will -# be redefined to overwrite the above symbols and perform initialization/ -# finalization of GPU state -@njit -def setup_gpu(mcdc, data_tally): - pass - - -@njit -def teardown_gpu(mcdc): - pass - - -def gpu_sources_spec(): - def make_work(prog: nb.uintp) -> nb.boolean: - mcdc = adapt.mcdc_global(prog) - - atomic_add(mcdc["mpi_work_iter"], 0, 1) - idx_work = mcdc["mpi_work_iter"][0] - - if idx_work >= mcdc["mpi_work_size"]: - return False - - generate_source_particle( - mcdc["mpi_work_start"], nb.uint64(idx_work), mcdc["source_seed"], prog - ) - return True - - def initialize(prog: nb.uintp): - pass - - def finalize(prog: nb.uintp): - pass - - base_fns = (initialize, finalize, make_work) - - def step(prog: nb.uintp, P_input: adapt.particle_gpu): - mcdc = adapt.mcdc_global(prog) - data = adapt.mcdc_data(prog) - particle_container = np.zeros(1, type_.particle) - particle_container[0] = P_input - particle = particle_container[0] - if particle["fresh"]: - prep_particle(particle_container, prog) - particle["fresh"] = False - step_particle(particle_container, data, prog) - if particle["alive"]: - adapt.step_async(prog, P) - - async_fns = [step] - return adapt.harm.RuntimeSpec("mcdc_source", adapt.state_spec, base_fns, async_fns) - - -BLOCK_COUNT = config.args.gpu_block_count - -ASYNC_EXECUTION = config.args.gpu_strategy == "async" - @njit(cache=caching) -def gpu_loop_source(seed, data, mcdc): - +def source_loop(seed, data, mcdc): # Progress bar indicator N_prog = 0 - if mcdc["technique"]["domain_decomposition"]: - particle_bank_module.dd_check_in(mcdc) - # ===================================================================== # GPU Interop # ===================================================================== @@ -100,7 +15,7 @@ def gpu_loop_source(seed, data, mcdc): # For async execution iter_count = 655360000 # For event-based execution - batch_size = 1 + batch_size = 64 full_work_size = mcdc["mpi_work_size"] if ASYNC_EXECUTION: @@ -116,172 +31,40 @@ def gpu_loop_source(seed, data, mcdc): mcdc["source_seed"] = seed # Store the global state to the GPU - src_store_constant(mcdc["gpu_state_pointer"], mcdc) - src_store_data(mcdc["gpu_state_pointer"], data) + if config.gpu_state_storage == "separate": + adapt.harm.memcpy_host_to_device(mcdc["gpu_meta"]["state_pointer"], mcdc) + adapt.harm.memcpy_host_to_device(mcdc["gpu_meta"]["state_pointer"], data) # Execute the program, and continue to do so until it is done if ASYNC_EXECUTION: - src_exec_program(mcdc["source_program_pointer"], BLOCK_COUNT, iter_count) - while not src_complete(mcdc["source_program_pointer"]): - particle_bank_module.dd_particle_send(mcdc) + src_exec_program( + mcdc["gpu_meta"]["source_program_pointer"], BLOCK_COUNT, iter_count + ) + while not src_complete(mcdc["gpu_meta"]["source_program_pointer"]): + kernel.dd_particle_send(mcdc) src_exec_program( - mcdc["source_program_pointer"], BLOCK_COUNT, iter_count + mcdc["gpu_meta"]["source_program_pointer"], BLOCK_COUNT, iter_count ) else: - src_exec_program(mcdc["source_program_pointer"], BLOCK_COUNT, batch_size) - while not src_complete(mcdc["source_program_pointer"]): - particle_bank_module.dd_particle_send(mcdc) + src_exec_program( + mcdc["gpu_meta"]["source_program_pointer"], BLOCK_COUNT, batch_size + ) + while not src_complete(mcdc["gpu_meta"]["source_program_pointer"]): + kernel.dd_particle_send(mcdc) src_exec_program( - mcdc["source_program_pointer"], BLOCK_COUNT, batch_size + mcdc["gpu_meta"]["source_program_pointer"], BLOCK_COUNT, batch_size ) - + src_clear_flags(mcdc["gpu_meta"]["source_program_pointer"]) # Recover the original program state - src_load_constant(mcdc, mcdc["gpu_state_pointer"]) - src_load_data(data, mcdc["gpu_state_pointer"]) - src_clear_flags(mcdc["source_program_pointer"]) - - mcdc["mpi_work_size"] = full_work_size - - particle_bank_module.set_bank_size(mcdc["bank_active"], 0) - - # ===================================================================== - # Closeout (Moved out of the typical particle loop) - # ===================================================================== - - source_closeout(mcdc, 1, 1, data) - - if mcdc["technique"]["domain_decomposition"]: - source_dd_resolution(data, mcdc) - - -def build_gpu_progs(input_deck, args): - - STRAT = args.gpu_strategy - - src_spec = gpu_sources_spec() - - adapt.harm.RuntimeSpec.bind_specs() - - rank = MPI.COMM_WORLD.Get_rank() - device_id = rank % args.gpu_share_stride - - if MPI.COMM_WORLD.Get_size() > 1: - MPI.COMM_WORLD.Barrier() - - adapt.harm.RuntimeSpec.load_specs() - - if STRAT == "async": - args.gpu_arena_size = args.gpu_arena_size // 32 - src_fns = src_spec.async_functions() - pre_fns = pre_spec.async_functions() - else: - src_fns = src_spec.event_functions() - pre_fns = pre_spec.event_functions() - - ARENA_SIZE = args.gpu_arena_size - BLOCK_COUNT = args.gpu_block_count - - global alloc_state, free_state - alloc_state = src_fns["alloc_state"] - free_state = src_fns["free_state"] - global src_alloc_program, src_free_program - global src_load_global, src_store_global, src_load_data, src_store_data, src_store_pointer_data - global src_init_program, src_exec_program, src_complete, src_clear_flags - src_alloc_program = src_fns["alloc_program"] - src_free_program = src_fns["free_program"] - src_load_global = src_fns["load_state_device_global"] - src_store_global = src_fns["store_state_device_global"] - src_store_pointer_global = src_fns["store_pointer_state_device_global"] - src_load_data = src_fns["load_state_device_data"] - src_store_data = src_fns["store_state_device_data"] - src_store_pointer_data = src_fns["store_pointer_state_device_data"] - src_init_program = src_fns["init_program"] - src_exec_program = src_fns["exec_program"] - src_complete = src_fns["complete"] - src_clear_flags = src_fns["clear_flags"] - src_set_device = src_fns["set_device"] - - global pre_alloc_program, pre_free_program - global pre_load_global, pre_store_global, pre_load_data, pre_store_data - global pre_init_program, pre_exec_program, pre_complete, pre_clear_flags - pre_alloc_state = pre_fns["alloc_state"] - pre_free_state = pre_fns["free_state"] - pre_alloc_program = pre_fns["alloc_program"] - pre_free_program = pre_fns["free_program"] - pre_load_global = pre_fns["load_state_device_global"] - pre_store_global = pre_fns["store_state_device_global"] - pre_load_data = pre_fns["load_state_device_data"] - pre_store_data = pre_fns["store_state_device_data"] - pre_init_program = pre_fns["init_program"] - pre_exec_program = pre_fns["exec_program"] - pre_complete = pre_fns["complete"] - pre_clear_flags = pre_fns["clear_flags"] - - @njit - def real_setup_gpu(mcdc_array, data_tally): - mcdc = mcdc_array[0] - src_set_device(device_id) - arena_size = ARENA_SIZE - mcdc["gpu_meta"]["state_pointer"] = adapt.cast_voidptr_to_uintp(alloc_state()) - # src_store_global(mcdc["gpu_meta"]["state_pointer"], mcdc_array[0]) if config.gpu_state_storage == "separate": - src_store_pointer_global( - mcdc["gpu_meta"]["state_pointer"], mcdc["gpu_meta"]["global_pointer"] - ) - src_store_pointer_data( - mcdc["gpu_meta"]["state_pointer"], mcdc["gpu_meta"]["tally_pointer"] - ) - else: - src_store_pointer_global(mcdc["gpu_meta"]["state_pointer"], mcdc_array) - src_store_pointer_data(mcdc["gpu_meta"]["state_pointer"], data_tally) - - mcdc["gpu_meta"]["source_program_pointer"] = adapt.cast_voidptr_to_uintp( - src_alloc_program(mcdc["gpu_meta"]["state_pointer"], ARENA_SIZE) - ) - src_init_program(mcdc["gpu_meta"]["source_program_pointer"], BLOCK_COUNT) - return - - @njit - def real_teardown_gpu(mcdc): - src_free_program( - adapt.cast_uintp_to_voidptr(mcdc["gpu_meta"]["source_program_pointer"]) - ) - free_state(adapt.cast_uintp_to_voidptr(mcdc["gpu_meta"]["state_pointer"])) - - global setup_gpu, teardown_gpu - setup_gpu = real_setup_gpu - teardown_gpu = real_teardown_gpu + adapt.harm.memcpy_device_to_host(mcdc, mcdc["gpu_meta"]["state_pointer"]) + adapt.harm.memcpy_device_to_host(data, mcdc["gpu_meta"]["state_pointer"]) - global loop_source - loop_source = gpu_loop_source + src_clear_flags(mcdc["gpu_meta"]["source_program_pointer"]) + mcdc["mpi_work_size"] = full_work_size -# ============================================================================= -# Functions for GPU Interop -# ============================================================================= - -# The symbols declared below will be overwritten to reference external code that -# manages GPU execution (if GPU execution is supported and selected) -alloc_state, free_state = [None] * 2 - -src_alloc_program, src_free_program = [None] * 2 -src_load_constant, src_load_constant, src_store_constant, src_store_data = [None] * 4 -src_init_program, src_exec_program, src_complete, src_clear_flags = [None] * 4 - -pre_alloc_program, pre_free_program = [None] * 2 -pre_load_constant, pre_load_data, pre_store_constant, pre_store_data = [None] * 4 -pre_init_program, pre_exec_program, pre_complete, pre_clear_flags = [None] * 4 - - -# If GPU execution is supported and selected, the functions shown below will -# be redefined to overwrite the above symbols and perform initialization/ -# finalization of GPU state -@njit -def setup_gpu(mcdc): - pass - + kernel.set_bank_size(mcdc["bank_active"], 0) -@njit -def teardown_gpu(mcdc): - pass + source_closeout(simulation, 1, 1, data) diff --git a/mcdc/code_factory/literals_generator.py b/mcdc/code_factory/literals_generator.py index 83cbb5d1..563609a1 100644 --- a/mcdc/code_factory/literals_generator.py +++ b/mcdc/code_factory/literals_generator.py @@ -10,7 +10,7 @@ def _literalize(value): def make_literals(simulation): - import mcdc.transport as transport + import mcdc.literals as literals # RPN evaluation buffer size if len(simulation.cells) == 0: @@ -21,6 +21,4 @@ def make_literals(simulation): [np.sum(np.array(x.region_RPN_tokens) >= 0.0) for x in simulation.cells] ) ) - transport.literals.rpn_evaluation_buffer_size = _literalize( - rpn_evaluation_buffer_size - ) + literals.rpn_evaluation_buffer_size = _literalize(rpn_evaluation_buffer_size) diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index 9622424e..7fc65260 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -331,33 +331,20 @@ def generate_numba_objects(simulation): f.write(text) # ================================================================================== - # GPU setup: Adapt transport functions, forward declare, and build program + # GPU preparation: Adapt transport functions, forward declare, and build program # ================================================================================== if config.target == "gpu": - from mcdc.code_factory.gpu.program_builder import ( - adapt_transport_functions, - forward_declare_gpu_program, - ) - - adapt_transport_functions() - forward_declare_gpu_program() - - from mcdc.code_factory.gpu.program_builder import ( - adapt_transport_functions_post_declare, - ) - - adapt_transport_functions_post_declare() - - from mcdc.code_factory.gpu.program_builder import build_gpu_program - - build_gpu_program(data["size"]) + gpu_builder.adapt_transport_functions() + gpu_builder.forward_declare_gpu_program() + gpu_builder.adapt_transport_functions_post_declare() + gpu_builder.build_gpu_program(data["size"]) # ================================================================================== # Allocate the flattened data and re-set the objects # ================================================================================== - data["array"], data["pointer"] = create_data_array(data["size"], type_map[float]) + data["array"], data["pointer"] = create_data_array(data["size"]) data["size"] = 0 for object_ in objects: @@ -410,6 +397,10 @@ def generate_numba_objects(simulation): for name in bank_names: mcdc_simulation[name]["tag"] = getattr(simulation, name).tag + # GPU program setup + if config.target == "gpu": + gpu_builder.setup_gpu_program(mcdc_simulation_container, data["array"]) + return mcdc_simulation_container, data["array"] @@ -667,22 +658,22 @@ def set_object( # ============================================================================= -def create_data_array(size, dtype): +def create_data_array(size): if not config.target == "gpu": - data = np.zeros(size, dtype=dtype) + data = np.zeros(size, dtype=np.float64) return data, 0 else: - create_data_array_on_gpu(size, dtype) + return create_data_array_on_gpu(size) @njit -def create_data_array_on_gpu(size, dtype): +def create_data_array_on_gpu(size): if config.gpu_state_storage == "managed": data_ptr = gpu_builder.alloc_managed_bytes(size) else: data_ptr = gpu_builder.alloc_device_bytes(size) data_uint = voidptr_to_uintp(data_ptr) - data = nb.carray(data_ptr, (size,), dtype) + data = nb.carray(data_ptr, (size,), dtype=np.float64) return data, data_uint @@ -691,18 +682,17 @@ def create_simulation_container(dtype): simulation_container = np.zeros((1,), dtype=dtype) return simulation_container, 0 else: - create_simulation_container_on_gpu(dtype) + return create_simulation_container_on_gpu(dtype, dtype.itemsize) @njit -def create_simulation_container_on_gpu(dtype): - size = dtype.itemsize +def create_simulation_container_on_gpu(dtype, size): if config.gpu_state_storage == "managed": simulation_ptr = gpu_builder.alloc_managed_bytes(size) else: simulation_ptr = gpu_builder.alloc_device_bytes(size) simulation_uint = voidptr_to_uintp(simulation_ptr) - simulation = nb.carray(simulation_ptr, (size,), dtype) + simulation = nb.carray(simulation_ptr, (1,), dtype) return simulation, simulation_uint diff --git a/mcdc/transport/literals.py b/mcdc/literals.py similarity index 100% rename from mcdc/transport/literals.py rename to mcdc/literals.py diff --git a/mcdc/main.py b/mcdc/main.py index 43b1a128..53572706 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -297,16 +297,6 @@ def preparation(): simulation["bank_source"]["size"] = N_local MPI.COMM_WORLD.Barrier() - # ================================================================================== - # Platform targeting, adapters, and toggles for portability - # ================================================================================== - - # Set up GPU if needed - if config.target == "gpu": - from mcdc.code_factory.gpu.program_builder import setup_gpu - - setup_gpu(mcdc_container, data) - # ================================================================================== # Finalize # ================================================================================== diff --git a/mcdc/object_/gpu_tools.py b/mcdc/object_/gpu_tools.py index a7ad7d3c..a8ef70c7 100644 --- a/mcdc/object_/gpu_tools.py +++ b/mcdc/object_/gpu_tools.py @@ -12,7 +12,6 @@ class GPUMeta(ObjectSingleton): label: str = "gpu_meta" # state_pointer: uint64 = uint64(0) - source_program_pointer: uint64 = uint64(0) - precursor_program_pointer: uint64 = uint64(0) - structure_pointer: uint64 = uint64(0) + program_pointer: uint64 = uint64(0) + simulation_pointer: uint64 = uint64(0) data_pointer: uint64 = uint64(0) diff --git a/mcdc/transport/__init__.py b/mcdc/transport/__init__.py index de5264ac..c2e7b125 100644 --- a/mcdc/transport/__init__.py +++ b/mcdc/transport/__init__.py @@ -1,5 +1,4 @@ import mcdc.transport.geometry as geometry -import mcdc.transport.literals as literals import mcdc.transport.particle_bank as particle_bank import mcdc.transport.simulation as simulation import mcdc.transport.util as util diff --git a/mcdc/transport/geometry/interface.py b/mcdc/transport/geometry/interface.py index 5c396308..0de3f79a 100644 --- a/mcdc/transport/geometry/interface.py +++ b/mcdc/transport/geometry/interface.py @@ -6,7 +6,7 @@ #### import mcdc.mcdc_get as mcdc_get -import mcdc.transport.literals as literals +import mcdc.literals as literals import mcdc.transport.mesh as mesh import mcdc.transport.physics as physics import mcdc.transport.tally as tally_module From c2057725e91df3151568b6b05b45ea8071088bbd Mon Sep 17 00:00:00 2001 From: ilhamv Date: Mon, 16 Mar 2026 05:59:25 -0700 Subject: [PATCH 34/50] rename variables in transport/source --- mcdc/main.py | 2 ++ mcdc/transport/source.py | 53 ++++++++++++++++++++++------------------ 2 files changed, 31 insertions(+), 24 deletions(-) diff --git a/mcdc/main.py b/mcdc/main.py index 53572706..6a569f82 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -77,6 +77,8 @@ def run(): # Run simulation import mcdc.transport.simulation as simulation_module + print("PASS") + exit() if settings.neutron_eigenvalue_mode: simulation_module.eigenvalue_simulation(simulation_container, data) else: diff --git a/mcdc/transport/source.py b/mcdc/transport/source.py index f56144f2..36291b67 100644 --- a/mcdc/transport/source.py +++ b/mcdc/transport/source.py @@ -18,13 +18,13 @@ @njit -def source_particle(P_rec_arr, seed, simulation, data): - P_rec = P_rec_arr[0] - P_rec["rng_seed"] = seed +def source_particle(particle_container, seed, simulation, data): + particle = particle_container[0] + particle["rng_seed"] = seed # Sample source # TODO: use cdf and binary search instead - xi = rng.lcg(P_rec_arr) + xi = rng.lcg(particle_container) tot = 0.0 for source in simulation["sources"]: tot += source["probability"] @@ -37,25 +37,28 @@ def source_particle(P_rec_arr, seed, simulation, data): y = source["point"][1] z = source["point"][2] else: - x = sample_uniform(source["x"][0], source["x"][1], P_rec_arr) - y = sample_uniform(source["y"][0], source["y"][1], P_rec_arr) - z = sample_uniform(source["z"][0], source["z"][1], P_rec_arr) + x = sample_uniform(source["x"][0], source["x"][1], particle_container) + y = sample_uniform(source["y"][0], source["y"][1], particle_container) + z = sample_uniform(source["z"][0], source["z"][1], particle_container) # Direction if source["isotropic_direction"]: - ux, uy, uz = sample_isotropic_direction(P_rec_arr) + ux, uy, uz = sample_isotropic_direction(particle_container) elif source["white_direction"]: rx = source["direction"][0] ry = source["direction"][1] rz = source["direction"][2] - ux, uy, uz = sample_white_direction(rx, ry, rz, P_rec_arr) + ux, uy, uz = sample_white_direction(rx, ry, rz, particle_container) elif source["mono_direction"]: ux = source["direction"][0] uy = source["direction"][1] uz = source["direction"][2] else: ux, uy, uz = sample_direction( - source["polar_cosine"], source["azimuthal"], source["direction"], P_rec_arr + source["polar_cosine"], + source["azimuthal"], + source["direction"], + particle_container, ) # Energy @@ -66,7 +69,7 @@ def source_particle(P_rec_arr, seed, simulation, data): else: ID = source["energy_group_pmf_ID"] pmf = simulation["pmf_distributions"][ID] - g = sample_pmf(pmf, P_rec_arr, data) + g = sample_pmf(pmf, particle_container, data) else: g = 0 if source["mono_energetic"]: @@ -74,13 +77,15 @@ def source_particle(P_rec_arr, seed, simulation, data): else: ID = source["energy_pdf_ID"] table = simulation["tabulated_distributions"][ID] - E = sample_tabulated(table, P_rec_arr, data) + E = sample_tabulated(table, particle_container, data) # Time if source["discrete_time"]: t = source["time"] else: - t = sample_uniform(source["time_range"][0], source["time_range"][1], P_rec_arr) + t = sample_uniform( + source["time_range"][0], source["time_range"][1], particle_container + ) # Motion translation if source["moving"]: @@ -120,14 +125,14 @@ def source_particle(P_rec_arr, seed, simulation, data): z += trans_0[2] + V[2] * t_local # Make and return particle - P_rec["x"] = x - P_rec["y"] = y - P_rec["z"] = z - P_rec["t"] = t - P_rec["ux"] = ux - P_rec["uy"] = uy - P_rec["uz"] = uz - P_rec["g"] = g - P_rec["E"] = E - P_rec["w"] = 1.0 - P_rec["particle_type"] = source["particle_type"] + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["t"] = t + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + particle["g"] = g + particle["E"] = E + particle["w"] = 1.0 + particle["particle_type"] = source["particle_type"] From 73cc22770e40e4dd9e80bf36ae33a095321e5d08 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Mon, 16 Mar 2026 06:37:11 -0700 Subject: [PATCH 35/50] reorganize gpu mode parameters --- .../gpu/transport/particle_bank.py | 4 +- mcdc/constant.py | 14 ++++ mcdc/main.py | 67 ++++++++++++++----- mcdc/object_/settings.py | 4 +- 4 files changed, 69 insertions(+), 20 deletions(-) diff --git a/mcdc/code_factory/gpu/transport/particle_bank.py b/mcdc/code_factory/gpu/transport/particle_bank.py index 8cdc46a4..aee6c20b 100644 --- a/mcdc/code_factory/gpu/transport/particle_bank.py +++ b/mcdc/code_factory/gpu/transport/particle_bank.py @@ -7,7 +7,7 @@ import mcdc.transport.util as util import mcdc.code_factory.gpu.program_builder as gpu_program -from mcdc.constant import GPU_STRATEGY_SIMPLE_ASYNC +from mcdc.constant import GPU_ASYNC_SIMPLE # ============================================================================= # Bank and pop particle @@ -20,7 +20,7 @@ def bank_active_particle(particle_container, program): active_particle_container = util.local_array(1, type_.particle) particle_module.copy(active_particle_container, particle_container) - if simulation["settings"]["gpu_strategy"] == GPU_STRATEGY_SIMPLE_ASYNC: + if simulation["settings"]["gpu_async_type"] == GPU_ASYNC_SIMPLE: gpu_program.step_async(program, active_particle_container[0]) """ else: diff --git a/mcdc/constant.py b/mcdc/constant.py index ebd99b42..ee235838 100644 --- a/mcdc/constant.py +++ b/mcdc/constant.py @@ -184,5 +184,19 @@ WW_MIN = 0 WW_WOLLABER = 1 +# ====================================================================================== +# GPU settings +# ====================================================================================== + # GPU strategies GPU_STRATEGY_SIMPLE_ASYNC = 0 +GPU_STRATEGY_ASYNC = 0 +GPU_STRATEGY_EVENT = 1 + +# GPU async. types +GPU_ASYNC_SIMPLE = 0 + +# GPU storage types +GPU_STORAGE_SEPARATE = 0 +GPU_STORAGE_MANAGED = 1 +GPU_STORAGE_UNITED = 2 diff --git a/mcdc/main.py b/mcdc/main.py index 6a569f82..c217183c 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -26,18 +26,6 @@ def run(): settings = simulationPy.settings master = MPI.COMM_WORLD.Get_rank() == 0 - # Override settings with command-line arguments - import mcdc.config as config - - if config.args.N_particle is not None: - settings.N_particle = config.args.N_particle - if config.args.N_batch is not None: - settings.N_batch = config.args.N_batch - if config.args.output is not None: - settings.output_name = config.args.output - if config.args.progress_bar is not None: - settings.use_progress_bar = config.args.progress_bar - # ================================================================================== # Preparation # ================================================================================== @@ -45,6 +33,9 @@ def run(): # TIMER: preparation time_prep_start = MPI.Wtime() + # Override settings with command-line arguments + override_settings() + # Generate the program state: # - `simulation`: the simulation, storing fixed side data and meta data that # describes arbitrarily-sized data @@ -121,11 +112,7 @@ def run(): # Finalizing # ================================================================================== - # GPU teardowns if needed - if config.target == "gpu": - from mcdc.code_factory.gpu.program_builder import teardown_gpu_program - - teardown_gpu_program(simulation) + finalize(simulation) # ====================================================================================== @@ -304,3 +291,49 @@ def preparation(): # ================================================================================== return simulation_container, data + + +# ====================================================================================== +# Misc. +# ====================================================================================== + + +def override_settings(): + import mcdc.config as config + from mcdc.object_.simulation import simulation as simulationPy + + settings = simulationPy.settings + + if config.args.N_particle is not None: + settings.N_particle = config.args.N_particle + if config.args.N_batch is not None: + settings.N_batch = config.args.N_batch + if config.args.output is not None: + settings.output_name = config.args.output + if config.args.progress_bar is not None: + settings.use_progress_bar = config.args.progress_bar + + # GPU settings + if config.target == "gpu": + from mcdc.constant import ( + GPU_STRATEGY_ASYNC, + GPU_STRATEGY_EVENT, + GPU_STORAGE_SEPARATE, + GPU_STORAGE_MANAGED, + GPU_STORAGE_UNITED, + ) + + if config.args.gpu_strategy == "async": + settings.gpu_strategy = GPU_STRATEGY_ASYNC + elif config.args.gpu_strategy == "event": + settings.gpu_strategy = GPU_STRATEGY_EVENT + + +def finalize(simulation): + import mcdc.config as config + + # GPU teardowns if needed + if config.target == "gpu": + from mcdc.code_factory.gpu.program_builder import teardown_gpu_program + + teardown_gpu_program(simulation) diff --git a/mcdc/object_/settings.py b/mcdc/object_/settings.py index dc819bcb..d08f743b 100644 --- a/mcdc/object_/settings.py +++ b/mcdc/object_/settings.py @@ -67,7 +67,9 @@ class Settings(ObjectSingleton): neutron_eigenvalue_mode: bool = False # GPU mode - gpu_strategy: int = GPU_STRATEGY_SIMPLE_ASYNC + gpu_strategy: int = GPU_STRATEGY_ASYNC + gpu_async_type: int = GPU_ASYNC_SIMPLE + gpu_storage: int = GPU_STORAGE_SEPARATE def __post_init__(self): super().__init__() From e58f28568250c6efacd17061ec87cafa495482ac Mon Sep 17 00:00:00 2001 From: ilhamv Date: Mon, 16 Mar 2026 13:48:13 -0700 Subject: [PATCH 36/50] update gpu transport source loop --- mcdc/code_factory/gpu/program_builder.py | 14 ++- mcdc/code_factory/gpu/transport/__init__.py | 3 +- mcdc/code_factory/gpu/transport/simulation.py | 90 +++++++++++-------- mcdc/code_factory/numba_objects_generator.py | 1 + 4 files changed, 66 insertions(+), 42 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 83f57197..99a2b875 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -30,7 +30,6 @@ def adapt_transport_functions(): transport.particle_bank.report_empty_bank = ( gpu_transport.particle_bank.report_empty_bank ) - # transport.simulation = gpu_transport.simulation transport.util.atomic_add = gpu_transport.util.atomic_add transport.util.local_array = gpu_transport.util.local_array @@ -41,6 +40,13 @@ def adapt_transport_functions_post_declare(): transport.util.access_simulation = access_simulation +def adapt_transport_functions_post_setup(): + import mcdc.code_factory.gpu.transport as gpu_transport + import mcdc.transport as transport + + transport.simulation.source_loop = gpu_transport.simulation.source_loop + + # ====================================================================================== # Forward declaration # ====================================================================================== @@ -308,9 +314,9 @@ def setup_gpu_program(simulation_container, data): init_program(simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT) -def teardown_gpu_program(mcdc): - free_program(cast_uintp_to_voidptr(mcdc["gpu_meta"]["program_pointer"])) - free_state(cast_uintp_to_voidptr(mcdc["gpu_meta"]["state_pointer"])) +def teardown_gpu_program(simulation): + free_program(cast_uintp_to_voidptr(simulation["gpu_meta"]["program_pointer"])) + free_state(cast_uintp_to_voidptr(simulation["gpu_meta"]["state_pointer"])) # ====================================================================================== diff --git a/mcdc/code_factory/gpu/transport/__init__.py b/mcdc/code_factory/gpu/transport/__init__.py index 97d7eb91..6dfd8b41 100644 --- a/mcdc/code_factory/gpu/transport/__init__.py +++ b/mcdc/code_factory/gpu/transport/__init__.py @@ -1,5 +1,4 @@ import mcdc.code_factory.gpu.transport.geometry as geometry import mcdc.code_factory.gpu.transport.particle_bank as particle_bank - -# import mcdc.code_factory.gpu.transport.simulation as simulation +import mcdc.code_factory.gpu.transport.simulation as simulation import mcdc.code_factory.gpu.transport.util as util diff --git a/mcdc/code_factory/gpu/transport/simulation.py b/mcdc/code_factory/gpu/transport/simulation.py index 42084bea..b35a2114 100644 --- a/mcdc/code_factory/gpu/transport/simulation.py +++ b/mcdc/code_factory/gpu/transport/simulation.py @@ -1,24 +1,36 @@ +import harmonize + +from numba import njit + +### + import mcdc.config as config +import mcdc.transport.particle_bank as particle_bank_module + +from mcdc.constant import GPU_STORAGE_SEPARATE, GPU_STRATEGY_ASYNC +from mcdc.code_factory.gpu.program_builder import ( + BLOCK_COUNT, + clear_flags, + complete, + exec_program, +) +from mcdc.transport.simulation import source_closeout caching = config.caching @njit(cache=caching) -def source_loop(seed, data, mcdc): - # Progress bar indicator - N_prog = 0 - - # ===================================================================== - # GPU Interop - # ===================================================================== - +def source_loop(seed, simulation, data): # For async execution iter_count = 655360000 # For event-based execution batch_size = 64 - full_work_size = mcdc["mpi_work_size"] - if ASYNC_EXECUTION: + settings = simulation["settings"] + + full_work_size = simulation["mpi_work_size"] + + if settings["gpu_strategy"] == GPU_STRATEGY_ASYNC: phase_size = 1000000000 else: phase_size = 1000000 @@ -26,45 +38,51 @@ def source_loop(seed, data, mcdc): for phase in range(phase_count): - mcdc["mpi_work_iter"][0] = phase_size * phase - mcdc["mpi_work_size"] = min(phase_size * (phase + 1), full_work_size) - mcdc["source_seed"] = seed + simulation["mpi_work_iter"][0] = phase_size * phase + simulation["mpi_work_size"] = min(phase_size * (phase + 1), full_work_size) + simulation["source_seed"] = seed # Store the global state to the GPU - if config.gpu_state_storage == "separate": - adapt.harm.memcpy_host_to_device(mcdc["gpu_meta"]["state_pointer"], mcdc) - adapt.harm.memcpy_host_to_device(mcdc["gpu_meta"]["state_pointer"], data) + if settings["gpu_storage"] == GPU_STORAGE_SEPARATE: + harmonize.memcpy_host_to_device( + simulation["gpu_meta"]["state_pointer"], simulation + ) + harmonize.memcpy_host_to_device( + simulation["gpu_meta"]["state_pointer"], data + ) # Execute the program, and continue to do so until it is done - if ASYNC_EXECUTION: - src_exec_program( - mcdc["gpu_meta"]["source_program_pointer"], BLOCK_COUNT, iter_count + if settings["gpu_strategy"] == GPU_STRATEGY_ASYNC: + exec_program( + simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT, iter_count ) - while not src_complete(mcdc["gpu_meta"]["source_program_pointer"]): - kernel.dd_particle_send(mcdc) - src_exec_program( - mcdc["gpu_meta"]["source_program_pointer"], BLOCK_COUNT, iter_count + while not complete(simulation["gpu_meta"]["program_pointer"]): + exec_program( + simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT, iter_count ) else: - src_exec_program( - mcdc["gpu_meta"]["source_program_pointer"], BLOCK_COUNT, batch_size + exec_program( + simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT, batch_size ) - while not src_complete(mcdc["gpu_meta"]["source_program_pointer"]): - kernel.dd_particle_send(mcdc) - src_exec_program( - mcdc["gpu_meta"]["source_program_pointer"], BLOCK_COUNT, batch_size + while not complete(simulation["gpu_meta"]["program_pointer"]): + exec_program( + simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT, batch_size ) - src_clear_flags(mcdc["gpu_meta"]["source_program_pointer"]) - # Recover the original program state + clear_flags(simulation["gpu_meta"]["program_pointer"]) + # Recover the original program state if config.gpu_state_storage == "separate": - adapt.harm.memcpy_device_to_host(mcdc, mcdc["gpu_meta"]["state_pointer"]) - adapt.harm.memcpy_device_to_host(data, mcdc["gpu_meta"]["state_pointer"]) + harmonize.memcpy_device_to_host( + simulation, simulation["gpu_meta"]["state_pointer"] + ) + harmonize.memcpy_device_to_host( + data, simulation["gpu_meta"]["state_pointer"] + ) - src_clear_flags(mcdc["gpu_meta"]["source_program_pointer"]) + clear_flags(simulation["gpu_meta"]["program_pointer"]) - mcdc["mpi_work_size"] = full_work_size + simulation["mpi_work_size"] = full_work_size - kernel.set_bank_size(mcdc["bank_active"], 0) + particle_bank_module.set_bank_size(simulation["bank_active"], 0) source_closeout(simulation, 1, 1, data) diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index 7fc65260..06c5f47b 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -400,6 +400,7 @@ def generate_numba_objects(simulation): # GPU program setup if config.target == "gpu": gpu_builder.setup_gpu_program(mcdc_simulation_container, data["array"]) + gpu_builder.adapt_transport_functions_post_setup() return mcdc_simulation_container, data["array"] From aef48b498e597283c7b498beef27270b2466f8bd Mon Sep 17 00:00:00 2001 From: ilhamv Date: Mon, 16 Mar 2026 22:12:16 -0700 Subject: [PATCH 37/50] working implementation --- mcdc/code_factory/gpu/program_builder.py | 1 + mcdc/code_factory/gpu/transport/simulation.py | 33 +++++---- mcdc/code_factory/numba_objects_generator.py | 67 +++++++++++++------ mcdc/main.py | 9 ++- mcdc/object_/gpu_tools.py | 12 ++-- mcdc/object_/particle.py | 4 +- 6 files changed, 80 insertions(+), 46 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index 99a2b875..a90e893e 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -314,6 +314,7 @@ def setup_gpu_program(simulation_container, data): init_program(simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT) +@njit def teardown_gpu_program(simulation): free_program(cast_uintp_to_voidptr(simulation["gpu_meta"]["program_pointer"])) free_state(cast_uintp_to_voidptr(simulation["gpu_meta"]["state_pointer"])) diff --git a/mcdc/code_factory/gpu/transport/simulation.py b/mcdc/code_factory/gpu/transport/simulation.py index b35a2114..591ec618 100644 --- a/mcdc/code_factory/gpu/transport/simulation.py +++ b/mcdc/code_factory/gpu/transport/simulation.py @@ -4,16 +4,11 @@ ### +import mcdc.code_factory.gpu.program_builder as gpu_module import mcdc.config as config import mcdc.transport.particle_bank as particle_bank_module from mcdc.constant import GPU_STORAGE_SEPARATE, GPU_STRATEGY_ASYNC -from mcdc.code_factory.gpu.program_builder import ( - BLOCK_COUNT, - clear_flags, - complete, - exec_program, -) from mcdc.transport.simulation import source_closeout caching = config.caching @@ -52,23 +47,25 @@ def source_loop(seed, simulation, data): ) # Execute the program, and continue to do so until it is done + block_count = gpu_module.BLOCK_COUNT + if settings["gpu_strategy"] == GPU_STRATEGY_ASYNC: - exec_program( - simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT, iter_count + gpu_module.exec_program( + simulation["gpu_meta"]["program_pointer"], block_count, iter_count ) - while not complete(simulation["gpu_meta"]["program_pointer"]): - exec_program( - simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT, iter_count + while not gpu_module.complete(simulation["gpu_meta"]["program_pointer"]): + gpu_module.exec_program( + simulation["gpu_meta"]["program_pointer"], block_count, iter_count ) else: - exec_program( - simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT, batch_size + gpu_module.exec_program( + simulation["gpu_meta"]["program_pointer"], block_count, batch_size ) - while not complete(simulation["gpu_meta"]["program_pointer"]): - exec_program( - simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT, batch_size + while not gpu_module.complete(simulation["gpu_meta"]["program_pointer"]): + gpu_module.exec_program( + simulation["gpu_meta"]["program_pointer"], block_count, batch_size ) - clear_flags(simulation["gpu_meta"]["program_pointer"]) + gpu_module.clear_flags(simulation["gpu_meta"]["program_pointer"]) # Recover the original program state if config.gpu_state_storage == "separate": @@ -79,7 +76,7 @@ def source_loop(seed, simulation, data): data, simulation["gpu_meta"]["state_pointer"] ) - clear_flags(simulation["gpu_meta"]["program_pointer"]) + gpu_module.clear_flags(simulation["gpu_meta"]["program_pointer"]) simulation["mpi_work_size"] = full_work_size diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index 06c5f47b..2f47f8b3 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -31,15 +31,16 @@ from mcdc.util import flatten type_map = { - bool: "?", - float: "f8", - int: "i8", + bool: np.bool_, + float: np.float64, + int: np.int64, str: "U32", - np.bool_: "?", - np.float64: "f8", - np.int64: "i8", - np.uint64: "u8", + np.bool_: np.bool_, + np.float64: np.float64, + np.int64: np.int64, + np.uint64: np.uint64, np.str_: "U32", + np.uintp: np.uintp, } bank_names = ["bank_active", "bank_census", "bank_source", "bank_future"] @@ -205,14 +206,14 @@ def generate_numba_objects(simulation): # Add ID for non-singleton for class_ in mcdc_classes: if issubclass(class_, ObjectNonSingleton): - structures[class_.label].append(("ID", "i8")) + structures[class_.label].append(("ID", type_map[int])) # Set parent and child ID and type if polymorphic if issubclass(class_, ObjectPolymorphic): if class_.__name__[-4:] == "Base" or class_.__name__ == "Tally": - structures[class_.label].append(("child_type", "i8")) - structures[class_.label].append(("child_ID", "i8")) + structures[class_.label].append(("child_type", type_map[int])) + structures[class_.label].append(("child_ID", type_map[int])) else: - structures[class_.label].append(("parent_ID", "i8")) + structures[class_.label].append(("parent_ID", type_map[int])) # Add particle data to particle banks and add particle banks to the simulation for name in bank_names: @@ -280,7 +281,7 @@ def generate_numba_objects(simulation): new_structure.append( (field, into_dtype(structures[item[2].label]), (N,)) ) - new_structure.append((f"N_{plural_to_singular(field)}", "i8")) + new_structure.append((f"N_{plural_to_singular(field)}", type_map[int])) record[f"N_{plural_to_singular(field)}"] = N # List of polymorphics @@ -295,7 +296,7 @@ def generate_numba_objects(simulation): (N,), ) ) - new_structure.append((f"N_{class_.label}", "i8")) + new_structure.append((f"N_{class_.label}", type_map[int])) record[f"N_{class_.label}"] = N # Singleton @@ -311,6 +312,12 @@ def generate_numba_objects(simulation): if MPI.COMM_WORLD.Get_rank() == 0: with open(f"{Path(mcdc.__file__).parent}/numba_types.py", "w") as f: text = "# The following is automatically generated by code_factory.py\n\n" + text += "from numpy import bool_\n" + text += "from numpy import float64\n" + text += "from numpy import int64\n" + text += "from numpy import uint64\n" + text += "from numpy import uintp\n" + text += "\n###\n\n" text += ( "from mcdc.code_factory.numba_objects_generator import into_dtype\n\n" ) @@ -318,9 +325,31 @@ def generate_numba_objects(simulation): for label in structures.keys(): text += f"{label} = into_dtype([\n" structure = structures[label] + + # GPU meta override + if label == "gpu_meta": + for item in structure: + if type(item[1]) != np.dtypes.VoidDType: + if isinstance(item[1], str): + dtype = f"'{item[1]}'" + elif item[1].__name__ == "uint64": + dtype = "uintp" + else: + dtype = item[1].__name__ + text += f" ('{item[0]}', {dtype}),\n" + text += "])\n\n" + continue + for item in structure: if type(item[1]) != np.dtypes.VoidDType: - text += f" {item},\n" + if isinstance(item[1], str): + dtype = f"'{item[1]}'" + else: + dtype = item[1].__name__ + if len(item) == 3: + text += f" ('{item[0]}', {dtype}, {item[2]}),\n" + else: + text += f" ('{item[0]}', {dtype}),\n" else: if len(item) == 3: text += f" ('{item[0]}', {plural_to_singular(item[0])}, {item[2]}),\n" @@ -478,8 +507,8 @@ def polymorphic_base(x): elif simple_scalar: structure.append((field, type_map[hint])) elif simple_list or numpy_array: - structure.append((f"{field}_offset", "i8")) - structure.append((f"{field}_length", "i8")) + structure.append((f"{field}_offset", type_map[int])) + structure.append((f"{field}_length", type_map[int])) if hint_origin_shape is not None: accessor_target.append((f"{field}", hint_origin_shape)) else: @@ -487,13 +516,13 @@ def polymorphic_base(x): # MC/DC classes elif non_polymorphic(hint) or polymorphic_base(hint): - structure.append((f"{field}_ID", "i8")) + structure.append((f"{field}_ID", type_map[int])) # List of MC/DC classes elif list_of_non_polymorphics or list_of_polymorphic_bases: singular = plural_to_singular(field) - structure.append((f"N_{singular}", "i8")) - structure.append((f"{singular}_IDs_offset", "i8")) + structure.append((f"N_{singular}", type_map[int])) + structure.append((f"{singular}_IDs_offset", type_map[int])) if hint_origin_shape is not None: accessor_target.append((f"{singular}_IDs", hint_origin_shape)) else: diff --git a/mcdc/main.py b/mcdc/main.py index c217183c..952cbb70 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -68,8 +68,6 @@ def run(): # Run simulation import mcdc.transport.simulation as simulation_module - print("PASS") - exit() if settings.neutron_eigenvalue_mode: simulation_module.eigenvalue_simulation(simulation_container, data) else: @@ -328,6 +326,13 @@ def override_settings(): elif config.args.gpu_strategy == "event": settings.gpu_strategy = GPU_STRATEGY_EVENT + if config.args.gpu_state_storage == "separate": + settings.gpu_storage = GPU_STORAGE_SEPARATE + elif config.args.gpu_state_storage == "managed": + settings.gpu_storage = GPU_STORAGE_MANAGED + elif config.args.gpu_state_storage == "united": + settings.gpu_storage = GPU_STORAGE_UNITED + def finalize(simulation): import mcdc.config as config diff --git a/mcdc/object_/gpu_tools.py b/mcdc/object_/gpu_tools.py index a8ef70c7..b4d4965a 100644 --- a/mcdc/object_/gpu_tools.py +++ b/mcdc/object_/gpu_tools.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from numpy import uint64 +from numpy import uintp #### @@ -11,7 +11,9 @@ class GPUMeta(ObjectSingleton): # Annotations for Numba mode label: str = "gpu_meta" # - state_pointer: uint64 = uint64(0) - program_pointer: uint64 = uint64(0) - simulation_pointer: uint64 = uint64(0) - data_pointer: uint64 = uint64(0) + state_pointer: uintp = uintp(0) + program_pointer: uintp = uintp(0) + simulation_pointer: uintp = uintp(0) + data_pointer: uintp = uintp(0) + + # Note that the uintp is manually overriden in code_factory. diff --git a/mcdc/object_/particle.py b/mcdc/object_/particle.py index 93c486b3..2e5b0b51 100644 --- a/mcdc/object_/particle.py +++ b/mcdc/object_/particle.py @@ -2,7 +2,7 @@ from dataclasses import dataclass, field from typing import Annotated -from numpy import int64, uint +from numpy import int64, uint64 from numpy.typing import NDArray #### @@ -25,7 +25,7 @@ class ParticleData(ObjectBase): E: float = 0.0 w: float = 0.0 particle_type: int = PARTICLE_NEUTRON - rng_seed: uint = uint(1) + rng_seed: uint64 = uint64(1) @dataclass From f1515056ca55f2b4eacee56136edac472193696d Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Tue, 17 Mar 2026 17:18:35 +0700 Subject: [PATCH 38/50] improve literals --- mcdc/code_factory/literals_generator.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mcdc/code_factory/literals_generator.py b/mcdc/code_factory/literals_generator.py index 563609a1..3e5329a5 100644 --- a/mcdc/code_factory/literals_generator.py +++ b/mcdc/code_factory/literals_generator.py @@ -4,9 +4,10 @@ def _literalize(value): + namespace = {} jit_str = f"@njit\ndef impl():\n return {value}\n" - exec(jit_str, globals(), locals()) - return eval("impl") + exec(jit_str, globals(), namespace) + return namespace["impl"] def make_literals(simulation): From 054b1dba6db1e8f4fe399e5d968320570463e4db Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Tue, 17 Mar 2026 20:05:16 +0700 Subject: [PATCH 39/50] cleanup debug --- mcdc/transport/physics/neutron/multigroup.py | 5 ++--- mcdc/transport/physics/neutron/native.py | 1 - mcdc/transport/tally/score.py | 1 - 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/mcdc/transport/physics/neutron/multigroup.py b/mcdc/transport/physics/neutron/multigroup.py index a1f51494..56b6616f 100644 --- a/mcdc/transport/physics/neutron/multigroup.py +++ b/mcdc/transport/physics/neutron/multigroup.py @@ -142,7 +142,6 @@ def collision(particle_container, program, data): else: total += SigmaF if total > xi: - return fission(particle_container, program, data) else: particle["alive"] = False @@ -262,7 +261,7 @@ def fission(particle_container, program, data): nu = mcdc_get.multigroup_material.mgxs_nu_f(g, material, data) nu_p = mcdc_get.multigroup_material.mgxs_nu_p(g, material, data) if J > 0: - stride = material["G"] + stride = material["J"] start = material["mgxs_nu_d_offset"] + g * stride nu_d = data[start : start + stride] # Above is equivalent to: nu_d = mcdc_get.multigroup_material.mgxs_nu_d_vector(g, material, data) @@ -298,7 +297,7 @@ def fission(particle_container, program, data): if xi < total: prompt = True stride = material["G"] - start = material["mgxs_chi_p"] + g * stride + start = material["mgxs_chi_p_offset"] + g * stride spectrum = data[start : start + stride] # Above is equivalent to: spectrum = mcdc_get.multigroup_material.mgxs_chi_p_vector(g, material, data) else: diff --git a/mcdc/transport/physics/neutron/native.py b/mcdc/transport/physics/neutron/native.py index 43e6aaf1..d1229976 100644 --- a/mcdc/transport/physics/neutron/native.py +++ b/mcdc/transport/physics/neutron/native.py @@ -420,7 +420,6 @@ def elastic_scattering(reaction, particle_container, nuclide, simulation, data): # Sample the scattering cosine from the multi-PDF distribution multi_table = simulation["multi_table_distributions"][reaction["mu_table_ID"]] mu0 = sample_multi_table(E, particle_container, multi_table, data) - return # Scatter the direction in COM azi = 2.0 * PI * rng.lcg(particle_container) diff --git a/mcdc/transport/tally/score.py b/mcdc/transport/tally/score.py index 19e43970..b4abf654 100644 --- a/mcdc/transport/tally/score.py +++ b/mcdc/transport/tally/score.py @@ -378,7 +378,6 @@ def eigenvalue_tally(particle_container, distance, simulation, data): ) # Fission production (needed even during inactive cycle) - return util.atomic_add(simulation["eigenvalue_tally_nuSigmaF"], 0, flux * nuSigmaF) # Done, if inactive From b7971d87018b75e7061527cb799d8e7c6babbe1d Mon Sep 17 00:00:00 2001 From: ilhamv Date: Wed, 18 Mar 2026 13:29:11 -0700 Subject: [PATCH 40/50] in the middle of debugging bank management --- mcdc/main.py | 1 + mcdc/transport/particle_bank.py | 8 +++++++- mcdc/transport/simulation.py | 1 + mcdc/transport/technique.py | 1 + 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/mcdc/main.py b/mcdc/main.py index 952cbb70..3cd1b4ad 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -72,6 +72,7 @@ def run(): simulation_module.eigenvalue_simulation(simulation_container, data) else: simulation_module.fixed_source_simulation(simulation_container, data) + return # TIMER: simulation time_simulation_end = MPI.Wtime() diff --git a/mcdc/transport/particle_bank.py b/mcdc/transport/particle_bank.py index e4c764b7..40049fdc 100644 --- a/mcdc/transport/particle_bank.py +++ b/mcdc/transport/particle_bank.py @@ -163,9 +163,11 @@ def manage_particle_banks(simulation): serial = simulation["mpi_size"] == 1 # TIMER: bank management + time_start = 0.0 if master: with objmode(time_start="float64"): time_start = MPI.Wtime() + time_spent = -time_start # Reset source bank set_bank_size(simulation["bank_source"], 0) @@ -191,6 +193,7 @@ def manage_particle_banks(simulation): # TODO: better alternative? source_bank["particle_data"][:size] = census_bank["particle_data"][:size] set_bank_size(source_bank, size) + return # Redistribute work and rebalance bank size across MPI ranks if serial: @@ -202,10 +205,13 @@ def manage_particle_banks(simulation): set_bank_size(simulation["bank_census"], 0) # TIMER: bank management + time_end = 0.0 if master: with objmode(time_end="float64"): time_end = MPI.Wtime() - simulation["runtime_bank_management"] += time_end - time_start + time_spent += time_end + if master: + simulation["runtime_bank_management"] += time_spent # ====================================================================================== diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index cd092888..5263967d 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -74,6 +74,7 @@ def fixed_source_simulation(simulation_container, data): # Manage particle banks: population control and work rebalance particle_bank_module.manage_particle_banks(simulation) + return # Time census-based tally closeout if use_census_based_tally: diff --git a/mcdc/transport/technique.py b/mcdc/transport/technique.py index fe629af3..9c0ffdf1 100644 --- a/mcdc/transport/technique.py +++ b/mcdc/transport/technique.py @@ -41,6 +41,7 @@ def population_control(simulation): M = simulation["settings"]["N_particle"] bank_source = simulation["bank_source"] + return # Scan the bank idx_start, N_local, N = particle_bank_module.bank_scanning(bank_census, simulation) idx_end = idx_start + N_local From 06d2a0c115b76cb83f1e53f4a9cfb2d4f9130459 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Sun, 5 Apr 2026 21:43:21 -0700 Subject: [PATCH 41/50] replace simulation with program in particle bank adds --- mcdc/main.py | 1 - mcdc/transport/particle_bank.py | 20 +++++++++++++------- mcdc/transport/physics/neutron/multigroup.py | 12 +++--------- mcdc/transport/physics/neutron/native.py | 12 +++--------- mcdc/transport/simulation.py | 7 +++---- mcdc/transport/technique.py | 7 ++++--- 6 files changed, 26 insertions(+), 33 deletions(-) diff --git a/mcdc/main.py b/mcdc/main.py index 3cd1b4ad..952cbb70 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -72,7 +72,6 @@ def run(): simulation_module.eigenvalue_simulation(simulation_container, data) else: simulation_module.fixed_source_simulation(simulation_container, data) - return # TIMER: simulation time_simulation_end = MPI.Wtime() diff --git a/mcdc/transport/particle_bank.py b/mcdc/transport/particle_bank.py index 40049fdc..8dccb25f 100644 --- a/mcdc/transport/particle_bank.py +++ b/mcdc/transport/particle_bank.py @@ -58,22 +58,26 @@ def _bank_particle(particle_container, bank): @njit -def bank_active_particle(particle_container, simulation): +def bank_active_particle(particle_container, program): + simulation = util.access_simulation(program) _bank_particle(particle_container, simulation["bank_active"]) @njit -def bank_source_particle(particle_container, simulation): +def bank_source_particle(particle_container, program): + simulation = util.access_simulation(program) _bank_particle(particle_container, simulation["bank_source"]) @njit -def bank_census_particle(particle_container, simulation): +def bank_census_particle(particle_container, program): + simulation = util.access_simulation(program) _bank_particle(particle_container, simulation["bank_census"]) @njit -def bank_future_particle(particle_container, simulation): +def bank_future_particle(particle_container, program): + simulation = util.access_simulation(program) _bank_particle(particle_container, simulation["bank_future"]) @@ -117,7 +121,9 @@ def report_empty_bank(bank): @njit -def promote_future_particles(simulation, data): +def promote_future_particles(program, data): + simulation = util.access_simulation(program) + # Get the banks future_bank = simulation["bank_future"] @@ -141,7 +147,8 @@ def promote_future_particles(simulation, data): # Promote the future particle to census bank if particle["t"] < next_census_time: - bank_census_particle(particle_container, simulation) + + bank_census_particle(particle_container, program) add_bank_size(future_bank, -1) # Consolidate the emptied space in the future bank @@ -193,7 +200,6 @@ def manage_particle_banks(simulation): # TODO: better alternative? source_bank["particle_data"][:size] = census_bank["particle_data"][:size] set_bank_size(source_bank, size) - return # Redistribute work and rebalance bank size across MPI ranks if serial: diff --git a/mcdc/transport/physics/neutron/multigroup.py b/mcdc/transport/physics/neutron/multigroup.py index 56b6616f..bab999cb 100644 --- a/mcdc/transport/physics/neutron/multigroup.py +++ b/mcdc/transport/physics/neutron/multigroup.py @@ -335,9 +335,7 @@ def fission(particle_container, program, data): # Eigenvalue mode: bank right away if settings["neutron_eigenvalue_mode"]: - particle_bank_module.bank_census_particle( - particle_container_new, simulation - ) + particle_bank_module.bank_census_particle(particle_container_new, program) continue # Below is only relevant for fixed-source problem @@ -379,13 +377,9 @@ def fission(particle_container, program, data): # Hit future census --> add to future bank elif hit_future_census: # Particle will participate in the future - particle_bank_module.bank_future_particle( - particle_container_new, simulation - ) + particle_bank_module.bank_future_particle(particle_container_new, program) # Hit current census --> add to census bank else: # Particle will participate after the current census is completed - particle_bank_module.bank_census_particle( - particle_container_new, simulation - ) + particle_bank_module.bank_census_particle(particle_container_new, program) diff --git a/mcdc/transport/physics/neutron/native.py b/mcdc/transport/physics/neutron/native.py index d1229976..b24f61bb 100644 --- a/mcdc/transport/physics/neutron/native.py +++ b/mcdc/transport/physics/neutron/native.py @@ -783,9 +783,7 @@ def fission(reaction, particle_container, nuclide, program, data): # Eigenvalue mode: bank right away if settings["neutron_eigenvalue_mode"]: - particle_bank_module.bank_census_particle( - particle_container_new, simulation - ) + particle_bank_module.bank_census_particle(particle_container_new, program) continue # Below is only relevant for fixed-source problem @@ -827,16 +825,12 @@ def fission(reaction, particle_container, nuclide, program, data): # Hit future census --> add to future bank elif hit_future_census: # Particle will participate in the future - particle_bank_module.bank_future_particle( - particle_container_new, simulation - ) + particle_bank_module.bank_future_particle(particle_container_new, program) # Hit current census --> add to census bank else: # Particle will participate after the current census is completed - particle_bank_module.bank_census_particle( - particle_container_new, simulation - ) + particle_bank_module.bank_census_particle(particle_container_new, program) @njit diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 5263967d..61bafb68 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -74,7 +74,6 @@ def fixed_source_simulation(simulation_container, data): # Manage particle banks: population control and work rebalance particle_bank_module.manage_particle_banks(simulation) - return # Time census-based tally closeout if use_census_based_tally: @@ -227,10 +226,10 @@ def generate_source_particle(work_start, idx_work, seed, program, data): particle_bank_module.bank_active_particle(particle_container, program) elif not hit_next_census: # Particle will participate after the current census - particle_bank_module.bank_census_particle(particle_container, simulation) + particle_bank_module.bank_census_particle(particle_container, program) else: # Particle will participate in the future - particle_bank_module.bank_future_particle(particle_container, simulation) + particle_bank_module.bank_future_particle(particle_container, program) @njit @@ -300,7 +299,7 @@ def step_particle(particle_container, program, data): # Census time crossing if particle["event"] & EVENT_TIME_CENSUS: - particle_bank_module.bank_census_particle(particle_container, simulation) + particle_bank_module.bank_census_particle(particle_container, program) particle["alive"] = False # Time boundary crossing diff --git a/mcdc/transport/technique.py b/mcdc/transport/technique.py index 9c0ffdf1..f58dc0b0 100644 --- a/mcdc/transport/technique.py +++ b/mcdc/transport/technique.py @@ -34,14 +34,15 @@ def weight_roulette(particle_container, simulation): @njit -def population_control(simulation): +def population_control(program): """Uniform Splitting-Roulette technique""" + simulation = util.access_simulation(program) + bank_census = simulation["bank_census"] M = simulation["settings"]["N_particle"] bank_source = simulation["bank_source"] - return # Scan the bank idx_start, N_local, N = particle_bank_module.bank_scanning(bank_census, simulation) idx_end = idx_start + N_local @@ -81,4 +82,4 @@ def population_control(simulation): ) # Set weight P_rec["w"] = w_survive - particle_bank_module.bank_source_particle(P_rec_arr, simulation) + particle_bank_module.bank_source_particle(P_rec_arr, program) From 0b79a1b6a334f106cd9b8f5b3a63994868f63d0e Mon Sep 17 00:00:00 2001 From: ilhamv Date: Tue, 7 Apr 2026 21:47:55 -0700 Subject: [PATCH 42/50] combine transport module adaptors --- mcdc/code_factory/gpu/program_builder.py | 10 ++++------ mcdc/code_factory/numba_objects_generator.py | 3 +-- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/mcdc/code_factory/gpu/program_builder.py b/mcdc/code_factory/gpu/program_builder.py index a90e893e..96170404 100644 --- a/mcdc/code_factory/gpu/program_builder.py +++ b/mcdc/code_factory/gpu/program_builder.py @@ -14,9 +14,13 @@ def adapt_transport_functions(): + global access_simulation + import mcdc.code_factory.gpu.transport as gpu_transport import mcdc.transport as transport + transport.util.access_simulation = access_simulation + # TODO: Make the following automatic transport.geometry.interface.report_lost_particle = ( gpu_transport.geometry.interface.report_lost_particle @@ -34,12 +38,6 @@ def adapt_transport_functions(): transport.util.local_array = gpu_transport.util.local_array -def adapt_transport_functions_post_declare(): - import mcdc.transport as transport - - transport.util.access_simulation = access_simulation - - def adapt_transport_functions_post_setup(): import mcdc.code_factory.gpu.transport as gpu_transport import mcdc.transport as transport diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index 2f47f8b3..95303642 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -364,9 +364,8 @@ def generate_numba_objects(simulation): # ================================================================================== if config.target == "gpu": - gpu_builder.adapt_transport_functions() gpu_builder.forward_declare_gpu_program() - gpu_builder.adapt_transport_functions_post_declare() + gpu_builder.adapt_transport_functions() gpu_builder.build_gpu_program(data["size"]) # ================================================================================== From 7b08ebc8bacdd2dcfa0b04f7b7f4a796ed5d866a Mon Sep 17 00:00:00 2001 From: ilhamv Date: Tue, 7 Apr 2026 21:49:52 -0700 Subject: [PATCH 43/50] separate bank size increment from bank-particle-adding function. Source bank size increment does not need atomic add. --- mcdc/transport/particle_bank.py | 36 +++++++++++++++++++++++---------- mcdc/transport/technique.py | 6 ++---- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/mcdc/transport/particle_bank.py b/mcdc/transport/particle_bank.py index 8dccb25f..9b32f9cd 100644 --- a/mcdc/transport/particle_bank.py +++ b/mcdc/transport/particle_bank.py @@ -53,32 +53,46 @@ def _bank_particle(particle_container, bank): idx = get_bank_size(bank) particle_module.copy(bank["particle_data"][idx : idx + 1], particle_container) - # Increment bank size - add_bank_size(bank, 1) - @njit def bank_active_particle(particle_container, program): simulation = util.access_simulation(program) - _bank_particle(particle_container, simulation["bank_active"]) + bank = simulation["bank_active"] + _bank_particle(particle_container, bank) - -@njit -def bank_source_particle(particle_container, program): - simulation = util.access_simulation(program) - _bank_particle(particle_container, simulation["bank_source"]) + # Increment bank size + add_bank_size(bank, 1) @njit def bank_census_particle(particle_container, program): simulation = util.access_simulation(program) - _bank_particle(particle_container, simulation["bank_census"]) + bank = simulation["bank_census"] + _bank_particle(particle_container, bank) + + # Increment bank size + add_bank_size(bank, 1) @njit def bank_future_particle(particle_container, program): simulation = util.access_simulation(program) - _bank_particle(particle_container, simulation["bank_future"]) + bank = simulation["bank_future"] + _bank_particle(particle_container, bank) + + # Increment bank size + add_bank_size(bank, 1) + + +@njit +def bank_source_particle(particle_container, simulation): + bank = simulation["bank_source"] + _bank_particle(particle_container, bank) + + # Increment bank size + # Note that we don't use the atomic operation in add_bank_size function + # as source particle banking is not thread-parallelized + bank["size"][0] += 1 @njit diff --git a/mcdc/transport/technique.py b/mcdc/transport/technique.py index f58dc0b0..fe629af3 100644 --- a/mcdc/transport/technique.py +++ b/mcdc/transport/technique.py @@ -34,11 +34,9 @@ def weight_roulette(particle_container, simulation): @njit -def population_control(program): +def population_control(simulation): """Uniform Splitting-Roulette technique""" - simulation = util.access_simulation(program) - bank_census = simulation["bank_census"] M = simulation["settings"]["N_particle"] bank_source = simulation["bank_source"] @@ -82,4 +80,4 @@ def population_control(program): ) # Set weight P_rec["w"] = w_survive - particle_bank_module.bank_source_particle(P_rec_arr, program) + particle_bank_module.bank_source_particle(P_rec_arr, simulation) From f4dfed171518abd42dec716ad8fb9d668dfaa2d9 Mon Sep 17 00:00:00 2001 From: ilhamv Date: Tue, 7 Apr 2026 21:50:16 -0700 Subject: [PATCH 44/50] debugging --- mcdc/transport/simulation.py | 8 ++++++++ mcdc/transport/tally/closeout.py | 12 ++++++++++++ 2 files changed, 20 insertions(+) diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 61bafb68..130712fb 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -44,11 +44,13 @@ def fixed_source_simulation(simulation_container, data): # Loop over batches for idx_batch in range(N_batch): + print("A", simulation["mpi_size"]) simulation["idx_batch"] = idx_batch seed_batch = rng.split_seed(uint64(idx_batch), settings["rng_seed"]) # Distribute work mpi.distribute_work(N_particle, simulation) + print("B", simulation["mpi_size"]) # Print multi-batch header if N_batch > 1: @@ -93,6 +95,7 @@ def fixed_source_simulation(simulation_container, data): ): break + print("C", simulation["mpi_size"]) # Multi-batch closeout if N_batch > 1: # Reset banks @@ -100,12 +103,17 @@ def fixed_source_simulation(simulation_container, data): particle_bank_module.set_bank_size(simulation["bank_census"], 0) particle_bank_module.set_bank_size(simulation["bank_source"], 0) particle_bank_module.set_bank_size(simulation["bank_future"], 0) + print("D", simulation["mpi_size"]) if not use_census_based_tally: # Tally history closeout tally_module.closeout.reduce(simulation, data) + print("E", simulation["mpi_size"]) tally_module.closeout.accumulate(simulation, data) + print("F", simulation["mpi_size"]) + print("G", simulation["mpi_size"]) + exit() # Tally closeout if not use_census_based_tally: tally_module.closeout.finalize(simulation, data) diff --git a/mcdc/transport/tally/closeout.py b/mcdc/transport/tally/closeout.py index 96f2cd66..c494f300 100644 --- a/mcdc/transport/tally/closeout.py +++ b/mcdc/transport/tally/closeout.py @@ -27,26 +27,38 @@ @njit def reduce(simulation, data): + print(" #A", simulation["mpi_size"]) for tally in simulation["tallies"]: + print(" #B", simulation["mpi_size"]) _reduce(tally, simulation, data) + print(" #C", simulation["mpi_size"]) @njit def _reduce(tally, simulation, data): + print(" *#A", simulation["mpi_size"]) N = tally["bin_length"] start = tally["bin_offset"] end = start + N + print(" *#B", simulation["mpi_size"]) # Normalize N_particle = simulation["settings"]["N_particle"] + print(" *#C", simulation["mpi_size"]) for i in range(N): + i = 3751 + print(" *#C-", i, "before", data[start + i], simulation["mpi_size"]) data[start + i] /= N_particle + print(" *#C-", i, "after", data[start + i], simulation["mpi_size"]) + return + print(" *#D", simulation["mpi_size"]) # MPI Reduce buff = np.zeros(N) with objmode(): MPI.COMM_WORLD.Reduce(data[start:end], buff, MPI.SUM, 0) data[start:end] = buff + print(" *#E", simulation["mpi_size"]) # ====================================================================================== From a30f0d88d2d938ee3cde8bf86c962d673086d0cb Mon Sep 17 00:00:00 2001 From: ilhamv Date: Fri, 10 Apr 2026 14:08:02 -0700 Subject: [PATCH 45/50] fix byte size assignment in code factory --- mcdc/code_factory/numba_objects_generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mcdc/code_factory/numba_objects_generator.py b/mcdc/code_factory/numba_objects_generator.py index 95303642..25653afb 100644 --- a/mcdc/code_factory/numba_objects_generator.py +++ b/mcdc/code_factory/numba_objects_generator.py @@ -692,7 +692,7 @@ def create_data_array(size): data = np.zeros(size, dtype=np.float64) return data, 0 else: - return create_data_array_on_gpu(size) + return create_data_array_on_gpu(size * 8) @njit From d23292fecacb4717e0de46fcb0d6364ac9e9420f Mon Sep 17 00:00:00 2001 From: ilhamv Date: Fri, 10 Apr 2026 14:31:53 -0700 Subject: [PATCH 46/50] remove debug prints --- mcdc/transport/simulation.py | 8 -------- mcdc/transport/tally/closeout.py | 12 ------------ 2 files changed, 20 deletions(-) diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index 130712fb..61bafb68 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -44,13 +44,11 @@ def fixed_source_simulation(simulation_container, data): # Loop over batches for idx_batch in range(N_batch): - print("A", simulation["mpi_size"]) simulation["idx_batch"] = idx_batch seed_batch = rng.split_seed(uint64(idx_batch), settings["rng_seed"]) # Distribute work mpi.distribute_work(N_particle, simulation) - print("B", simulation["mpi_size"]) # Print multi-batch header if N_batch > 1: @@ -95,7 +93,6 @@ def fixed_source_simulation(simulation_container, data): ): break - print("C", simulation["mpi_size"]) # Multi-batch closeout if N_batch > 1: # Reset banks @@ -103,17 +100,12 @@ def fixed_source_simulation(simulation_container, data): particle_bank_module.set_bank_size(simulation["bank_census"], 0) particle_bank_module.set_bank_size(simulation["bank_source"], 0) particle_bank_module.set_bank_size(simulation["bank_future"], 0) - print("D", simulation["mpi_size"]) if not use_census_based_tally: # Tally history closeout tally_module.closeout.reduce(simulation, data) - print("E", simulation["mpi_size"]) tally_module.closeout.accumulate(simulation, data) - print("F", simulation["mpi_size"]) - print("G", simulation["mpi_size"]) - exit() # Tally closeout if not use_census_based_tally: tally_module.closeout.finalize(simulation, data) diff --git a/mcdc/transport/tally/closeout.py b/mcdc/transport/tally/closeout.py index c494f300..96f2cd66 100644 --- a/mcdc/transport/tally/closeout.py +++ b/mcdc/transport/tally/closeout.py @@ -27,38 +27,26 @@ @njit def reduce(simulation, data): - print(" #A", simulation["mpi_size"]) for tally in simulation["tallies"]: - print(" #B", simulation["mpi_size"]) _reduce(tally, simulation, data) - print(" #C", simulation["mpi_size"]) @njit def _reduce(tally, simulation, data): - print(" *#A", simulation["mpi_size"]) N = tally["bin_length"] start = tally["bin_offset"] end = start + N - print(" *#B", simulation["mpi_size"]) # Normalize N_particle = simulation["settings"]["N_particle"] - print(" *#C", simulation["mpi_size"]) for i in range(N): - i = 3751 - print(" *#C-", i, "before", data[start + i], simulation["mpi_size"]) data[start + i] /= N_particle - print(" *#C-", i, "after", data[start + i], simulation["mpi_size"]) - return - print(" *#D", simulation["mpi_size"]) # MPI Reduce buff = np.zeros(N) with objmode(): MPI.COMM_WORLD.Reduce(data[start:end], buff, MPI.SUM, 0) data[start:end] = buff - print(" *#E", simulation["mpi_size"]) # ====================================================================================== From 0a0c89e3bd571e76183b9aa86eff2bb85f3ccc75 Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Sat, 11 Apr 2026 05:44:00 +0700 Subject: [PATCH 47/50] back in black --- mcdc/transport/physics/neutron/interface.py | 4 +++- mcdc/transport/physics/neutron/native.py | 4 +++- mcdc/transport/simulation.py | 12 ++++++++++-- mcdc/transport/tally/score.py | 8 ++++++-- 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/mcdc/transport/physics/neutron/interface.py b/mcdc/transport/physics/neutron/interface.py index 324e7acd..29f78602 100644 --- a/mcdc/transport/physics/neutron/interface.py +++ b/mcdc/transport/physics/neutron/interface.py @@ -54,6 +54,8 @@ def collision(particle_container, collision_data_container, program, data): simulation = util.access_simulation(program) if simulation["settings"]["neutron_multigroup_mode"]: - multigroup.collision(particle_container, collision_data_container, program, data) + multigroup.collision( + particle_container, collision_data_container, program, data + ) else: native.collision(particle_container, collision_data_container, program, data) diff --git a/mcdc/transport/physics/neutron/native.py b/mcdc/transport/physics/neutron/native.py index e99030ca..b02d98e8 100644 --- a/mcdc/transport/physics/neutron/native.py +++ b/mcdc/transport/physics/neutron/native.py @@ -258,7 +258,9 @@ def collision(particle_container, collision_data_container, program, data): # Implicit capture if simulation["implicit_capture"]["active"]: # Calculate capture fraction - SigmaC = macro_xs(NEUTRON_REACTION_CAPTURE, particle_container, simulation, data) + SigmaC = macro_xs( + NEUTRON_REACTION_CAPTURE, particle_container, simulation, data + ) capture_fraction = SigmaC / SigmaT # Deposit energy captured diff --git a/mcdc/transport/simulation.py b/mcdc/transport/simulation.py index b8d9924b..6f2e3f06 100644 --- a/mcdc/transport/simulation.py +++ b/mcdc/transport/simulation.py @@ -310,7 +310,11 @@ def step_particle(particle_container, program, data): tally = simulation["collision_tallies"][tally_base["child_ID"]] tally_module.score.collision_tally( - particle_container, collision_data_container, tally, simulation, data + particle_container, + collision_data_container, + tally, + simulation, + data, ) # Other collision tallies @@ -322,7 +326,11 @@ def step_particle(particle_container, program, data): continue tally_module.score.collision_tally( - particle_container, collision_data_container, tally, simulation, data + particle_container, + collision_data_container, + tally, + simulation, + data, ) # Surface and domain crossing diff --git a/mcdc/transport/tally/score.py b/mcdc/transport/tally/score.py index aa730710..853bba5d 100644 --- a/mcdc/transport/tally/score.py +++ b/mcdc/transport/tally/score.py @@ -81,7 +81,9 @@ def surface_tally(particle_container, surface, tally, simulation, data): @njit -def collision_tally(particle_container, collision_data_container, tally, simulation, data): +def collision_tally( + particle_container, collision_data_container, tally, simulation, data +): particle = particle_container[0] collision_data = collision_data_container[0] tally_base = simulation["tallies"][tally["parent_ID"]] @@ -101,7 +103,9 @@ def collision_tally(particle_container, collision_data_container, tally, simulat mesh_tally = tally["spatial_filter_type"] == SPATIAL_FILTER_MESH if mesh_tally: mesh = simulation["meshes"][tally["spatial_filter_ID"]] - i_x, i_y, i_z = mesh_module.get_indices(particle_container, mesh, simulation, data) + i_x, i_y, i_z = mesh_module.get_indices( + particle_container, mesh, simulation, data + ) # No score outside mesh bins if i_x == -1 or i_y == -1 or i_z == -1: From 2dde7782fac0790eb71e0897f1a0115ff561b186 Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Sat, 11 Apr 2026 06:42:43 +0700 Subject: [PATCH 48/50] fix minor bugs --- mcdc/print_.py | 16 ++++++++-------- mcdc/transport/tally/score.py | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/mcdc/print_.py b/mcdc/print_.py index 8aa6f350..7a381d0f 100644 --- a/mcdc/print_.py +++ b/mcdc/print_.py @@ -208,16 +208,16 @@ def print_progress_eigenvalue(simulation, data): def print_runtime(simulation): - total = simulation["runtime_total"] - preparation = simulation["runtime_preparation"] - simulation = simulation["runtime_simulation"] - output = simulation["runtime_output"] + t_total = simulation["runtime_total"] + t_preparation = simulation["runtime_preparation"] + t_simulation = simulation["runtime_simulation"] + t_output = simulation["runtime_output"] if master: print("\n Runtime report:") - print_time("Total ", total, 100) - print_time("Preparation", preparation, preparation / total * 100) - print_time("Simulation ", simulation, simulation / total * 100) - print_time("Output ", output, output / total * 100) + print_time("Total ", t_total, 100) + print_time("Preparation", t_preparation, t_preparation / t_total * 100) + print_time("Simulation ", t_simulation, t_simulation / t_total * 100) + print_time("Output ", t_output, t_output / t_total * 100) print("\n") sys.stdout.flush() diff --git a/mcdc/transport/tally/score.py b/mcdc/transport/tally/score.py index 853bba5d..924c0338 100644 --- a/mcdc/transport/tally/score.py +++ b/mcdc/transport/tally/score.py @@ -41,7 +41,7 @@ def surface_tally(particle_container, surface, tally, simulation, data): tally_base = simulation["tallies"][tally["parent_ID"]] # Get filter indices - MG_mode = simulation["settings"]["multigroup_mode"] + MG_mode = simulation["settings"]["neutron_multigroup_mode"] i_mu, i_azi, i_energy, i_time = get_filter_indices( particle_container, tally_base, data, MG_mode ) @@ -89,7 +89,7 @@ def collision_tally( tally_base = simulation["tallies"][tally["parent_ID"]] # Get filter indices - MG_mode = simulation["settings"]["multigroup_mode"] + MG_mode = simulation["settings"]["neutron_multigroup_mode"] i_mu, i_azi, i_energy, i_time = get_filter_indices( particle_container, tally_base, data, MG_mode ) From 6f17476cf15c5ab853737578c01e9a00b01dd43f Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Sat, 11 Apr 2026 06:59:19 +0700 Subject: [PATCH 49/50] fix misplaced scalinh --- mcdc/transport/literals.py | 3 +++ mcdc/transport/physics/electron/native.py | 6 ++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 mcdc/transport/literals.py diff --git a/mcdc/transport/literals.py b/mcdc/transport/literals.py new file mode 100644 index 00000000..bfca9dd7 --- /dev/null +++ b/mcdc/transport/literals.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +rpn_evaluation_buffer_size = 2 diff --git a/mcdc/transport/physics/electron/native.py b/mcdc/transport/physics/electron/native.py index 08e1ecff..4c296ceb 100644 --- a/mcdc/transport/physics/electron/native.py +++ b/mcdc/transport/physics/electron/native.py @@ -25,7 +25,7 @@ ) from mcdc.transport.data import evaluate_data from mcdc.transport.distribution import ( - sample_distribution_with_scale, + sample_distribution, sample_multi_table, ) from mcdc.transport.physics.util import ( @@ -477,9 +477,7 @@ def ionization( ) ) dist_base = simulation["distributions"][dist_ID] - T_delta = sample_distribution_with_scale( - E, dist_base, particle_container, simulation, data - ) + T_delta = sample_distribution(E, dist_base, particle_container, simulation, data) # Primary outgoing energy E_out = E - B - T_delta From 7678a8f4374300586716c221f1b9960d08d4b212 Mon Sep 17 00:00:00 2001 From: Ilham Variansyah Date: Sat, 11 Apr 2026 14:42:26 +0700 Subject: [PATCH 50/50] get all rank to generate literals --- mcdc/main.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mcdc/main.py b/mcdc/main.py index 305f62e6..1e9ff34a 100644 --- a/mcdc/main.py +++ b/mcdc/main.py @@ -243,11 +243,10 @@ def preparation(): # ================================================================================== from mcdc.code_factory.numba_objects_generator import generate_numba_objects + from mcdc.code_factory.literals_generator import make_literals - if MPI.COMM_WORLD.Get_rank() == 0: - from mcdc.code_factory.literals_generator import make_literals + make_literals(simulationPy) - make_literals(simulationPy) simulation_container, data = generate_numba_objects(simulationPy) simulation = simulation_container[0]