From b2fb2a6c801592c58ace880e380d9e22d8b0207b Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 13:54:51 +0100 Subject: [PATCH 01/17] moved first 3 numba functions --- .../imaging/inversion_imaging_numba_util.py | 272 ++++++++++++++++++ .../imaging/inversion_imaging_util.py | 270 ----------------- autoarray/structures/arrays/array_2d_util.py | 5 - autoarray/structures/grids/grid_2d_util.py | 3 +- 4 files changed, 273 insertions(+), 277 deletions(-) create mode 100644 autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py new file mode 100644 index 000000000..f49ef6637 --- /dev/null +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py @@ -0,0 +1,272 @@ +import numpy as np +from typing import Tuple + +from autoarray import numba_util + +import numpy as np + +@numba_util.jit() +def w_tilde_curvature_imaging_from( + noise_map_native: np.ndarray, kernel_native: np.ndarray, native_index_for_slim_index +) -> np.ndarray: + """ + The matrix `w_tilde_curvature` is a matrix of dimensions [image_pixels, image_pixels] that encodes the PSF + convolution of every pair of image pixels given the noise map. This can be used to efficiently compute the + curvature matrix via the mappings between image and source pixels, in a way that omits having to perform the + PSF convolution on every individual source pixel. This provides a significant speed up for inversions of imaging + datasets. + + The limitation of this matrix is that the dimensions of [image_pixels, image_pixels] can exceed many 10s of GB's, + making it impossible to store in memory and its use in linear algebra calculations extremely. The method + `w_tilde_curvature_preload_imaging_from` describes a compressed representation that overcomes this hurdles. It is + advised `w_tilde` and this method are only used for testing. + + Parameters + ---------- + noise_map_native + The two dimensional masked noise-map of values which w_tilde is computed from. + kernel_native + The two dimensional PSF kernel that w_tilde encodes the convolution of. + native_index_for_slim_index + An array of shape [total_x_pixels*sub_size] that maps pixels from the slimmed array to the native array. + + Returns + ------- + ndarray + A matrix that encodes the PSF convolution values between the noise map that enables efficient calculation of + the curvature matrix. + """ + image_pixels = len(native_index_for_slim_index) + + w_tilde_curvature = np.zeros((image_pixels, image_pixels)) + + for ip0 in range(w_tilde_curvature.shape[0]): + ip0_y, ip0_x = native_index_for_slim_index[ip0] + + for ip1 in range(ip0, w_tilde_curvature.shape[1]): + ip1_y, ip1_x = native_index_for_slim_index[ip1] + + w_tilde_curvature[ip0, ip1] += w_tilde_curvature_value_from( + value_native=noise_map_native, + kernel_native=kernel_native, + ip0_y=ip0_y, + ip0_x=ip0_x, + ip1_y=ip1_y, + ip1_x=ip1_x, + ) + + for ip0 in range(w_tilde_curvature.shape[0]): + for ip1 in range(ip0, w_tilde_curvature.shape[1]): + w_tilde_curvature[ip1, ip0] = w_tilde_curvature[ip0, ip1] + + return w_tilde_curvature + + +@numba_util.jit() +def w_tilde_curvature_preload_imaging_from( + noise_map_native: np.ndarray, kernel_native: np.ndarray, native_index_for_slim_index +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + The matrix `w_tilde_curvature` is a matrix of dimensions [image_pixels, image_pixels] that encodes the PSF + convolution of every pair of image pixels on the noise map. This can be used to efficiently compute the + curvature matrix via the mappings between image and source pixels, in a way that omits having to repeat the PSF + convolution on every individual source pixel. This provides a significant speed up for inversions of imaging + datasets. + + The limitation of this matrix is that the dimensions of [image_pixels, image_pixels] can exceed many 10s of GB's, + making it impossible to store in memory and its use in linear algebra calculations slow. This methods creates + a sparse matrix that can compute the matrix `w_tilde_curvature` efficiently, albeit the linear algebra calculations + in PyAutoArray bypass this matrix entirely to go straight to the curvature matrix. + + for dataset data, w_tilde is a sparse matrix, whereby non-zero entries are only contained for pairs of image pixels + where the two pixels overlap due to the kernel size. For example, if the kernel size is (11, 11) and two image + pixels are separated by more than 20 pixels, the kernel will never convolve flux between the two pixels. Two image + pixels will only share a convolution if they are within `kernel_overlap_size = 2 * kernel_shape - 1` pixels within + one another. + + Thus, a `w_tilde_curvature_preload` matrix of dimensions [image_pixels, kernel_overlap_size ** 2] can be computed + which significantly reduces the memory consumption by removing the sparsity. Because the dimensions of the second + axes is no longer `image_pixels`, a second matrix `w_tilde_indexes` must also be computed containing the slim image + pixel indexes of every entry of `w_tilde_preload`. + + In order for the preload to store half the number of values, owing to the symmetry of the `w_tilde_curvature` + matrix, the image pixel pairs corresponding to the same image pixel are divided by two. This ensures that when the + curvature matrix is computed these pixels are not double-counted. + + The values stored in `w_tilde_curvature_preload` represent the convolution of overlapping noise-maps given the + PSF kernel. It is common for many values to be neglibly small. Removing these values can speed up the inversion + and reduce memory at the expense of a numerically irrelevent change of solution. + + This matrix can then be used to compute the `curvature_matrix` in a memory efficient way that exploits the sparsity + of the linear algebra. + + Parameters + ---------- + noise_map_native + The two dimensional masked noise-map of values which `w_tilde_curvature` is computed from. + signal_to_noise_map_native + The two dimensional masked signal-to-noise-map from which the threshold discarding low S/N image pixel + pairs is used. + kernel_native + The two dimensional PSF kernel that `w_tilde_curvature` encodes the convolution of. + native_index_for_slim_index + An array of shape [total_x_pixels*sub_size] that maps pixels from the slimmed array to the native array. + + Returns + ------- + ndarray + A matrix that encodes the PSF convolution values between the noise map that enables efficient calculation of + the curvature matrix, where the dimensions are reduced to save memory. + """ + + image_pixels = len(native_index_for_slim_index) + + kernel_overlap_size = (2 * kernel_native.shape[0] - 1) * ( + 2 * kernel_native.shape[1] - 1 + ) + + curvature_preload_tmp = np.zeros((image_pixels, kernel_overlap_size)) + curvature_indexes_tmp = np.zeros((image_pixels, kernel_overlap_size)) + curvature_lengths = np.zeros(image_pixels) + + for ip0 in range(image_pixels): + ip0_y, ip0_x = native_index_for_slim_index[ip0] + + kernel_index = 0 + + for ip1 in range(ip0, curvature_preload_tmp.shape[0]): + ip1_y, ip1_x = native_index_for_slim_index[ip1] + + noise_value = w_tilde_curvature_value_from( + value_native=noise_map_native, + kernel_native=kernel_native, + ip0_y=ip0_y, + ip0_x=ip0_x, + ip1_y=ip1_y, + ip1_x=ip1_x, + ) + + if ip0 == ip1: + noise_value /= 2.0 + + if noise_value > 0.0: + curvature_preload_tmp[ip0, kernel_index] = noise_value + curvature_indexes_tmp[ip0, kernel_index] = ip1 + kernel_index += 1 + + curvature_lengths[ip0] = kernel_index + + curvature_total_pairs = int(np.sum(curvature_lengths)) + + curvature_preload = np.zeros((curvature_total_pairs)) + curvature_indexes = np.zeros((curvature_total_pairs)) + + index = 0 + + for i in range(image_pixels): + for data_index in range(int(curvature_lengths[i])): + curvature_preload[index] = curvature_preload_tmp[i, data_index] + curvature_indexes[index] = curvature_indexes_tmp[i, data_index] + + index += 1 + + return (curvature_preload, curvature_indexes, curvature_lengths) + + +@numba_util.jit() +def w_tilde_curvature_value_from( + value_native: np.ndarray, + kernel_native: np.ndarray, + ip0_y, + ip0_x, + ip1_y, + ip1_x, + renormalize=False, +) -> float: + """ + Compute the value of an entry of the `w_tilde_curvature` matrix, where this entry encodes the PSF convolution of + the noise-map between two image pixels. + + The calculation is performed by over-laying the PSF kernel over two noise-map pixels in 2D. For all pixels where + the two overlaid PSF kernels overlap, the following calculation is performed for every noise map value: + + `value = kernel_value_0 * kernel_value_1 * (1.0 / noise_value) ** 2.0` + + This calculation infers the fraction of flux that every PSF convolution will move between each pair of noise-map + pixels and can therefore be used to efficiently calculate the curvature_matrix that is used in the linear algebra + calculation of an inversion. + + The sum of all values where kernel pixels overlap is returned to give the `w_tilde` value. + + Parameters + ---------- + value_native + A two dimensional masked array of values (e.g. a noise-map, signal to noise map) which the w_tilde curvature + values are computed from. + kernel_native + The two dimensional PSF kernel that w_tilde encodes the convolution of. + ip0_y + The y index of the first image pixel in the image pixel pair. + ip0_x + The x index of the first image pixel in the image pixel pair. + ip1_y + The y index of the second image pixel in the image pixel pair. + ip1_x + The x index of the second image pixel in the image pixel pair. + + Returns + ------- + float + The w_tilde value that encodes the value of PSF convolution between a pair of image pixels. + + """ + + curvature_value = 0.0 + + kernel_shift_y = -(kernel_native.shape[1] // 2) + kernel_shift_x = -(kernel_native.shape[0] // 2) + + ip_y_offset = ip0_y - ip1_y + ip_x_offset = ip0_x - ip1_x + + if ( + ip_y_offset < 2 * kernel_shift_y + or ip_y_offset > -2 * kernel_shift_y + or ip_x_offset < 2 * kernel_shift_x + or ip_x_offset > -2 * kernel_shift_x + ): + return curvature_value + + kernel_pixels = kernel_native.shape[0] * kernel_native.shape[1] + kernel_count = 0 + + for k0_y in range(kernel_native.shape[0]): + for k0_x in range(kernel_native.shape[1]): + value = value_native[ + ip0_y + k0_y + kernel_shift_y, ip0_x + k0_x + kernel_shift_x + ] + + if value > 0.0: + k1_y = k0_y + ip_y_offset + k1_x = k0_x + ip_x_offset + + if ( + k1_y >= 0 + and k1_x >= 0 + and k1_y < kernel_native.shape[0] + and k1_x < kernel_native.shape[1] + ): + kernel_count += 1 + + kernel_value_0 = kernel_native[k0_y, k0_x] + kernel_value_1 = kernel_native[k1_y, k1_x] + + curvature_value += ( + kernel_value_0 * kernel_value_1 * (1.0 / value) ** 2.0 + ) + + if renormalize: + if kernel_count > 0: + curvature_value *= kernel_pixels / kernel_count + + return curvature_value diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py index 285fb5eeb..d26b9c47e 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py @@ -1,10 +1,7 @@ -from scipy.signal import convolve2d import jax.numpy as jnp -import numpy as np from typing import Tuple from autoarray import numba_util -from scipy.signal import correlate2d import numpy as np @@ -134,273 +131,6 @@ def w_tilde_data_imaging_from( return jnp.sum(patches * kernel_native[None, :, :], axis=(1, 2)) # (N,) -@numba_util.jit() -def w_tilde_curvature_imaging_from( - noise_map_native: np.ndarray, kernel_native: np.ndarray, native_index_for_slim_index -) -> np.ndarray: - """ - The matrix `w_tilde_curvature` is a matrix of dimensions [image_pixels, image_pixels] that encodes the PSF - convolution of every pair of image pixels given the noise map. This can be used to efficiently compute the - curvature matrix via the mappings between image and source pixels, in a way that omits having to perform the - PSF convolution on every individual source pixel. This provides a significant speed up for inversions of imaging - datasets. - - The limitation of this matrix is that the dimensions of [image_pixels, image_pixels] can exceed many 10s of GB's, - making it impossible to store in memory and its use in linear algebra calculations extremely. The method - `w_tilde_curvature_preload_imaging_from` describes a compressed representation that overcomes this hurdles. It is - advised `w_tilde` and this method are only used for testing. - - Parameters - ---------- - noise_map_native - The two dimensional masked noise-map of values which w_tilde is computed from. - kernel_native - The two dimensional PSF kernel that w_tilde encodes the convolution of. - native_index_for_slim_index - An array of shape [total_x_pixels*sub_size] that maps pixels from the slimmed array to the native array. - - Returns - ------- - ndarray - A matrix that encodes the PSF convolution values between the noise map that enables efficient calculation of - the curvature matrix. - """ - image_pixels = len(native_index_for_slim_index) - - w_tilde_curvature = np.zeros((image_pixels, image_pixels)) - - for ip0 in range(w_tilde_curvature.shape[0]): - ip0_y, ip0_x = native_index_for_slim_index[ip0] - - for ip1 in range(ip0, w_tilde_curvature.shape[1]): - ip1_y, ip1_x = native_index_for_slim_index[ip1] - - w_tilde_curvature[ip0, ip1] += w_tilde_curvature_value_from( - value_native=noise_map_native, - kernel_native=kernel_native, - ip0_y=ip0_y, - ip0_x=ip0_x, - ip1_y=ip1_y, - ip1_x=ip1_x, - ) - - for ip0 in range(w_tilde_curvature.shape[0]): - for ip1 in range(ip0, w_tilde_curvature.shape[1]): - w_tilde_curvature[ip1, ip0] = w_tilde_curvature[ip0, ip1] - - return w_tilde_curvature - - -@numba_util.jit() -def w_tilde_curvature_preload_imaging_from( - noise_map_native: np.ndarray, kernel_native: np.ndarray, native_index_for_slim_index -) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - The matrix `w_tilde_curvature` is a matrix of dimensions [image_pixels, image_pixels] that encodes the PSF - convolution of every pair of image pixels on the noise map. This can be used to efficiently compute the - curvature matrix via the mappings between image and source pixels, in a way that omits having to repeat the PSF - convolution on every individual source pixel. This provides a significant speed up for inversions of imaging - datasets. - - The limitation of this matrix is that the dimensions of [image_pixels, image_pixels] can exceed many 10s of GB's, - making it impossible to store in memory and its use in linear algebra calculations slow. This methods creates - a sparse matrix that can compute the matrix `w_tilde_curvature` efficiently, albeit the linear algebra calculations - in PyAutoArray bypass this matrix entirely to go straight to the curvature matrix. - - for dataset data, w_tilde is a sparse matrix, whereby non-zero entries are only contained for pairs of image pixels - where the two pixels overlap due to the kernel size. For example, if the kernel size is (11, 11) and two image - pixels are separated by more than 20 pixels, the kernel will never convolve flux between the two pixels. Two image - pixels will only share a convolution if they are within `kernel_overlap_size = 2 * kernel_shape - 1` pixels within - one another. - - Thus, a `w_tilde_curvature_preload` matrix of dimensions [image_pixels, kernel_overlap_size ** 2] can be computed - which significantly reduces the memory consumption by removing the sparsity. Because the dimensions of the second - axes is no longer `image_pixels`, a second matrix `w_tilde_indexes` must also be computed containing the slim image - pixel indexes of every entry of `w_tilde_preload`. - - In order for the preload to store half the number of values, owing to the symmetry of the `w_tilde_curvature` - matrix, the image pixel pairs corresponding to the same image pixel are divided by two. This ensures that when the - curvature matrix is computed these pixels are not double-counted. - - The values stored in `w_tilde_curvature_preload` represent the convolution of overlapping noise-maps given the - PSF kernel. It is common for many values to be neglibly small. Removing these values can speed up the inversion - and reduce memory at the expense of a numerically irrelevent change of solution. - - This matrix can then be used to compute the `curvature_matrix` in a memory efficient way that exploits the sparsity - of the linear algebra. - - Parameters - ---------- - noise_map_native - The two dimensional masked noise-map of values which `w_tilde_curvature` is computed from. - signal_to_noise_map_native - The two dimensional masked signal-to-noise-map from which the threshold discarding low S/N image pixel - pairs is used. - kernel_native - The two dimensional PSF kernel that `w_tilde_curvature` encodes the convolution of. - native_index_for_slim_index - An array of shape [total_x_pixels*sub_size] that maps pixels from the slimmed array to the native array. - - Returns - ------- - ndarray - A matrix that encodes the PSF convolution values between the noise map that enables efficient calculation of - the curvature matrix, where the dimensions are reduced to save memory. - """ - - image_pixels = len(native_index_for_slim_index) - - kernel_overlap_size = (2 * kernel_native.shape[0] - 1) * ( - 2 * kernel_native.shape[1] - 1 - ) - - curvature_preload_tmp = np.zeros((image_pixels, kernel_overlap_size)) - curvature_indexes_tmp = np.zeros((image_pixels, kernel_overlap_size)) - curvature_lengths = np.zeros(image_pixels) - - for ip0 in range(image_pixels): - ip0_y, ip0_x = native_index_for_slim_index[ip0] - - kernel_index = 0 - - for ip1 in range(ip0, curvature_preload_tmp.shape[0]): - ip1_y, ip1_x = native_index_for_slim_index[ip1] - - noise_value = w_tilde_curvature_value_from( - value_native=noise_map_native, - kernel_native=kernel_native, - ip0_y=ip0_y, - ip0_x=ip0_x, - ip1_y=ip1_y, - ip1_x=ip1_x, - ) - - if ip0 == ip1: - noise_value /= 2.0 - - if noise_value > 0.0: - curvature_preload_tmp[ip0, kernel_index] = noise_value - curvature_indexes_tmp[ip0, kernel_index] = ip1 - kernel_index += 1 - - curvature_lengths[ip0] = kernel_index - - curvature_total_pairs = int(np.sum(curvature_lengths)) - - curvature_preload = np.zeros((curvature_total_pairs)) - curvature_indexes = np.zeros((curvature_total_pairs)) - - index = 0 - - for i in range(image_pixels): - for data_index in range(int(curvature_lengths[i])): - curvature_preload[index] = curvature_preload_tmp[i, data_index] - curvature_indexes[index] = curvature_indexes_tmp[i, data_index] - - index += 1 - - return (curvature_preload, curvature_indexes, curvature_lengths) - - -@numba_util.jit() -def w_tilde_curvature_value_from( - value_native: np.ndarray, - kernel_native: np.ndarray, - ip0_y, - ip0_x, - ip1_y, - ip1_x, - renormalize=False, -) -> float: - """ - Compute the value of an entry of the `w_tilde_curvature` matrix, where this entry encodes the PSF convolution of - the noise-map between two image pixels. - - The calculation is performed by over-laying the PSF kernel over two noise-map pixels in 2D. For all pixels where - the two overlaid PSF kernels overlap, the following calculation is performed for every noise map value: - - `value = kernel_value_0 * kernel_value_1 * (1.0 / noise_value) ** 2.0` - - This calculation infers the fraction of flux that every PSF convolution will move between each pair of noise-map - pixels and can therefore be used to efficiently calculate the curvature_matrix that is used in the linear algebra - calculation of an inversion. - - The sum of all values where kernel pixels overlap is returned to give the `w_tilde` value. - - Parameters - ---------- - value_native - A two dimensional masked array of values (e.g. a noise-map, signal to noise map) which the w_tilde curvature - values are computed from. - kernel_native - The two dimensional PSF kernel that w_tilde encodes the convolution of. - ip0_y - The y index of the first image pixel in the image pixel pair. - ip0_x - The x index of the first image pixel in the image pixel pair. - ip1_y - The y index of the second image pixel in the image pixel pair. - ip1_x - The x index of the second image pixel in the image pixel pair. - - Returns - ------- - float - The w_tilde value that encodes the value of PSF convolution between a pair of image pixels. - - """ - - curvature_value = 0.0 - - kernel_shift_y = -(kernel_native.shape[1] // 2) - kernel_shift_x = -(kernel_native.shape[0] // 2) - - ip_y_offset = ip0_y - ip1_y - ip_x_offset = ip0_x - ip1_x - - if ( - ip_y_offset < 2 * kernel_shift_y - or ip_y_offset > -2 * kernel_shift_y - or ip_x_offset < 2 * kernel_shift_x - or ip_x_offset > -2 * kernel_shift_x - ): - return curvature_value - - kernel_pixels = kernel_native.shape[0] * kernel_native.shape[1] - kernel_count = 0 - - for k0_y in range(kernel_native.shape[0]): - for k0_x in range(kernel_native.shape[1]): - value = value_native[ - ip0_y + k0_y + kernel_shift_y, ip0_x + k0_x + kernel_shift_x - ] - - if value > 0.0: - k1_y = k0_y + ip_y_offset - k1_x = k0_x + ip_x_offset - - if ( - k1_y >= 0 - and k1_x >= 0 - and k1_y < kernel_native.shape[0] - and k1_x < kernel_native.shape[1] - ): - kernel_count += 1 - - kernel_value_0 = kernel_native[k0_y, k0_x] - kernel_value_1 = kernel_native[k1_y, k1_x] - - curvature_value += ( - kernel_value_0 * kernel_value_1 * (1.0 / value) ** 2.0 - ) - - if renormalize: - if kernel_count > 0: - curvature_value *= kernel_pixels / kernel_count - - return curvature_value - - @numba_util.jit() def data_vector_via_w_tilde_data_imaging_from( w_tilde_data: np.ndarray, diff --git a/autoarray/structures/arrays/array_2d_util.py b/autoarray/structures/arrays/array_2d_util.py index e3700a7e8..a1534e480 100644 --- a/autoarray/structures/arrays/array_2d_util.py +++ b/autoarray/structures/arrays/array_2d_util.py @@ -224,8 +224,6 @@ def extracted_array_2d_from( In the example below, an array of size (5,5) is extracted using the coordinates y0=1, y1=4, x0=1, x1=4. This extracts an array of dimensions (3,3) and is equivalent to array_2d[1:4, 1:4]. - This function is necessary work with numba jit tags and is why a standard Numpy array extraction is not used. - Parameters ---------- array_2d @@ -284,9 +282,6 @@ def resized_array_2d_from( calculated automatically. For example, a (5,5) array's central pixel is (2,2). For even dimensions the central pixel is assumed to be the lower indexed value, e.g. a (6,4) array's central pixel is calculated as (2,1). - The default origin is (-1, -1) because numba requires that the function input is the same type throughout the - function, thus a default 'None' value cannot be used. - Parameters ---------- array_2d diff --git a/autoarray/structures/grids/grid_2d_util.py b/autoarray/structures/grids/grid_2d_util.py index 75993ba90..db8f92fe0 100644 --- a/autoarray/structures/grids/grid_2d_util.py +++ b/autoarray/structures/grids/grid_2d_util.py @@ -10,7 +10,6 @@ from autoarray import exc from autoarray.structures.arrays import array_2d_util from autoarray.geometry import geometry_util -from autoarray import numba_util from autoarray import type as ty @@ -535,7 +534,7 @@ def grid_scaled_2d_slim_radial_projected_from( The (y,x) scaled units to pixel units conversion factor of the 2D mask array. shape_slim Manually choose the shape of the 1D projected grid that is returned. If 0, the border based on the 2D grid is - used (due to numba None cannot be used as a default value). + used. Returns ------- From d9a1582a4b8a607a89573fa33d55269fb10f8047 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 13:56:06 +0100 Subject: [PATCH 02/17] moved more functions --- .../imaging/inversion_imaging_numba_util.py | 377 +++++++++++++++- .../imaging/inversion_imaging_util.py | 425 ------------------ 2 files changed, 376 insertions(+), 426 deletions(-) diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py index f49ef6637..1a9460e98 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py @@ -1,4 +1,3 @@ -import numpy as np from typing import Tuple from autoarray import numba_util @@ -270,3 +269,379 @@ def w_tilde_curvature_value_from( curvature_value *= kernel_pixels / kernel_count return curvature_value + + +@numba_util.jit() +def curvature_matrix_via_w_tilde_curvature_preload_imaging_from( + curvature_preload: np.ndarray, + curvature_indexes: np.ndarray, + curvature_lengths: np.ndarray, + data_to_pix_unique: np.ndarray, + data_weights: np.ndarray, + pix_lengths: np.ndarray, + pix_pixels: int, +) -> np.ndarray: + """ + Returns the curvature matrix `F` (see Warren & Dye 2003) by computing it using `w_tilde_preload` + (see `w_tilde_preload_interferometer_from`) for an imaging inversion. + + To compute the curvature matrix via w_tilde the following matrix multiplication is normally performed: + + curvature_matrix = mapping_matrix.T * w_tilde * mapping matrix + + This function speeds this calculation up in two ways: + + 1) Instead of using `w_tilde` (dimensions [image_pixels, image_pixels] it uses `w_tilde_preload` (dimensions + [image_pixels, kernel_overlap]). The massive reduction in the size of this matrix in memory allows for much fast + computation. + + 2) It omits the `mapping_matrix` and instead uses directly the 1D vector that maps every image pixel to a source + pixel `native_index_for_slim_index`. This exploits the sparsity in the `mapping_matrix` to directly + compute the `curvature_matrix` (e.g. it condenses the triple matrix multiplication into a double for loop!). + + Parameters + ---------- + curvature_preload + A matrix that precomputes the values for fast computation of the curvature matrix in a memory efficient way. + curvature_indexes + The image-pixel indexes of the values stored in the w tilde preload matrix, which are used to compute + the weights of the data values when computing the curvature matrix. + curvature_lengths + The number of image pixels in every row of `w_tilde_curvature`, which is iterated over when computing the + curvature matrix. + data_to_pix_unique + An array that maps every data pixel index (e.g. the masked image pixel indexes in 1D) to its unique set of + pixelization pixel indexes (see `data_slim_to_pixelization_unique_from`). + data_weights + For every unique mapping between a set of data sub-pixels and a pixelization pixel, the weight of these mapping + based on the number of sub-pixels that map to pixelization pixel. + pix_lengths + A 1D array describing how many unique pixels each data pixel maps too, which is used to iterate over + `data_to_pix_unique` and `data_weights`. + pix_pixels + The total number of pixels in the pixelization that reconstructs the data. + + Returns + ------- + ndarray + The curvature matrix `F` (see Warren & Dye 2003). + """ + + data_pixels = curvature_lengths.shape[0] + + curvature_matrix = np.zeros((pix_pixels, pix_pixels)) + + curvature_index = 0 + + for data_0 in range(data_pixels): + for data_1_index in range(curvature_lengths[data_0]): + data_1 = curvature_indexes[curvature_index] + w_tilde_value = curvature_preload[curvature_index] + + for pix_0_index in range(pix_lengths[data_0]): + data_0_weight = data_weights[data_0, pix_0_index] + pix_0 = data_to_pix_unique[data_0, pix_0_index] + + for pix_1_index in range(pix_lengths[data_1]): + data_1_weight = data_weights[data_1, pix_1_index] + pix_1 = data_to_pix_unique[data_1, pix_1_index] + + curvature_matrix[pix_0, pix_1] += ( + data_0_weight * data_1_weight * w_tilde_value + ) + + curvature_index += 1 + + for i in range(pix_pixels): + for j in range(i, pix_pixels): + curvature_matrix[i, j] += curvature_matrix[j, i] + + for i in range(pix_pixels): + for j in range(i, pix_pixels): + curvature_matrix[j, i] = curvature_matrix[i, j] + + return curvature_matrix + + +@numba_util.jit() +def curvature_matrix_off_diags_via_w_tilde_curvature_preload_imaging_from( + curvature_preload: np.ndarray, + curvature_indexes: np.ndarray, + curvature_lengths: np.ndarray, + data_to_pix_unique_0: np.ndarray, + data_weights_0: np.ndarray, + pix_lengths_0: np.ndarray, + pix_pixels_0: int, + data_to_pix_unique_1: np.ndarray, + data_weights_1: np.ndarray, + pix_lengths_1: np.ndarray, + pix_pixels_1: int, +) -> np.ndarray: + """ + Returns the off diagonal terms in the curvature matrix `F` (see Warren & Dye 2003) by computing them + using `w_tilde_preload` (see `w_tilde_preload_interferometer_from`) for an imaging inversion. + + When there is more than one mapper in the inversion, its `mapping_matrix` is extended to have dimensions + [data_pixels, sum(source_pixels_in_each_mapper)]. The curvature matrix therefore will have dimensions + [sum(source_pixels_in_each_mapper), sum(source_pixels_in_each_mapper)]. + + To compute the curvature matrix via w_tilde the following matrix multiplication is normally performed: + + curvature_matrix = mapping_matrix.T * w_tilde * mapping matrix + + When the `mapping_matrix` consists of multiple mappers from different planes, this means that shared data mappings + between source-pixels in different mappers must be accounted for when computing the `curvature_matrix`. These + appear as off-diagonal terms in the overall curvature matrix. + + This function evaluates these off-diagonal terms, by using the w-tilde curvature preloads and the unique + data-to-pixelization mappings of each mapper. It behaves analogous to the + function `curvature_matrix_via_w_tilde_curvature_preload_imaging_from`. + + Parameters + ---------- + curvature_preload + A matrix that precomputes the values for fast computation of the curvature matrix in a memory efficient way. + curvature_indexes + The image-pixel indexes of the values stored in the w tilde preload matrix, which are used to compute + the weights of the data values when computing the curvature matrix. + curvature_lengths + The number of image pixels in every row of `w_tilde_curvature`, which is iterated over when computing the + curvature matrix. + data_to_pix_unique + An array that maps every data pixel index (e.g. the masked image pixel indexes in 1D) to its unique set of + pixelization pixel indexes (see `data_slim_to_pixelization_unique_from`). + data_weights + For every unique mapping between a set of data sub-pixels and a pixelization pixel, the weight of these mapping + based on the number of sub-pixels that map to pixelization pixel. + pix_lengths + A 1D array describing how many unique pixels each data pixel maps too, which is used to iterate over + `data_to_pix_unique` and `data_weights`. + pix_pixels + The total number of pixels in the pixelization that reconstructs the data. + + Returns + ------- + ndarray + The curvature matrix `F` (see Warren & Dye 2003). + """ + + data_pixels = curvature_lengths.shape[0] + + curvature_matrix = np.zeros((pix_pixels_0, pix_pixels_1)) + + curvature_index = 0 + + for data_0 in range(data_pixels): + for data_1_index in range(curvature_lengths[data_0]): + data_1 = curvature_indexes[curvature_index] + w_tilde_value = curvature_preload[curvature_index] + + for pix_0_index in range(pix_lengths_0[data_0]): + data_0_weight = data_weights_0[data_0, pix_0_index] + pix_0 = data_to_pix_unique_0[data_0, pix_0_index] + + for pix_1_index in range(pix_lengths_1[data_1]): + data_1_weight = data_weights_1[data_1, pix_1_index] + pix_1 = data_to_pix_unique_1[data_1, pix_1_index] + + curvature_matrix[pix_0, pix_1] += ( + data_0_weight * data_1_weight * w_tilde_value + ) + + curvature_index += 1 + + return curvature_matrix + +@numba_util.jit() +def data_linear_func_matrix_from( + curvature_weights_matrix: np.ndarray, + image_frame_1d_lengths: np.ndarray, + image_frame_1d_indexes: np.ndarray, + image_frame_1d_kernels: np.ndarray, +) -> np.ndarray: + """ + Returns a matrix that for each data pixel, maps it to the sum of the values of a linear object function convolved + with the PSF kernel at the data pixel. + + If a linear function in an inversion is fixed, its values can be evaluated and preloaded beforehand. For every + data pixel, the PSF convolution with this preloaded linear function can also be preloaded, in a matrix of + shape [data_pixels, 1]. + + Given that multiple linear functions can be used and fixed in an inversion, this matrix is extended to have + dimensions [data_pixels, total_fixed_linear_functions]. + + When mapper objects and linear functions are used simultaneously in an inversion, this preloaded matrix + significantly speed up the computation of their off-diagonal terms in the curvature matrix. + + This is similar to the preloading performed via the w-tilde formalism, except that there it is the PSF convolved + values of each noise-map value pair that are preloaded. + + In **PyAutoGalaxy** and **PyAutoLens**, this preload is used when linear light profiles are fixed in the model. + For example, when using a multi Gaussian expansion, the values defining how those Gaussians are evaluated + (e.g. `centre`, `ell_comps` and `sigma`) are often fixed in a model, meaning this matrix can be preloaded and + used for speed up. + + Parameters + ---------- + curvature_weights_matrix + The operated values of each linear function divided by the noise-map squared, in a matrix of shape + [data_pixels, total_fixed_linear_functions]. + image_frame_indexes + The indexes of all masked pixels that the PSF blurs light into (see the `Convolver` object). + image_frame_kernels + The kernel values of all masked pixels that the PSF blurs light into (see the `Convolver` object). + image_frame_length + The number of masked pixels it will blur light into (unmasked pixels are excluded, see the `Convolver` object). + + Returns + ------- + ndarray + A matrix of shape [data_pixels, total_fixed_linear_functions] that for each data pixel, maps it to the sum of + the values of a linear object function convolved with the PSF kernel at the data pixel. + """ + data_pixels = curvature_weights_matrix.shape[0] + linear_func_pixels = curvature_weights_matrix.shape[1] + + data_linear_func_matrix_dict = np.zeros(shape=(data_pixels, linear_func_pixels)) + + for data_0 in range(data_pixels): + for psf_index in range(image_frame_1d_lengths[data_0]): + data_index = image_frame_1d_indexes[data_0, psf_index] + kernel_value = image_frame_1d_kernels[data_0, psf_index] + + for linear_index in range(linear_func_pixels): + data_linear_func_matrix_dict[data_0, linear_index] += ( + kernel_value * curvature_weights_matrix[data_index, linear_index] + ) + + return data_linear_func_matrix_dict + + +@numba_util.jit() +def curvature_matrix_off_diags_via_data_linear_func_matrix_from( + data_linear_func_matrix: np.ndarray, + data_to_pix_unique: np.ndarray, + data_weights: np.ndarray, + pix_lengths: np.ndarray, + pix_pixels: int, +): + """ + Returns the off diagonal terms in the curvature matrix `F` (see Warren & Dye 2003) between a mapper object + and a linear func object, using the preloaded `data_linear_func_matrix` of the values of the linear functions. + + + If a linear function in an inversion is fixed, its values can be evaluated and preloaded beforehand. For every + data pixel, the PSF convolution with this preloaded linear function can also be preloaded, in a matrix of + shape [data_pixels, 1]. + + When mapper objects and linear functions are used simultaneously in an inversion, this preloaded matrix + significantly speed up the computation of their off-diagonal terms in the curvature matrix. + + This function performs this efficient calcluation via the preloaded `data_linear_func_matrix`. + + Parameters + ---------- + data_linear_func_matrix + A matrix of shape [data_pixels, total_fixed_linear_functions] that for each data pixel, maps it to the sum of + the values of a linear object function convolved with the PSF kernel at the data pixel. + data_to_pix_unique + The indexes of all pixels that each data pixel maps to (see the `Mapper` object). + data_weights + The weights of all pixels that each data pixel maps to (see the `Mapper` object). + pix_lengths + The number of pixelization pixels that each data pixel maps to (see the `Mapper` object). + pix_pixels + The number of pixelization pixels in the pixelization (see the `Mapper` object). + """ + + linear_func_pixels = data_linear_func_matrix.shape[1] + + off_diag = np.zeros((pix_pixels, linear_func_pixels)) + + data_pixels = data_weights.shape[0] + + for data_0 in range(data_pixels): + for pix_0_index in range(pix_lengths[data_0]): + data_0_weight = data_weights[data_0, pix_0_index] + pix_0 = data_to_pix_unique[data_0, pix_0_index] + + for linear_index in range(linear_func_pixels): + off_diag[pix_0, linear_index] += ( + data_linear_func_matrix[data_0, linear_index] * data_0_weight + ) + + return off_diag + + +@numba_util.jit() +def curvature_matrix_off_diags_via_mapper_and_linear_func_curvature_vector_from( + data_to_pix_unique: np.ndarray, + data_weights: np.ndarray, + pix_lengths: np.ndarray, + pix_pixels: int, + curvature_weights: np.ndarray, + image_frame_1d_lengths: np.ndarray, + image_frame_1d_indexes: np.ndarray, + image_frame_1d_kernels: np.ndarray, +) -> np.ndarray: + """ + Returns the off diagonal terms in the curvature matrix `F` (see Warren & Dye 2003) between a mapper object + and a linear func object, using the unique mappings between data pixels and pixelization pixels. + + This takes as input the curvature weights of the linear function object, which are the values of the linear + function convolved with the PSF and divided by the noise-map squared. + + For each unique mapping between a data pixel and a pixelization pixel, the pixels which that pixel convolves + light into are computed, multiplied by their corresponding curvature weights and summed. This process also + accounts the sub-pixel mapping of each data pixel to the pixelization pixel + + This is done for every unique mapping of a data pixel to a pixelization pixel, giving the off-diagonal terms in + the curvature matrix. + + Parameters + ---------- + data_to_pix_unique + An array that maps every data pixel index (e.g. the masked image pixel indexes in 1D) to its unique set of + pixelization pixel indexes (see `data_slim_to_pixelization_unique_from`). + data_weights + For every unique mapping between a set of data sub-pixels and a pixelization pixel, the weight of these mapping + based on the number of sub-pixels that map to pixelization pixel. + pix_lengths + A 1D array describing how many unique pixels each data pixel maps too, which is used to iterate over + `data_to_pix_unique` and `data_weights`. + pix_pixels + The total number of pixels in the pixelization that reconstructs the data. + curvature_weights + The operated values of the linear func divided by the noise-map squared. + image_frame_indexes + The indexes of all masked pixels that the PSF blurs light into (see the `Convolver` object). + image_frame_kernels + The kernel values of all masked pixels that the PSF blurs light into (see the `Convolver` object). + image_frame_length + The number of masked pixels it will blur light into (unmasked pixels are excluded, see the `Convolver` object). + + Returns + ------- + ndarray + The curvature matrix `F` (see Warren & Dye 2003). + """ + + data_pixels = data_weights.shape[0] + linear_func_pixels = curvature_weights.shape[1] + + off_diag = np.zeros((pix_pixels, linear_func_pixels)) + + for data_0 in range(data_pixels): + for pix_0_index in range(pix_lengths[data_0]): + data_0_weight = data_weights[data_0, pix_0_index] + pix_0 = data_to_pix_unique[data_0, pix_0_index] + + for psf_index in range(image_frame_1d_lengths[data_0]): + data_index = image_frame_1d_indexes[data_0, psf_index] + kernel_value = image_frame_1d_kernels[data_0, psf_index] + + off_diag[pix_0, :] += ( + data_0_weight * curvature_weights[data_index, :] * kernel_value + ) + + return off_diag \ No newline at end of file diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py index d26b9c47e..5ea0b2574 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py @@ -1,5 +1,4 @@ import jax.numpy as jnp -from typing import Tuple from autoarray import numba_util @@ -131,54 +130,6 @@ def w_tilde_data_imaging_from( return jnp.sum(patches * kernel_native[None, :, :], axis=(1, 2)) # (N,) -@numba_util.jit() -def data_vector_via_w_tilde_data_imaging_from( - w_tilde_data: np.ndarray, - data_to_pix_unique: np.ndarray, - data_weights: np.ndarray, - pix_lengths: np.ndarray, - pix_pixels: int, -) -> np.ndarray: - """ - Returns the data vector `D` from the `w_tilde_data` matrix (see `w_tilde_data_imaging_from`), which encodes the - the 1D image `d` and 1D noise-map values `\sigma` (see Warren & Dye 2003). - - This uses the array `data_to_pix_unique`, which describes the unique mappings of every set of image sub-pixels to - pixelization pixels and `data_weights`, which describes how many sub-pixels uniquely map to each pixelization - pixels (see `data_slim_to_pixelization_unique_from`). - - Parameters - ---------- - w_tilde_data - A matrix that encodes the PSF convolution values between the imaging divided by the noise map**2 that enables - efficient calculation of the data vector. - data_to_pix_unique - An array that maps every data pixel index (e.g. the masked image pixel indexes in 1D) to its unique set of - pixelization pixel indexes (see `data_slim_to_pixelization_unique_from`). - data_weights - For every unique mapping between a set of data sub-pixels and a pixelization pixel, the weight of these mapping - based on the number of sub-pixels that map to pixelization pixel. - pix_lengths - A 1D array describing how many unique pixels each data pixel maps too, which is used to iterate over - `data_to_pix_unique` and `data_weights`. - pix_pixels - The total number of pixels in the pixelization that reconstructs the data. - """ - - data_pixels = w_tilde_data.shape[0] - - data_vector = np.zeros(pix_pixels) - - for data_0 in range(data_pixels): - for pix_0_index in range(pix_lengths[data_0]): - data_0_weight = data_weights[data_0, pix_0_index] - pix_0 = data_to_pix_unique[data_0, pix_0_index] - - data_vector[pix_0] += data_0_weight * w_tilde_data[data_0] - - return data_vector - - def data_vector_via_blurred_mapping_matrix_from( blurred_mapping_matrix: np.ndarray, image: np.ndarray, noise_map: np.ndarray ) -> np.ndarray: @@ -197,379 +148,3 @@ def data_vector_via_blurred_mapping_matrix_from( """ return (image / noise_map**2.0) @ blurred_mapping_matrix - -@numba_util.jit() -def curvature_matrix_via_w_tilde_curvature_preload_imaging_from( - curvature_preload: np.ndarray, - curvature_indexes: np.ndarray, - curvature_lengths: np.ndarray, - data_to_pix_unique: np.ndarray, - data_weights: np.ndarray, - pix_lengths: np.ndarray, - pix_pixels: int, -) -> np.ndarray: - """ - Returns the curvature matrix `F` (see Warren & Dye 2003) by computing it using `w_tilde_preload` - (see `w_tilde_preload_interferometer_from`) for an imaging inversion. - - To compute the curvature matrix via w_tilde the following matrix multiplication is normally performed: - - curvature_matrix = mapping_matrix.T * w_tilde * mapping matrix - - This function speeds this calculation up in two ways: - - 1) Instead of using `w_tilde` (dimensions [image_pixels, image_pixels] it uses `w_tilde_preload` (dimensions - [image_pixels, kernel_overlap]). The massive reduction in the size of this matrix in memory allows for much fast - computation. - - 2) It omits the `mapping_matrix` and instead uses directly the 1D vector that maps every image pixel to a source - pixel `native_index_for_slim_index`. This exploits the sparsity in the `mapping_matrix` to directly - compute the `curvature_matrix` (e.g. it condenses the triple matrix multiplication into a double for loop!). - - Parameters - ---------- - curvature_preload - A matrix that precomputes the values for fast computation of the curvature matrix in a memory efficient way. - curvature_indexes - The image-pixel indexes of the values stored in the w tilde preload matrix, which are used to compute - the weights of the data values when computing the curvature matrix. - curvature_lengths - The number of image pixels in every row of `w_tilde_curvature`, which is iterated over when computing the - curvature matrix. - data_to_pix_unique - An array that maps every data pixel index (e.g. the masked image pixel indexes in 1D) to its unique set of - pixelization pixel indexes (see `data_slim_to_pixelization_unique_from`). - data_weights - For every unique mapping between a set of data sub-pixels and a pixelization pixel, the weight of these mapping - based on the number of sub-pixels that map to pixelization pixel. - pix_lengths - A 1D array describing how many unique pixels each data pixel maps too, which is used to iterate over - `data_to_pix_unique` and `data_weights`. - pix_pixels - The total number of pixels in the pixelization that reconstructs the data. - - Returns - ------- - ndarray - The curvature matrix `F` (see Warren & Dye 2003). - """ - - data_pixels = curvature_lengths.shape[0] - - curvature_matrix = np.zeros((pix_pixels, pix_pixels)) - - curvature_index = 0 - - for data_0 in range(data_pixels): - for data_1_index in range(curvature_lengths[data_0]): - data_1 = curvature_indexes[curvature_index] - w_tilde_value = curvature_preload[curvature_index] - - for pix_0_index in range(pix_lengths[data_0]): - data_0_weight = data_weights[data_0, pix_0_index] - pix_0 = data_to_pix_unique[data_0, pix_0_index] - - for pix_1_index in range(pix_lengths[data_1]): - data_1_weight = data_weights[data_1, pix_1_index] - pix_1 = data_to_pix_unique[data_1, pix_1_index] - - curvature_matrix[pix_0, pix_1] += ( - data_0_weight * data_1_weight * w_tilde_value - ) - - curvature_index += 1 - - for i in range(pix_pixels): - for j in range(i, pix_pixels): - curvature_matrix[i, j] += curvature_matrix[j, i] - - for i in range(pix_pixels): - for j in range(i, pix_pixels): - curvature_matrix[j, i] = curvature_matrix[i, j] - - return curvature_matrix - - -@numba_util.jit() -def curvature_matrix_off_diags_via_w_tilde_curvature_preload_imaging_from( - curvature_preload: np.ndarray, - curvature_indexes: np.ndarray, - curvature_lengths: np.ndarray, - data_to_pix_unique_0: np.ndarray, - data_weights_0: np.ndarray, - pix_lengths_0: np.ndarray, - pix_pixels_0: int, - data_to_pix_unique_1: np.ndarray, - data_weights_1: np.ndarray, - pix_lengths_1: np.ndarray, - pix_pixels_1: int, -) -> np.ndarray: - """ - Returns the off diagonal terms in the curvature matrix `F` (see Warren & Dye 2003) by computing them - using `w_tilde_preload` (see `w_tilde_preload_interferometer_from`) for an imaging inversion. - - When there is more than one mapper in the inversion, its `mapping_matrix` is extended to have dimensions - [data_pixels, sum(source_pixels_in_each_mapper)]. The curvature matrix therefore will have dimensions - [sum(source_pixels_in_each_mapper), sum(source_pixels_in_each_mapper)]. - - To compute the curvature matrix via w_tilde the following matrix multiplication is normally performed: - - curvature_matrix = mapping_matrix.T * w_tilde * mapping matrix - - When the `mapping_matrix` consists of multiple mappers from different planes, this means that shared data mappings - between source-pixels in different mappers must be accounted for when computing the `curvature_matrix`. These - appear as off-diagonal terms in the overall curvature matrix. - - This function evaluates these off-diagonal terms, by using the w-tilde curvature preloads and the unique - data-to-pixelization mappings of each mapper. It behaves analogous to the - function `curvature_matrix_via_w_tilde_curvature_preload_imaging_from`. - - Parameters - ---------- - curvature_preload - A matrix that precomputes the values for fast computation of the curvature matrix in a memory efficient way. - curvature_indexes - The image-pixel indexes of the values stored in the w tilde preload matrix, which are used to compute - the weights of the data values when computing the curvature matrix. - curvature_lengths - The number of image pixels in every row of `w_tilde_curvature`, which is iterated over when computing the - curvature matrix. - data_to_pix_unique - An array that maps every data pixel index (e.g. the masked image pixel indexes in 1D) to its unique set of - pixelization pixel indexes (see `data_slim_to_pixelization_unique_from`). - data_weights - For every unique mapping between a set of data sub-pixels and a pixelization pixel, the weight of these mapping - based on the number of sub-pixels that map to pixelization pixel. - pix_lengths - A 1D array describing how many unique pixels each data pixel maps too, which is used to iterate over - `data_to_pix_unique` and `data_weights`. - pix_pixels - The total number of pixels in the pixelization that reconstructs the data. - - Returns - ------- - ndarray - The curvature matrix `F` (see Warren & Dye 2003). - """ - - data_pixels = curvature_lengths.shape[0] - - curvature_matrix = np.zeros((pix_pixels_0, pix_pixels_1)) - - curvature_index = 0 - - for data_0 in range(data_pixels): - for data_1_index in range(curvature_lengths[data_0]): - data_1 = curvature_indexes[curvature_index] - w_tilde_value = curvature_preload[curvature_index] - - for pix_0_index in range(pix_lengths_0[data_0]): - data_0_weight = data_weights_0[data_0, pix_0_index] - pix_0 = data_to_pix_unique_0[data_0, pix_0_index] - - for pix_1_index in range(pix_lengths_1[data_1]): - data_1_weight = data_weights_1[data_1, pix_1_index] - pix_1 = data_to_pix_unique_1[data_1, pix_1_index] - - curvature_matrix[pix_0, pix_1] += ( - data_0_weight * data_1_weight * w_tilde_value - ) - - curvature_index += 1 - - return curvature_matrix - - -@numba_util.jit() -def data_linear_func_matrix_from( - curvature_weights_matrix: np.ndarray, - image_frame_1d_lengths: np.ndarray, - image_frame_1d_indexes: np.ndarray, - image_frame_1d_kernels: np.ndarray, -) -> np.ndarray: - """ - Returns a matrix that for each data pixel, maps it to the sum of the values of a linear object function convolved - with the PSF kernel at the data pixel. - - If a linear function in an inversion is fixed, its values can be evaluated and preloaded beforehand. For every - data pixel, the PSF convolution with this preloaded linear function can also be preloaded, in a matrix of - shape [data_pixels, 1]. - - Given that multiple linear functions can be used and fixed in an inversion, this matrix is extended to have - dimensions [data_pixels, total_fixed_linear_functions]. - - When mapper objects and linear functions are used simultaneously in an inversion, this preloaded matrix - significantly speed up the computation of their off-diagonal terms in the curvature matrix. - - This is similar to the preloading performed via the w-tilde formalism, except that there it is the PSF convolved - values of each noise-map value pair that are preloaded. - - In **PyAutoGalaxy** and **PyAutoLens**, this preload is used when linear light profiles are fixed in the model. - For example, when using a multi Gaussian expansion, the values defining how those Gaussians are evaluated - (e.g. `centre`, `ell_comps` and `sigma`) are often fixed in a model, meaning this matrix can be preloaded and - used for speed up. - - Parameters - ---------- - curvature_weights_matrix - The operated values of each linear function divided by the noise-map squared, in a matrix of shape - [data_pixels, total_fixed_linear_functions]. - image_frame_indexes - The indexes of all masked pixels that the PSF blurs light into (see the `Convolver` object). - image_frame_kernels - The kernel values of all masked pixels that the PSF blurs light into (see the `Convolver` object). - image_frame_length - The number of masked pixels it will blur light into (unmasked pixels are excluded, see the `Convolver` object). - - Returns - ------- - ndarray - A matrix of shape [data_pixels, total_fixed_linear_functions] that for each data pixel, maps it to the sum of - the values of a linear object function convolved with the PSF kernel at the data pixel. - """ - data_pixels = curvature_weights_matrix.shape[0] - linear_func_pixels = curvature_weights_matrix.shape[1] - - data_linear_func_matrix_dict = np.zeros(shape=(data_pixels, linear_func_pixels)) - - for data_0 in range(data_pixels): - for psf_index in range(image_frame_1d_lengths[data_0]): - data_index = image_frame_1d_indexes[data_0, psf_index] - kernel_value = image_frame_1d_kernels[data_0, psf_index] - - for linear_index in range(linear_func_pixels): - data_linear_func_matrix_dict[data_0, linear_index] += ( - kernel_value * curvature_weights_matrix[data_index, linear_index] - ) - - return data_linear_func_matrix_dict - - -@numba_util.jit() -def curvature_matrix_off_diags_via_data_linear_func_matrix_from( - data_linear_func_matrix: np.ndarray, - data_to_pix_unique: np.ndarray, - data_weights: np.ndarray, - pix_lengths: np.ndarray, - pix_pixels: int, -): - """ - Returns the off diagonal terms in the curvature matrix `F` (see Warren & Dye 2003) between a mapper object - and a linear func object, using the preloaded `data_linear_func_matrix` of the values of the linear functions. - - - If a linear function in an inversion is fixed, its values can be evaluated and preloaded beforehand. For every - data pixel, the PSF convolution with this preloaded linear function can also be preloaded, in a matrix of - shape [data_pixels, 1]. - - When mapper objects and linear functions are used simultaneously in an inversion, this preloaded matrix - significantly speed up the computation of their off-diagonal terms in the curvature matrix. - - This function performs this efficient calcluation via the preloaded `data_linear_func_matrix`. - - Parameters - ---------- - data_linear_func_matrix - A matrix of shape [data_pixels, total_fixed_linear_functions] that for each data pixel, maps it to the sum of - the values of a linear object function convolved with the PSF kernel at the data pixel. - data_to_pix_unique - The indexes of all pixels that each data pixel maps to (see the `Mapper` object). - data_weights - The weights of all pixels that each data pixel maps to (see the `Mapper` object). - pix_lengths - The number of pixelization pixels that each data pixel maps to (see the `Mapper` object). - pix_pixels - The number of pixelization pixels in the pixelization (see the `Mapper` object). - """ - - linear_func_pixels = data_linear_func_matrix.shape[1] - - off_diag = np.zeros((pix_pixels, linear_func_pixels)) - - data_pixels = data_weights.shape[0] - - for data_0 in range(data_pixels): - for pix_0_index in range(pix_lengths[data_0]): - data_0_weight = data_weights[data_0, pix_0_index] - pix_0 = data_to_pix_unique[data_0, pix_0_index] - - for linear_index in range(linear_func_pixels): - off_diag[pix_0, linear_index] += ( - data_linear_func_matrix[data_0, linear_index] * data_0_weight - ) - - return off_diag - - -@numba_util.jit() -def curvature_matrix_off_diags_via_mapper_and_linear_func_curvature_vector_from( - data_to_pix_unique: np.ndarray, - data_weights: np.ndarray, - pix_lengths: np.ndarray, - pix_pixels: int, - curvature_weights: np.ndarray, - image_frame_1d_lengths: np.ndarray, - image_frame_1d_indexes: np.ndarray, - image_frame_1d_kernels: np.ndarray, -) -> np.ndarray: - """ - Returns the off diagonal terms in the curvature matrix `F` (see Warren & Dye 2003) between a mapper object - and a linear func object, using the unique mappings between data pixels and pixelization pixels. - - This takes as input the curvature weights of the linear function object, which are the values of the linear - function convolved with the PSF and divided by the noise-map squared. - - For each unique mapping between a data pixel and a pixelization pixel, the pixels which that pixel convolves - light into are computed, multiplied by their corresponding curvature weights and summed. This process also - accounts the sub-pixel mapping of each data pixel to the pixelization pixel - - This is done for every unique mapping of a data pixel to a pixelization pixel, giving the off-diagonal terms in - the curvature matrix. - - Parameters - ---------- - data_to_pix_unique - An array that maps every data pixel index (e.g. the masked image pixel indexes in 1D) to its unique set of - pixelization pixel indexes (see `data_slim_to_pixelization_unique_from`). - data_weights - For every unique mapping between a set of data sub-pixels and a pixelization pixel, the weight of these mapping - based on the number of sub-pixels that map to pixelization pixel. - pix_lengths - A 1D array describing how many unique pixels each data pixel maps too, which is used to iterate over - `data_to_pix_unique` and `data_weights`. - pix_pixels - The total number of pixels in the pixelization that reconstructs the data. - curvature_weights - The operated values of the linear func divided by the noise-map squared. - image_frame_indexes - The indexes of all masked pixels that the PSF blurs light into (see the `Convolver` object). - image_frame_kernels - The kernel values of all masked pixels that the PSF blurs light into (see the `Convolver` object). - image_frame_length - The number of masked pixels it will blur light into (unmasked pixels are excluded, see the `Convolver` object). - - Returns - ------- - ndarray - The curvature matrix `F` (see Warren & Dye 2003). - """ - - data_pixels = data_weights.shape[0] - linear_func_pixels = curvature_weights.shape[1] - - off_diag = np.zeros((pix_pixels, linear_func_pixels)) - - for data_0 in range(data_pixels): - for pix_0_index in range(pix_lengths[data_0]): - data_0_weight = data_weights[data_0, pix_0_index] - pix_0 = data_to_pix_unique[data_0, pix_0_index] - - for psf_index in range(image_frame_1d_lengths[data_0]): - data_index = image_frame_1d_indexes[data_0, psf_index] - kernel_value = image_frame_1d_kernels[data_0, psf_index] - - off_diag[pix_0, :] += ( - data_0_weight * curvature_weights[data_index, :] * kernel_value - ) - - return off_diag From e5ee4ddd2becc315845dc2543370e8d1041186c7 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 13:57:18 +0100 Subject: [PATCH 03/17] update dataset w tilde to use numba util --- autoarray/dataset/imaging/w_tilde.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/autoarray/dataset/imaging/w_tilde.py b/autoarray/dataset/imaging/w_tilde.py index 2cb5a5778..b239bd457 100644 --- a/autoarray/dataset/imaging/w_tilde.py +++ b/autoarray/dataset/imaging/w_tilde.py @@ -1,4 +1,3 @@ -import copy import logging import numpy as np @@ -6,7 +5,7 @@ from autoarray.dataset.abstract.w_tilde import AbstractWTilde -from autoarray.inversion.inversion.imaging import inversion_imaging_util +from autoarray.inversion.inversion.imaging import inversion_imaging_numba_util logger = logging.getLogger(__name__) @@ -85,7 +84,7 @@ def w_matrix(self): the curvature matrix. """ - return inversion_imaging_util.w_tilde_curvature_imaging_from( + return inversion_imaging_numba_util.w_tilde_curvature_imaging_from( noise_map_native=np.array(self.noise_map.native.array).astype("float64"), kernel_native=np.array(self.psf.native.array).astype("float64"), native_index_for_slim_index=np.array( From 88a8d93b9b6d1e46e3ec4cd7fd58d5bf8fae3794 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 17:20:42 +0100 Subject: [PATCH 04/17] update w_tilde to use numba utils --- autoarray/dataset/imaging/dataset.py | 4 +- autoarray/dataset/imaging/w_tilde.py | 1 + .../imaging/inversion_imaging_numba_util.py | 114 ++++++++---------- .../imaging/inversion_imaging_util.py | 64 +++++++++- .../inversion/inversion/imaging/w_tilde.py | 91 +++++++------- autoarray/util/__init__.py | 3 + 6 files changed, 159 insertions(+), 118 deletions(-) diff --git a/autoarray/dataset/imaging/dataset.py b/autoarray/dataset/imaging/dataset.py index d5d146a46..7130f9151 100644 --- a/autoarray/dataset/imaging/dataset.py +++ b/autoarray/dataset/imaging/dataset.py @@ -15,7 +15,7 @@ from autoarray import exc from autoarray.operators.over_sampling import over_sample_util -from autoarray.inversion.inversion.imaging import inversion_imaging_util +from autoarray.inversion.inversion.imaging import inversion_imaging_numba_util logger = logging.getLogger(__name__) @@ -239,7 +239,7 @@ def w_tilde(self): curvature_preload, indexes, lengths, - ) = inversion_imaging_util.w_tilde_curvature_preload_imaging_from( + ) = inversion_imaging_numba_util.w_tilde_curvature_preload_imaging_from( noise_map_native=np.array(self.noise_map.native.array).astype("float64"), kernel_native=np.array(self.psf.native.array).astype("float64"), native_index_for_slim_index=np.array( diff --git a/autoarray/dataset/imaging/w_tilde.py b/autoarray/dataset/imaging/w_tilde.py index b239bd457..95ebe5291 100644 --- a/autoarray/dataset/imaging/w_tilde.py +++ b/autoarray/dataset/imaging/w_tilde.py @@ -5,6 +5,7 @@ from autoarray.dataset.abstract.w_tilde import AbstractWTilde +from autoarray.inversion.inversion.imaging import inversion_imaging_util from autoarray.inversion.inversion.imaging import inversion_imaging_numba_util logger = logging.getLogger(__name__) diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py index 1a9460e98..2ddf8503b 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py @@ -271,6 +271,55 @@ def w_tilde_curvature_value_from( return curvature_value + +@numba_util.jit() +def data_vector_via_w_tilde_data_imaging_from( + w_tilde_data: np.ndarray, + data_to_pix_unique: np.ndarray, + data_weights: np.ndarray, + pix_lengths: np.ndarray, + pix_pixels: int, +) -> np.ndarray: + """ + Returns the data vector `D` from the `w_tilde_data` matrix (see `w_tilde_data_imaging_from`), which encodes the + the 1D image `d` and 1D noise-map values `\sigma` (see Warren & Dye 2003). + + This uses the array `data_to_pix_unique`, which describes the unique mappings of every set of image sub-pixels to + pixelization pixels and `data_weights`, which describes how many sub-pixels uniquely map to each pixelization + pixels (see `data_slim_to_pixelization_unique_from`). + + Parameters + ---------- + w_tilde_data + A matrix that encodes the PSF convolution values between the imaging divided by the noise map**2 that enables + efficient calculation of the data vector. + data_to_pix_unique + An array that maps every data pixel index (e.g. the masked image pixel indexes in 1D) to its unique set of + pixelization pixel indexes (see `data_slim_to_pixelization_unique_from`). + data_weights + For every unique mapping between a set of data sub-pixels and a pixelization pixel, the weight of these mapping + based on the number of sub-pixels that map to pixelization pixel. + pix_lengths + A 1D array describing how many unique pixels each data pixel maps too, which is used to iterate over + `data_to_pix_unique` and `data_weights`. + pix_pixels + The total number of pixels in the pixelization that reconstructs the data. + """ + + data_pixels = w_tilde_data.shape[0] + + data_vector = np.zeros(pix_pixels) + + for data_0 in range(data_pixels): + for pix_0_index in range(pix_lengths[data_0]): + data_0_weight = data_weights[data_0, pix_0_index] + pix_0 = data_to_pix_unique[data_0, pix_0_index] + + data_vector[pix_0] += data_0_weight * w_tilde_data[data_0] + + return data_vector + + @numba_util.jit() def curvature_matrix_via_w_tilde_curvature_preload_imaging_from( curvature_preload: np.ndarray, @@ -452,71 +501,6 @@ def curvature_matrix_off_diags_via_w_tilde_curvature_preload_imaging_from( return curvature_matrix -@numba_util.jit() -def data_linear_func_matrix_from( - curvature_weights_matrix: np.ndarray, - image_frame_1d_lengths: np.ndarray, - image_frame_1d_indexes: np.ndarray, - image_frame_1d_kernels: np.ndarray, -) -> np.ndarray: - """ - Returns a matrix that for each data pixel, maps it to the sum of the values of a linear object function convolved - with the PSF kernel at the data pixel. - - If a linear function in an inversion is fixed, its values can be evaluated and preloaded beforehand. For every - data pixel, the PSF convolution with this preloaded linear function can also be preloaded, in a matrix of - shape [data_pixels, 1]. - - Given that multiple linear functions can be used and fixed in an inversion, this matrix is extended to have - dimensions [data_pixels, total_fixed_linear_functions]. - - When mapper objects and linear functions are used simultaneously in an inversion, this preloaded matrix - significantly speed up the computation of their off-diagonal terms in the curvature matrix. - - This is similar to the preloading performed via the w-tilde formalism, except that there it is the PSF convolved - values of each noise-map value pair that are preloaded. - - In **PyAutoGalaxy** and **PyAutoLens**, this preload is used when linear light profiles are fixed in the model. - For example, when using a multi Gaussian expansion, the values defining how those Gaussians are evaluated - (e.g. `centre`, `ell_comps` and `sigma`) are often fixed in a model, meaning this matrix can be preloaded and - used for speed up. - - Parameters - ---------- - curvature_weights_matrix - The operated values of each linear function divided by the noise-map squared, in a matrix of shape - [data_pixels, total_fixed_linear_functions]. - image_frame_indexes - The indexes of all masked pixels that the PSF blurs light into (see the `Convolver` object). - image_frame_kernels - The kernel values of all masked pixels that the PSF blurs light into (see the `Convolver` object). - image_frame_length - The number of masked pixels it will blur light into (unmasked pixels are excluded, see the `Convolver` object). - - Returns - ------- - ndarray - A matrix of shape [data_pixels, total_fixed_linear_functions] that for each data pixel, maps it to the sum of - the values of a linear object function convolved with the PSF kernel at the data pixel. - """ - data_pixels = curvature_weights_matrix.shape[0] - linear_func_pixels = curvature_weights_matrix.shape[1] - - data_linear_func_matrix_dict = np.zeros(shape=(data_pixels, linear_func_pixels)) - - for data_0 in range(data_pixels): - for psf_index in range(image_frame_1d_lengths[data_0]): - data_index = image_frame_1d_indexes[data_0, psf_index] - kernel_value = image_frame_1d_kernels[data_0, psf_index] - - for linear_index in range(linear_func_pixels): - data_linear_func_matrix_dict[data_0, linear_index] += ( - kernel_value * curvature_weights_matrix[data_index, linear_index] - ) - - return data_linear_func_matrix_dict - - @numba_util.jit() def curvature_matrix_off_diags_via_data_linear_func_matrix_from( data_linear_func_matrix: np.ndarray, diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py index 5ea0b2574..36467f804 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py @@ -1,7 +1,5 @@ import jax.numpy as jnp -from autoarray import numba_util - import numpy as np @@ -148,3 +146,65 @@ def data_vector_via_blurred_mapping_matrix_from( """ return (image / noise_map**2.0) @ blurred_mapping_matrix +def data_linear_func_matrix_from( + curvature_weights_matrix: np.ndarray, + image_frame_1d_lengths: np.ndarray, + image_frame_1d_indexes: np.ndarray, + image_frame_1d_kernels: np.ndarray, +) -> np.ndarray: + """ + Returns a matrix that for each data pixel, maps it to the sum of the values of a linear object function convolved + with the PSF kernel at the data pixel. + + If a linear function in an inversion is fixed, its values can be evaluated and preloaded beforehand. For every + data pixel, the PSF convolution with this preloaded linear function can also be preloaded, in a matrix of + shape [data_pixels, 1]. + + Given that multiple linear functions can be used and fixed in an inversion, this matrix is extended to have + dimensions [data_pixels, total_fixed_linear_functions]. + + When mapper objects and linear functions are used simultaneously in an inversion, this preloaded matrix + significantly speed up the computation of their off-diagonal terms in the curvature matrix. + + This is similar to the preloading performed via the w-tilde formalism, except that there it is the PSF convolved + values of each noise-map value pair that are preloaded. + + In **PyAutoGalaxy** and **PyAutoLens**, this preload is used when linear light profiles are fixed in the model. + For example, when using a multi Gaussian expansion, the values defining how those Gaussians are evaluated + (e.g. `centre`, `ell_comps` and `sigma`) are often fixed in a model, meaning this matrix can be preloaded and + used for speed up. + + Parameters + ---------- + curvature_weights_matrix + The operated values of each linear function divided by the noise-map squared, in a matrix of shape + [data_pixels, total_fixed_linear_functions]. + image_frame_indexes + The indexes of all masked pixels that the PSF blurs light into (see the `Convolver` object). + image_frame_kernels + The kernel values of all masked pixels that the PSF blurs light into (see the `Convolver` object). + image_frame_length + The number of masked pixels it will blur light into (unmasked pixels are excluded, see the `Convolver` object). + + Returns + ------- + ndarray + A matrix of shape [data_pixels, total_fixed_linear_functions] that for each data pixel, maps it to the sum of + the values of a linear object function convolved with the PSF kernel at the data pixel. + """ + data_pixels = curvature_weights_matrix.shape[0] + linear_func_pixels = curvature_weights_matrix.shape[1] + + data_linear_func_matrix_dict = np.zeros(shape=(data_pixels, linear_func_pixels)) + + for data_0 in range(data_pixels): + for psf_index in range(image_frame_1d_lengths[data_0]): + data_index = image_frame_1d_indexes[data_0, psf_index] + kernel_value = image_frame_1d_kernels[data_0, psf_index] + + for linear_index in range(linear_func_pixels): + data_linear_func_matrix_dict[data_0, linear_index] += ( + kernel_value * curvature_weights_matrix[data_index, linear_index] + ) + + return data_linear_func_matrix_dict \ No newline at end of file diff --git a/autoarray/inversion/inversion/imaging/w_tilde.py b/autoarray/inversion/inversion/imaging/w_tilde.py index 1cb7a2f6d..3d772716f 100644 --- a/autoarray/inversion/inversion/imaging/w_tilde.py +++ b/autoarray/inversion/inversion/imaging/w_tilde.py @@ -17,6 +17,7 @@ from autoarray import exc from autoarray.inversion.inversion import inversion_util from autoarray.inversion.inversion.imaging import inversion_imaging_util +from autoarray.inversion.inversion.imaging import inversion_imaging_numba_util class InversionImagingWTilde(AbstractInversionImaging): @@ -91,33 +92,32 @@ def _data_vector_mapper(self) -> np.ndarray: This method is used to compute part of the `data_vector` if there are also linear function list objects in the inversion, and is separated into a separate method to enable preloading of the mapper `data_vector`. """ - return jnp.dot(self.mapping_matrix.T, self.w_tilde_data) - - # if not self.has(cls=AbstractMapper): - # return None - # - # data_vector = np.zeros(self.total_params) - # - # mapper_list = self.cls_list_from(cls=AbstractMapper) - # mapper_param_range = self.param_range_list_from(cls=AbstractMapper) - # - # for mapper_index, mapper in enumerate(mapper_list): - # data_vector_mapper = ( - # inversion_imaging_util.data_vector_via_w_tilde_data_imaging_from( - # w_tilde_data=np.array(self.w_tilde_data), - # data_to_pix_unique=np.array( - # mapper.unique_mappings.data_to_pix_unique - # ), - # data_weights=np.array(mapper.unique_mappings.data_weights), - # pix_lengths=np.array(mapper.unique_mappings.pix_lengths), - # pix_pixels=mapper.params, - # ) - # ) - # param_range = mapper_param_range[mapper_index] - # - # data_vector[param_range[0] : param_range[1],] = data_vector_mapper - # - # return data_vector + + if not self.has(cls=AbstractMapper): + return None + + data_vector = np.zeros(self.total_params) + + mapper_list = self.cls_list_from(cls=AbstractMapper) + mapper_param_range = self.param_range_list_from(cls=AbstractMapper) + + for mapper_index, mapper in enumerate(mapper_list): + data_vector_mapper = ( + inversion_imaging_numba_util.data_vector_via_w_tilde_data_imaging_from( + w_tilde_data=np.array(self.w_tilde_data), + data_to_pix_unique=np.array( + mapper.unique_mappings.data_to_pix_unique + ), + data_weights=np.array(mapper.unique_mappings.data_weights), + pix_lengths=np.array(mapper.unique_mappings.pix_lengths), + pix_pixels=mapper.params, + ) + ) + param_range = mapper_param_range[mapper_index] + + data_vector[param_range[0] : param_range[1],] = data_vector_mapper + + return data_vector @cached_property def data_vector(self) -> np.ndarray: @@ -148,17 +148,15 @@ def _data_vector_x1_mapper(self) -> np.ndarray: This method computes the `data_vector` whenthere is a single mapper object in the `Inversion`, which circumvents `np.concatenate` for speed up. """ - return self._data_vector_mapper - - # linear_obj = self.linear_obj_list[0] - # - # return inversion_imaging_util.data_vector_via_w_tilde_data_imaging_from( - # w_tilde_data=self.w_tilde_data, - # data_to_pix_unique=linear_obj.unique_mappings.data_to_pix_unique, - # data_weights=linear_obj.unique_mappings.data_weights, - # pix_lengths=linear_obj.unique_mappings.pix_lengths, - # pix_pixels=linear_obj.params, - # ) + linear_obj = self.linear_obj_list[0] + + return inversion_imaging_numba_util.data_vector_via_w_tilde_data_imaging_from( + w_tilde_data=self.w_tilde_data, + data_to_pix_unique=linear_obj.unique_mappings.data_to_pix_unique, + data_weights=linear_obj.unique_mappings.data_weights, + pix_lengths=linear_obj.unique_mappings.pix_lengths, + pix_pixels=linear_obj.params, + ) @property def _data_vector_multi_mapper(self) -> np.ndarray: @@ -172,7 +170,7 @@ def _data_vector_multi_mapper(self) -> np.ndarray: return np.concatenate( [ - inversion_imaging_util.data_vector_via_w_tilde_data_imaging_from( + inversion_imaging_numba_util.data_vector_via_w_tilde_data_imaging_from( w_tilde_data=np.array(self.w_tilde_data), data_to_pix_unique=linear_obj.unique_mappings.data_to_pix_unique, data_weights=linear_obj.unique_mappings.data_weights, @@ -286,7 +284,7 @@ def _curvature_matrix_mapper_diag(self) -> Optional[np.ndarray]: mapper_i = mapper_list[i] mapper_param_range_i = mapper_param_range_list[i] - diag = inversion_imaging_util.curvature_matrix_via_w_tilde_curvature_preload_imaging_from( + diag = inversion_imaging_numba_util.curvature_matrix_via_w_tilde_curvature_preload_imaging_from( curvature_preload=self.w_tilde.curvature_preload, curvature_indexes=self.w_tilde.indexes, curvature_lengths=self.w_tilde.lengths, @@ -321,7 +319,7 @@ def _curvature_matrix_off_diag_from( This function computes the off-diagonal terms of F using the w_tilde formalism. """ - curvature_matrix_off_diag_0 = inversion_imaging_util.curvature_matrix_off_diags_via_w_tilde_curvature_preload_imaging_from( + curvature_matrix_off_diag_0 = inversion_imaging_numba_util.curvature_matrix_off_diags_via_w_tilde_curvature_preload_imaging_from( curvature_preload=self.w_tilde.curvature_preload, curvature_indexes=self.w_tilde.indexes, curvature_lengths=self.w_tilde.lengths, @@ -335,7 +333,7 @@ def _curvature_matrix_off_diag_from( pix_pixels_1=mapper_1.params, ) - curvature_matrix_off_diag_1 = inversion_imaging_util.curvature_matrix_off_diags_via_w_tilde_curvature_preload_imaging_from( + curvature_matrix_off_diag_1 = inversion_imaging_numba_util.curvature_matrix_off_diags_via_w_tilde_curvature_preload_imaging_from( curvature_preload=self.w_tilde.curvature_preload, curvature_indexes=self.w_tilde.indexes, curvature_lengths=self.w_tilde.lengths, @@ -360,12 +358,7 @@ def _curvature_matrix_x1_mapper(self) -> np.ndarray: This method computes the `curvature_matrix` when there is a single mapper object in the `Inversion`, which circumvents `block_diag` for speed up. """ - - return inversion_util.curvature_matrix_via_w_tilde_from( - w_tilde=self.w_tilde.w_matrix, mapping_matrix=self.mapping_matrix - ) - - # return self._curvature_matrix_mapper_diag + return self._curvature_matrix_mapper_diag @property def _curvature_matrix_multi_mapper(self) -> np.ndarray: @@ -438,7 +431,7 @@ def _curvature_matrix_func_list_and_mapper(self) -> np.ndarray: / self.noise_map[:, None] ** 2 ) - off_diag = inversion_imaging_util.curvature_matrix_off_diags_via_mapper_and_linear_func_curvature_vector_from( + off_diag = inversion_imaging_numba_util.curvature_matrix_off_diags_via_mapper_and_linear_func_curvature_vector_from( data_to_pix_unique=mapper.unique_mappings.data_to_pix_unique, data_weights=mapper.unique_mappings.data_weights, pix_lengths=mapper.unique_mappings.pix_lengths, diff --git a/autoarray/util/__init__.py b/autoarray/util/__init__.py index 46b5afb5b..43801e059 100644 --- a/autoarray/util/__init__.py +++ b/autoarray/util/__init__.py @@ -17,6 +17,9 @@ from autoarray.inversion.inversion.imaging import ( inversion_imaging_util as inversion_imaging, ) +from autoarray.inversion.inversion.imaging import ( + inversion_imaging_numba_util as inversion_imaging, +) from autoarray.inversion.inversion.interferometer import ( inversion_interferometer_util as inversion_interferometer, ) From 6c8a3258f58ea476c95b010abd5e1680d2351ff0 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 17:26:19 +0100 Subject: [PATCH 05/17] fix _data_vector_x1_mapper --- autoarray/inversion/inversion/imaging/w_tilde.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autoarray/inversion/inversion/imaging/w_tilde.py b/autoarray/inversion/inversion/imaging/w_tilde.py index 3d772716f..821825d1e 100644 --- a/autoarray/inversion/inversion/imaging/w_tilde.py +++ b/autoarray/inversion/inversion/imaging/w_tilde.py @@ -151,7 +151,7 @@ def _data_vector_x1_mapper(self) -> np.ndarray: linear_obj = self.linear_obj_list[0] return inversion_imaging_numba_util.data_vector_via_w_tilde_data_imaging_from( - w_tilde_data=self.w_tilde_data, + w_tilde_data=np.array(self.w_tilde_data), data_to_pix_unique=linear_obj.unique_mappings.data_to_pix_unique, data_weights=linear_obj.unique_mappings.data_weights, pix_lengths=linear_obj.unique_mappings.pix_lengths, From 3126e930f75b8354a8f1b1b2102cb443d4abe4ba Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 17:35:52 +0100 Subject: [PATCH 06/17] more direct split of numba code --- .../imaging/inversion_imaging_numba_util.py | 97 +++++++++++++++++++ .../imaging/inversion_imaging_util.py | 2 + .../inversion/inversion/imaging/w_tilde.py | 11 +-- 3 files changed, 104 insertions(+), 6 deletions(-) diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py index 2ddf8503b..f0154550f 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py @@ -4,6 +4,73 @@ import numpy as np +@numba_util.jit() +def w_tilde_data_imaging_from( + image_native: np.ndarray, + noise_map_native: np.ndarray, + kernel_native: np.ndarray, + native_index_for_slim_index, +) -> np.ndarray: + """ + The matrix w_tilde is a matrix of dimensions [image_pixels, image_pixels] that encodes the PSF convolution of + every pair of image pixels given the noise map. This can be used to efficiently compute the curvature matrix via + the mappings between image and source pixels, in a way that omits having to perform the PSF convolution on every + individual source pixel. This provides a significant speed up for inversions of imaging datasets. + + When w_tilde is used to perform an inversion, the mapping matrices are not computed, meaning that they cannot be + used to compute the data vector. This method creates the vector `w_tilde_data` which allows for the data + vector to be computed efficiently without the mapping matrix. + + The matrix w_tilde_data is dimensions [image_pixels] and encodes the PSF convolution with the `weight_map`, + where the weights are the image-pixel values divided by the noise-map values squared: + + weight = image / noise**2.0 + + Parameters + ---------- + image_native + The two dimensional masked image of values which `w_tilde_data` is computed from. + noise_map_native + The two dimensional masked noise-map of values which `w_tilde_data` is computed from. + kernel_native + The two dimensional PSF kernel that `w_tilde_data` encodes the convolution of. + native_index_for_slim_index + An array of shape [total_x_pixels*sub_size] that maps pixels from the slimmed array to the native array. + + Returns + ------- + ndarray + A matrix that encodes the PSF convolution values between the imaging divided by the noise map**2 that enables + efficient calculation of the data vector. + """ + + kernel_shift_y = -(kernel_native.shape[1] // 2) + kernel_shift_x = -(kernel_native.shape[0] // 2) + + image_pixels = len(native_index_for_slim_index) + + w_tilde_data = np.zeros((image_pixels,)) + + weight_map_native = image_native / noise_map_native**2.0 + + for ip0 in range(image_pixels): + ip0_y, ip0_x = native_index_for_slim_index[ip0] + + value = 0.0 + + for k0_y in range(kernel_native.shape[0]): + for k0_x in range(kernel_native.shape[1]): + weight_value = weight_map_native[ + ip0_y + k0_y + kernel_shift_y, ip0_x + k0_x + kernel_shift_x + ] + + if not np.isnan(weight_value): + value += kernel_native[k0_y, k0_x] * weight_value + + w_tilde_data[ip0] = value + + return w_tilde_data + @numba_util.jit() def w_tilde_curvature_imaging_from( noise_map_native: np.ndarray, kernel_native: np.ndarray, native_index_for_slim_index @@ -270,7 +337,37 @@ def w_tilde_curvature_value_from( return curvature_value +@numba_util.jit() +def data_vector_via_blurred_mapping_matrix_from( + blurred_mapping_matrix: np.ndarray, image: np.ndarray, noise_map: np.ndarray +) -> np.ndarray: + """ + Returns the data vector `D` from a blurred mapping matrix `f` and the 1D image `d` and 1D noise-map $\sigma$` + (see Warren & Dye 2003). + + Parameters + ---------- + blurred_mapping_matrix + The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. + image + Flattened 1D array of the observed image the inversion is fitting. + noise_map + Flattened 1D array of the noise-map used by the inversion during the fit. + """ + + data_shape = blurred_mapping_matrix.shape + + data_vector = np.zeros(data_shape[1]) + for data_index in range(data_shape[0]): + for pix_index in range(data_shape[1]): + data_vector[pix_index] += ( + image[data_index] + * blurred_mapping_matrix[data_index, pix_index] + / (noise_map[data_index] ** 2.0) + ) + + return data_vector @numba_util.jit() def data_vector_via_w_tilde_data_imaging_from( diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py index 36467f804..c355bf680 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py @@ -192,6 +192,8 @@ def data_linear_func_matrix_from( A matrix of shape [data_pixels, total_fixed_linear_functions] that for each data pixel, maps it to the sum of the values of a linear object function convolved with the PSF kernel at the data pixel. """ + ggg + data_pixels = curvature_weights_matrix.shape[0] linear_func_pixels = curvature_weights_matrix.shape[1] diff --git a/autoarray/inversion/inversion/imaging/w_tilde.py b/autoarray/inversion/inversion/imaging/w_tilde.py index 821825d1e..807f03bb4 100644 --- a/autoarray/inversion/inversion/imaging/w_tilde.py +++ b/autoarray/inversion/inversion/imaging/w_tilde.py @@ -16,7 +16,6 @@ from autoarray import exc from autoarray.inversion.inversion import inversion_util -from autoarray.inversion.inversion.imaging import inversion_imaging_util from autoarray.inversion.inversion.imaging import inversion_imaging_numba_util @@ -76,7 +75,7 @@ def __init__( @cached_property def w_tilde_data(self): - return inversion_imaging_util.w_tilde_data_imaging_from( + return inversion_imaging_numba_util.w_tilde_data_imaging_from( image_native=self.data.native.array, noise_map_native=self.noise_map.native.array, kernel_native=self.psf.native.array, @@ -104,7 +103,7 @@ def _data_vector_mapper(self) -> np.ndarray: for mapper_index, mapper in enumerate(mapper_list): data_vector_mapper = ( inversion_imaging_numba_util.data_vector_via_w_tilde_data_imaging_from( - w_tilde_data=np.array(self.w_tilde_data), + w_tilde_data=self.w_tilde_data, data_to_pix_unique=np.array( mapper.unique_mappings.data_to_pix_unique ), @@ -151,7 +150,7 @@ def _data_vector_x1_mapper(self) -> np.ndarray: linear_obj = self.linear_obj_list[0] return inversion_imaging_numba_util.data_vector_via_w_tilde_data_imaging_from( - w_tilde_data=np.array(self.w_tilde_data), + w_tilde_data=self.w_tilde_data, data_to_pix_unique=linear_obj.unique_mappings.data_to_pix_unique, data_weights=linear_obj.unique_mappings.data_weights, pix_lengths=linear_obj.unique_mappings.pix_lengths, @@ -171,7 +170,7 @@ def _data_vector_multi_mapper(self) -> np.ndarray: return np.concatenate( [ inversion_imaging_numba_util.data_vector_via_w_tilde_data_imaging_from( - w_tilde_data=np.array(self.w_tilde_data), + w_tilde_data=self.w_tilde_data, data_to_pix_unique=linear_obj.unique_mappings.data_to_pix_unique, data_weights=linear_obj.unique_mappings.data_weights, pix_lengths=linear_obj.unique_mappings.pix_lengths, @@ -207,7 +206,7 @@ def _data_vector_func_list_and_mapper(self) -> np.ndarray: linear_func ] - diag = inversion_imaging_util.data_vector_via_blurred_mapping_matrix_from( + diag = inversion_imaging_numba_util.data_vector_via_blurred_mapping_matrix_from( blurred_mapping_matrix=operated_mapping_matrix, image=self.data.array, noise_map=self.noise_map.array, From 673f0757810ca85b6c1c3ff6bcb8bd85b223ad8f Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 17:42:03 +0100 Subject: [PATCH 07/17] fix unitt est due to jax operated mapping matrix --- .../inversion/inversion/imaging/w_tilde.py | 2 +- .../inversion/inversion/test_factory.py | 28 +++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/autoarray/inversion/inversion/imaging/w_tilde.py b/autoarray/inversion/inversion/imaging/w_tilde.py index 807f03bb4..2f8c0e91d 100644 --- a/autoarray/inversion/inversion/imaging/w_tilde.py +++ b/autoarray/inversion/inversion/imaging/w_tilde.py @@ -207,7 +207,7 @@ def _data_vector_func_list_and_mapper(self) -> np.ndarray: ] diag = inversion_imaging_numba_util.data_vector_via_blurred_mapping_matrix_from( - blurred_mapping_matrix=operated_mapping_matrix, + blurred_mapping_matrix=np.array(operated_mapping_matrix), image=self.data.array, noise_map=self.noise_map.array, ) diff --git a/test_autoarray/inversion/inversion/test_factory.py b/test_autoarray/inversion/inversion/test_factory.py index b8cdaf31f..1aaaa8cf9 100644 --- a/test_autoarray/inversion/inversion/test_factory.py +++ b/test_autoarray/inversion/inversion/test_factory.py @@ -612,3 +612,31 @@ def test__inversion_imaging__positive_only_solver(masked_imaging_7x7_no_blur): assert isinstance(inversion, aa.InversionImagingMapping) assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) assert inversion.reconstruction == pytest.approx(np.array([2.0]), 1.0e-4) + +# def test__data_linear_func_matrix_dict(masked_imaging_7x7, +# rectangular_mapper_7x7_3x3, +# ): +# +# mask = masked_imaging_7x7.mask +# +# grid = aa.Grid2D.from_mask(mask=mask) +# +# mapping_matrix = np.full(fill_value=0.5, shape=(9, 2)) +# mapping_matrix[0, 0] = 0.8 +# mapping_matrix[1, 1] = 0.4 +# +# linear_obj = aa.m.MockLinearObjFuncList( +# parameters=2, grid=grid, mapping_matrix=mapping_matrix +# ) +# +# inversion_mapping = aa.Inversion( +# dataset=masked_imaging_7x7, +# linear_obj_list=[linear_obj, rectangular_mapper_7x7_3x3], +# settings=aa.SettingsInversion(use_w_tilde=False, use_positive_only_solver=True), +# ) +# +# print(inversion_mapping.data_linear_func_matrix_dict) +# +# assert inversion_mapping.data_linear_func_matrix_dict == pytest.approx( +# None, 1.0e-4 +# ) \ No newline at end of file From 724af264734b55778bd5dc7ef5a79c895da5557f Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 17:45:02 +0100 Subject: [PATCH 08/17] test__data_linear_func_matrix_dict --- .../imaging/inversion_imaging_util.py | 1 - .../inversion/inversion/test_factory.py | 58 ++++++++++--------- 2 files changed, 31 insertions(+), 28 deletions(-) diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py index c355bf680..9c5a4e5b8 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py @@ -192,7 +192,6 @@ def data_linear_func_matrix_from( A matrix of shape [data_pixels, total_fixed_linear_functions] that for each data pixel, maps it to the sum of the values of a linear object function convolved with the PSF kernel at the data pixel. """ - ggg data_pixels = curvature_weights_matrix.shape[0] linear_func_pixels = curvature_weights_matrix.shape[1] diff --git a/test_autoarray/inversion/inversion/test_factory.py b/test_autoarray/inversion/inversion/test_factory.py index 1aaaa8cf9..f5a8b852a 100644 --- a/test_autoarray/inversion/inversion/test_factory.py +++ b/test_autoarray/inversion/inversion/test_factory.py @@ -613,30 +613,34 @@ def test__inversion_imaging__positive_only_solver(masked_imaging_7x7_no_blur): assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) assert inversion.reconstruction == pytest.approx(np.array([2.0]), 1.0e-4) -# def test__data_linear_func_matrix_dict(masked_imaging_7x7, -# rectangular_mapper_7x7_3x3, -# ): -# -# mask = masked_imaging_7x7.mask -# -# grid = aa.Grid2D.from_mask(mask=mask) -# -# mapping_matrix = np.full(fill_value=0.5, shape=(9, 2)) -# mapping_matrix[0, 0] = 0.8 -# mapping_matrix[1, 1] = 0.4 -# -# linear_obj = aa.m.MockLinearObjFuncList( -# parameters=2, grid=grid, mapping_matrix=mapping_matrix -# ) -# -# inversion_mapping = aa.Inversion( -# dataset=masked_imaging_7x7, -# linear_obj_list=[linear_obj, rectangular_mapper_7x7_3x3], -# settings=aa.SettingsInversion(use_w_tilde=False, use_positive_only_solver=True), -# ) -# -# print(inversion_mapping.data_linear_func_matrix_dict) -# -# assert inversion_mapping.data_linear_func_matrix_dict == pytest.approx( -# None, 1.0e-4 -# ) \ No newline at end of file +def test__data_linear_func_matrix_dict(masked_imaging_7x7, + rectangular_mapper_7x7_3x3, +): + + mask = masked_imaging_7x7.mask + + grid = aa.Grid2D.from_mask(mask=mask) + + mapping_matrix = np.full(fill_value=0.5, shape=(9, 2)) + mapping_matrix[0, 0] = 0.8 + mapping_matrix[1, 1] = 0.4 + + linear_obj = aa.m.MockLinearObjFuncList( + parameters=2, grid=grid, mapping_matrix=mapping_matrix + ) + + inversion_mapping = aa.Inversion( + dataset=masked_imaging_7x7, + linear_obj_list=[linear_obj, rectangular_mapper_7x7_3x3], + settings=aa.SettingsInversion(use_w_tilde=False, use_positive_only_solver=True), + ) + + assert inversion_mapping.data_linear_func_matrix_dict[linear_obj][0] == pytest.approx( + [0.075, 0.05972222], 1.0e-4 + ) + assert inversion_mapping.data_linear_func_matrix_dict[linear_obj][1] == pytest.approx( + [0.09166667, 0.07847222], 1.0e-4 + ) + assert inversion_mapping.data_linear_func_matrix_dict[linear_obj][2] == pytest.approx( + [0.06458333, 0.05972222], 1.0e-4 + ) \ No newline at end of file From 6f969668cc6d3b10dc2239bdb6205312196a9c71 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 17:58:18 +0100 Subject: [PATCH 09/17] update data_linear_func_matrix_from to not use numba --- .../inversion/inversion/imaging/abstract.py | 5 ++- .../imaging/inversion_imaging_util.py | 33 +++++++++---------- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/autoarray/inversion/inversion/imaging/abstract.py b/autoarray/inversion/inversion/imaging/abstract.py index 8d85a5db1..9167af6f9 100644 --- a/autoarray/inversion/inversion/imaging/abstract.py +++ b/autoarray/inversion/inversion/imaging/abstract.py @@ -188,9 +188,8 @@ def data_linear_func_matrix_dict(self): data_linear_func_matrix = ( inversion_imaging_util.data_linear_func_matrix_from( curvature_weights_matrix=curvature_weights, - image_frame_1d_lengths=self.convolver.image_frame_1d_lengths, - image_frame_1d_indexes=self.convolver.image_frame_1d_indexes, - image_frame_1d_kernels=self.convolver.image_frame_1d_kernels, + kernel_native=self.psf.native, + mask=self.mask, ) ) diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py index 9c5a4e5b8..78eed58cf 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py @@ -1,7 +1,7 @@ import jax.numpy as jnp import numpy as np - +from scipy.signal import fftconvolve def psf_operator_matrix_dense_from( kernel_native: np.ndarray, @@ -147,10 +147,7 @@ def data_vector_via_blurred_mapping_matrix_from( return (image / noise_map**2.0) @ blurred_mapping_matrix def data_linear_func_matrix_from( - curvature_weights_matrix: np.ndarray, - image_frame_1d_lengths: np.ndarray, - image_frame_1d_indexes: np.ndarray, - image_frame_1d_kernels: np.ndarray, + curvature_weights_matrix: np.ndarray, kernel_native, mask ) -> np.ndarray: """ Returns a matrix that for each data pixel, maps it to the sum of the values of a linear object function convolved @@ -193,19 +190,19 @@ def data_linear_func_matrix_from( the values of a linear object function convolved with the PSF kernel at the data pixel. """ - data_pixels = curvature_weights_matrix.shape[0] - linear_func_pixels = curvature_weights_matrix.shape[1] - - data_linear_func_matrix_dict = np.zeros(shape=(data_pixels, linear_func_pixels)) + ny, nx = mask.shape_native + n_unmasked, n_funcs = curvature_weights_matrix.shape - for data_0 in range(data_pixels): - for psf_index in range(image_frame_1d_lengths[data_0]): - data_index = image_frame_1d_indexes[data_0, psf_index] - kernel_value = image_frame_1d_kernels[data_0, psf_index] + # Expand masked -> native grid + native = np.zeros((ny, nx, n_funcs)) + native[~mask] = curvature_weights_matrix # put values into unmasked positions - for linear_index in range(linear_func_pixels): - data_linear_func_matrix_dict[data_0, linear_index] += ( - kernel_value * curvature_weights_matrix[data_index, linear_index] - ) + # Convolve each function with PSF kernel + from scipy.signal import fftconvolve + blurred_list = [] + for i in range(n_funcs): + blurred = fftconvolve(native[..., i], kernel_native, mode="same") + # Re-mask: only keep unmasked pixels + blurred_list.append(blurred[~mask]) - return data_linear_func_matrix_dict \ No newline at end of file + return np.stack(blurred_list, axis=1) # shape (n_unmasked, n_funcs) \ No newline at end of file From 6877618d88183dae86598c65e50d1c55b4975ec9 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 18:07:11 +0100 Subject: [PATCH 10/17] remove use of frames in off diag --- .../imaging/inversion_imaging_numba_util.py | 117 ++++++++++-------- .../inversion/inversion/imaging/w_tilde.py | 5 +- 2 files changed, 64 insertions(+), 58 deletions(-) diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py index f0154550f..3adca27aa 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py @@ -655,74 +655,81 @@ def curvature_matrix_off_diags_via_data_linear_func_matrix_from( @numba_util.jit() -def curvature_matrix_off_diags_via_mapper_and_linear_func_curvature_vector_from( - data_to_pix_unique: np.ndarray, - data_weights: np.ndarray, - pix_lengths: np.ndarray, - pix_pixels: int, - curvature_weights: np.ndarray, - image_frame_1d_lengths: np.ndarray, - image_frame_1d_indexes: np.ndarray, - image_frame_1d_kernels: np.ndarray, -) -> np.ndarray: +def convolve_with_kernel_native(curvature_native, psf_kernel): """ - Returns the off diagonal terms in the curvature matrix `F` (see Warren & Dye 2003) between a mapper object - and a linear func object, using the unique mappings between data pixels and pixelization pixels. - - This takes as input the curvature weights of the linear function object, which are the values of the linear - function convolved with the PSF and divided by the noise-map squared. - - For each unique mapping between a data pixel and a pixelization pixel, the pixels which that pixel convolves - light into are computed, multiplied by their corresponding curvature weights and summed. This process also - accounts the sub-pixel mapping of each data pixel to the pixelization pixel - - This is done for every unique mapping of a data pixel to a pixelization pixel, giving the off-diagonal terms in - the curvature matrix. + Convolve each function slice of curvature_native with psf_kernel using direct sliding window. Parameters ---------- - data_to_pix_unique - An array that maps every data pixel index (e.g. the masked image pixel indexes in 1D) to its unique set of - pixelization pixel indexes (see `data_slim_to_pixelization_unique_from`). - data_weights - For every unique mapping between a set of data sub-pixels and a pixelization pixel, the weight of these mapping - based on the number of sub-pixels that map to pixelization pixel. - pix_lengths - A 1D array describing how many unique pixels each data pixel maps too, which is used to iterate over - `data_to_pix_unique` and `data_weights`. - pix_pixels - The total number of pixels in the pixelization that reconstructs the data. - curvature_weights - The operated values of the linear func divided by the noise-map squared. - image_frame_indexes - The indexes of all masked pixels that the PSF blurs light into (see the `Convolver` object). - image_frame_kernels - The kernel values of all masked pixels that the PSF blurs light into (see the `Convolver` object). - image_frame_length - The number of masked pixels it will blur light into (unmasked pixels are excluded, see the `Convolver` object). + curvature_native : ndarray (ny, nx, n_funcs) + Curvature weights expanded to the native grid, 0 in masked regions. + psf_kernel : ndarray (ky, kx) + The PSF kernel. Returns ------- - ndarray - The curvature matrix `F` (see Warren & Dye 2003). + blurred_native : ndarray (ny, nx, n_funcs) + The curvature weights convolved with the PSF. """ + ny, nx, n_funcs = curvature_native.shape + ky, kx = psf_kernel.shape + cy, cx = ky // 2, kx // 2 # kernel center + + blurred_native = np.zeros_like(curvature_native) + + for f in range(n_funcs): # parallelize over functions + for y in range(ny): + for x in range(nx): + acc = 0.0 + for dy in range(ky): + for dx in range(kx): + yy = y + dy - cy + xx = x + dx - cx + if 0 <= yy < ny and 0 <= xx < nx: + acc += psf_kernel[dy, dx] * curvature_native[yy, xx, f] + blurred_native[y, x, f] = acc + return blurred_native +@numba_util.jit() +def curvature_matrix_off_diags_via_mapper_and_linear_func_curvature_vector_from( + data_to_pix_unique: np.ndarray, + data_weights: np.ndarray, + pix_lengths: np.ndarray, + pix_pixels: int, + curvature_weights: np.ndarray, # shape (n_unmasked, n_funcs) + mask: np.ndarray, # shape (ny, nx), bool + psf_kernel: np.ndarray # shape (ky, kx) +) -> np.ndarray: + """ + Compute the off-diagonal curvature terms using the PSF kernel directly (Numba version). + """ data_pixels = data_weights.shape[0] - linear_func_pixels = curvature_weights.shape[1] - - off_diag = np.zeros((pix_pixels, linear_func_pixels)) - + n_funcs = curvature_weights.shape[1] + ny, nx = mask.shape + + # Expand curvature weights into native grid + curvature_native = np.zeros((ny, nx, n_funcs)) + unmasked_coords = np.argwhere(~mask) + for i, (y, x) in enumerate(unmasked_coords): + for f in range(n_funcs): + curvature_native[y, x, f] = curvature_weights[i, f] + + # Convolve in native space + blurred_native = convolve_with_kernel_native(curvature_native, psf_kernel) + + # Map back to slim representation + blurred_slim = np.zeros((data_pixels, n_funcs)) + for i, (y, x) in enumerate(unmasked_coords): + for f in range(n_funcs): + blurred_slim[i, f] = blurred_native[y, x, f] + + # Accumulate into off_diag + off_diag = np.zeros((pix_pixels, n_funcs)) for data_0 in range(data_pixels): for pix_0_index in range(pix_lengths[data_0]): data_0_weight = data_weights[data_0, pix_0_index] pix_0 = data_to_pix_unique[data_0, pix_0_index] - - for psf_index in range(image_frame_1d_lengths[data_0]): - data_index = image_frame_1d_indexes[data_0, psf_index] - kernel_value = image_frame_1d_kernels[data_0, psf_index] - - off_diag[pix_0, :] += ( - data_0_weight * curvature_weights[data_index, :] * kernel_value - ) + for f in range(n_funcs): + off_diag[pix_0, f] += data_0_weight * blurred_slim[data_0, f] return off_diag \ No newline at end of file diff --git a/autoarray/inversion/inversion/imaging/w_tilde.py b/autoarray/inversion/inversion/imaging/w_tilde.py index 2f8c0e91d..1f526d7f6 100644 --- a/autoarray/inversion/inversion/imaging/w_tilde.py +++ b/autoarray/inversion/inversion/imaging/w_tilde.py @@ -436,9 +436,8 @@ def _curvature_matrix_func_list_and_mapper(self) -> np.ndarray: pix_lengths=mapper.unique_mappings.pix_lengths, pix_pixels=mapper.params, curvature_weights=np.array(curvature_weights), - image_frame_1d_lengths=self.convolver.image_frame_1d_lengths, - image_frame_1d_indexes=self.convolver.image_frame_1d_indexes, - image_frame_1d_kernels=self.convolver.image_frame_1d_kernels, + mask=self.mask.array, + psf_kernel=self.psf.native.array ) curvature_matrix[ From a3ab883c9f7d14644dac1daf13159cfe932db6d8 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 18:09:04 +0100 Subject: [PATCH 11/17] docstring --- .../imaging/inversion_imaging_numba_util.py | 41 ++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py index 3adca27aa..4058e671f 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py @@ -701,7 +701,46 @@ def curvature_matrix_off_diags_via_mapper_and_linear_func_curvature_vector_from( psf_kernel: np.ndarray # shape (ky, kx) ) -> np.ndarray: """ - Compute the off-diagonal curvature terms using the PSF kernel directly (Numba version). + Returns the off-diagonal terms in the curvature matrix `F` (see Warren & Dye 2003) + between a mapper object and a linear func object, using the unique mappings between + data pixels and pixelization pixels. + + This version applies the PSF directly as a 2D convolution kernel. The curvature + weights of the linear function object (values of the linear function divided by the + noise-map squared) are expanded into the native 2D image grid, convolved with the PSF + kernel, and then remapped back to the 1D slim representation. + + For each unique mapping between a data pixel and a pixelization pixel, the convolved + curvature weights at that data pixel are multiplied by the mapping weights and + accumulated into the off-diagonal block of the curvature matrix. This accounts for + sub-pixel mappings between data pixels and pixelization pixels. + + Parameters + ---------- + data_to_pix_unique : ndarray + An array that maps every data pixel index (e.g. the masked image pixel indexes in 1D) + to its unique set of pixelization pixel indexes (see `data_slim_to_pixelization_unique_from`). + data_weights : ndarray + For every unique mapping between a set of data sub-pixels and a pixelization pixel, + the weight of this mapping based on the number of sub-pixels that map to the pixelization pixel. + pix_lengths : ndarray + A 1D array describing how many unique pixels each data pixel maps to. Used to iterate over + `data_to_pix_unique` and `data_weights`. + pix_pixels : int + The total number of pixels in the pixelization that reconstructs the data. + curvature_weights : ndarray + The operated values of the linear function divided by the noise-map squared, with shape + [n_unmasked_data_pixels, n_linear_func_pixels]. + mask : ndarray + A 2D boolean mask of shape (ny, nx) indicating which pixels are in the data region. + psf_kernel : ndarray + The PSF kernel in its native 2D form, centered (odd dimensions recommended). + + Returns + ------- + ndarray + The off-diagonal block of the curvature matrix `F` (see Warren & Dye 2003), + with shape [pix_pixels, n_linear_func_pixels]. """ data_pixels = data_weights.shape[0] n_funcs = curvature_weights.shape[1] From aa71fb3e3ec572468d2ebedbf44ca77e98959777 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 18:12:02 +0100 Subject: [PATCH 12/17] remove convolver --- autoarray/__init__.py | 1 - autoarray/dataset/imaging/dataset.py | 21 - autoarray/inversion/convolver.py | 544 ------------------ autoarray/inversion/inversion/abstract.py | 4 - .../inversion/inversion/dataset_interface.py | 2 - .../imaging/inversion_imaging_numba_util.py | 28 +- .../imaging/inversion_imaging_util.py | 13 +- .../inversion/inversion/imaging/w_tilde.py | 2 +- .../inversion/mock/mock_inversion_imaging.py | 2 - autoarray/mock.py | 1 - autoarray/operators/mock/mock_psf.py | 8 - .../inversion/inversion/test_factory.py | 22 +- 12 files changed, 36 insertions(+), 612 deletions(-) delete mode 100644 autoarray/inversion/convolver.py diff --git a/autoarray/__init__.py b/autoarray/__init__.py index 54dc7a84f..3aa146ef7 100644 --- a/autoarray/__init__.py +++ b/autoarray/__init__.py @@ -24,7 +24,6 @@ from .fit.fit_imaging import FitImaging from .fit.fit_interferometer import FitInterferometer from .geometry.geometry_2d import Geometry2D -from .inversion.convolver import Convolver from .inversion.pixelization.mappers.abstract import AbstractMapper from .inversion.pixelization import mesh from .inversion.pixelization import image_mesh diff --git a/autoarray/dataset/imaging/dataset.py b/autoarray/dataset/imaging/dataset.py index 7130f9151..b69e7dd6d 100644 --- a/autoarray/dataset/imaging/dataset.py +++ b/autoarray/dataset/imaging/dataset.py @@ -194,27 +194,6 @@ def __init__( psf=self.psf, ) - @cached_property - def convolver(self): - """ - Returns a `Convolver` from a mask and 2D PSF kernel. - - The `Convolver` stores in memory the array indexing between the mask and PSF, enabling efficient 2D PSF - convolution of images and matrices used for linear algebra calculations (see `operators.convolver`). - - This uses lazy allocation such that the calculation is only performed when the convolver is used, ensuring - efficient set up of the `Imaging` class. - - Returns - ------- - Convolver - The convolver given the masked imaging data's mask and PSF. - """ - - from autoarray.inversion.convolver import Convolver - - return Convolver(mask=self.mask, kernel=self.psf) - @cached_property def w_tilde(self): """ diff --git a/autoarray/inversion/convolver.py b/autoarray/inversion/convolver.py deleted file mode 100644 index 70eb6efdf..000000000 --- a/autoarray/inversion/convolver.py +++ /dev/null @@ -1,544 +0,0 @@ -from autoarray import numba_util -import numpy as np - -from autoarray.structures.arrays.uniform_2d import Array2D - -from autoarray import exc -from autoarray.mask import mask_2d_util - - -class Convolver: - def __init__(self, mask, kernel): - """ - Class to setup the 1D convolution of an / mapping matrix. - - Take a simple 3x3 and masks: - - [[2, 8, 2], - [5, 7, 5], - [3, 1, 4]] - - [[True, False, True], (True means that the value is masked) - [False, False, False], - [True, False, True]] - - A set of values in a corresponding 1d array of this might be represented as: - - [2, 8, 2, 5, 7, 5, 3, 1, 4] - - and after masking as: - - [8, 5, 7, 5, 1] - - Setup is required to perform 2D real-space convolution on the masked array. This module finds the \ - relationship between the unmasked 2D data, masked data and kernel, so that 2D real-space convolutions \ - can be efficiently applied to reduced 1D masked structures. - - This calculation also accounts for the blurring of light outside of the masked regions which blurs into \ - the masked region. - - - **IMAGE FRAMES:** - - For a masked in 2D, one can compute for every pixel all of the unmasked pixels it will blur light into for \ - a given PSF kernel size, e.g.: - - IxIxIxIxIxIxIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI This is an imaging.Mask2D, where: - IxIxIxIxIxIxIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI x = `True` (Pixel is masked and excluded from lens) - IxIxIxIoIoIoIxIxIxIxI o = `False` (Pixel is not masked and included in lens) - IxIxIxIoIoIoIxIxIxIxI - IxIxIxIoIoIoIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI - - Here, there are 9 unmasked pixels. Indexing of each unmasked pixel goes from the top-left corner right and \ - downwards, therefore: - - IxIxIxIxIxIxIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI - IxIxIxI0I1I2IxIxIxIxI - IxIxIxI3I4I5IxIxIxIxI - IxIxIxI6I7I8IxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI - - For every unmasked pixel, the Convolver over-lays the PSF and computes three quantities; - - image_frame_indexes - The indexes of all masked pixels it will blur light into. - image_frame_kernels - The kernel values that overlap each masked pixel it will blur light into. - image_frame_length - The number of masked pixels it will blur light into (unmasked pixels are excluded) - - For example, if we had the following 3x3 kernel: - - I0.1I0.2I0.3I - I0.4I0.5I0.6I - I0.7I0.8I0.9I - - For pixel 0 above, when we overlap the kernel 4 unmasked pixels overlap this kernel, such that: - - image_frame_indexes = [0, 1, 3, 4] - image_frame_kernels = [0.5, 0.6, 0.8, 0.9] - image_frame_length = 4 - - Noting that the other 5 kernel values (0.1, 0.2, 0.3, 0.4, 0.7) overlap masked pixels and are thus discarded. - - For pixel 1, we get the following results: - - image_frame_indexes = [0, 1, 2, 3, 4, 5] - image_frame_kernels = [0.4, 0.5, 0.6, 0.7, 0.8, 0.9] - image_frame_lengths = 6 - - In the majority of cases, the kernel will overlap only unmasked pixels. This is the case above for \ - central pixel 4, where: - - image_frame_indexes = [0, 1, 2, 3, 4, 5, 6, 7, 8] - image_frame_kernels = [0,1, 0.2, 0,3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] - image_frame_lengths = 9 - - Once we have set up all these quantities, the convolution routine simply uses them to convolve a 1D array of a - masked or the masked of a util in the inversion module. - - - **BLURRING FRAMES:** - - Whilst the scheme above accounts for all blurred light within the masks, it does not account for the fact that \ - pixels outside of the masks will also blur light into it. This effect is accounted for using blurring frames. - - It is omitted for mapping matrix blurring, as an inversion does not fit data outside of the masks. - - First, a blurring masks is computed from a masks, which describes all pixels which are close enough to the masks \ - to blur light into it for a given kernel size. Following the example above, the following blurring masks is \ - computed: - - IxIxIxIxIxIxIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI This is an example grid.Mask2D, where: - IxIxIxIxIxIxIxIxIxIxI - IxIxIoIoIoIoIoIxIxIxI x = `True` (Pixel is masked and excluded from lens) - IxIxIoIxIxIxIoIxIxIxI o = `False` (Pixel is not masked and included in lens) - IxIxIoIxIxIxIoIxIxIxI - IxIxIoIxIxIxIoIxIxIxI - IxIxIoIoIoIoIoIxIxIxI - IxIxIxIxIxIxIxIxIxIxI - IxIxIxIxIxIxIxIxIxIxI - - Indexing again goes from the top-left corner right and downwards: - - IxIxI xI xI xI xI xIxIxIxI - IxIxI xI xI xI xI xIxIxIxI - IxIxI xI xI xI xI xIxIxIxI - IxIxI 0I 1I 2I 3I 4IxIxIxI - IxIxI 5I xI xI xI 6IxIxIxI - IxIxI 7I xI xI xI 8IxIxIxI - IxIxI 9I xI xI xI10IxIxIxI - IxIxI11I12I13I14I15IxIxIxI - IxIxI xI xI xI xI xIxIxIxI - IxIxI xI xI xI xI xIxIxIxI - - For every unmasked blurring-pixel, the Convolver over-lays the PSF kernel and computes three quantities; - - blurring_frame_indexes - The indexes of all unmasked pixels (not unmasked blurring pixels) it will \ - blur light into. - bluring_frame_kernels - The kernel values that overlap each pixel it will blur light into. - blurring_frame_length - The number of pixels it will blur light into. - - The blurring frame therefore does not perform any blurring which blurs light into other blurring pixels. \ - It only performs computations which add light inside of the masks. - - For pixel 0 above, when we overlap the 3x3 kernel above only 1 unmasked pixels overlaps the kernel, such that: - - blurring_frame_indexes = [0] (This 0 refers to pixel 0 within the masks, not blurring_frame_pixel 0) - blurring_frame_kernels = [0.9] - blurring_frame_length = 1 - - For pixel 1 above, when we overlap the 3x3 kernel above 2 unmasked pixels overlap the kernel, such that: - - blurring_frame_indexes = [0, 1] (This 0 and 1 refer to pixels 0 and 1 within the masks) - blurring_frame_kernels = [0.8, 0.9] - blurring_frame_length = 2 - - For pixel 3 above, when we overlap the 3x3 kernel above 3 unmasked pixels overlap the kernel, such that: - - blurring_frame_indexes = [0, 1, 2] (Again, these are pixels 0, 1 and 2) - blurring_frame_kernels = [0.7, 0.8, 0.9] - blurring_frame_length = 3 - - Parameters - ---------- - mask - The mask within which the convolved signal is calculated. - blurring_mask - A masks of pixels outside the masks but whose light blurs into it after PSF convolution. - kernel : grid.PSF or ndarray - An array representing a PSF. - """ - if kernel.shape_native[0] % 2 == 0 or kernel.shape_native[1] % 2 == 0: - raise exc.KernelException("PSF kernel must be odd") - - self.mask = mask - - self.mask_index_array = np.full(mask.shape, -1) - self.pixels_in_mask = int(np.size(mask) - np.sum(mask)) - - count = 0 - for x in range(mask.shape[0]): - for y in range(mask.shape[1]): - if not mask[x, y]: - self.mask_index_array[x, y] = count - count += 1 - - self.kernel = kernel - self.kernel_max_size = self.kernel.shape_native[0] * self.kernel.shape_native[1] - - mask_1d_index = 0 - self.image_frame_1d_indexes = np.zeros( - (self.pixels_in_mask, self.kernel_max_size), dtype="int" - ) - self.image_frame_1d_kernels = np.zeros( - (self.pixels_in_mask, self.kernel_max_size) - ) - self.image_frame_1d_lengths = np.zeros((self.pixels_in_mask), dtype="int") - for x in range(self.mask_index_array.shape[0]): - for y in range(self.mask_index_array.shape[1]): - if not mask[x][y]: - ( - image_frame_1d_indexes, - image_frame_1d_kernels, - ) = self.frame_at_coordinates_jit( - coordinates=(x, y), - mask=np.array(mask), - mask_index_array=self.mask_index_array, - kernel_2d=np.array(self.kernel.native.array), - ) - self.image_frame_1d_indexes[mask_1d_index, :] = ( - image_frame_1d_indexes - ) - self.image_frame_1d_kernels[mask_1d_index, :] = ( - image_frame_1d_kernels - ) - self.image_frame_1d_lengths[mask_1d_index] = image_frame_1d_indexes[ - image_frame_1d_indexes >= 0 - ].shape[0] - mask_1d_index += 1 - - self.blurring_mask = mask_2d_util.blurring_mask_2d_from( - mask_2d=np.array(mask), - kernel_shape_native=kernel.shape_native, - ) - - self.pixels_in_blurring_mask = int( - np.size(self.blurring_mask) - np.sum(self.blurring_mask) - ) - - mask_1d_index = 0 - self.blurring_frame_1d_indexes = np.zeros( - (self.pixels_in_blurring_mask, self.kernel_max_size), dtype="int" - ) - self.blurring_frame_1d_kernels = np.zeros( - (self.pixels_in_blurring_mask, self.kernel_max_size) - ) - self.blurring_frame_1d_lengths = np.zeros( - (self.pixels_in_blurring_mask), dtype="int" - ) - for x in range(mask.shape[0]): - for y in range(mask.shape[1]): - if mask[x][y] and not self.blurring_mask[x, y]: - ( - image_frame_1d_indexes, - image_frame_1d_kernels, - ) = self.frame_at_coordinates_jit( - coordinates=(x, y), - mask=np.array(mask), - mask_index_array=np.array(self.mask_index_array), - kernel_2d=np.array(self.kernel.native.array), - ) - self.blurring_frame_1d_indexes[mask_1d_index, :] = ( - image_frame_1d_indexes - ) - self.blurring_frame_1d_kernels[mask_1d_index, :] = ( - image_frame_1d_kernels - ) - self.blurring_frame_1d_lengths[mask_1d_index] = ( - image_frame_1d_indexes[image_frame_1d_indexes >= 0].shape[0] - ) - mask_1d_index += 1 - - @staticmethod - @numba_util.jit() - def frame_at_coordinates_jit(coordinates, mask, mask_index_array, kernel_2d): - """ - Returns the frame (indexes of pixels light is blurred into) and kernel_frame (kernel kernel values of those \ - pixels) for a given coordinate in a masks and its PSF. - - Parameters - ---------- - coordinates: Tuple[int, int] - The coordinates of mask_index_array on which the frame should be centred - kernel_shape_native: Tuple[int, int] - The shape of the kernel for which this frame will be used - """ - - kernel_shape_native = kernel_2d.shape - kernel_max_size = kernel_shape_native[0] * kernel_shape_native[1] - - half_x = int(kernel_shape_native[0] / 2) - half_y = int(kernel_shape_native[1] / 2) - - frame = -1 * np.ones((kernel_max_size)) - kernel_frame = -1.0 * np.ones((kernel_max_size)) - - count = 0 - for i in range(kernel_shape_native[0]): - for j in range(kernel_shape_native[1]): - x = coordinates[0] - half_x + i - y = coordinates[1] - half_y + j - if ( - 0 <= x < mask_index_array.shape[0] - and 0 <= y < mask_index_array.shape[1] - ): - value = mask_index_array[x, y] - if value >= 0 and not mask[x, y]: - frame[count] = value - kernel_frame[count] = kernel_2d[i, j] - count += 1 - - return frame, kernel_frame - - def convolve_image(self, image, blurring_image): - """ - For a given 1D array and blurring array, convolve the two using this convolver. - - Parameters - ---------- - image - 1D array of the values which are to be blurred with the convolver's PSF. - blurring_image - 1D array of the blurring values which blur into the array after PSF convolution. - """ - - if self.blurring_mask is None: - raise exc.KernelException( - "You cannot use the convolve_image function of a Convolver if the Convolver was" - "not created with a blurring_mask." - ) - - convolved_image = self.convolve_jit( - image_1d_array=np.array(image.slim.array), - image_frame_1d_indexes=self.image_frame_1d_indexes, - image_frame_1d_kernels=self.image_frame_1d_kernels, - image_frame_1d_lengths=self.image_frame_1d_lengths, - blurring_1d_array=np.array(blurring_image.slim.array), - blurring_frame_1d_indexes=self.blurring_frame_1d_indexes, - blurring_frame_1d_kernels=self.blurring_frame_1d_kernels, - blurring_frame_1d_lengths=self.blurring_frame_1d_lengths, - ) - - return Array2D(values=convolved_image, mask=self.mask) - - @staticmethod - @numba_util.jit() - def convolve_jit( - image_1d_array, - image_frame_1d_indexes, - image_frame_1d_kernels, - image_frame_1d_lengths, - blurring_1d_array, - blurring_frame_1d_indexes, - blurring_frame_1d_kernels, - blurring_frame_1d_lengths, - ): - blurred_image_1d = np.zeros(image_1d_array.shape) - - for image_1d_index in range(len(image_1d_array)): - frame_1d_indexes = image_frame_1d_indexes[image_1d_index] - frame_1d_kernel = image_frame_1d_kernels[image_1d_index] - frame_1d_length = image_frame_1d_lengths[image_1d_index] - image_value = image_1d_array[image_1d_index] - - for kernel_1d_index in range(frame_1d_length): - vector_index = frame_1d_indexes[kernel_1d_index] - kernel_value = frame_1d_kernel[kernel_1d_index] - blurred_image_1d[vector_index] += image_value * kernel_value - - for blurring_1d_index in range(len(blurring_1d_array)): - frame_1d_indexes = blurring_frame_1d_indexes[blurring_1d_index] - frame_1d_kernel = blurring_frame_1d_kernels[blurring_1d_index] - frame_1d_length = blurring_frame_1d_lengths[blurring_1d_index] - image_value = blurring_1d_array[blurring_1d_index] - - for kernel_1d_index in range(frame_1d_length): - vector_index = frame_1d_indexes[kernel_1d_index] - kernel_value = frame_1d_kernel[kernel_1d_index] - blurred_image_1d[vector_index] += image_value * kernel_value - - return blurred_image_1d - - def convolve_image_no_blurring(self, image): - """For a given 1D array and blurring array, convolve the two using this convolver. - - Parameters - ---------- - image - 1D array of the values which are to be blurred with the convolver's PSF. - """ - - convolved_image = self.convolve_no_blurring_jit( - image_1d_array=np.array(image.slim), - image_frame_1d_indexes=self.image_frame_1d_indexes, - image_frame_1d_kernels=self.image_frame_1d_kernels, - image_frame_1d_lengths=self.image_frame_1d_lengths, - ) - - return Array2D(values=convolved_image, mask=self.mask) - - def convolve_image_no_blurring_interpolation(self, image): - """For a given 1D array and blurring array, convolve the two using this convolver. - - Parameters - ---------- - image - 1D array of the values which are to be blurred with the convolver's PSF. - """ - - convolved_image = self.convolve_no_blurring_jit( - image_1d_array=image, - image_frame_1d_indexes=self.image_frame_1d_indexes, - image_frame_1d_kernels=self.image_frame_1d_kernels, - image_frame_1d_lengths=self.image_frame_1d_lengths, - ) - - return Array2D(values=convolved_image, mask=self.mask) - - @staticmethod - @numba_util.jit() - def convolve_no_blurring_jit( - image_1d_array, - image_frame_1d_indexes, - image_frame_1d_kernels, - image_frame_1d_lengths, - ): - blurred_image_1d = np.zeros(image_1d_array.shape) - - for image_1d_index in range(len(image_1d_array)): - frame_1d_indexes = image_frame_1d_indexes[image_1d_index] - frame_1d_kernel = image_frame_1d_kernels[image_1d_index] - frame_1d_length = image_frame_1d_lengths[image_1d_index] - image_value = image_1d_array[image_1d_index] - - for kernel_1d_index in range(frame_1d_length): - vector_index = frame_1d_indexes[kernel_1d_index] - kernel_value = frame_1d_kernel[kernel_1d_index] - blurred_image_1d[vector_index] += image_value * kernel_value - - return blurred_image_1d - - def convolve_mapping_matrix(self, mapping_matrix): - """For a given inversion mapping matrix, convolve every pixel's mapped with the PSF kernel. - - A mapping matrix provides non-zero entries in all elements which map two pixels to one another - (see *inversions.mappers*). - - For example, lets take an which is masked using a 'cross' of 5 pixels: - - [[ True, False, True]], - [[False, False, False]], - [[ True, False, True]] - - As example mapping matrix of this cross is as follows (5 pixels x 3 source pixels): - - [1, 0, 0] [0->0] - [1, 0, 0] [1->0] - [0, 1, 0] [2->1] - [0, 1, 0] [3->1] - [0, 0, 1] [4->2] - - For each source-pixel, we can create an of its unit-surface brightnesses by util the non-zero - entries back to masks. For example, doing this for source pixel 1 gives: - - [[0.0, 1.0, 0.0]], - [[1.0, 0.0, 0.0]] - [[0.0, 0.0, 0.0]] - - And source pixel 2: - - [[0.0, 0.0, 0.0]], - [[0.0, 1.0, 1.0]] - [[0.0, 0.0, 0.0]] - - We then convolve each of these with our PSF kernel, in 2 dimensions, like we would a grid. For - example, using the kernel below: - - kernel: - - [[0.0, 0.1, 0.0]] - [[0.1, 0.6, 0.1]] - [[0.0, 0.1, 0.0]] - - Blurred Source Pixel 1 (we don't need to perform the convolution into masked pixels): - - [[0.0, 0.6, 0.0]], - [[0.6, 0.0, 0.0]], - [[0.0, 0.0, 0.0]] - - Blurred Source pixel 2: - - [[0.0, 0.0, 0.0]], - [[0.0, 0.7, 0.7]], - [[0.0, 0.0, 0.0]] - - Finally, we map each of these blurred back to a blurred mapping matrix, which is analogous to the - mapping matrix. - - [0.6, 0.0, 0.0] [0->0] - [0.6, 0.0, 0.0] [1->0] - [0.0, 0.7, 0.0] [2->1] - [0.0, 0.7, 0.0] [3->1] - [0.0, 0.0, 0.6] [4->2] - - If the mapping matrix is sub-gridded, we perform the convolution on the fractional surface brightnesses in an - identical fashion to above. - - Parameters - ---------- - mapping_matrix - The 2D mapping matrix describing how every inversion pixel maps to a pixel on the data pixel. - """ - return self.convolve_matrix_jit( - mapping_matrix=mapping_matrix, - image_frame_1d_indexes=self.image_frame_1d_indexes, - image_frame_1d_kernels=self.image_frame_1d_kernels, - image_frame_1d_lengths=self.image_frame_1d_lengths, - ) - - @staticmethod - @numba_util.jit() - def convolve_matrix_jit( - mapping_matrix, - image_frame_1d_indexes, - image_frame_1d_kernels, - image_frame_1d_lengths, - ): - blurred_mapping_matrix = np.zeros(mapping_matrix.shape) - - for pixel_1d_index in range(mapping_matrix.shape[1]): - for image_1d_index in range(mapping_matrix.shape[0]): - value = mapping_matrix[image_1d_index, pixel_1d_index] - - if value > 0: - frame_1d_indexes = image_frame_1d_indexes[image_1d_index] - frame_1d_kernel = image_frame_1d_kernels[image_1d_index] - frame_1d_length = image_frame_1d_lengths[image_1d_index] - - for kernel_1d_index in range(frame_1d_length): - vector_index = frame_1d_indexes[kernel_1d_index] - kernel_value = frame_1d_kernel[kernel_1d_index] - blurred_mapping_matrix[vector_index, pixel_1d_index] += ( - value * kernel_value - ) - - return blurred_mapping_matrix diff --git a/autoarray/inversion/inversion/abstract.py b/autoarray/inversion/inversion/abstract.py index 2e3d240cc..fe9071277 100644 --- a/autoarray/inversion/inversion/abstract.py +++ b/autoarray/inversion/inversion/abstract.py @@ -85,10 +85,6 @@ def data(self): def noise_map(self): return self.dataset.noise_map - @property - def convolver(self): - return self.dataset.convolver - def has(self, cls: Type) -> bool: """ Does this `Inversion` have an attribute which is of type `cls`? diff --git a/autoarray/inversion/inversion/dataset_interface.py b/autoarray/inversion/inversion/dataset_interface.py index f37efef36..cf5960bf8 100644 --- a/autoarray/inversion/inversion/dataset_interface.py +++ b/autoarray/inversion/inversion/dataset_interface.py @@ -5,7 +5,6 @@ def __init__( noise_map, grids=None, psf=None, - convolver=None, transformer=None, w_tilde=None, noise_covariance_matrix=None, @@ -61,7 +60,6 @@ def __init__( self.noise_map = noise_map self.grids = grids self.psf = psf - self.convolver = convolver self.transformer = transformer self.w_tilde = w_tilde self.noise_covariance_matrix = noise_covariance_matrix diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py index 4058e671f..6cb517415 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py @@ -4,6 +4,7 @@ import numpy as np + @numba_util.jit() def w_tilde_data_imaging_from( image_native: np.ndarray, @@ -71,6 +72,7 @@ def w_tilde_data_imaging_from( return w_tilde_data + @numba_util.jit() def w_tilde_curvature_imaging_from( noise_map_native: np.ndarray, kernel_native: np.ndarray, native_index_for_slim_index @@ -337,6 +339,7 @@ def w_tilde_curvature_value_from( return curvature_value + @numba_util.jit() def data_vector_via_blurred_mapping_matrix_from( blurred_mapping_matrix: np.ndarray, image: np.ndarray, noise_map: np.ndarray @@ -369,6 +372,7 @@ def data_vector_via_blurred_mapping_matrix_from( return data_vector + @numba_util.jit() def data_vector_via_w_tilde_data_imaging_from( w_tilde_data: np.ndarray, @@ -598,6 +602,7 @@ def curvature_matrix_off_diags_via_w_tilde_curvature_preload_imaging_from( return curvature_matrix + @numba_util.jit() def curvature_matrix_off_diags_via_data_linear_func_matrix_from( data_linear_func_matrix: np.ndarray, @@ -690,15 +695,16 @@ def convolve_with_kernel_native(curvature_native, psf_kernel): blurred_native[y, x, f] = acc return blurred_native + @numba_util.jit() def curvature_matrix_off_diags_via_mapper_and_linear_func_curvature_vector_from( data_to_pix_unique: np.ndarray, data_weights: np.ndarray, pix_lengths: np.ndarray, pix_pixels: int, - curvature_weights: np.ndarray, # shape (n_unmasked, n_funcs) - mask: np.ndarray, # shape (ny, nx), bool - psf_kernel: np.ndarray # shape (ky, kx) + curvature_weights: np.ndarray, # shape (n_unmasked, n_funcs) + mask: np.ndarray, # shape (ny, nx), bool + psf_kernel: np.ndarray, # shape (ky, kx) ) -> np.ndarray: """ Returns the off-diagonal terms in the curvature matrix `F` (see Warren & Dye 2003) @@ -717,23 +723,23 @@ def curvature_matrix_off_diags_via_mapper_and_linear_func_curvature_vector_from( Parameters ---------- - data_to_pix_unique : ndarray + data_to_pix_unique An array that maps every data pixel index (e.g. the masked image pixel indexes in 1D) to its unique set of pixelization pixel indexes (see `data_slim_to_pixelization_unique_from`). - data_weights : ndarray + data_weights For every unique mapping between a set of data sub-pixels and a pixelization pixel, the weight of this mapping based on the number of sub-pixels that map to the pixelization pixel. - pix_lengths : ndarray + pix_lengths A 1D array describing how many unique pixels each data pixel maps to. Used to iterate over `data_to_pix_unique` and `data_weights`. - pix_pixels : int + pix_pixels The total number of pixels in the pixelization that reconstructs the data. - curvature_weights : ndarray + curvature_weights The operated values of the linear function divided by the noise-map squared, with shape [n_unmasked_data_pixels, n_linear_func_pixels]. - mask : ndarray + mask A 2D boolean mask of shape (ny, nx) indicating which pixels are in the data region. - psf_kernel : ndarray + psf_kernel The PSF kernel in its native 2D form, centered (odd dimensions recommended). Returns @@ -771,4 +777,4 @@ def curvature_matrix_off_diags_via_mapper_and_linear_func_curvature_vector_from( for f in range(n_funcs): off_diag[pix_0, f] += data_0_weight * blurred_slim[data_0, f] - return off_diag \ No newline at end of file + return off_diag diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py index 78eed58cf..fe82ce398 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_util.py @@ -3,6 +3,7 @@ import numpy as np from scipy.signal import fftconvolve + def psf_operator_matrix_dense_from( kernel_native: np.ndarray, native_index_for_slim_index: np.ndarray, # shape (N_pix, 2), native (y,x) coords of masked pixels @@ -146,6 +147,7 @@ def data_vector_via_blurred_mapping_matrix_from( """ return (image / noise_map**2.0) @ blurred_mapping_matrix + def data_linear_func_matrix_from( curvature_weights_matrix: np.ndarray, kernel_native, mask ) -> np.ndarray: @@ -176,12 +178,8 @@ def data_linear_func_matrix_from( curvature_weights_matrix The operated values of each linear function divided by the noise-map squared, in a matrix of shape [data_pixels, total_fixed_linear_functions]. - image_frame_indexes - The indexes of all masked pixels that the PSF blurs light into (see the `Convolver` object). - image_frame_kernels - The kernel values of all masked pixels that the PSF blurs light into (see the `Convolver` object). - image_frame_length - The number of masked pixels it will blur light into (unmasked pixels are excluded, see the `Convolver` object). + kernel_native + The 2D PSf kernel. Returns ------- @@ -199,10 +197,11 @@ def data_linear_func_matrix_from( # Convolve each function with PSF kernel from scipy.signal import fftconvolve + blurred_list = [] for i in range(n_funcs): blurred = fftconvolve(native[..., i], kernel_native, mode="same") # Re-mask: only keep unmasked pixels blurred_list.append(blurred[~mask]) - return np.stack(blurred_list, axis=1) # shape (n_unmasked, n_funcs) \ No newline at end of file + return np.stack(blurred_list, axis=1) # shape (n_unmasked, n_funcs) diff --git a/autoarray/inversion/inversion/imaging/w_tilde.py b/autoarray/inversion/inversion/imaging/w_tilde.py index 1f526d7f6..e49bfd636 100644 --- a/autoarray/inversion/inversion/imaging/w_tilde.py +++ b/autoarray/inversion/inversion/imaging/w_tilde.py @@ -437,7 +437,7 @@ def _curvature_matrix_func_list_and_mapper(self) -> np.ndarray: pix_pixels=mapper.params, curvature_weights=np.array(curvature_weights), mask=self.mask.array, - psf_kernel=self.psf.native.array + psf_kernel=self.psf.native.array, ) curvature_matrix[ diff --git a/autoarray/inversion/mock/mock_inversion_imaging.py b/autoarray/inversion/mock/mock_inversion_imaging.py index 83e18356f..673f33566 100644 --- a/autoarray/inversion/mock/mock_inversion_imaging.py +++ b/autoarray/inversion/mock/mock_inversion_imaging.py @@ -14,7 +14,6 @@ def __init__( data=None, noise_map=None, psf=None, - convolver=None, linear_obj_list=None, operated_mapping_matrix=None, linear_func_operated_mapping_matrix_dict=None, @@ -28,7 +27,6 @@ def __init__( data=data, noise_map=noise_map, psf=psf, - convolver=convolver, ) super().__init__( diff --git a/autoarray/mock.py b/autoarray/mock.py index 55e1431d5..d6dbe40b0 100644 --- a/autoarray/mock.py +++ b/autoarray/mock.py @@ -15,7 +15,6 @@ from autoarray.fit.mock.mock_fit_imaging import MockFitImaging from autoarray.fit.mock.mock_fit_interferometer import MockFitInterferometer from autoarray.mask.mock.mock_mask import MockMask -from autoarray.operators.mock.mock_psf import MockConvolver from autoarray.operators.mock.mock_psf import MockPSF from autoarray.structures.mock.mock_grid import MockGrid2DMesh from autoarray.structures.mock.mock_grid import MockMeshGrid diff --git a/autoarray/operators/mock/mock_psf.py b/autoarray/operators/mock/mock_psf.py index b3db57b36..e89d2b732 100644 --- a/autoarray/operators/mock/mock_psf.py +++ b/autoarray/operators/mock/mock_psf.py @@ -4,11 +4,3 @@ def __init__(self, operated_mapping_matrix=None): def convolve_mapping_matrix(self, mapping_matrix, mask): return self.operated_mapping_matrix - - -class MockConvolver: - def __init__(self, operated_mapping_matrix=None): - self.operated_mapping_matrix = operated_mapping_matrix - - def convolve_mapping_matrix(self, mapping_matrix): - return self.operated_mapping_matrix diff --git a/test_autoarray/inversion/inversion/test_factory.py b/test_autoarray/inversion/inversion/test_factory.py index f5a8b852a..5c0b11b88 100644 --- a/test_autoarray/inversion/inversion/test_factory.py +++ b/test_autoarray/inversion/inversion/test_factory.py @@ -613,7 +613,9 @@ def test__inversion_imaging__positive_only_solver(masked_imaging_7x7_no_blur): assert inversion.mapped_reconstructed_image == pytest.approx(np.ones(9), 1.0e-4) assert inversion.reconstruction == pytest.approx(np.array([2.0]), 1.0e-4) -def test__data_linear_func_matrix_dict(masked_imaging_7x7, + +def test__data_linear_func_matrix_dict( + masked_imaging_7x7, rectangular_mapper_7x7_3x3, ): @@ -635,12 +637,12 @@ def test__data_linear_func_matrix_dict(masked_imaging_7x7, settings=aa.SettingsInversion(use_w_tilde=False, use_positive_only_solver=True), ) - assert inversion_mapping.data_linear_func_matrix_dict[linear_obj][0] == pytest.approx( - [0.075, 0.05972222], 1.0e-4 - ) - assert inversion_mapping.data_linear_func_matrix_dict[linear_obj][1] == pytest.approx( - [0.09166667, 0.07847222], 1.0e-4 - ) - assert inversion_mapping.data_linear_func_matrix_dict[linear_obj][2] == pytest.approx( - [0.06458333, 0.05972222], 1.0e-4 - ) \ No newline at end of file + assert inversion_mapping.data_linear_func_matrix_dict[linear_obj][ + 0 + ] == pytest.approx([0.075, 0.05972222], 1.0e-4) + assert inversion_mapping.data_linear_func_matrix_dict[linear_obj][ + 1 + ] == pytest.approx([0.09166667, 0.07847222], 1.0e-4) + assert inversion_mapping.data_linear_func_matrix_dict[linear_obj][ + 2 + ] == pytest.approx([0.06458333, 0.05972222], 1.0e-4) From 179ef6954db1f10ca31baf51f40733a56138a4e0 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 18:54:53 +0100 Subject: [PATCH 13/17] split mappeR_util with numba --- .../imaging/inversion_imaging_numba_util.py | 81 +++++- .../inversion/inversion/imaging/w_tilde.py | 23 +- .../inversion/inversion/inversion_util.py | 60 ----- .../pixelization/mappers/abstract.py | 4 +- .../pixelization/mappers/delaunay.py | 12 +- .../pixelization/mappers/mapper_numba_util.py | 247 +++++++++++++++++ .../pixelization/mappers/mapper_util.py | 250 +----------------- .../pixelization/mappers/rectangular.py | 2 - .../inversion/pixelization/mappers/voronoi.py | 6 +- autoarray/structures/mesh/rectangular_2d.py | 2 - autoarray/util/__init__.py | 6 +- 11 files changed, 348 insertions(+), 345 deletions(-) create mode 100644 autoarray/inversion/pixelization/mappers/mapper_numba_util.py diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py index 6cb517415..7fe5c55d0 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py @@ -1,4 +1,4 @@ -from typing import Tuple +from typing import List, Optional, Tuple from autoarray import numba_util @@ -421,6 +421,53 @@ def data_vector_via_w_tilde_data_imaging_from( return data_vector +@numba_util.jit() +def curvature_matrix_with_added_to_diag_from( + curvature_matrix: np.ndarray, + value: float, + no_regularization_index_list: Optional[List] = None, +) -> np.ndarray: + """ + It is common for the `curvature_matrix` computed to not be positive-definite, leading for the inversion + via `np.linalg.solve` to fail and raise a `LinAlgError`. + + In many circumstances, adding a small numerical value of `1.0e-8` to the diagonal of the `curvature_matrix` + makes it positive definite, such that the inversion is performed without raising an error. + + This function adds this numerical value to the diagonal of the curvature matrix. + + Parameters + ---------- + curvature_matrix + The curvature matrix which is being constructed in order to solve a linear system of equations. + """ + + for i in no_regularization_index_list: + curvature_matrix[i, i] += value + + return curvature_matrix + + +@numba_util.jit() +def curvature_matrix_mirrored_from( + curvature_matrix: np.ndarray, +) -> np.ndarray: + curvature_matrix_mirrored = np.zeros( + (curvature_matrix.shape[0], curvature_matrix.shape[1]) + ) + + for i in range(curvature_matrix.shape[0]): + for j in range(curvature_matrix.shape[1]): + if curvature_matrix[i, j] != 0: + curvature_matrix_mirrored[i, j] = curvature_matrix[i, j] + curvature_matrix_mirrored[j, i] = curvature_matrix[i, j] + if curvature_matrix[j, i] != 0: + curvature_matrix_mirrored[i, j] = curvature_matrix[j, i] + curvature_matrix_mirrored[j, i] = curvature_matrix[j, i] + + return curvature_matrix_mirrored + + @numba_util.jit() def curvature_matrix_via_w_tilde_curvature_preload_imaging_from( curvature_preload: np.ndarray, @@ -778,3 +825,35 @@ def curvature_matrix_off_diags_via_mapper_and_linear_func_curvature_vector_from( off_diag[pix_0, f] += data_0_weight * blurred_slim[data_0, f] return off_diag + + +@numba_util.jit() +def mapped_reconstructed_data_via_image_to_pix_unique_from( + data_to_pix_unique: np.ndarray, + data_weights: np.ndarray, + pix_lengths: np.ndarray, + reconstruction: np.ndarray, +) -> np.ndarray: + """ + Returns the reconstructed data vector from the blurred mapping matrix `f` and solution vector *S*. + + Parameters + ---------- + mapping_matrix + The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. + + """ + + data_pixels = data_to_pix_unique.shape[0] + + mapped_reconstructed_data = np.zeros(data_pixels) + + for data_0 in range(data_pixels): + for pix_0 in range(pix_lengths[data_0]): + pix_for_data = data_to_pix_unique[data_0, pix_0] + + mapped_reconstructed_data[data_0] += ( + data_weights[data_0, pix_0] * reconstruction[pix_for_data] + ) + + return mapped_reconstructed_data \ No newline at end of file diff --git a/autoarray/inversion/inversion/imaging/w_tilde.py b/autoarray/inversion/inversion/imaging/w_tilde.py index e49bfd636..0e32796a3 100644 --- a/autoarray/inversion/inversion/imaging/w_tilde.py +++ b/autoarray/inversion/inversion/imaging/w_tilde.py @@ -15,7 +15,6 @@ from autoarray.structures.arrays.uniform_2d import Array2D from autoarray import exc -from autoarray.inversion.inversion import inversion_util from autoarray.inversion.inversion.imaging import inversion_imaging_numba_util @@ -247,12 +246,12 @@ def curvature_matrix(self) -> np.ndarray: else: curvature_matrix = self._curvature_matrix_multi_mapper - curvature_matrix = inversion_util.curvature_matrix_mirrored_from( + curvature_matrix = inversion_imaging_numba_util.curvature_matrix_mirrored_from( curvature_matrix=curvature_matrix ) if len(self.no_regularization_index_list) > 0: - curvature_matrix = inversion_util.curvature_matrix_with_added_to_diag_from( + curvature_matrix = inversion_imaging_numba_util.curvature_matrix_with_added_to_diag_from( curvature_matrix=curvature_matrix, value=self.settings.no_regularization_add_to_curvature_diag_value, no_regularization_index_list=self.no_regularization_index_list, @@ -512,7 +511,7 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: cls=AbstractLinearObjFuncList ): - mapped_reconstructed_image = inversion_util.mapped_reconstructed_data_via_image_to_pix_unique_from( + mapped_reconstructed_image = inversion_imaging_numba_util.mapped_reconstructed_data_via_image_to_pix_unique_from( data_to_pix_unique=linear_obj.unique_mappings.data_to_pix_unique, data_weights=linear_obj.unique_mappings.data_weights, pix_lengths=linear_obj.unique_mappings.pix_lengths, @@ -527,22 +526,6 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: values=mapped_reconstructed_image, mask=self.mask ) - elif isinstance(linear_obj, AbstractMapper) and not self.has( - cls=AbstractLinearObjFuncList - ): - - mapped_reconstructed_image = ( - inversion_util.mapped_reconstructed_data_via_w_tilde_from( - w_tilde=self.w_tilde.psf_operator_matrix_dense, - mapping_matrix=self.mapping_matrix, - reconstruction=reconstruction, - ) - ) - - mapped_reconstructed_image = Array2D( - values=mapped_reconstructed_image, mask=self.mask - ) - else: operated_mapping_matrix = self.linear_func_operated_mapping_matrix_dict[ diff --git a/autoarray/inversion/inversion/inversion_util.py b/autoarray/inversion/inversion/inversion_util.py index 18b6d0cb0..737a7e779 100644 --- a/autoarray/inversion/inversion/inversion_util.py +++ b/autoarray/inversion/inversion/inversion_util.py @@ -60,34 +60,6 @@ def curvature_matrix_with_added_to_diag_from( no_regularization_index_list, no_regularization_index_list ].add(value) - -@numba_util.jit() -def curvature_matrix_with_added_to_diag_from_numba( - curvature_matrix: np.ndarray, - value: float, - no_regularization_index_list: Optional[List] = None, -) -> np.ndarray: - """ - It is common for the `curvature_matrix` computed to not be positive-definite, leading for the inversion - via `np.linalg.solve` to fail and raise a `LinAlgError`. - - In many circumstances, adding a small numerical value of `1.0e-8` to the diagonal of the `curvature_matrix` - makes it positive definite, such that the inversion is performed without raising an error. - - This function adds this numerical value to the diagonal of the curvature matrix. - - Parameters - ---------- - curvature_matrix - The curvature matrix which is being constructed in order to solve a linear system of equations. - """ - - for i in no_regularization_index_list: - curvature_matrix[i, i] += value - - return curvature_matrix - - def curvature_matrix_mirrored_from( curvature_matrix: np.ndarray, ) -> np.ndarray: @@ -133,38 +105,6 @@ def curvature_matrix_via_mapping_matrix_from( return curvature_matrix -@numba_util.jit() -def mapped_reconstructed_data_via_image_to_pix_unique_from( - data_to_pix_unique: np.ndarray, - data_weights: np.ndarray, - pix_lengths: np.ndarray, - reconstruction: np.ndarray, -) -> np.ndarray: - """ - Returns the reconstructed data vector from the blurred mapping matrix `f` and solution vector *S*. - - Parameters - ---------- - mapping_matrix - The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. - - """ - - data_pixels = data_to_pix_unique.shape[0] - - mapped_reconstructed_data = np.zeros(data_pixels) - - for data_0 in range(data_pixels): - for pix_0 in range(pix_lengths[data_0]): - pix_for_data = data_to_pix_unique[data_0, pix_0] - - mapped_reconstructed_data[data_0] += ( - data_weights[data_0, pix_0] * reconstruction[pix_for_data] - ) - - return mapped_reconstructed_data - - def mapped_reconstructed_data_via_mapping_matrix_from( mapping_matrix: np.ndarray, reconstruction: np.ndarray ) -> np.ndarray: diff --git a/autoarray/inversion/pixelization/mappers/abstract.py b/autoarray/inversion/pixelization/mappers/abstract.py index 8d0fad744..4357896f0 100644 --- a/autoarray/inversion/pixelization/mappers/abstract.py +++ b/autoarray/inversion/pixelization/mappers/abstract.py @@ -16,7 +16,7 @@ from autoarray.structures.mesh.abstract_2d import Abstract2DMesh from autoarray.inversion.pixelization.mappers import mapper_util - +from autoarray.inversion.pixelization.mappers import mapper_numba_util class AbstractMapper(LinearObj): def __init__( @@ -225,7 +225,7 @@ def unique_mappings(self) -> UniqueMappings: data_to_pix_unique, data_weights, pix_lengths, - ) = mapper_util.data_slim_to_pixelization_unique_from( + ) = mapper_numba_util.data_slim_to_pixelization_unique_from( data_pixels=self.over_sampler.mask.pixels_in_mask, pix_indexes_for_sub_slim_index=np.array( self.pix_indexes_for_sub_slim_index diff --git a/autoarray/inversion/pixelization/mappers/delaunay.py b/autoarray/inversion/pixelization/mappers/delaunay.py index 30c9dc7b2..050c22199 100644 --- a/autoarray/inversion/pixelization/mappers/delaunay.py +++ b/autoarray/inversion/pixelization/mappers/delaunay.py @@ -6,7 +6,7 @@ from autoarray.inversion.pixelization.mappers.abstract import PixSubWeights from autoarray.inversion.pixelization.mappers import mapper_util - +from autoarray.inversion.pixelization.mappers import mapper_numba_util class MapperDelaunay(AbstractMapper): """ @@ -103,7 +103,7 @@ def pix_sub_weights(self) -> PixSubWeights: The interpolation weights of these multiple mappings are stored in the array `pix_weights_for_sub_slim_index`. For the Delaunay pixelization these mappings are calculated using the Scipy spatial library - (see `mapper_util.pix_indexes_for_sub_slim_index_delaunay_from`). + (see `mapper_numba_util.pix_indexes_for_sub_slim_index_delaunay_from`). """ delaunay = self.delaunay @@ -112,7 +112,7 @@ def pix_sub_weights(self) -> PixSubWeights: ) pix_indexes_for_simplex_index = delaunay.simplices - mappings, sizes = mapper_util.pix_indexes_for_sub_slim_index_delaunay_from( + mappings, sizes = mapper_numba_util.pix_indexes_for_sub_slim_index_delaunay_from( source_plane_data_grid=np.array(self.source_plane_data_grid.over_sampled), simplex_index_for_sub_slim_index=simplex_index_for_sub_slim_index, pix_indexes_for_simplex_index=pix_indexes_for_simplex_index, @@ -122,7 +122,7 @@ def pix_sub_weights(self) -> PixSubWeights: mappings = mappings.astype("int") sizes = sizes.astype("int") - weights = mapper_util.pixel_weights_delaunay_from( + weights = mapper_numba_util.pixel_weights_delaunay_from( source_plane_data_grid=np.array(self.source_plane_data_grid.over_sampled), source_plane_mesh_grid=np.array(self.source_plane_mesh_grid), slim_index_for_sub_slim_index=self.slim_index_for_sub_slim_index, @@ -154,14 +154,14 @@ def pix_sub_weights_split_cross(self) -> PixSubWeights: ( splitted_mappings, splitted_sizes, - ) = mapper_util.pix_indexes_for_sub_slim_index_delaunay_from( + ) = mapper_numba_util.pix_indexes_for_sub_slim_index_delaunay_from( source_plane_data_grid=self.source_plane_mesh_grid.split_cross, simplex_index_for_sub_slim_index=splitted_simplex_index_for_sub_slim_index, pix_indexes_for_simplex_index=pix_indexes_for_simplex_index, delaunay_points=delaunay.points, ) - splitted_weights = mapper_util.pixel_weights_delaunay_from( + splitted_weights = mapper_numba_util.pixel_weights_delaunay_from( source_plane_data_grid=self.source_plane_mesh_grid.split_cross, source_plane_mesh_grid=np.array(self.source_plane_mesh_grid), slim_index_for_sub_slim_index=self.source_plane_mesh_grid.split_cross, diff --git a/autoarray/inversion/pixelization/mappers/mapper_numba_util.py b/autoarray/inversion/pixelization/mappers/mapper_numba_util.py new file mode 100644 index 000000000..cc3ee3056 --- /dev/null +++ b/autoarray/inversion/pixelization/mappers/mapper_numba_util.py @@ -0,0 +1,247 @@ +import numpy as np +from typing import Tuple + +from autoarray import numba_util +from autoarray.inversion.pixelization.mesh import mesh_util + + +@numba_util.jit() +def data_slim_to_pixelization_unique_from( + data_pixels, + pix_indexes_for_sub_slim_index: np.ndarray, + pix_sizes_for_sub_slim_index: np.ndarray, + pix_weights_for_sub_slim_index, + pix_pixels: int, + sub_size: np.ndarray, +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Create an array describing the unique mappings between the sub-pixels of every slim data pixel and the pixelization + pixels, which is used to perform efficiently linear algebra calculations. + + For example, assuming `sub_size=2`: + + - If 3 sub-pixels in image pixel 0 map to pixelization pixel 2 then `data_pix_to_unique[0, 0] = 2`. + - If the fourth sub-pixel maps to pixelization pixel 4, then `data_to_pix_unique[0, 1] = 4`. + + The size of the second index depends on the number of unique sub-pixel to pixelization pixels mappings in a given + data pixel. In the example above, there were only two unique sets of mapping, but for high levels of sub-gridding + there could be many more unique mappings all of which must be stored. + + The array `data_to_pix_unique` does not describe how many sub-pixels uniquely map to each pixelization pixel for + a given data pixel. This information is contained in the array `data_weights`. For the example above, + where `sub_size=2` and therefore `sub_fraction=0.25`: + + - `data_weights[0, 0] = 0.75` (because 3 sub-pixels mapped to this pixelization pixel). + - `data_weights[0, 1] = 0.25` (because 1 sub-pixel mapped to this pixelization pixel). + + The `sub_fractions` are stored as opposed to the number of sub-pixels, because these values are used directly + when performing the linear algebra calculation. + + The array `pix_lengths` in a 1D array of dimensions [data_pixels] describing how many unique pixelization pixels + each data pixel's set of sub-pixels maps too. + + Parameters + ---------- + data_pixels + The total number of data pixels in the dataset. + pix_indexes_for_sub_slim_index + Maps an unmasked data sub pixel to its corresponding pixelization pixel. + sub_size + The size of the sub-grid defining the number of sub-pixels in every data pixel. + + Returns + ------- + ndarray + The unique mappings between the sub-pixels of every data pixel and the pixelization pixels, alongside arrays + that give the weights and total number of mappings. + """ + + sub_fraction = 1.0 / (sub_size**2.0) + + max_pix_mappings = int(np.max(pix_sizes_for_sub_slim_index)) + + # TODO : Work out if we can reduce size from np.max(sub_size) using sub_size of max_pix_mappings. + + data_to_pix_unique = -1 * np.ones( + (data_pixels, max_pix_mappings * np.max(sub_size) ** 2) + ) + data_weights = np.zeros((data_pixels, max_pix_mappings * np.max(sub_size) ** 2)) + pix_lengths = np.zeros(data_pixels) + pix_check = -1 * np.ones(shape=pix_pixels) + + ip_sub_start = 0 + + for ip in range(data_pixels): + pix_check[:] = -1 + + pix_size = 0 + + ip_sub_end = ip_sub_start + sub_size[ip] ** 2 + + for ip_sub in range(ip_sub_start, ip_sub_end): + for pix_interp_index in range(pix_sizes_for_sub_slim_index[ip_sub]): + pix = pix_indexes_for_sub_slim_index[ip_sub, pix_interp_index] + pixel_weight = pix_weights_for_sub_slim_index[ip_sub, pix_interp_index] + + if pix_check[pix] > -0.5: + data_weights[ip, int(pix_check[pix])] += ( + sub_fraction[ip] * pixel_weight + ) + + else: + data_to_pix_unique[ip, pix_size] = pix + data_weights[ip, pix_size] += sub_fraction[ip] * pixel_weight + pix_check[pix] = pix_size + pix_size += 1 + + ip_sub_start = ip_sub_end + + pix_lengths[ip] = pix_size + + return data_to_pix_unique, data_weights, pix_lengths + + +@numba_util.jit() +def pix_indexes_for_sub_slim_index_delaunay_from( + source_plane_data_grid, + simplex_index_for_sub_slim_index, + pix_indexes_for_simplex_index, + delaunay_points, +) -> Tuple[np.ndarray, np.ndarray]: + """ + The indexes mappings between the sub pixels and Voronoi mesh pixels. + For Delaunay tessellation, most sub pixels should have contribution of 3 pixelization pixels. However, + for those ones not belonging to any triangle, we link its value to its closest point. + + The returning result is a matrix of (len(sub_pixels, 3)) where the entries mark the relevant source pixel indexes. + A row like [A, -1, -1] means that sub pixel only links to source pixel A. + """ + + pix_indexes_for_sub_slim_index = -1 * np.ones( + shape=(source_plane_data_grid.shape[0], 3) + ) + + for i in range(len(source_plane_data_grid)): + simplex_index = simplex_index_for_sub_slim_index[i] + if simplex_index != -1: + pix_indexes_for_sub_slim_index[i] = pix_indexes_for_simplex_index[ + simplex_index_for_sub_slim_index[i] + ] + else: + pix_indexes_for_sub_slim_index[i][0] = np.argmin( + np.sum((delaunay_points - source_plane_data_grid[i]) ** 2.0, axis=1) + ) + + pix_indexes_for_sub_slim_index_sizes = np.sum( + pix_indexes_for_sub_slim_index >= 0, axis=1 + ) + + return pix_indexes_for_sub_slim_index, pix_indexes_for_sub_slim_index_sizes + + +@numba_util.jit() +def pixel_weights_delaunay_from( + source_plane_data_grid, + source_plane_mesh_grid, + slim_index_for_sub_slim_index: np.ndarray, + pix_indexes_for_sub_slim_index, +) -> np.ndarray: + """ + Returns the weights of the mappings between the masked sub-pixels and the Delaunay pixelization. + + Weights are determiend via a nearest neighbor interpolation scheme, whereby every data-sub pixel maps to three + Delaunay pixel vertexes (in the source frame). The weights of these 3 mappings depends on the distance of the + coordinate to each vertex, with the highest weight being its closest neighbor, + + Parameters + ---------- + source_plane_data_grid + A 2D grid of (y,x) coordinates associated with the unmasked 2D data after it has been transformed to the + `source` reference frame. + source_plane_mesh_grid + The 2D grid of (y,x) centres of every pixelization pixel in the `source` frame. + slim_index_for_sub_slim_index + The mappings between the data's sub slimmed indexes and the slimmed indexes on the non sub-sized indexes. + pix_indexes_for_sub_slim_index + The mappings from a data sub-pixel index to a pixelization pixel index. + """ + + pixel_weights = np.zeros(pix_indexes_for_sub_slim_index.shape) + + for sub_slim_index in range(slim_index_for_sub_slim_index.shape[0]): + pix_indexes = pix_indexes_for_sub_slim_index[sub_slim_index] + + if pix_indexes[1] != -1: + vertices_of_the_simplex = source_plane_mesh_grid[pix_indexes] + + sub_gird_coordinate_on_source_place = source_plane_data_grid[sub_slim_index] + + area_0 = mesh_util.delaunay_triangle_area_from( + corner_0=vertices_of_the_simplex[1], + corner_1=vertices_of_the_simplex[2], + corner_2=sub_gird_coordinate_on_source_place, + ) + area_1 = mesh_util.delaunay_triangle_area_from( + corner_0=vertices_of_the_simplex[0], + corner_1=vertices_of_the_simplex[2], + corner_2=sub_gird_coordinate_on_source_place, + ) + area_2 = mesh_util.delaunay_triangle_area_from( + corner_0=vertices_of_the_simplex[0], + corner_1=vertices_of_the_simplex[1], + corner_2=sub_gird_coordinate_on_source_place, + ) + + norm = area_0 + area_1 + area_2 + + weight_abc = np.array([area_0, area_1, area_2]) / norm + + pixel_weights[sub_slim_index] = weight_abc + + else: + pixel_weights[sub_slim_index][0] = 1.0 + + return pixel_weights + + +@numba_util.jit() +def remove_bad_entries_voronoi_nn( + bad_indexes, + pix_weights_for_sub_slim_index, + pix_indexes_for_sub_slim_index, + grid, + mesh_grid, +): + """ + The nearest neighbor interpolation can return invalid or bad entries which are removed from the mapping arrays. The + current circumstances this arises are: + + 1) If a point is outside the whole Voronoi region, some weights have negative values. In this case, we reset its + neighbor to its closest neighbor. + + 2) The nearest neighbor interpolation code may not return even a single neighbor. We mark these as a bad grid by + settings their neighbors to the closest ones. + + Parameters + ---------- + bad_indexes + pix_weights_for_sub_slim_index + pix_indexes_for_sub_slim_index + grid + mesh_grid + + Returns + ------- + + """ + + for item in bad_indexes: + ind = item[0] + pix_indexes_for_sub_slim_index[ind] = -1 + pix_indexes_for_sub_slim_index[ind][0] = np.argmin( + np.sum((grid[ind] - mesh_grid) ** 2.0, axis=1) + ) + pix_weights_for_sub_slim_index[ind] = 0.0 + pix_weights_for_sub_slim_index[ind][0] = 1.0 + + return pix_weights_for_sub_slim_index, pix_indexes_for_sub_slim_index diff --git a/autoarray/inversion/pixelization/mappers/mapper_util.py b/autoarray/inversion/pixelization/mappers/mapper_util.py index 0293a880e..b32027e21 100644 --- a/autoarray/inversion/pixelization/mappers/mapper_util.py +++ b/autoarray/inversion/pixelization/mappers/mapper_util.py @@ -1,3 +1,5 @@ +from functools import partial +import jax import jax.numpy as jnp import numpy as np from typing import Tuple @@ -9,108 +11,6 @@ from autoarray.inversion.pixelization.mesh import mesh_util -@numba_util.jit() -def data_slim_to_pixelization_unique_from( - data_pixels, - pix_indexes_for_sub_slim_index: np.ndarray, - pix_sizes_for_sub_slim_index: np.ndarray, - pix_weights_for_sub_slim_index, - pix_pixels: int, - sub_size: np.ndarray, -) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - Create an array describing the unique mappings between the sub-pixels of every slim data pixel and the pixelization - pixels, which is used to perform efficiently linear algebra calculations. - - For example, assuming `sub_size=2`: - - - If 3 sub-pixels in image pixel 0 map to pixelization pixel 2 then `data_pix_to_unique[0, 0] = 2`. - - If the fourth sub-pixel maps to pixelization pixel 4, then `data_to_pix_unique[0, 1] = 4`. - - The size of the second index depends on the number of unique sub-pixel to pixelization pixels mappings in a given - data pixel. In the example above, there were only two unique sets of mapping, but for high levels of sub-gridding - there could be many more unique mappings all of which must be stored. - - The array `data_to_pix_unique` does not describe how many sub-pixels uniquely map to each pixelization pixel for - a given data pixel. This information is contained in the array `data_weights`. For the example above, - where `sub_size=2` and therefore `sub_fraction=0.25`: - - - `data_weights[0, 0] = 0.75` (because 3 sub-pixels mapped to this pixelization pixel). - - `data_weights[0, 1] = 0.25` (because 1 sub-pixel mapped to this pixelization pixel). - - The `sub_fractions` are stored as opposed to the number of sub-pixels, because these values are used directly - when performing the linear algebra calculation. - - The array `pix_lengths` in a 1D array of dimensions [data_pixels] describing how many unique pixelization pixels - each data pixel's set of sub-pixels maps too. - - Parameters - ---------- - data_pixels - The total number of data pixels in the dataset. - pix_indexes_for_sub_slim_index - Maps an unmasked data sub pixel to its corresponding pixelization pixel. - sub_size - The size of the sub-grid defining the number of sub-pixels in every data pixel. - - Returns - ------- - ndarray - The unique mappings between the sub-pixels of every data pixel and the pixelization pixels, alongside arrays - that give the weights and total number of mappings. - """ - - sub_fraction = 1.0 / (sub_size**2.0) - - max_pix_mappings = int(np.max(pix_sizes_for_sub_slim_index)) - - # TODO : Work out if we can reduce size from np.max(sub_size) using sub_size of max_pix_mappings. - - data_to_pix_unique = -1 * np.ones( - (data_pixels, max_pix_mappings * np.max(sub_size) ** 2) - ) - data_weights = np.zeros((data_pixels, max_pix_mappings * np.max(sub_size) ** 2)) - pix_lengths = np.zeros(data_pixels) - pix_check = -1 * np.ones(shape=pix_pixels) - - ip_sub_start = 0 - - for ip in range(data_pixels): - pix_check[:] = -1 - - pix_size = 0 - - ip_sub_end = ip_sub_start + sub_size[ip] ** 2 - - for ip_sub in range(ip_sub_start, ip_sub_end): - for pix_interp_index in range(pix_sizes_for_sub_slim_index[ip_sub]): - pix = pix_indexes_for_sub_slim_index[ip_sub, pix_interp_index] - pixel_weight = pix_weights_for_sub_slim_index[ip_sub, pix_interp_index] - - if pix_check[pix] > -0.5: - data_weights[ip, int(pix_check[pix])] += ( - sub_fraction[ip] * pixel_weight - ) - - else: - data_to_pix_unique[ip, pix_size] = pix - data_weights[ip, pix_size] += sub_fraction[ip] * pixel_weight - pix_check[pix] = pix_size - pix_size += 1 - - ip_sub_start = ip_sub_end - - pix_lengths[ip] = pix_size - - return data_to_pix_unique, data_weights, pix_lengths - - -import jax -import jax.numpy as jnp - -from functools import partial - - def forward_interp(xp, yp, x): return jax.vmap(jnp.interp, in_axes=(1, 1, None, None, None))(x, xp, yp, 0, 1).T @@ -370,44 +270,6 @@ def rectangular_mappings_weights_via_interpolation_from( return mappings, weights -@numba_util.jit() -def pix_indexes_for_sub_slim_index_delaunay_from( - source_plane_data_grid, - simplex_index_for_sub_slim_index, - pix_indexes_for_simplex_index, - delaunay_points, -) -> Tuple[np.ndarray, np.ndarray]: - """ - The indexes mappings between the sub pixels and Voronoi mesh pixels. - For Delaunay tessellation, most sub pixels should have contribution of 3 pixelization pixels. However, - for those ones not belonging to any triangle, we link its value to its closest point. - - The returning result is a matrix of (len(sub_pixels, 3)) where the entries mark the relevant source pixel indexes. - A row like [A, -1, -1] means that sub pixel only links to source pixel A. - """ - - pix_indexes_for_sub_slim_index = -1 * np.ones( - shape=(source_plane_data_grid.shape[0], 3) - ) - - for i in range(len(source_plane_data_grid)): - simplex_index = simplex_index_for_sub_slim_index[i] - if simplex_index != -1: - pix_indexes_for_sub_slim_index[i] = pix_indexes_for_simplex_index[ - simplex_index_for_sub_slim_index[i] - ] - else: - pix_indexes_for_sub_slim_index[i][0] = np.argmin( - np.sum((delaunay_points - source_plane_data_grid[i]) ** 2.0, axis=1) - ) - - pix_indexes_for_sub_slim_index_sizes = np.sum( - pix_indexes_for_sub_slim_index >= 0, axis=1 - ) - - return pix_indexes_for_sub_slim_index, pix_indexes_for_sub_slim_index_sizes - - def nearest_pixelization_index_for_slim_index_from_kdtree(grid, mesh_grid): from scipy.spatial import cKDTree @@ -423,71 +285,6 @@ def nearest_pixelization_index_for_slim_index_from_kdtree(grid, mesh_grid): return sparse_index_for_slim_index -@numba_util.jit() -def pixel_weights_delaunay_from( - source_plane_data_grid, - source_plane_mesh_grid, - slim_index_for_sub_slim_index: np.ndarray, - pix_indexes_for_sub_slim_index, -) -> np.ndarray: - """ - Returns the weights of the mappings between the masked sub-pixels and the Delaunay pixelization. - - Weights are determiend via a nearest neighbor interpolation scheme, whereby every data-sub pixel maps to three - Delaunay pixel vertexes (in the source frame). The weights of these 3 mappings depends on the distance of the - coordinate to each vertex, with the highest weight being its closest neighbor, - - Parameters - ---------- - source_plane_data_grid - A 2D grid of (y,x) coordinates associated with the unmasked 2D data after it has been transformed to the - `source` reference frame. - source_plane_mesh_grid - The 2D grid of (y,x) centres of every pixelization pixel in the `source` frame. - slim_index_for_sub_slim_index - The mappings between the data's sub slimmed indexes and the slimmed indexes on the non sub-sized indexes. - pix_indexes_for_sub_slim_index - The mappings from a data sub-pixel index to a pixelization pixel index. - """ - - pixel_weights = np.zeros(pix_indexes_for_sub_slim_index.shape) - - for sub_slim_index in range(slim_index_for_sub_slim_index.shape[0]): - pix_indexes = pix_indexes_for_sub_slim_index[sub_slim_index] - - if pix_indexes[1] != -1: - vertices_of_the_simplex = source_plane_mesh_grid[pix_indexes] - - sub_gird_coordinate_on_source_place = source_plane_data_grid[sub_slim_index] - - area_0 = mesh_util.delaunay_triangle_area_from( - corner_0=vertices_of_the_simplex[1], - corner_1=vertices_of_the_simplex[2], - corner_2=sub_gird_coordinate_on_source_place, - ) - area_1 = mesh_util.delaunay_triangle_area_from( - corner_0=vertices_of_the_simplex[0], - corner_1=vertices_of_the_simplex[2], - corner_2=sub_gird_coordinate_on_source_place, - ) - area_2 = mesh_util.delaunay_triangle_area_from( - corner_0=vertices_of_the_simplex[0], - corner_1=vertices_of_the_simplex[1], - corner_2=sub_gird_coordinate_on_source_place, - ) - - norm = area_0 + area_1 + area_2 - - weight_abc = np.array([area_0, area_1, area_2]) / norm - - pixel_weights[sub_slim_index] = weight_abc - - else: - pixel_weights[sub_slim_index][0] = 1.0 - - return pixel_weights - - def pix_size_weights_voronoi_nn_from( grid: np.ndarray, mesh_grid: np.ndarray ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: @@ -590,49 +387,6 @@ def pix_size_weights_voronoi_nn_from( ) -@numba_util.jit() -def remove_bad_entries_voronoi_nn( - bad_indexes, - pix_weights_for_sub_slim_index, - pix_indexes_for_sub_slim_index, - grid, - mesh_grid, -): - """ - The nearest neighbor interpolation can return invalid or bad entries which are removed from the mapping arrays. The - current circumstances this arises are: - - 1) If a point is outside the whole Voronoi region, some weights have negative values. In this case, we reset its - neighbor to its closest neighbor. - - 2) The nearest neighbor interpolation code may not return even a single neighbor. We mark these as a bad grid by - settings their neighbors to the closest ones. - - Parameters - ---------- - bad_indexes - pix_weights_for_sub_slim_index - pix_indexes_for_sub_slim_index - grid - mesh_grid - - Returns - ------- - - """ - - for item in bad_indexes: - ind = item[0] - pix_indexes_for_sub_slim_index[ind] = -1 - pix_indexes_for_sub_slim_index[ind][0] = np.argmin( - np.sum((grid[ind] - mesh_grid) ** 2.0, axis=1) - ) - pix_weights_for_sub_slim_index[ind] = 0.0 - pix_weights_for_sub_slim_index[ind][0] = 1.0 - - return pix_weights_for_sub_slim_index, pix_indexes_for_sub_slim_index - - def adaptive_pixel_signals_from( pixels: int, pixel_weights: np.ndarray, diff --git a/autoarray/inversion/pixelization/mappers/rectangular.py b/autoarray/inversion/pixelization/mappers/rectangular.py index 83b000f6d..14fd3fd9f 100644 --- a/autoarray/inversion/pixelization/mappers/rectangular.py +++ b/autoarray/inversion/pixelization/mappers/rectangular.py @@ -1,10 +1,8 @@ import jax.numpy as jnp -import numpy as np from typing import Tuple from autoconf import cached_property -from autoarray.structures.grids.irregular_2d import Grid2DIrregular from autoarray.inversion.pixelization.mappers.abstract import AbstractMapper from autoarray.inversion.pixelization.mappers.abstract import PixSubWeights diff --git a/autoarray/inversion/pixelization/mappers/voronoi.py b/autoarray/inversion/pixelization/mappers/voronoi.py index 56921b2b2..db364319e 100644 --- a/autoarray/inversion/pixelization/mappers/voronoi.py +++ b/autoarray/inversion/pixelization/mappers/voronoi.py @@ -7,7 +7,7 @@ from autoarray.inversion.pixelization.mappers.abstract import PixSubWeights from autoarray.structures.arrays.uniform_2d import Array2D -from autoarray.inversion.pixelization.mappers import mapper_util +from autoarray.inversion.pixelization.mappers import mapper_numba_util class MapperVoronoi(AbstractMapper): @@ -75,7 +75,7 @@ def pix_sub_weights_split_cross(self) -> PixSubWeights: This property returns a unique set of `PixSubWeights` used for these regularization schemes which compute mappings and weights at each point on the split cross. """ - (mappings, sizes, weights) = mapper_util.pix_size_weights_voronoi_nn_from( + (mappings, sizes, weights) = mapper_numba_util.pix_size_weights_voronoi_nn_from( grid=self.source_plane_mesh_grid.split_cross, mesh_grid=self.source_plane_mesh_grid, ) @@ -125,7 +125,7 @@ def pix_sub_weights(self) -> PixSubWeights: The interpolation weights of these multiple mappings are stored in the array `pix_weights_for_sub_slim_index`. """ - mappings, sizes, weights = mapper_util.pix_size_weights_voronoi_nn_from( + mappings, sizes, weights = mapper_numba_util.pix_size_weights_voronoi_nn_from( grid=self.source_plane_data_grid.over_sampled, mesh_grid=self.source_plane_mesh_grid, ) diff --git a/autoarray/structures/mesh/rectangular_2d.py b/autoarray/structures/mesh/rectangular_2d.py index 55ea54859..8e447b74b 100644 --- a/autoarray/structures/mesh/rectangular_2d.py +++ b/autoarray/structures/mesh/rectangular_2d.py @@ -8,12 +8,10 @@ from autoarray import type as ty from autoarray.inversion.linear_obj.neighbors import Neighbors from autoarray.mask.mask_2d import Mask2D -from autoarray.structures.abstract_structure import Structure from autoarray.structures.arrays.uniform_2d import Array2D from autoarray.structures.mesh.abstract_2d import Abstract2DMesh -from autoarray.inversion.pixelization.mappers import mapper_util from autoarray.inversion.pixelization.mesh import mesh_util from autoarray.structures.grids import grid_2d_util diff --git a/autoarray/util/__init__.py b/autoarray/util/__init__.py index 43801e059..95a911a23 100644 --- a/autoarray/util/__init__.py +++ b/autoarray/util/__init__.py @@ -12,13 +12,17 @@ from autoarray.fit import fit_util as fit from autoarray.inversion.pixelization.mesh import mesh_util as mesh from autoarray.inversion.pixelization.mappers import mapper_util as mapper +from autoarray.inversion.pixelization.mappers import mapper_numba_util as mapper_numba from autoarray.inversion.regularization import regularization_util as regularization from autoarray.inversion.inversion import inversion_util as inversion from autoarray.inversion.inversion.imaging import ( inversion_imaging_util as inversion_imaging, ) from autoarray.inversion.inversion.imaging import ( - inversion_imaging_numba_util as inversion_imaging, + inversion_imaging_util as inversion_imaging, +) +from autoarray.inversion.inversion.imaging import ( + inversion_imaging_numba_util as inversion_imaging_numba, ) from autoarray.inversion.inversion.interferometer import ( inversion_interferometer_util as inversion_interferometer, From e3a0c7f66a83651277cb4e7269e63bb328247f98 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 18:58:55 +0100 Subject: [PATCH 14/17] split mesh_util into numba --- .../inversion/inversion/imaging/w_tilde.py | 15 +- .../pixelization/mappers/mapper_numba_util.py | 8 +- .../pixelization/mappers/mapper_util.py | 2 - .../pixelization/mesh/mesh_numba_util.py | 301 ++++++++++++++++++ .../inversion/pixelization/mesh/mesh_util.py | 300 +---------------- autoarray/plot/wrap/two_d/voronoi_drawer.py | 4 +- autoarray/structures/mesh/triangulation_2d.py | 5 +- autoarray/structures/mesh/voronoi_2d.py | 8 +- autoarray/util/__init__.py | 1 + .../inversion/test_inversion_util.py | 4 +- 10 files changed, 323 insertions(+), 325 deletions(-) create mode 100644 autoarray/inversion/pixelization/mesh/mesh_numba_util.py diff --git a/autoarray/inversion/inversion/imaging/w_tilde.py b/autoarray/inversion/inversion/imaging/w_tilde.py index 0e32796a3..05bc7a9a8 100644 --- a/autoarray/inversion/inversion/imaging/w_tilde.py +++ b/autoarray/inversion/inversion/imaging/w_tilde.py @@ -507,10 +507,7 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: for linear_obj in self.linear_obj_list: reconstruction = reconstruction_dict[linear_obj] - if isinstance(linear_obj, AbstractMapper) and self.has( - cls=AbstractLinearObjFuncList - ): - + if isinstance(linear_obj, AbstractMapper): mapped_reconstructed_image = inversion_imaging_numba_util.mapped_reconstructed_data_via_image_to_pix_unique_from( data_to_pix_unique=linear_obj.unique_mappings.data_to_pix_unique, data_weights=linear_obj.unique_mappings.data_weights, @@ -518,16 +515,15 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: reconstruction=reconstruction, ) - mapped_reconstructed_image = self.psf.convolve_image_no_blurring( - image=mapped_reconstructed_image, mask=self.mask - ).array - mapped_reconstructed_image = Array2D( values=mapped_reconstructed_image, mask=self.mask ) - else: + mapped_reconstructed_image = self.convolver.convolve_image_no_blurring( + image=mapped_reconstructed_image + ) + else: operated_mapping_matrix = self.linear_func_operated_mapping_matrix_dict[ linear_obj ] @@ -540,6 +536,7 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: values=mapped_reconstructed_image, mask=self.mask ) + mapped_reconstructed_data_dict[linear_obj] = mapped_reconstructed_image return mapped_reconstructed_data_dict diff --git a/autoarray/inversion/pixelization/mappers/mapper_numba_util.py b/autoarray/inversion/pixelization/mappers/mapper_numba_util.py index cc3ee3056..e5476e142 100644 --- a/autoarray/inversion/pixelization/mappers/mapper_numba_util.py +++ b/autoarray/inversion/pixelization/mappers/mapper_numba_util.py @@ -2,7 +2,7 @@ from typing import Tuple from autoarray import numba_util -from autoarray.inversion.pixelization.mesh import mesh_util +from autoarray.inversion.pixelization.mesh import mesh_numba_util @numba_util.jit() @@ -176,17 +176,17 @@ def pixel_weights_delaunay_from( sub_gird_coordinate_on_source_place = source_plane_data_grid[sub_slim_index] - area_0 = mesh_util.delaunay_triangle_area_from( + area_0 = mesh_numba_util.delaunay_triangle_area_from( corner_0=vertices_of_the_simplex[1], corner_1=vertices_of_the_simplex[2], corner_2=sub_gird_coordinate_on_source_place, ) - area_1 = mesh_util.delaunay_triangle_area_from( + area_1 = mesh_numba_util.delaunay_triangle_area_from( corner_0=vertices_of_the_simplex[0], corner_1=vertices_of_the_simplex[2], corner_2=sub_gird_coordinate_on_source_place, ) - area_2 = mesh_util.delaunay_triangle_area_from( + area_2 = mesh_numba_util.delaunay_triangle_area_from( corner_0=vertices_of_the_simplex[0], corner_1=vertices_of_the_simplex[1], corner_2=sub_gird_coordinate_on_source_place, diff --git a/autoarray/inversion/pixelization/mappers/mapper_util.py b/autoarray/inversion/pixelization/mappers/mapper_util.py index b32027e21..012ef27a7 100644 --- a/autoarray/inversion/pixelization/mappers/mapper_util.py +++ b/autoarray/inversion/pixelization/mappers/mapper_util.py @@ -6,9 +6,7 @@ from autoconf import conf -from autoarray import numba_util from autoarray import exc -from autoarray.inversion.pixelization.mesh import mesh_util def forward_interp(xp, yp, x): diff --git a/autoarray/inversion/pixelization/mesh/mesh_numba_util.py b/autoarray/inversion/pixelization/mesh/mesh_numba_util.py new file mode 100644 index 000000000..e3fc2232f --- /dev/null +++ b/autoarray/inversion/pixelization/mesh/mesh_numba_util.py @@ -0,0 +1,301 @@ +import numpy as np + +from typing import List, Tuple, Union + +from autoarray import numba_util + + +@numba_util.jit() +def delaunay_triangle_area_from( + corner_0: Tuple[float, float], + corner_1: Tuple[float, float], + corner_2: Tuple[float, float], +) -> float: + """ + Returns the area within a Delaunay triangle where the three corners are located at the (x,y) coordinates given by + the inputs `corner_a` `corner_b` and `corner_c`. + + This function actually returns the area of any triangle, but the term `delaunay` is included in the title to + separate it from the `rectangular` and `voronoi` methods in `mesh_util.py`. + + Parameters + ---------- + corner_0 + The (x,y) coordinates of the triangle's first corner. + corner_1 + The (x,y) coordinates of the triangle's second corner. + corner_2 + The (x,y) coordinates of the triangle's third corner. + + Returns + ------- + The area of the triangle given the input (x,y) corners. + """ + + x1 = corner_0[0] + y1 = corner_0[1] + x2 = corner_1[0] + y2 = corner_1[1] + x3 = corner_2[0] + y3 = corner_2[1] + + return 0.5 * np.abs(x1 * y2 + x2 * y3 + x3 * y1 - x2 * y1 - x3 * y2 - x1 * y3) + + +def delaunay_interpolated_array_from( + shape_native: Tuple[int, int], + interpolation_grid_slim: np.ndarray, + pixel_values: np.ndarray, + delaunay: "scipy.spatial.Delaunay", +) -> np.ndarray: + """ + Given a Delaunay triangulation and 1D values at the node of each Delaunay pixel (e.g. the connecting points where + triangles meet), interpolate these values to a uniform 2D (y,x) grid. + + By mapping the delaunay's value to a regular grid this enables a source reconstruction of an inversion to be + output to a .fits file. + + The `grid_interpolate_slim`, which gives the (y,x) coordinates the values are evaluated at for interpolation, + need not be regular and can have undergone coordinate transforms (e.g. it can be the `source_plane_mesh_grid`) + of a `Mapper`. + + The shape of `grid_interpolate_slim` therefore must be equal to `shape_native[0] * shape_native[1]`, but the (y,x) + coordinates themselves do not need to be uniform. + + Parameters + ---------- + shape_native + The 2D (y,x) shape of the uniform grid the values are interpolated on too. + interpolation_grid_slim + A 1D grid of (y,x) coordinates where each interpolation is evaluated. The shape of this grid must be equal to + shape_native[0] * shape_native[1], but it does not need to be uniform itself. + pixel_values + The values of the Delaunay nodes (e.g. the connecting points where triangles meet) which are interpolated + to compute the value in each pixel on the `interpolated_grid`. + delaunay + A `scipy.spatial.Delaunay` object which contains all functionality describing the Delaunay triangulation. + + Returns + ------- + The input values interpolated to the `grid_interpolate_slim` (y,x) coordintes given the Delaunay triangulation. + + """ + simplex_index_for_interpolate_index = delaunay.find_simplex(interpolation_grid_slim) + + simplices = delaunay.simplices + pixel_points = delaunay.points + + interpolated_array = np.zeros(len(interpolation_grid_slim)) + + for slim_index in range(len(interpolation_grid_slim)): + simplex_index = simplex_index_for_interpolate_index[slim_index] + interpolating_point = tuple(interpolation_grid_slim[slim_index]) + + if simplex_index == -1: + cloest_pixel_index = np.argmin( + np.sum((pixel_points - interpolating_point) ** 2.0, axis=1) + ) + interpolated_array[slim_index] = pixel_values[cloest_pixel_index] + else: + triangle_points = pixel_points[simplices[simplex_index]] + triangle_values = pixel_values[simplices[simplex_index]] + + area_0 = delaunay_triangle_area_from( + corner_0=triangle_points[1], + corner_1=triangle_points[2], + corner_2=interpolating_point, + ) + area_1 = delaunay_triangle_area_from( + corner_0=triangle_points[0], + corner_1=triangle_points[2], + corner_2=interpolating_point, + ) + area_2 = delaunay_triangle_area_from( + corner_0=triangle_points[0], + corner_1=triangle_points[1], + corner_2=interpolating_point, + ) + norm = area_0 + area_1 + area_2 + + weight_abc = np.array([area_0, area_1, area_2]) / norm + + interpolated_array[slim_index] = np.sum(weight_abc * triangle_values) + + return interpolated_array.reshape(shape_native) + + +@numba_util.jit() +def voronoi_neighbors_from( + pixels: int, ridge_points: np.ndarray +) -> Tuple[np.ndarray, np.ndarray]: + """ + Returns the adjacent neighbors of every pixel on a Voronoi mesh as an ndarray of shape + [total_pixels, voronoi_pixel_with_max_neighbors], using the `ridge_points` output from the `scipy.spatial.Voronoi()` + object. + + Entries with values of `-1` signify edge pixels which do not have neighbors. This function therefore also returns + an ndarray with the number of neighbors of every pixel, `neighbors_sizes`, which is iterated over when using + the `neighbors` ndarray. + + Indexing is defined in an arbritrary manner due to the irregular nature of a Voronoi mesh. + + For example, if `neighbors[0,:] = [1, 5, 36, 2, -1, -1]`, this informs us that the first Voronoi pixel has + 4 neighbors which have indexes 1, 5, 36, 2. Correspondingly `neighbors_sizes[0] = 4`. + + Parameters + ---------- + pixels + The number of pixels on the Voronoi mesh. + ridge_points + Contains the information on every Voronoi source pixel and its neighbors. + + Returns + ------- + The arrays containing the 1D index of every pixel's neighbors and the number of neighbors that each pixel has. + """ + neighbors_sizes = np.zeros(shape=(pixels)) + + for ridge_index in range(ridge_points.shape[0]): + pair0 = ridge_points[ridge_index, 0] + pair1 = ridge_points[ridge_index, 1] + neighbors_sizes[pair0] += 1 + neighbors_sizes[pair1] += 1 + + neighbors_index = np.zeros(shape=(pixels)) + neighbors = -1 * np.ones(shape=(pixels, int(np.max(neighbors_sizes)))) + + for ridge_index in range(ridge_points.shape[0]): + pair0 = ridge_points[ridge_index, 0] + pair1 = ridge_points[ridge_index, 1] + neighbors[pair0, int(neighbors_index[pair0])] = pair1 + neighbors[pair1, int(neighbors_index[pair1])] = pair0 + neighbors_index[pair0] += 1 + neighbors_index[pair1] += 1 + + return neighbors, neighbors_sizes + + +def voronoi_edge_pixels_from(regions: np.ndarray, point_region: np.ndarray) -> List: + """ + Returns the edge pixels of a Voronoi mesh, where the edge pixels are defined as those pixels which are on the + edge of the Voronoi diagram. + + Parameters + ---------- + regions + Indices of the Voronoi vertices forming each Voronoi region, where -1 indicates vertex outside the Voronoi + diagram. + """ + + voronoi_edge_pixel_list = [] + + for index, i in enumerate(point_region): + if -1 in regions[i]: + voronoi_edge_pixel_list.append(index) + + return voronoi_edge_pixel_list + + +def voronoi_revised_from( + voronoi: "scipy.spatial.Voronoi", +) -> Union[List[Tuple], np.ndarray]: + """ + To plot a Voronoi mesh using the `matplotlib.fill()` function a revised Voronoi mesh must be + computed, where 2D infinite voronoi regions are converted to finite 2D regions. + + This function returns a list of tuples containing the indices of the vertices of each revised Voronoi cell and + a list of tuples containing the revised Voronoi vertex vertices. + + Parameters + ---------- + voronoi + The input Voronoi diagram that is being plotted. + """ + + if voronoi.points.shape[1] != 2: + raise ValueError("Requires 2D input") + + region_list = [] + vertex_list = voronoi.vertices.tolist() + + center = voronoi.points.mean(axis=0) + radius = np.ptp(voronoi.points).max() * 2 + + # Construct a map containing all ridges for a given point + all_ridges = {} + for (p1, p2), (v1, v2) in zip(voronoi.ridge_points, voronoi.ridge_vertices): + all_ridges.setdefault(p1, []).append((p2, v1, v2)) + all_ridges.setdefault(p2, []).append((p1, v1, v2)) + + # Reconstruct infinite regions + for p1, region in enumerate(voronoi.point_region): + vertices = voronoi.regions[region] + + if all(v >= 0 for v in vertices): + # finite region + region_list.append(vertices) + continue + + # reconstruct a non-finite region + ridges = all_ridges[p1] + region = [v for v in vertices if v >= 0] + + for p2, v1, v2 in ridges: + if v2 < 0: + v1, v2 = v2, v1 + if v1 >= 0: + # finite ridge: already in the region + continue + + # Compute the missing endpoint of an infinite ridge + + t = voronoi.points[p2] - voronoi.points[p1] # tangent + t /= np.linalg.norm(t) + n = np.array([-t[1], t[0]]) + + midpoint = voronoi.points[[p1, p2]].mean(axis=0) + direction = np.sign(np.dot(midpoint - center, n)) * n + far_point = voronoi.vertices[v2] + direction * radius + + region.append(len(vertex_list)) + vertex_list.append(far_point.tolist()) + + # sort region counterclockwise + vs = np.asarray([vertex_list[v] for v in region]) + c = vs.mean(axis=0) + angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0]) + region = np.array(region)[np.argsort(angles)] + + # finish + region_list.append(region.tolist()) + + return region_list, np.asarray(vertex_list) + + +def voronoi_nn_interpolated_array_from( + shape_native: Tuple[int, int], + interpolation_grid_slim: np.ndarray, + pixel_values: np.ndarray, + voronoi: "scipy.spatial.Voronoi", +) -> np.ndarray: + try: + from autoarray.util.nn import nn_py + except ImportError as e: + raise ImportError( + "In order to use the Voronoi pixelization you must install the " + "Natural Neighbor Interpolation c package.\n\n" + "" + "See: https://github.com/Jammy2211/PyAutoArray/tree/main/autoarray/util/nn" + ) from e + + pixel_points = voronoi.points + + interpolated_array = nn_py.natural_interpolation( + pixel_points[:, 0], + pixel_points[:, 1], + pixel_values, + interpolation_grid_slim[:, 1], + interpolation_grid_slim[:, 0], + ) + + return interpolated_array.reshape(shape_native) diff --git a/autoarray/inversion/pixelization/mesh/mesh_util.py b/autoarray/inversion/pixelization/mesh/mesh_util.py index 514fb5e1d..86e2c4110 100644 --- a/autoarray/inversion/pixelization/mesh/mesh_util.py +++ b/autoarray/inversion/pixelization/mesh/mesh_util.py @@ -1,9 +1,7 @@ import jax.numpy as jnp import numpy as np -from typing import List, Tuple, Union - -from autoarray import numba_util +from typing import List, Tuple def rectangular_neighbors_from( @@ -387,299 +385,3 @@ def rectangular_edge_pixel_list_from(shape_native: Tuple[int, int]) -> List[int] # Sort and return return np.sort(edge_pixel_indices).tolist() - - -@numba_util.jit() -def delaunay_triangle_area_from( - corner_0: Tuple[float, float], - corner_1: Tuple[float, float], - corner_2: Tuple[float, float], -) -> float: - """ - Returns the area within a Delaunay triangle where the three corners are located at the (x,y) coordinates given by - the inputs `corner_a` `corner_b` and `corner_c`. - - This function actually returns the area of any triangle, but the term `delaunay` is included in the title to - separate it from the `rectangular` and `voronoi` methods in `mesh_util.py`. - - Parameters - ---------- - corner_0 - The (x,y) coordinates of the triangle's first corner. - corner_1 - The (x,y) coordinates of the triangle's second corner. - corner_2 - The (x,y) coordinates of the triangle's third corner. - - Returns - ------- - The area of the triangle given the input (x,y) corners. - """ - - x1 = corner_0[0] - y1 = corner_0[1] - x2 = corner_1[0] - y2 = corner_1[1] - x3 = corner_2[0] - y3 = corner_2[1] - - return 0.5 * np.abs(x1 * y2 + x2 * y3 + x3 * y1 - x2 * y1 - x3 * y2 - x1 * y3) - - -def delaunay_interpolated_array_from( - shape_native: Tuple[int, int], - interpolation_grid_slim: np.ndarray, - pixel_values: np.ndarray, - delaunay: "scipy.spatial.Delaunay", -) -> np.ndarray: - """ - Given a Delaunay triangulation and 1D values at the node of each Delaunay pixel (e.g. the connecting points where - triangles meet), interpolate these values to a uniform 2D (y,x) grid. - - By mapping the delaunay's value to a regular grid this enables a source reconstruction of an inversion to be - output to a .fits file. - - The `grid_interpolate_slim`, which gives the (y,x) coordinates the values are evaluated at for interpolation, - need not be regular and can have undergone coordinate transforms (e.g. it can be the `source_plane_mesh_grid`) - of a `Mapper`. - - The shape of `grid_interpolate_slim` therefore must be equal to `shape_native[0] * shape_native[1]`, but the (y,x) - coordinates themselves do not need to be uniform. - - Parameters - ---------- - shape_native - The 2D (y,x) shape of the uniform grid the values are interpolated on too. - interpolation_grid_slim - A 1D grid of (y,x) coordinates where each interpolation is evaluated. The shape of this grid must be equal to - shape_native[0] * shape_native[1], but it does not need to be uniform itself. - pixel_values - The values of the Delaunay nodes (e.g. the connecting points where triangles meet) which are interpolated - to compute the value in each pixel on the `interpolated_grid`. - delaunay - A `scipy.spatial.Delaunay` object which contains all functionality describing the Delaunay triangulation. - - Returns - ------- - The input values interpolated to the `grid_interpolate_slim` (y,x) coordintes given the Delaunay triangulation. - - """ - simplex_index_for_interpolate_index = delaunay.find_simplex(interpolation_grid_slim) - - simplices = delaunay.simplices - pixel_points = delaunay.points - - interpolated_array = np.zeros(len(interpolation_grid_slim)) - - for slim_index in range(len(interpolation_grid_slim)): - simplex_index = simplex_index_for_interpolate_index[slim_index] - interpolating_point = tuple(interpolation_grid_slim[slim_index]) - - if simplex_index == -1: - cloest_pixel_index = np.argmin( - np.sum((pixel_points - interpolating_point) ** 2.0, axis=1) - ) - interpolated_array[slim_index] = pixel_values[cloest_pixel_index] - else: - triangle_points = pixel_points[simplices[simplex_index]] - triangle_values = pixel_values[simplices[simplex_index]] - - area_0 = delaunay_triangle_area_from( - corner_0=triangle_points[1], - corner_1=triangle_points[2], - corner_2=interpolating_point, - ) - area_1 = delaunay_triangle_area_from( - corner_0=triangle_points[0], - corner_1=triangle_points[2], - corner_2=interpolating_point, - ) - area_2 = delaunay_triangle_area_from( - corner_0=triangle_points[0], - corner_1=triangle_points[1], - corner_2=interpolating_point, - ) - norm = area_0 + area_1 + area_2 - - weight_abc = np.array([area_0, area_1, area_2]) / norm - - interpolated_array[slim_index] = np.sum(weight_abc * triangle_values) - - return interpolated_array.reshape(shape_native) - - -@numba_util.jit() -def voronoi_neighbors_from( - pixels: int, ridge_points: np.ndarray -) -> Tuple[np.ndarray, np.ndarray]: - """ - Returns the adjacent neighbors of every pixel on a Voronoi mesh as an ndarray of shape - [total_pixels, voronoi_pixel_with_max_neighbors], using the `ridge_points` output from the `scipy.spatial.Voronoi()` - object. - - Entries with values of `-1` signify edge pixels which do not have neighbors. This function therefore also returns - an ndarray with the number of neighbors of every pixel, `neighbors_sizes`, which is iterated over when using - the `neighbors` ndarray. - - Indexing is defined in an arbritrary manner due to the irregular nature of a Voronoi mesh. - - For example, if `neighbors[0,:] = [1, 5, 36, 2, -1, -1]`, this informs us that the first Voronoi pixel has - 4 neighbors which have indexes 1, 5, 36, 2. Correspondingly `neighbors_sizes[0] = 4`. - - Parameters - ---------- - pixels - The number of pixels on the Voronoi mesh. - ridge_points - Contains the information on every Voronoi source pixel and its neighbors. - - Returns - ------- - The arrays containing the 1D index of every pixel's neighbors and the number of neighbors that each pixel has. - """ - neighbors_sizes = np.zeros(shape=(pixels)) - - for ridge_index in range(ridge_points.shape[0]): - pair0 = ridge_points[ridge_index, 0] - pair1 = ridge_points[ridge_index, 1] - neighbors_sizes[pair0] += 1 - neighbors_sizes[pair1] += 1 - - neighbors_index = np.zeros(shape=(pixels)) - neighbors = -1 * np.ones(shape=(pixels, int(np.max(neighbors_sizes)))) - - for ridge_index in range(ridge_points.shape[0]): - pair0 = ridge_points[ridge_index, 0] - pair1 = ridge_points[ridge_index, 1] - neighbors[pair0, int(neighbors_index[pair0])] = pair1 - neighbors[pair1, int(neighbors_index[pair1])] = pair0 - neighbors_index[pair0] += 1 - neighbors_index[pair1] += 1 - - return neighbors, neighbors_sizes - - -def voronoi_edge_pixels_from(regions: np.ndarray, point_region: np.ndarray) -> List: - """ - Returns the edge pixels of a Voronoi mesh, where the edge pixels are defined as those pixels which are on the - edge of the Voronoi diagram. - - Parameters - ---------- - regions - Indices of the Voronoi vertices forming each Voronoi region, where -1 indicates vertex outside the Voronoi - diagram. - """ - - voronoi_edge_pixel_list = [] - - for index, i in enumerate(point_region): - if -1 in regions[i]: - voronoi_edge_pixel_list.append(index) - - return voronoi_edge_pixel_list - - -def voronoi_revised_from( - voronoi: "scipy.spatial.Voronoi", -) -> Union[List[Tuple], np.ndarray]: - """ - To plot a Voronoi mesh using the `matplotlib.fill()` function a revised Voronoi mesh must be - computed, where 2D infinite voronoi regions are converted to finite 2D regions. - - This function returns a list of tuples containing the indices of the vertices of each revised Voronoi cell and - a list of tuples containing the revised Voronoi vertex vertices. - - Parameters - ---------- - voronoi - The input Voronoi diagram that is being plotted. - """ - - if voronoi.points.shape[1] != 2: - raise ValueError("Requires 2D input") - - region_list = [] - vertex_list = voronoi.vertices.tolist() - - center = voronoi.points.mean(axis=0) - radius = np.ptp(voronoi.points).max() * 2 - - # Construct a map containing all ridges for a given point - all_ridges = {} - for (p1, p2), (v1, v2) in zip(voronoi.ridge_points, voronoi.ridge_vertices): - all_ridges.setdefault(p1, []).append((p2, v1, v2)) - all_ridges.setdefault(p2, []).append((p1, v1, v2)) - - # Reconstruct infinite regions - for p1, region in enumerate(voronoi.point_region): - vertices = voronoi.regions[region] - - if all(v >= 0 for v in vertices): - # finite region - region_list.append(vertices) - continue - - # reconstruct a non-finite region - ridges = all_ridges[p1] - region = [v for v in vertices if v >= 0] - - for p2, v1, v2 in ridges: - if v2 < 0: - v1, v2 = v2, v1 - if v1 >= 0: - # finite ridge: already in the region - continue - - # Compute the missing endpoint of an infinite ridge - - t = voronoi.points[p2] - voronoi.points[p1] # tangent - t /= np.linalg.norm(t) - n = np.array([-t[1], t[0]]) - - midpoint = voronoi.points[[p1, p2]].mean(axis=0) - direction = np.sign(np.dot(midpoint - center, n)) * n - far_point = voronoi.vertices[v2] + direction * radius - - region.append(len(vertex_list)) - vertex_list.append(far_point.tolist()) - - # sort region counterclockwise - vs = np.asarray([vertex_list[v] for v in region]) - c = vs.mean(axis=0) - angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0]) - region = np.array(region)[np.argsort(angles)] - - # finish - region_list.append(region.tolist()) - - return region_list, np.asarray(vertex_list) - - -def voronoi_nn_interpolated_array_from( - shape_native: Tuple[int, int], - interpolation_grid_slim: np.ndarray, - pixel_values: np.ndarray, - voronoi: "scipy.spatial.Voronoi", -) -> np.ndarray: - try: - from autoarray.util.nn import nn_py - except ImportError as e: - raise ImportError( - "In order to use the Voronoi pixelization you must install the " - "Natural Neighbor Interpolation c package.\n\n" - "" - "See: https://github.com/Jammy2211/PyAutoArray/tree/main/autoarray/util/nn" - ) from e - - pixel_points = voronoi.points - - interpolated_array = nn_py.natural_interpolation( - pixel_points[:, 0], - pixel_points[:, 1], - pixel_values, - interpolation_grid_slim[:, 1], - interpolation_grid_slim[:, 0], - ) - - return interpolated_array.reshape(shape_native) diff --git a/autoarray/plot/wrap/two_d/voronoi_drawer.py b/autoarray/plot/wrap/two_d/voronoi_drawer.py index aec6661cc..46a8a2a13 100644 --- a/autoarray/plot/wrap/two_d/voronoi_drawer.py +++ b/autoarray/plot/wrap/two_d/voronoi_drawer.py @@ -6,7 +6,7 @@ from autoarray.plot.wrap.base.units import Units from autoarray.inversion.pixelization.mappers.voronoi import MapperVoronoi -from autoarray.inversion.pixelization.mesh import mesh_util +from autoarray.inversion.pixelization.mesh import mesh_numba_util from autoarray.plot.wrap import base as wb @@ -59,7 +59,7 @@ def draw_voronoi_pixels( if ax is None: ax = plt.gca() - regions, vertices = mesh_util.voronoi_revised_from(voronoi=mapper.voronoi) + regions, vertices = mesh_numba_util.voronoi_revised_from(voronoi=mapper.voronoi) if pixel_values is not None: norm = cmap.norm_from(array=pixel_values, use_log10=use_log10) diff --git a/autoarray/structures/mesh/triangulation_2d.py b/autoarray/structures/mesh/triangulation_2d.py index 3eeaa249d..1ffc915ce 100644 --- a/autoarray/structures/mesh/triangulation_2d.py +++ b/autoarray/structures/mesh/triangulation_2d.py @@ -2,14 +2,13 @@ from typing import List, Union, Tuple -from autoarray.structures.abstract_structure import Structure from autoconf import cached_property from autoarray.geometry.geometry_2d_irregular import Geometry2DIrregular from autoarray.structures.mesh.abstract_2d import Abstract2DMesh from autoarray import exc -from autoarray.inversion.pixelization.mesh import mesh_util +from autoarray.inversion.pixelization.mesh import mesh_numba_util from autoarray.structures.grids import grid_2d_util @@ -126,7 +125,7 @@ def edge_pixel_list(self) -> List: Returns a list of the Voronoi pixel indexes that are on the edge of the mesh. """ - return mesh_util.voronoi_edge_pixels_from( + return mesh_numba_util.voronoi_edge_pixels_from( regions=self.voronoi.regions, point_region=self.voronoi.point_region ) diff --git a/autoarray/structures/mesh/voronoi_2d.py b/autoarray/structures/mesh/voronoi_2d.py index 38b7a123c..b4135610d 100644 --- a/autoarray/structures/mesh/voronoi_2d.py +++ b/autoarray/structures/mesh/voronoi_2d.py @@ -8,7 +8,7 @@ from autoarray.structures.arrays.uniform_2d import Array2D from autoarray.structures.mesh.triangulation_2d import Abstract2DMeshTriangulation -from autoarray.inversion.pixelization.mesh import mesh_util +from autoarray.inversion.pixelization.mesh import mesh_numba_util class Mesh2DVoronoi(Abstract2DMeshTriangulation): @@ -36,10 +36,10 @@ def neighbors(self) -> Neighbors: see `Neighbors` for a complete description of the neighboring scheme. The neighbors of a Voronoi mesh are computed using the `ridge_points` attribute of the scipy `Voronoi` - object, as described in the method `mesh_util.voronoi_neighbors_from`. + object, as described in the method `mesh_numba_util.voronoi_neighbors_from`. """ - neighbors, sizes = mesh_util.voronoi_neighbors_from( + neighbors, sizes = mesh_numba_util.voronoi_neighbors_from( pixels=self.pixels, ridge_points=np.asarray(self.voronoi.ridge_points) ) @@ -83,7 +83,7 @@ def interpolated_array_from( ) if use_nn: - interpolated_array = mesh_util.voronoi_nn_interpolated_array_from( + interpolated_array = mesh_numba_util.voronoi_nn_interpolated_array_from( shape_native=shape_native, interpolation_grid_slim=interpolation_grid.slim, pixel_values=values, diff --git a/autoarray/util/__init__.py b/autoarray/util/__init__.py index 95a911a23..a9ba1dfd3 100644 --- a/autoarray/util/__init__.py +++ b/autoarray/util/__init__.py @@ -11,6 +11,7 @@ from autoarray.layout import layout_util as layout from autoarray.fit import fit_util as fit from autoarray.inversion.pixelization.mesh import mesh_util as mesh +from autoarray.inversion.pixelization.mesh import mesh_numba_util as mesh_numba from autoarray.inversion.pixelization.mappers import mapper_util as mapper from autoarray.inversion.pixelization.mappers import mapper_numba_util as mapper_numba from autoarray.inversion.regularization import regularization_util as regularization diff --git a/test_autoarray/inversion/inversion/test_inversion_util.py b/test_autoarray/inversion/inversion/test_inversion_util.py index 625ff3dcd..70c4017f8 100644 --- a/test_autoarray/inversion/inversion/test_inversion_util.py +++ b/test_autoarray/inversion/inversion/test_inversion_util.py @@ -133,7 +133,7 @@ def test__mapped_reconstructed_data_via_image_to_pix_unique_from(): reconstruction = np.array([1.0, 1.0, 2.0]) mapped_reconstructed_data = ( - aa.util.inversion.mapped_reconstructed_data_via_image_to_pix_unique_from( + aa.util.inversion_imaging_numba.mapped_reconstructed_data_via_image_to_pix_unique_from( data_to_pix_unique=data_to_pix_unique.astype("int"), data_weights=data_weights, pix_lengths=pix_lengths.astype("int"), @@ -165,7 +165,7 @@ def test__mapped_reconstructed_data_via_image_to_pix_unique_from(): reconstruction = np.array([1.0, 1.0, 2.0]) mapped_reconstructed_data = ( - aa.util.inversion.mapped_reconstructed_data_via_image_to_pix_unique_from( + aa.util.inversion_imaging_numba.mapped_reconstructed_data_via_image_to_pix_unique_from( data_to_pix_unique=data_to_pix_unique.astype("int"), data_weights=data_weights, pix_lengths=pix_lengths.astype("int"), From 6d6a3e70e8e589388b16f470149162e5327ea7f4 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 19:08:43 +0100 Subject: [PATCH 15/17] replace more unit tests so they use numba util --- .../imaging/inversion_imaging_numba_util.py | 2 +- .../inversion/inversion/imaging/w_tilde.py | 20 ++-- .../inversion/inversion/inversion_util.py | 1 + .../pixelization/mappers/abstract.py | 1 + .../pixelization/mappers/delaunay.py | 15 ++- .../pixelization/mappers/mapper_numba_util.py | 106 ++++++++++++++++++ .../pixelization/mappers/mapper_util.py | 102 ----------------- autoarray/structures/mesh/delaunay_2d.py | 2 +- .../imaging/test_inversion_imaging_util.py | 16 +-- .../inversion/test_inversion_util.py | 28 ++--- .../pixelization/mappers/test_delaunay.py | 2 +- .../pixelization/mappers/test_mapper_util.py | 6 +- .../pixelization/mappers/test_voronoi.py | 2 +- .../pixelization/mesh/test_mesh_util.py | 6 +- test_autoarray/inversion/test_linear_obj.py | 2 +- .../structures/mesh/test_voronoi.py | 2 +- 16 files changed, 161 insertions(+), 152 deletions(-) diff --git a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py index 7fe5c55d0..0eb95d26d 100644 --- a/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py +++ b/autoarray/inversion/inversion/imaging/inversion_imaging_numba_util.py @@ -856,4 +856,4 @@ def mapped_reconstructed_data_via_image_to_pix_unique_from( data_weights[data_0, pix_0] * reconstruction[pix_for_data] ) - return mapped_reconstructed_data \ No newline at end of file + return mapped_reconstructed_data diff --git a/autoarray/inversion/inversion/imaging/w_tilde.py b/autoarray/inversion/inversion/imaging/w_tilde.py index 05bc7a9a8..ad7d4c7e1 100644 --- a/autoarray/inversion/inversion/imaging/w_tilde.py +++ b/autoarray/inversion/inversion/imaging/w_tilde.py @@ -251,10 +251,12 @@ def curvature_matrix(self) -> np.ndarray: ) if len(self.no_regularization_index_list) > 0: - curvature_matrix = inversion_imaging_numba_util.curvature_matrix_with_added_to_diag_from( - curvature_matrix=curvature_matrix, - value=self.settings.no_regularization_add_to_curvature_diag_value, - no_regularization_index_list=self.no_regularization_index_list, + curvature_matrix = ( + inversion_imaging_numba_util.curvature_matrix_with_added_to_diag_from( + curvature_matrix=curvature_matrix, + value=self.settings.no_regularization_add_to_curvature_diag_value, + no_regularization_index_list=self.no_regularization_index_list, + ) ) return curvature_matrix @@ -515,15 +517,16 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: reconstruction=reconstruction, ) + mapped_reconstructed_image = self.psf.convolve_image_no_blurring( + image=mapped_reconstructed_image, mask=self.mask + ).array + mapped_reconstructed_image = Array2D( values=mapped_reconstructed_image, mask=self.mask ) - mapped_reconstructed_image = self.convolver.convolve_image_no_blurring( - image=mapped_reconstructed_image - ) - else: + operated_mapping_matrix = self.linear_func_operated_mapping_matrix_dict[ linear_obj ] @@ -536,7 +539,6 @@ def mapped_reconstructed_data_dict(self) -> Dict[LinearObj, Array2D]: values=mapped_reconstructed_image, mask=self.mask ) - mapped_reconstructed_data_dict[linear_obj] = mapped_reconstructed_image return mapped_reconstructed_data_dict diff --git a/autoarray/inversion/inversion/inversion_util.py b/autoarray/inversion/inversion/inversion_util.py index 737a7e779..95e216c9e 100644 --- a/autoarray/inversion/inversion/inversion_util.py +++ b/autoarray/inversion/inversion/inversion_util.py @@ -60,6 +60,7 @@ def curvature_matrix_with_added_to_diag_from( no_regularization_index_list, no_regularization_index_list ].add(value) + def curvature_matrix_mirrored_from( curvature_matrix: np.ndarray, ) -> np.ndarray: diff --git a/autoarray/inversion/pixelization/mappers/abstract.py b/autoarray/inversion/pixelization/mappers/abstract.py index 4357896f0..480650ee0 100644 --- a/autoarray/inversion/pixelization/mappers/abstract.py +++ b/autoarray/inversion/pixelization/mappers/abstract.py @@ -18,6 +18,7 @@ from autoarray.inversion.pixelization.mappers import mapper_util from autoarray.inversion.pixelization.mappers import mapper_numba_util + class AbstractMapper(LinearObj): def __init__( self, diff --git a/autoarray/inversion/pixelization/mappers/delaunay.py b/autoarray/inversion/pixelization/mappers/delaunay.py index 050c22199..1b4226771 100644 --- a/autoarray/inversion/pixelization/mappers/delaunay.py +++ b/autoarray/inversion/pixelization/mappers/delaunay.py @@ -8,6 +8,7 @@ from autoarray.inversion.pixelization.mappers import mapper_util from autoarray.inversion.pixelization.mappers import mapper_numba_util + class MapperDelaunay(AbstractMapper): """ To understand a `Mapper` one must be familiar `Mesh` objects and the `mesh` and `pixelization` packages, where @@ -112,11 +113,15 @@ def pix_sub_weights(self) -> PixSubWeights: ) pix_indexes_for_simplex_index = delaunay.simplices - mappings, sizes = mapper_numba_util.pix_indexes_for_sub_slim_index_delaunay_from( - source_plane_data_grid=np.array(self.source_plane_data_grid.over_sampled), - simplex_index_for_sub_slim_index=simplex_index_for_sub_slim_index, - pix_indexes_for_simplex_index=pix_indexes_for_simplex_index, - delaunay_points=delaunay.points, + mappings, sizes = ( + mapper_numba_util.pix_indexes_for_sub_slim_index_delaunay_from( + source_plane_data_grid=np.array( + self.source_plane_data_grid.over_sampled + ), + simplex_index_for_sub_slim_index=simplex_index_for_sub_slim_index, + pix_indexes_for_simplex_index=pix_indexes_for_simplex_index, + delaunay_points=delaunay.points, + ) ) mappings = mappings.astype("int") diff --git a/autoarray/inversion/pixelization/mappers/mapper_numba_util.py b/autoarray/inversion/pixelization/mappers/mapper_numba_util.py index e5476e142..916c813dd 100644 --- a/autoarray/inversion/pixelization/mappers/mapper_numba_util.py +++ b/autoarray/inversion/pixelization/mappers/mapper_numba_util.py @@ -1,9 +1,13 @@ import numpy as np from typing import Tuple +from autoconf import conf + from autoarray import numba_util from autoarray.inversion.pixelization.mesh import mesh_numba_util +from autoarray import exc + @numba_util.jit() def data_slim_to_pixelization_unique_from( @@ -245,3 +249,105 @@ def remove_bad_entries_voronoi_nn( pix_weights_for_sub_slim_index[ind][0] = 1.0 return pix_weights_for_sub_slim_index, pix_indexes_for_sub_slim_index + + +def pix_size_weights_voronoi_nn_from( + grid: np.ndarray, mesh_grid: np.ndarray +) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """ + Returns the mappings between a set of slimmed sub-grid pixels and pixelization pixels, using information on + how the pixels hosting each sub-pixel map to their closest pixelization pixel on the slim grid in the data-plane + and the pixelization's pixel centres. + + To determine the complete set of slim sub-pixel to pixelization pixel mappings, we must pair every sub-pixel to + its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via + the Voronoi grid) are used to localize each nearest neighbor search by using a graph search. + + Parameters + ---------- + grid + The grid of (y,x) scaled coordinates at the centre of every unmasked pixel, which has been traced to + to an irgrid via lens. + slim_index_for_sub_slim_index + The mappings between the data slimmed sub-pixels and their regular pixels. + mesh_grid + The (y,x) centre of every Voronoi pixel in arc-seconds. + neighbors + An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in + the Voronoi grid (entries of -1 correspond to no neighbor). + neighbors_sizes + An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the + Voronoi grid. + """ + + try: + from autoarray.util.nn import nn_py + except ImportError as e: + raise ImportError( + "In order to use the Voronoi pixelization you must install the " + "Natural Neighbor Interpolation c package.\n\n" + "" + "See: https://github.com/Jammy2211/PyAutoArray/tree/main/autoarray/util/nn" + ) from e + + max_nneighbours = conf.instance["general"]["pixelization"][ + "voronoi_nn_max_interpolation_neighbors" + ] + + ( + pix_weights_for_sub_slim_index, + pix_indexes_for_sub_slim_index, + ) = nn_py.natural_interpolation_weights( + x_in=mesh_grid[:, 1], + y_in=mesh_grid[:, 0], + x_target=grid[:, 1], + y_target=grid[:, 0], + max_nneighbours=max_nneighbours, + ) + + bad_indexes = np.argwhere(np.sum(pix_weights_for_sub_slim_index < 0.0, axis=1) > 0) + + ( + pix_weights_for_sub_slim_index, + pix_indexes_for_sub_slim_index, + ) = remove_bad_entries_voronoi_nn( + bad_indexes=bad_indexes, + pix_weights_for_sub_slim_index=pix_weights_for_sub_slim_index, + pix_indexes_for_sub_slim_index=pix_indexes_for_sub_slim_index, + grid=np.array(grid), + mesh_grid=np.array(mesh_grid), + ) + + bad_indexes = np.argwhere(pix_indexes_for_sub_slim_index[:, 0] == -1) + + ( + pix_weights_for_sub_slim_index, + pix_indexes_for_sub_slim_index, + ) = remove_bad_entries_voronoi_nn( + bad_indexes=bad_indexes, + pix_weights_for_sub_slim_index=pix_weights_for_sub_slim_index, + pix_indexes_for_sub_slim_index=pix_indexes_for_sub_slim_index, + grid=np.array(grid), + mesh_grid=np.array(mesh_grid), + ) + + pix_indexes_for_sub_slim_index_sizes = np.sum( + pix_indexes_for_sub_slim_index != -1, axis=1 + ) + + if np.max(pix_indexes_for_sub_slim_index_sizes) > max_nneighbours: + raise exc.MeshException( + f""" + The number of Voronoi natural neighbours interpolations in one or more pixelization pixel's + exceeds the maximum allowed: max_nneighbors = {max_nneighbours}. + + To fix this, increase the value of `voronoi_nn_max_interpolation_neighbors` in the [pixelization] + section of the `general.ini` config file. + """ + ) + + return ( + pix_indexes_for_sub_slim_index, + pix_indexes_for_sub_slim_index_sizes, + pix_weights_for_sub_slim_index, + ) diff --git a/autoarray/inversion/pixelization/mappers/mapper_util.py b/autoarray/inversion/pixelization/mappers/mapper_util.py index 012ef27a7..2fac0a81c 100644 --- a/autoarray/inversion/pixelization/mappers/mapper_util.py +++ b/autoarray/inversion/pixelization/mappers/mapper_util.py @@ -283,108 +283,6 @@ def nearest_pixelization_index_for_slim_index_from_kdtree(grid, mesh_grid): return sparse_index_for_slim_index -def pix_size_weights_voronoi_nn_from( - grid: np.ndarray, mesh_grid: np.ndarray -) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - Returns the mappings between a set of slimmed sub-grid pixels and pixelization pixels, using information on - how the pixels hosting each sub-pixel map to their closest pixelization pixel on the slim grid in the data-plane - and the pixelization's pixel centres. - - To determine the complete set of slim sub-pixel to pixelization pixel mappings, we must pair every sub-pixel to - its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via - the Voronoi grid) are used to localize each nearest neighbor search by using a graph search. - - Parameters - ---------- - grid - The grid of (y,x) scaled coordinates at the centre of every unmasked pixel, which has been traced to - to an irgrid via lens. - slim_index_for_sub_slim_index - The mappings between the data slimmed sub-pixels and their regular pixels. - mesh_grid - The (y,x) centre of every Voronoi pixel in arc-seconds. - neighbors - An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in - the Voronoi grid (entries of -1 correspond to no neighbor). - neighbors_sizes - An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the - Voronoi grid. - """ - - try: - from autoarray.util.nn import nn_py - except ImportError as e: - raise ImportError( - "In order to use the Voronoi pixelization you must install the " - "Natural Neighbor Interpolation c package.\n\n" - "" - "See: https://github.com/Jammy2211/PyAutoArray/tree/main/autoarray/util/nn" - ) from e - - max_nneighbours = conf.instance["general"]["pixelization"][ - "voronoi_nn_max_interpolation_neighbors" - ] - - ( - pix_weights_for_sub_slim_index, - pix_indexes_for_sub_slim_index, - ) = nn_py.natural_interpolation_weights( - x_in=mesh_grid[:, 1], - y_in=mesh_grid[:, 0], - x_target=grid[:, 1], - y_target=grid[:, 0], - max_nneighbours=max_nneighbours, - ) - - bad_indexes = np.argwhere(np.sum(pix_weights_for_sub_slim_index < 0.0, axis=1) > 0) - - ( - pix_weights_for_sub_slim_index, - pix_indexes_for_sub_slim_index, - ) = remove_bad_entries_voronoi_nn( - bad_indexes=bad_indexes, - pix_weights_for_sub_slim_index=pix_weights_for_sub_slim_index, - pix_indexes_for_sub_slim_index=pix_indexes_for_sub_slim_index, - grid=np.array(grid), - mesh_grid=np.array(mesh_grid), - ) - - bad_indexes = np.argwhere(pix_indexes_for_sub_slim_index[:, 0] == -1) - - ( - pix_weights_for_sub_slim_index, - pix_indexes_for_sub_slim_index, - ) = remove_bad_entries_voronoi_nn( - bad_indexes=bad_indexes, - pix_weights_for_sub_slim_index=pix_weights_for_sub_slim_index, - pix_indexes_for_sub_slim_index=pix_indexes_for_sub_slim_index, - grid=np.array(grid), - mesh_grid=np.array(mesh_grid), - ) - - pix_indexes_for_sub_slim_index_sizes = np.sum( - pix_indexes_for_sub_slim_index != -1, axis=1 - ) - - if np.max(pix_indexes_for_sub_slim_index_sizes) > max_nneighbours: - raise exc.MeshException( - f""" - The number of Voronoi natural neighbours interpolations in one or more pixelization pixel's - exceeds the maximum allowed: max_nneighbors = {max_nneighbours}. - - To fix this, increase the value of `voronoi_nn_max_interpolation_neighbors` in the [pixelization] - section of the `general.ini` config file. - """ - ) - - return ( - pix_indexes_for_sub_slim_index, - pix_indexes_for_sub_slim_index_sizes, - pix_weights_for_sub_slim_index, - ) - - def adaptive_pixel_signals_from( pixels: int, pixel_weights: np.ndarray, diff --git a/autoarray/structures/mesh/delaunay_2d.py b/autoarray/structures/mesh/delaunay_2d.py index 61cc5e88a..e12f99b28 100644 --- a/autoarray/structures/mesh/delaunay_2d.py +++ b/autoarray/structures/mesh/delaunay_2d.py @@ -69,7 +69,7 @@ def interpolated_array_from( shape_native=shape_native, extent=extent ) - interpolated_array = mesh_util.delaunay_interpolated_array_from( + interpolated_array = mesh_numba_util.delaunay_interpolated_array_from( shape_native=shape_native, interpolation_grid_slim=np.array(interpolation_grid.slim.array), delaunay=self.delaunay, diff --git a/test_autoarray/inversion/inversion/imaging/test_inversion_imaging_util.py b/test_autoarray/inversion/inversion/imaging/test_inversion_imaging_util.py index 120a1da9c..cafb8722b 100644 --- a/test_autoarray/inversion/inversion/imaging/test_inversion_imaging_util.py +++ b/test_autoarray/inversion/inversion/imaging/test_inversion_imaging_util.py @@ -17,7 +17,7 @@ def test__w_tilde_imaging_from(): native_index_for_slim_index = np.array([[1, 1], [1, 2], [2, 1], [2, 2]]) - w_tilde = aa.util.inversion_imaging.w_tilde_curvature_imaging_from( + w_tilde = aa.util.inversion_imaging_numba.w_tilde_curvature_imaging_from( noise_map_native=noise_map_2d, kernel_native=kernel, native_index_for_slim_index=native_index_for_slim_index, @@ -87,7 +87,7 @@ def test__w_tilde_curvature_preload_imaging_from(): w_tilde_preload, w_tilde_indexes, w_tilde_lengths, - ) = aa.util.inversion_imaging.w_tilde_curvature_preload_imaging_from( + ) = aa.util.inversion_imaging_numba.w_tilde_curvature_preload_imaging_from( noise_map_native=noise_map_2d, kernel_native=kernel, native_index_for_slim_index=native_index_for_slim_index, @@ -228,7 +228,7 @@ def test__data_vector_via_w_tilde_data_two_methods_agree(): data_to_pix_unique, data_weights, pix_lengths, - ) = aa.util.mapper.data_slim_to_pixelization_unique_from( + ) = aa.util.mapper_numba.data_slim_to_pixelization_unique_from( data_pixels=w_tilde_data.shape[0], pix_indexes_for_sub_slim_index=np.array( mapper.pix_indexes_for_sub_slim_index @@ -242,7 +242,7 @@ def test__data_vector_via_w_tilde_data_two_methods_agree(): ) data_vector_via_w_tilde = ( - aa.util.inversion_imaging.data_vector_via_w_tilde_data_imaging_from( + aa.util.inversion_imaging_numba.data_vector_via_w_tilde_data_imaging_from( w_tilde_data=np.array(w_tilde_data), data_to_pix_unique=data_to_pix_unique.astype("int"), data_weights=data_weights, @@ -278,7 +278,7 @@ def test__curvature_matrix_via_w_tilde_two_methods_agree(): mapping_matrix = mapper.mapping_matrix - w_tilde = aa.util.inversion_imaging.w_tilde_curvature_imaging_from( + w_tilde = aa.util.inversion_imaging_numba.w_tilde_curvature_imaging_from( noise_map_native=np.array(noise_map.native.array), kernel_native=np.array(kernel.native.array), native_index_for_slim_index=np.array( @@ -335,7 +335,7 @@ def test__curvature_matrix_via_w_tilde_preload_two_methods_agree(): w_tilde_preload, w_tilde_indexes, w_tilde_lengths, - ) = aa.util.inversion_imaging.w_tilde_curvature_preload_imaging_from( + ) = aa.util.inversion_imaging_numba.w_tilde_curvature_preload_imaging_from( noise_map_native=np.array(noise_map.native.array), kernel_native=np.array(kernel.native.array), native_index_for_slim_index=np.array( @@ -347,7 +347,7 @@ def test__curvature_matrix_via_w_tilde_preload_two_methods_agree(): data_to_pix_unique, data_weights, pix_lengths, - ) = aa.util.mapper.data_slim_to_pixelization_unique_from( + ) = aa.util.mapper_numba.data_slim_to_pixelization_unique_from( data_pixels=w_tilde_lengths.shape[0], pix_indexes_for_sub_slim_index=np.array( mapper.pix_indexes_for_sub_slim_index @@ -360,7 +360,7 @@ def test__curvature_matrix_via_w_tilde_preload_two_methods_agree(): sub_size=np.array(grid.over_sample_size), ) - curvature_matrix_via_w_tilde = aa.util.inversion_imaging.curvature_matrix_via_w_tilde_curvature_preload_imaging_from( + curvature_matrix_via_w_tilde = aa.util.inversion_imaging_numba.curvature_matrix_via_w_tilde_curvature_preload_imaging_from( curvature_preload=w_tilde_preload, curvature_indexes=w_tilde_indexes.astype("int"), curvature_lengths=w_tilde_lengths.astype("int"), diff --git a/test_autoarray/inversion/inversion/test_inversion_util.py b/test_autoarray/inversion/inversion/test_inversion_util.py index 70c4017f8..0b1661f2e 100644 --- a/test_autoarray/inversion/inversion/test_inversion_util.py +++ b/test_autoarray/inversion/inversion/test_inversion_util.py @@ -121,7 +121,7 @@ def test__mapped_reconstructed_data_via_image_to_pix_unique_from(): data_to_pix_unique, data_weights, pix_lengths, - ) = aa.util.mapper.data_slim_to_pixelization_unique_from( + ) = aa.util.mapper_numba.data_slim_to_pixelization_unique_from( data_pixels=3, pix_indexes_for_sub_slim_index=pix_indexes_for_sub_slim_index, pix_sizes_for_sub_slim_index=pix_indexes_for_sub_slim_index_sizes, @@ -132,13 +132,11 @@ def test__mapped_reconstructed_data_via_image_to_pix_unique_from(): reconstruction = np.array([1.0, 1.0, 2.0]) - mapped_reconstructed_data = ( - aa.util.inversion_imaging_numba.mapped_reconstructed_data_via_image_to_pix_unique_from( - data_to_pix_unique=data_to_pix_unique.astype("int"), - data_weights=data_weights, - pix_lengths=pix_lengths.astype("int"), - reconstruction=reconstruction, - ) + mapped_reconstructed_data = aa.util.inversion_imaging_numba.mapped_reconstructed_data_via_image_to_pix_unique_from( + data_to_pix_unique=data_to_pix_unique.astype("int"), + data_weights=data_weights, + pix_lengths=pix_lengths.astype("int"), + reconstruction=reconstruction, ) assert (mapped_reconstructed_data == np.array([1.0, 1.0, 2.0])).all() @@ -153,7 +151,7 @@ def test__mapped_reconstructed_data_via_image_to_pix_unique_from(): data_to_pix_unique, data_weights, pix_lengths, - ) = aa.util.mapper.data_slim_to_pixelization_unique_from( + ) = aa.util.mapper_numba.data_slim_to_pixelization_unique_from( data_pixels=3, pix_indexes_for_sub_slim_index=pix_indexes_for_sub_slim_index, pix_sizes_for_sub_slim_index=pix_indexes_for_sub_slim_index_sizes, @@ -164,13 +162,11 @@ def test__mapped_reconstructed_data_via_image_to_pix_unique_from(): reconstruction = np.array([1.0, 1.0, 2.0]) - mapped_reconstructed_data = ( - aa.util.inversion_imaging_numba.mapped_reconstructed_data_via_image_to_pix_unique_from( - data_to_pix_unique=data_to_pix_unique.astype("int"), - data_weights=data_weights, - pix_lengths=pix_lengths.astype("int"), - reconstruction=reconstruction, - ) + mapped_reconstructed_data = aa.util.inversion_imaging_numba.mapped_reconstructed_data_via_image_to_pix_unique_from( + data_to_pix_unique=data_to_pix_unique.astype("int"), + data_weights=data_weights, + pix_lengths=pix_lengths.astype("int"), + reconstruction=reconstruction, ) assert (mapped_reconstructed_data == np.array([1.25, 1.0, 1.75])).all() diff --git a/test_autoarray/inversion/pixelization/mappers/test_delaunay.py b/test_autoarray/inversion/pixelization/mappers/test_delaunay.py index 33b03fbb4..070d47598 100644 --- a/test_autoarray/inversion/pixelization/mappers/test_delaunay.py +++ b/test_autoarray/inversion/pixelization/mappers/test_delaunay.py @@ -28,7 +28,7 @@ def test__pix_indexes_for_sub_slim_index__matches_util(grid_2d_sub_1_7x7): ( pix_indexes_for_sub_slim_index_util, sizes, - ) = aa.util.mapper.pix_indexes_for_sub_slim_index_delaunay_from( + ) = aa.util.mapper_numba.pix_indexes_for_sub_slim_index_delaunay_from( source_plane_data_grid=np.array(mapper.source_plane_data_grid), simplex_index_for_sub_slim_index=simplex_index_for_sub_slim_index, pix_indexes_for_simplex_index=pix_indexes_for_simplex_index, diff --git a/test_autoarray/inversion/pixelization/mappers/test_mapper_util.py b/test_autoarray/inversion/pixelization/mappers/test_mapper_util.py index fdbaabe88..2a862ea63 100644 --- a/test_autoarray/inversion/pixelization/mappers/test_mapper_util.py +++ b/test_autoarray/inversion/pixelization/mappers/test_mapper_util.py @@ -278,7 +278,7 @@ def test__data_to_pix_unique_from(): data_to_pix_unique, data_weights, pix_lengths, - ) = aa.util.mapper.data_slim_to_pixelization_unique_from( + ) = aa.util.mapper_numba.data_slim_to_pixelization_unique_from( data_pixels=image_pixels, pix_indexes_for_sub_slim_index=pix_indexes_for_sub_slim_index, pix_sizes_for_sub_slim_index=pix_size_for_sub_slim_index, @@ -314,7 +314,7 @@ def test__data_to_pix_unique_from(): data_to_pix_unique, data_weights, pix_lengths, - ) = aa.util.mapper.data_slim_to_pixelization_unique_from( + ) = aa.util.mapper_numba.data_slim_to_pixelization_unique_from( data_pixels=image_pixels, pix_indexes_for_sub_slim_index=pix_indexes_for_sub_slim_index, pix_sizes_for_sub_slim_index=pix_size_for_sub_slim_index, @@ -343,7 +343,7 @@ def test__weights(): pix_indexes_for_sub_slim_index = np.array([[0, 1, 2], [2, -1, -1]]) - pixel_weights = aa.util.mapper.pixel_weights_delaunay_from( + pixel_weights = aa.util.mapper_numba.pixel_weights_delaunay_from( source_plane_data_grid=source_plane_data_grid, source_plane_mesh_grid=source_plane_mesh_grid, slim_index_for_sub_slim_index=slim_index_for_sub_slim_index, diff --git a/test_autoarray/inversion/pixelization/mappers/test_voronoi.py b/test_autoarray/inversion/pixelization/mappers/test_voronoi.py index 57e971aae..92bc5995a 100644 --- a/test_autoarray/inversion/pixelization/mappers/test_voronoi.py +++ b/test_autoarray/inversion/pixelization/mappers/test_voronoi.py @@ -35,7 +35,7 @@ def test__pix_indexes_for_sub_slim_index__matches_util(grid_2d_sub_1_7x7): pix_indexes_for_sub_slim_index_util, sizes, weights, - ) = aa.util.mapper.pix_size_weights_voronoi_nn_from( + ) = aa.util.mapper_numba.pix_size_weights_voronoi_nn_from( grid=grid_2d_sub_1_7x7, mesh_grid=source_plane_mesh_grid ) diff --git a/test_autoarray/inversion/pixelization/mesh/test_mesh_util.py b/test_autoarray/inversion/pixelization/mesh/test_mesh_util.py index 25cca246c..b9058dede 100644 --- a/test_autoarray/inversion/pixelization/mesh/test_mesh_util.py +++ b/test_autoarray/inversion/pixelization/mesh/test_mesh_util.py @@ -110,7 +110,7 @@ def test__voronoi_neighbors_from(): points = np.array([[1.0, -1.0], [1.0, 1.0], [0.0, 0.0], [-1.0, -1.0], [-1.0, 1.0]]) voronoi = scipy.spatial.Voronoi(points, qhull_options="Qbb Qc Qx Qm") - (neighbors, neighbors_sizes) = aa.util.mesh.voronoi_neighbors_from( + (neighbors, neighbors_sizes) = aa.util.mesh_numba.voronoi_neighbors_from( pixels=5, ridge_points=np.array(voronoi.ridge_points) ) @@ -139,7 +139,7 @@ def test__voronoi_neighbors_from(): ) voronoi = scipy.spatial.Voronoi(points, qhull_options="Qbb Qc Qx Qm") - (neighbors, neighbors_sizes) = aa.util.mesh.voronoi_neighbors_from( + (neighbors, neighbors_sizes) = aa.util.mesh_numba.voronoi_neighbors_from( pixels=9, ridge_points=np.array(voronoi.ridge_points) ) @@ -171,7 +171,7 @@ def test__delaunay_interpolated_grid_from(): values = np.array([1.0, 2.0, 3.0, 4.0, 5.0]) - interpolated_grid = aa.util.mesh.delaunay_interpolated_array_from( + interpolated_grid = aa.util.mesh_numba.delaunay_interpolated_array_from( shape_native=shape_native, interpolation_grid_slim=grid_interpolate_slim, pixel_values=values, diff --git a/test_autoarray/inversion/test_linear_obj.py b/test_autoarray/inversion/test_linear_obj.py index c54204350..f906c2989 100644 --- a/test_autoarray/inversion/test_linear_obj.py +++ b/test_autoarray/inversion/test_linear_obj.py @@ -27,7 +27,7 @@ def test__data_to_pix_unique_from(): data_to_pix_unique, data_weights, pix_lengths, - ) = aa.util.mapper.data_slim_to_pixelization_unique_from( + ) = aa.util.mapper_numba.data_slim_to_pixelization_unique_from( data_pixels=image_pixels, pix_indexes_for_sub_slim_index=pix_index_for_sub_slim_index, pix_sizes_for_sub_slim_index=pix_sizes_for_sub_slim_index, diff --git a/test_autoarray/structures/mesh/test_voronoi.py b/test_autoarray/structures/mesh/test_voronoi.py index 12ef7bc30..6e3fa5887 100644 --- a/test_autoarray/structures/mesh/test_voronoi.py +++ b/test_autoarray/structures/mesh/test_voronoi.py @@ -29,7 +29,7 @@ def test__neighbors__compare_to_mesh_util(): np.asarray([grid[:, 1], grid[:, 0]]).T, qhull_options="Qbb Qc Qx Qm" ) - (neighbors_util, neighbors_sizes_util) = aa.util.mesh.voronoi_neighbors_from( + (neighbors_util, neighbors_sizes_util) = aa.util.mesh_numba.voronoi_neighbors_from( pixels=9, ridge_points=np.array(voronoi.ridge_points) ) From 5525b9cf80f55bccbabcf26a3d872cb24da56e15 Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Wed, 1 Oct 2025 19:12:37 +0100 Subject: [PATCH 16/17] fix mesh numab improt --- autoarray/structures/mesh/delaunay_2d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/autoarray/structures/mesh/delaunay_2d.py b/autoarray/structures/mesh/delaunay_2d.py index e12f99b28..11c1707ae 100644 --- a/autoarray/structures/mesh/delaunay_2d.py +++ b/autoarray/structures/mesh/delaunay_2d.py @@ -6,7 +6,7 @@ from autoarray.inversion.linear_obj.neighbors import Neighbors from autoarray.structures.arrays.uniform_2d import Array2D from autoarray.structures.mesh.triangulation_2d import Abstract2DMeshTriangulation -from autoarray.inversion.pixelization.mesh import mesh_util +from autoarray.inversion.pixelization.mesh import mesh_numba_util class Mesh2DDelaunay(Abstract2DMeshTriangulation): From ea3067cc4127dd7c26971b9a38b15e9b8fba573a Mon Sep 17 00:00:00 2001 From: Jammy2211 Date: Mon, 6 Oct 2025 12:27:56 +0100 Subject: [PATCH 17/17] JAX complet --- autoarray/inversion/inversion/imaging/w_tilde.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autoarray/inversion/inversion/imaging/w_tilde.py b/autoarray/inversion/inversion/imaging/w_tilde.py index ad7d4c7e1..1e67270a0 100644 --- a/autoarray/inversion/inversion/imaging/w_tilde.py +++ b/autoarray/inversion/inversion/imaging/w_tilde.py @@ -74,8 +74,9 @@ def __init__( @cached_property def w_tilde_data(self): + return inversion_imaging_numba_util.w_tilde_data_imaging_from( - image_native=self.data.native.array, + image_native=np.array(self.data.native.array), noise_map_native=self.noise_map.native.array, kernel_native=self.psf.native.array, native_index_for_slim_index=self.data.mask.derive_indexes.native_for_slim,