diff --git a/autoarray/dataset/interferometer/dataset.py b/autoarray/dataset/interferometer/dataset.py index 01b5d84bd..0a2d5bbdb 100644 --- a/autoarray/dataset/interferometer/dataset.py +++ b/autoarray/dataset/interferometer/dataset.py @@ -10,7 +10,7 @@ from autoarray.dataset.interferometer.w_tilde import WTildeInterferometer from autoarray.dataset.grids import GridsDataset from autoarray.operators.transformer import TransformerNUFFT - +from autoarray.mask.mask_2d import Mask2D from autoarray.structures.visibilities import Visibilities from autoarray.structures.visibilities import VisibilitiesNoiseMap @@ -25,8 +25,9 @@ def __init__( data: Visibilities, noise_map: VisibilitiesNoiseMap, uv_wavelengths: np.ndarray, - real_space_mask, + real_space_mask: Mask2D, transformer_class=TransformerNUFFT, + dft_preload_transform: bool = True, preprocessing_directory=None, ): """ @@ -73,6 +74,9 @@ def __init__( transformer_class The class of the Fourier Transform which maps images from real space to Fourier space visibilities and the uv-plane. + dft_preload_transform + If True, precomputes and stores the cosine and sine terms for the Fourier transform. + This accelerates repeated transforms but consumes additional memory (~1GB+ for large datasets). """ self.real_space_mask = real_space_mask @@ -86,7 +90,9 @@ def __init__( self.uv_wavelengths = uv_wavelengths self.transformer = transformer_class( - uv_wavelengths=uv_wavelengths, real_space_mask=real_space_mask + uv_wavelengths=uv_wavelengths, + real_space_mask=real_space_mask, + preload_transform=dft_preload_transform, ) self.preprocessing_directory = ( @@ -114,6 +120,7 @@ def from_fits( noise_map_hdu=0, uv_wavelengths_hdu=0, transformer_class=TransformerNUFFT, + dft_preload_transform: bool = True, ): """ Factory for loading the interferometer data_type from .fits files, as well as computing properties like the @@ -139,6 +146,7 @@ def from_fits( noise_map=noise_map, uv_wavelengths=uv_wavelengths, transformer_class=transformer_class, + dft_preload_transform=dft_preload_transform, ) def w_tilde_preprocessing(self): diff --git a/autoarray/fit/fit_interferometer.py b/autoarray/fit/fit_interferometer.py index ec4c1d99d..40a713bc4 100644 --- a/autoarray/fit/fit_interferometer.py +++ b/autoarray/fit/fit_interferometer.py @@ -113,7 +113,7 @@ def chi_squared(self) -> float: Returns the chi-squared terms of the model data's fit to an dataset, by summing the chi-squared-map. """ return fit_util.chi_squared_complex_from( - chi_squared_map=self.chi_squared_map, + chi_squared_map=self.chi_squared_map.array, ) @property diff --git a/autoarray/fit/fit_util.py b/autoarray/fit/fit_util.py index c61262c1a..7da5cd310 100644 --- a/autoarray/fit/fit_util.py +++ b/autoarray/fit/fit_util.py @@ -158,8 +158,8 @@ def chi_squared_complex_from(*, chi_squared_map: jnp.ndarray) -> float: chi_squared_map The chi-squared-map of values of the model-data fit to the dataset. """ - chi_squared_real = jnp.sum(np.array(chi_squared_map.real)) - chi_squared_imag = jnp.sum(np.array(chi_squared_map.imag)) + chi_squared_real = jnp.sum(chi_squared_map.real) + chi_squared_imag = jnp.sum(chi_squared_map.imag) return chi_squared_real + chi_squared_imag diff --git a/autoarray/fit/plot/fit_interferometer_plotters.py b/autoarray/fit/plot/fit_interferometer_plotters.py index 93e7b212e..5247d67e5 100644 --- a/autoarray/fit/plot/fit_interferometer_plotters.py +++ b/autoarray/fit/plot/fit_interferometer_plotters.py @@ -183,7 +183,7 @@ def figures_2d( auto_labels=AutoLabels( title="Model Visibilities", filename="model_data" ), - color_array=np.real(self.fit.model_data), + color_array=np.real(self.fit.model_data.array), ) if residual_map_real: diff --git a/autoarray/inversion/inversion/abstract.py b/autoarray/inversion/inversion/abstract.py index ebbb9927f..bec0b1ce2 100644 --- a/autoarray/inversion/inversion/abstract.py +++ b/autoarray/inversion/inversion/abstract.py @@ -287,7 +287,7 @@ def mapping_matrix(self) -> np.ndarray: If there are multiple linear objects, the mapping matrices are stacked such that their simultaneous linear equations are solved simultaneously. This property returns the stacked mapping matrix. """ - return np.hstack( + return jnp.hstack( [linear_obj.mapping_matrix for linear_obj in self.linear_obj_list] ) diff --git a/autoarray/inversion/inversion/dataset_interface.py b/autoarray/inversion/inversion/dataset_interface.py index c1cf2db9a..f37efef36 100644 --- a/autoarray/inversion/inversion/dataset_interface.py +++ b/autoarray/inversion/inversion/dataset_interface.py @@ -36,6 +36,9 @@ def __init__( noise_map An array describing the RMS standard deviation error in each pixel used for computing quantities like the chi-squared in a fit (in PyAutoGalaxy and PyAutoLens the recommended units are electrons per second). + grids + The grids of (y,x) Cartesian coordinates that the image data is paired with, which are used for evaluting + light profiles and calculations associated with a pixelization. over_sampler Performs over-sampling whereby the masked image pixels are split into sub-pixels, which are all mapped via the mapper with sub-fractional values of flux. @@ -50,9 +53,6 @@ def __init__( w_tilde The w_tilde matrix used by the w-tilde formalism to construct the data vector and curvature matrix during an inversion efficiently.. - grids - The grids of (y,x) Cartesian coordinates that the image data is paired with, which are used for evaluting - light profiles and calculations associated with a pixelization. noise_covariance_matrix A noise-map covariance matrix representing the covariance between noise in every `data` value, which can be used via a bespoke fit to account for correlated noise in the data. diff --git a/autoarray/inversion/inversion/interferometer/inversion_interferometer_util.py b/autoarray/inversion/inversion/interferometer/inversion_interferometer_util.py index 29580c06d..1a3e647dc 100644 --- a/autoarray/inversion/inversion/interferometer/inversion_interferometer_util.py +++ b/autoarray/inversion/inversion/interferometer/inversion_interferometer_util.py @@ -387,7 +387,6 @@ def w_tilde_via_preload_from(w_tilde_preload, native_index_for_slim_index): return w_tilde_via_preload -@numba_util.jit() def data_vector_via_transformed_mapping_matrix_from( transformed_mapping_matrix: np.ndarray, visibilities: np.ndarray, @@ -406,31 +405,24 @@ def data_vector_via_transformed_mapping_matrix_from( noise_map Flattened 1D array of the noise-map used by the inversion during the fit. """ + # Extract components + vis_real = visibilities.real + vis_imag = visibilities.imag + f_real = transformed_mapping_matrix.real + f_imag = transformed_mapping_matrix.imag + noise_real = noise_map.real + noise_imag = noise_map.imag - data_vector = np.zeros(transformed_mapping_matrix.shape[1]) - - visibilities_real = visibilities.real - visibilities_imag = visibilities.imag - transformed_mapping_matrix_real = transformed_mapping_matrix.real - transformed_mapping_matrix_imag = transformed_mapping_matrix.imag - noise_map_real = noise_map.real - noise_map_imag = noise_map.imag - - for vis_1d_index in range(transformed_mapping_matrix.shape[0]): - for pix_1d_index in range(transformed_mapping_matrix.shape[1]): - real_value = ( - visibilities_real[vis_1d_index] - * transformed_mapping_matrix_real[vis_1d_index, pix_1d_index] - / (noise_map_real[vis_1d_index] ** 2.0) - ) - imag_value = ( - visibilities_imag[vis_1d_index] - * transformed_mapping_matrix_imag[vis_1d_index, pix_1d_index] - / (noise_map_imag[vis_1d_index] ** 2.0) - ) - data_vector[pix_1d_index] += real_value + imag_value + # Square noise components + inv_var_real = 1.0 / (noise_real**2) + inv_var_imag = 1.0 / (noise_imag**2) - return data_vector + # Real and imaginary contributions + weighted_real = (vis_real * inv_var_real)[:, None] * f_real + weighted_imag = (vis_imag * inv_var_imag)[:, None] * f_imag + + # Sum over visibilities + return np.sum(weighted_real + weighted_imag, axis=0) @numba_util.jit() @@ -512,7 +504,6 @@ def curvature_matrix_via_w_tilde_curvature_preload_interferometer_from( return curvature_matrix -@numba_util.jit() def mapped_reconstructed_visibilities_from( transformed_mapping_matrix: np.ndarray, reconstruction: np.ndarray ) -> np.ndarray: @@ -525,20 +516,7 @@ def mapped_reconstructed_visibilities_from( The matrix representing the blurred mappings between sub-grid pixels and pixelization pixels. """ - mapped_reconstructed_visibilities = (0.0 + 0.0j) * np.zeros( - transformed_mapping_matrix.shape[0] - ) - - transformed_mapping_matrix_real = transformed_mapping_matrix.real - transformed_mapping_matrix_imag = transformed_mapping_matrix.imag - - for i in range(transformed_mapping_matrix.shape[0]): - for j in range(reconstruction.shape[0]): - mapped_reconstructed_visibilities[i] += ( - reconstruction[j] * transformed_mapping_matrix_real[i, j] - ) + 1.0j * (reconstruction[j] * transformed_mapping_matrix_imag[i, j]) - - return mapped_reconstructed_visibilities + return transformed_mapping_matrix @ reconstruction """ diff --git a/autoarray/inversion/inversion/interferometer/mapping.py b/autoarray/inversion/inversion/interferometer/mapping.py index 2b3219a2e..77ec2576c 100644 --- a/autoarray/inversion/inversion/interferometer/mapping.py +++ b/autoarray/inversion/inversion/interferometer/mapping.py @@ -1,3 +1,4 @@ +import jax.numpy as jnp import numpy as np from typing import Dict, List, Optional, Union @@ -76,8 +77,8 @@ def data_vector(self) -> np.ndarray: """ return inversion_interferometer_util.data_vector_via_transformed_mapping_matrix_from( - transformed_mapping_matrix=np.array(self.operated_mapping_matrix), - visibilities=np.array(self.data), + transformed_mapping_matrix=self.operated_mapping_matrix, + visibilities=self.data, noise_map=np.array(self.noise_map), ) @@ -106,13 +107,13 @@ def curvature_matrix(self) -> np.ndarray: noise_map=self.noise_map.imag, ) - curvature_matrix = np.add(real_curvature_matrix, imag_curvature_matrix) + curvature_matrix = jnp.add(real_curvature_matrix, imag_curvature_matrix) if len(self.no_regularization_index_list) > 0: curvature_matrix = inversion_util.curvature_matrix_with_added_to_diag_from( curvature_matrix=curvature_matrix, - no_regularization_index_list=self.no_regularization_index_list, value=self.settings.no_regularization_add_to_curvature_diag_value, + no_regularization_index_list=self.no_regularization_index_list, ) return curvature_matrix @@ -152,10 +153,8 @@ def mapped_reconstructed_data_dict( visibilities = ( inversion_interferometer_util.mapped_reconstructed_visibilities_from( - transformed_mapping_matrix=np.array( - operated_mapping_matrix_list[index] - ), - reconstruction=np.array(reconstruction), + transformed_mapping_matrix=operated_mapping_matrix_list[index], + reconstruction=reconstruction, ) ) diff --git a/autoarray/inversion/inversion/interferometer/w_tilde.py b/autoarray/inversion/inversion/interferometer/w_tilde.py index 83febb864..e538e8779 100644 --- a/autoarray/inversion/inversion/interferometer/w_tilde.py +++ b/autoarray/inversion/inversion/interferometer/w_tilde.py @@ -85,9 +85,7 @@ def data_vector(self) -> np.ndarray: The calculation is described in more detail in `inversion_util.w_tilde_data_interferometer_from`. """ - return np.dot( - self.linear_obj_list[0].mapping_matrix.T, self.w_tilde.dirty_image - ) + return np.dot(self.mapping_matrix.T, self.w_tilde.dirty_image) @cached_property @profile_func diff --git a/autoarray/operators/transformer.py b/autoarray/operators/transformer.py index c5b1d3a50..c3d94f686 100644 --- a/autoarray/operators/transformer.py +++ b/autoarray/operators/transformer.py @@ -1,7 +1,9 @@ from astropy import units import copy +import jax.numpy as jnp import numpy as np import warnings +from typing import Tuple class NUFFTPlaceholder: @@ -14,6 +16,7 @@ class NUFFTPlaceholder: NUFFT_cpu = NUFFTPlaceholder +from autoarray.mask.mask_2d import Mask2D from autoarray.structures.arrays.uniform_2d import Array2D from autoarray.structures.grids.uniform_2d import Grid2D from autoarray.structures.visibilities import Visibilities @@ -33,8 +36,49 @@ def pynufft_exception(): class TransformerDFT: - def __init__(self, uv_wavelengths, real_space_mask, preload_transform=True): - + def __init__( + self, + uv_wavelengths: np.ndarray, + real_space_mask: Mask2D, + preload_transform: bool = True, + ): + """ + A direct Fourier transform (DFT) operator for radio interferometric imaging. + + This class performs the forward and inverse mapping between real-space images and + complex visibilities measured by an interferometer. It uses a direct implementation + of the Fourier transform (not FFT-based), making it suitable for irregular uv-coverage. + + Optionally, it precomputes and stores the sine and cosine terms used in the transform, + which can significantly improve performance for repeated operations but at the cost of memory. + + Parameters + ---------- + uv_wavelengths + The (u, v) coordinates in wavelengths of the measured visibilities. + real_space_mask + The real-space mask that defines the image grid and which pixels are valid. + preload_transform + If True, precomputes and stores the cosine and sine terms for the Fourier transform. + This accelerates repeated transforms but consumes additional memory (~1GB+ for large datasets). + + Attributes + ---------- + grid : ndarray + The unmasked real-space grid in radians. + total_visibilities : int + The number of measured visibilities. + total_image_pixels : int + The number of unmasked pixels in the real-space image grid. + preload_real_transforms : ndarray, optional + The precomputed cosine terms used in the real part of the DFT. + preload_imag_transforms : ndarray, optional + The precomputed sine terms used in the imaginary part of the DFT. + real_space_pixels : int + Alias for `total_image_pixels`. + adjoint_scaling : float + Scaling factor applied to the adjoint operator to normalize the inverse transform. + """ super().__init__() self.uv_wavelengths = uv_wavelengths.astype("float") @@ -47,58 +91,88 @@ def __init__(self, uv_wavelengths, real_space_mask, preload_transform=True): self.preload_transform = preload_transform if preload_transform: - self.preload_real_transforms = transformer_util.preload_real_transforms( - grid_radians=np.array(self.grid.array), - uv_wavelengths=self.uv_wavelengths, + + self.preload_real_transforms = ( + transformer_util.preload_real_transforms_from( + grid_radians=np.array(self.grid.array), + uv_wavelengths=self.uv_wavelengths, + ) ) - self.preload_imag_transforms = transformer_util.preload_imag_transforms( - grid_radians=np.array(self.grid.array), - uv_wavelengths=self.uv_wavelengths, + self.preload_imag_transforms = ( + transformer_util.preload_imag_transforms_from( + grid_radians=np.array(self.grid.array), + uv_wavelengths=self.uv_wavelengths, + ) ) self.real_space_pixels = self.real_space_mask.pixels_in_mask - self.shape = ( - int(np.prod(self.total_visibilities)), - int(np.prod(self.real_space_pixels)), - ) - self.dtype = "complex128" - self.explicit = False - # NOTE: This is the scaling factor that needs to be applied to the adjoint operator self.adjoint_scaling = (2.0 * self.grid.shape_native[0]) * ( 2.0 * self.grid.shape_native[1] ) - self.matvec_count = 0 - self.rmatvec_count = 0 - self.matmat_count = 0 - self.rmatmat_count = 0 + def visibilities_from(self, image: Array2D) -> Visibilities: + """ + Computes the visibilities from a real-space image using the direct Fourier transform (DFT). + + This method transforms the input image into the uv-plane (Fourier space), simulating the + measurements made by an interferometer at specified uv-wavelengths. + + If `preload_transform` is True, it uses precomputed sine and cosine terms to accelerate the computation. + + Parameters + ---------- + image + The real-space image to be transformed to the uv-plane. Must be defined on the + same grid and mask as this transformer's `real_space_mask`. - def visibilities_from(self, image): + Returns + ------- + The complex visibilities resulting from the Fourier transform of the input image. + """ if self.preload_transform: - visibilities = transformer_util.visibilities_via_preload_jit_from( - image_1d=np.array(image.array), + visibilities = transformer_util.visibilities_via_preload_from( + image_1d=image.array, preloaded_reals=self.preload_real_transforms, preloaded_imags=self.preload_imag_transforms, ) - else: - visibilities = transformer_util.visibilities_jit( - image_1d=np.array(image.slim.array), + visibilities = transformer_util.visibilities_from( + image_1d=image.slim.array, grid_radians=np.array(self.grid), uv_wavelengths=self.uv_wavelengths, ) - return Visibilities(visibilities=visibilities) + return Visibilities(visibilities=jnp.array(visibilities)) - def image_from(self, visibilities, use_adjoint_scaling: bool = False): - image_slim = transformer_util.image_via_jit_from( - n_pixels=self.grid.shape[0], + def image_from( + self, visibilities: Visibilities, use_adjoint_scaling: bool = False + ) -> Array2D: + """ + Computes the real-space image from a set of visibilities using the adjoint of the DFT. + + This is not a true inverse Fourier transform, but rather the adjoint operation, which maps + complex visibilities back into image space. This is typically used as the first step + in inverse imaging algorithms like CLEAN or regularized reconstruction. + + Parameters + ---------- + visibilities + The complex visibilities to be transformed into a real-space image. + use_adjoint_scaling + If True, the result is scaled by a normalization factor. Currently unused. + + Returns + ------- + The real-space image resulting from the adjoint DFT operation, defined on the same + mask as this transformer's `real_space_mask`. + """ + image_slim = transformer_util.image_direct_from( + visibilities=visibilities.in_array, grid_radians=np.array(self.grid.array), uv_wavelengths=self.uv_wavelengths, - visibilities=visibilities.in_array, ) image_native = array_2d_util.array_2d_native_from( @@ -108,24 +182,86 @@ def image_from(self, visibilities, use_adjoint_scaling: bool = False): return Array2D(values=image_native, mask=self.real_space_mask) - def transform_mapping_matrix(self, mapping_matrix): + def transform_mapping_matrix(self, mapping_matrix: np.ndarray) -> np.ndarray: + """ + Applies the DFT to a mapping matrix that maps source pixels to image pixels. + + This is used in linear inversion frameworks, where the transform of each source basis function + (represented by a column of the mapping matrix) is computed individually. The result is a matrix + mapping source pixels directly to visibilities. + + If `preload_transform` is True, the computation is accelerated using precomputed sine and cosine terms. + + Parameters + ---------- + mapping_matrix + A 2D array of shape (n_image_pixels, n_source_pixels) that maps source pixels to image-plane pixels. + + Returns + ------- + A 2D complex-valued array of shape (n_visibilities, n_source_pixels) that maps source-plane basis + functions directly to the visibilities. + """ if self.preload_transform: - return transformer_util.transformed_mapping_matrix_via_preload_jit_from( + return transformer_util.transformed_mapping_matrix_via_preload_from( mapping_matrix=mapping_matrix, preloaded_reals=self.preload_real_transforms, preloaded_imags=self.preload_imag_transforms, ) - else: - return transformer_util.transformed_mapping_matrix_jit( - mapping_matrix=mapping_matrix, - grid_radians=np.array(self.grid), - uv_wavelengths=self.uv_wavelengths, - ) + return transformer_util.transformed_mapping_matrix_from( + mapping_matrix=mapping_matrix, + grid_radians=np.array(self.grid), + uv_wavelengths=self.uv_wavelengths, + ) class TransformerNUFFT(NUFFT_cpu): - def __init__(self, uv_wavelengths, real_space_mask): + def __init__(self, uv_wavelengths: np.ndarray, real_space_mask: Mask2D, **kwargs): + """ + Performs the Non-Uniform Fast Fourier Transform (NUFFT) for interferometric image reconstruction. + + This transformer uses the PyNUFFT library to efficiently compute the Fourier transform + of an image defined on a regular real-space grid to a set of non-uniform uv-plane (Fourier space) + coordinates, as is typical in radio interferometry. + + It is initialized with the interferometer uv-wavelengths and a real-space mask, which defines + the pixelized image domain. + + Parameters + ---------- + uv_wavelengths + The uv-coordinates (Fourier-space sampling points) corresponding to the measured visibilities. + Should be an array of shape (n_vis, 2), where the two columns represent u and v coordinates in wavelengths. + + real_space_mask + The 2D mask defining the real-space pixel grid on which the image is defined. Used to create the + unmasked grid required for NUFFT planning. + + Notes + ----- + - The `initialize_plan()` method builds the internal NUFFT plan based on the input grid and uv sampling. + - A complex exponential `shift` factor is applied to align the center of the Fourier transform correctly, + accounting for the pixel-center offset in the real-space grid. + - The adjoint operation (used in inverse imaging) must be scaled by `adjoint_scaling` to normalize its output. + - This transformer inherits directly from PyNUFFT's `NUFFT_cpu` base class. + - If `NUFFTPlaceholder` is detected (indicating PyNUFFT is not available), an exception is raised. + + Attributes + ---------- + grid : Grid2D + The real-space pixel grid derived from the mask, in radians. + native_index_for_slim_index : np.ndarray + Index map converting from slim (1D) grid to native (2D) indexing, for image reshaping. + shift : np.ndarray + Complex exponential phase shift applied to account for real-space pixel centering. + real_space_pixels : int + Total number of valid real-space pixels defined by the mask. + total_visibilities : int + Total number of visibilities across all uv-wavelength components. + adjoint_scaling : float + Scaling factor for adjoint operations to normalize reconstructed images. + """ if isinstance(self, NUFFTPlaceholder): pynufft_exception() @@ -164,27 +300,40 @@ def __init__(self, uv_wavelengths, real_space_mask): # NOTE: If reshaped the shape of the operator is (2 x Nvis, Np) else it is (Nvis, Np) self.total_visibilities = int(uv_wavelengths.shape[0] * uv_wavelengths.shape[1]) - self.shape = ( - int(np.prod(self.total_visibilities)), - int(np.prod(self.real_space_pixels)), - ) - - # NOTE: If the operator is reshaped then the output is real. - self.dtype = "float64" - - self.explicit = False - # NOTE: This is the scaling factor that needs to be applied to the adjoint operator self.adjoint_scaling = (2.0 * self.grid.shape_native[0]) * ( 2.0 * self.grid.shape_native[1] ) - self.matvec_count = 0 - self.rmatvec_count = 0 - self.matmat_count = 0 - self.rmatmat_count = 0 - - def initialize_plan(self, ratio=2, interp_kernel=(6, 6)): + def initialize_plan(self, ratio: int = 2, interp_kernel: Tuple[int, int] = (6, 6)): + """ + Initializes the PyNUFFT plan for performing the NUFFT operation. + + This method precomputes the interpolation structure and gridding + needed by the NUFFT algorithm to map between the regular real-space + image grid and the non-uniform uv-plane sampling defined by the + interferometric visibilities. + + Parameters + ---------- + ratio + The oversampling ratio used to pad the Fourier grid before interpolation. + A higher value improves accuracy at the cost of increased memory and computation. + Default is 2 (i.e., the Fourier grid is twice the size of the image grid). + + interp_kernel + The interpolation kernel size along each axis, given as (Jy, Jx). + This determines how many neighboring Fourier grid points are used + to interpolate each uv-point. + Default is (6, 6), a good trade-off between accuracy and performance. + + Notes + ----- + - The uv-coordinates are normalized and rescaled into the range expected by PyNUFFT + using the real-space grid’s pixel scale and the Nyquist frequency limit. + - The plan must be initialized before performing any NUFFT operations (e.g., forward or adjoint). + - This method modifies the internal state of the NUFFT object by calling `self.plan(...)`. + """ if not isinstance(ratio, int): ratio = int(ratio) @@ -208,9 +357,23 @@ def initialize_plan(self, ratio=2, interp_kernel=(6, 6)): Jd=interp_kernel, ) - def visibilities_from(self, image): + def visibilities_from(self, image: Array2D) -> Visibilities: """ - ... + Computes visibilities from a real-space image using the NUFFT forward transform. + + Parameters + ---------- + image + The input image in real space, represented as a 2D array object. + + Returns + ------- + The complex visibilities in the uv-plane computed via the NUFFT forward operation. + + Notes + ----- + - The image is flipped vertically before transformation to account for PyNUFFT’s internal data layout. + - Warnings during the NUFFT computation are suppressed for cleaner output. """ warnings.filterwarnings("ignore") @@ -221,7 +384,29 @@ def visibilities_from(self, image): ) # flip due to PyNUFFT internal flip ) - def image_from(self, visibilities, use_adjoint_scaling: bool = False): + def image_from( + self, visibilities: Visibilities, use_adjoint_scaling: bool = False + ) -> Array2D: + """ + Reconstructs a real-space image from visibilities using the NUFFT adjoint transform. + + Parameters + ---------- + visibilities + The complex visibilities in the uv-plane to be inverted. + use_adjoint_scaling + If True, apply a scaling factor to the adjoint result to improve accuracy. + Default is False. + + Returns + ------- + The reconstructed real-space image after applying the NUFFT adjoint transform. + + Notes + ----- + - The output image is flipped vertically to align with the input image orientation. + - Warnings during the adjoint operation are suppressed. + """ with warnings.catch_warnings(): warnings.simplefilter("ignore") image = np.real(self.adjoint(visibilities))[::-1, :] @@ -231,7 +416,25 @@ def image_from(self, visibilities, use_adjoint_scaling: bool = False): return Array2D(values=image, mask=self.real_space_mask) - def transform_mapping_matrix(self, mapping_matrix): + def transform_mapping_matrix(self, mapping_matrix: np.ndarray) -> np.ndarray: + """ + Applies the NUFFT forward transform to each column of a mapping matrix, producing transformed visibilities. + + Parameters + ---------- + mapping_matrix : np.ndarray + A 2D array where each column corresponds to a source-plane pixel intensity distribution flattened into image space. + + Returns + ------- + np.ndarray + A complex-valued 2D array where each column contains the visibilities corresponding to the respective column in the input mapping matrix. + + Notes + ----- + - Each column of the input mapping matrix is reshaped into the native 2D image grid before transformation. + - This method repeatedly calls `visibilities_from` for each column, which may be computationally intensive. + """ transformed_mapping_matrix = 0 + 0j * np.zeros( (self.uv_wavelengths.shape[0], mapping_matrix.shape[1]) ) @@ -249,61 +452,3 @@ def transform_mapping_matrix(self, mapping_matrix): transformed_mapping_matrix[:, source_pixel_1d_index] = visibilities return transformed_mapping_matrix - - def forward_lop(self, x): - """ - Forward NUFFT on CPU - :param x: The input numpy array, with the size of Nd or Nd + (batch,) - :type: numpy array with the dtype of numpy.complex64 - :return: y: The output numpy array, with the size of (M,) or (M, batch) - :rtype: numpy array with the dtype of numpy.complex64 - """ - - warnings.filterwarnings("ignore") - - x2d = array_2d_util.array_2d_native_complex_via_indexes_from( - array_2d_slim=x, - shape_native=self.real_space_mask.shape_native, - native_index_for_slim_index_2d=self.native_index_for_slim_index, - )[::-1, :] - - y = self.k2y(self.xx2k(self.x2xx(x2d))) - return np.concatenate((y.real, y.imag), axis=0) - - def adjoint_lop(self, y): - """ - Adjoint NUFFT on CPU - :param y: The input numpy array, with the size of (M,) or (M, batch) - :type: numpy array with the dtype of numpy.complex64 - :return: x: The output numpy array, - with the size of Nd or Nd + (batch, ) - :rtype: numpy array with the dtype of numpy.complex64 - """ - - warnings.filterwarnings("ignore") - - def a_complex_from(a_real, a_imag): - return a_real + 1j * a_imag - - y = a_complex_from( - a_real=y[: int(self.shape[0] / 2.0)], a_imag=y[int(self.shape[0] / 2.0) :] - ) - - x2d = np.real(self.xx2x(self.k2xx(self.y2k(y)))) - - x = array_2d_util.array_2d_slim_complex_from( - array_2d_native=x2d[::-1, :], - mask=np.array(self.real_space_mask), - ) - x = x.real # NOTE: - - # NOTE: - x *= self.adjoint_scaling - - return x - - def _matvec(self, x): - return self.forward_lop(x) - - def _rmatvec(self, x): - return self.adjoint_lop(x) diff --git a/autoarray/operators/transformer_util.py b/autoarray/operators/transformer_util.py index 395034794..34659510a 100644 --- a/autoarray/operators/transformer_util.py +++ b/autoarray/operators/transformer_util.py @@ -1,20 +1,18 @@ +import jax.numpy as jnp import numpy as np -from autoarray import numba_util - -@numba_util.jit() -def preload_real_transforms( +def preload_real_transforms_from( grid_radians: np.ndarray, uv_wavelengths: np.ndarray ) -> np.ndarray: """ - Sets up the real preloaded values used by the direct fourier transform (`TransformerDFT`) to speed up + Sets up the real preloaded values used by the direct Fourier transform (`TransformerDFT`) to speed up the Fourier transform calculations. The preloaded values are the cosine terms of every (y,x) radian coordinate on the real-space grid multiplied by - everu `uv_wavelength` value. + every `uv_wavelength` value. - For large numbers of visibilities (> 100000) this array requires large amounts of memory ( > 1 GB) and it is + For large numbers of visibilities (> 100000) this array requires large amounts of memory (> 1 GB) and it is recommended this preloading is not used. Parameters @@ -28,179 +26,267 @@ def preload_real_transforms( Returns ------- - np.ndarray - The preloaded values of the cosine terms in the calculation of real entries of the direct Fourier transform. - + The preloaded values of the cosine terms in the calculation of real entries of the direct Fourier transform. """ - - preloaded_real_transforms = np.zeros( - shape=(grid_radians.shape[0], uv_wavelengths.shape[0]) + # Compute the phase matrix: shape (n_pixels, n_visibilities) + phase = ( + -2.0 + * np.pi + * ( + np.outer(grid_radians[:, 1], uv_wavelengths[:, 0]) # y * u + + np.outer(grid_radians[:, 0], uv_wavelengths[:, 1]) # x * v + ) ) - for image_1d_index in range(grid_radians.shape[0]): - for vis_1d_index in range(uv_wavelengths.shape[0]): - preloaded_real_transforms[image_1d_index, vis_1d_index] += np.cos( - -2.0 - * np.pi - * ( - grid_radians[image_1d_index, 1] * uv_wavelengths[vis_1d_index, 0] - + grid_radians[image_1d_index, 0] * uv_wavelengths[vis_1d_index, 1] - ) - ) + # Compute cosine of the phase matrix + preloaded_real_transforms = np.cos(phase) return preloaded_real_transforms -@numba_util.jit() -def preload_imag_transforms(grid_radians, uv_wavelengths): - preloaded_imag_transforms = np.zeros( - shape=(grid_radians.shape[0], uv_wavelengths.shape[0]) +def preload_imag_transforms_from( + grid_radians: np.ndarray, uv_wavelengths: np.ndarray +) -> np.ndarray: + """ + Sets up the imaginary preloaded values used by the direct Fourier transform (`TransformerDFT`) to speed up + the Fourier transform calculations in interferometric imaging. + + The preloaded values are the sine terms of every (y,x) radian coordinate on the real-space grid multiplied by + every `uv_wavelength` value. These are used to compute the imaginary components of visibilities. + + For large numbers of visibilities (> 100000), this array can require significant memory (> 1 GB), so preloading + should be used with care. + + Parameters + ---------- + grid_radians + The grid in radians corresponding to the (y,x) coordinates in real space. + uv_wavelengths + The (u,v) coordinates in the Fourier plane (in units of wavelengths). + + Returns + ------- + The sine term preloads used in imaginary-part DFT calculations. + """ + # Compute the phase matrix: shape (n_pixels, n_visibilities) + phase = ( + -2.0 + * np.pi + * ( + np.outer(grid_radians[:, 1], uv_wavelengths[:, 0]) # y * u + + np.outer(grid_radians[:, 0], uv_wavelengths[:, 1]) # x * v + ) ) - for image_1d_index in range(grid_radians.shape[0]): - for vis_1d_index in range(uv_wavelengths.shape[0]): - preloaded_imag_transforms[image_1d_index, vis_1d_index] += np.sin( - -2.0 - * np.pi - * ( - grid_radians[image_1d_index, 1] * uv_wavelengths[vis_1d_index, 0] - + grid_radians[image_1d_index, 0] * uv_wavelengths[vis_1d_index, 1] - ) - ) + # Compute sine of the phase matrix + preloaded_imag_transforms = np.sin(phase) return preloaded_imag_transforms -@numba_util.jit() -def visibilities_via_preload_jit_from(image_1d, preloaded_reals, preloaded_imags): - visibilities = 0 + 0j * np.zeros(shape=(preloaded_reals.shape[1])) +def visibilities_via_preload_from( + image_1d: np.ndarray, preloaded_reals: np.ndarray, preloaded_imags: np.ndarray +) -> np.ndarray: + """ + Computes interferometric visibilities using preloaded real and imaginary DFT transform components. + + This function performs a direct Fourier transform (DFT) using precomputed cosine (real) and sine (imaginary) + terms. It is used in radio astronomy to compute visibilities from an image for a given interferometric + observation setup. + + Parameters + ---------- + image_1d : ndarray of shape (n_pixels,) + The 1D image vector (real-space brightness values). + preloaded_reals : ndarray of shape (n_pixels, n_visibilities) + The preloaded cosine terms (real part of DFT matrix). + preloaded_imags : ndarray of shape (n_pixels, n_visibilities) + The preloaded sine terms (imaginary part of DFT matrix). + + Returns + ------- + visibilities : ndarray of shape (n_visibilities,) + The complex visibilities computed by summing over all pixels. + """ + # Perform the dot product between the image and preloaded transform matrices + vis_real = jnp.dot(image_1d, preloaded_reals) # shape (n_visibilities,) + vis_imag = jnp.dot(image_1d, preloaded_imags) # shape (n_visibilities,) - for image_1d_index in range(image_1d.shape[0]): - for vis_1d_index in range(preloaded_reals.shape[1]): - vis_real = ( - image_1d[image_1d_index] * preloaded_reals[image_1d_index, vis_1d_index] - ) - vis_imag = ( - image_1d[image_1d_index] * preloaded_imags[image_1d_index, vis_1d_index] - ) - visibilities[vis_1d_index] += vis_real + 1j * vis_imag + visibilities = vis_real + 1j * vis_imag return visibilities -@numba_util.jit() -def visibilities_jit(image_1d, grid_radians, uv_wavelengths): - visibilities = 0 + 0j * np.zeros(shape=(uv_wavelengths.shape[0])) - - for image_1d_index in range(image_1d.shape[0]): - for vis_1d_index in range(uv_wavelengths.shape[0]): - vis_real = image_1d[image_1d_index] * np.cos( - -2.0 - * np.pi - * ( - grid_radians[image_1d_index, 1] * uv_wavelengths[vis_1d_index, 0] - + grid_radians[image_1d_index, 0] * uv_wavelengths[vis_1d_index, 1] - ) - ) - vis_imag = image_1d[image_1d_index] * np.sin( - -2.0 - * np.pi - * ( - grid_radians[image_1d_index, 1] * uv_wavelengths[vis_1d_index, 0] - + grid_radians[image_1d_index, 0] * uv_wavelengths[vis_1d_index, 1] - ) - ) - visibilities[vis_1d_index] += vis_real + 1j * vis_imag +def visibilities_from( + image_1d: np.ndarray, grid_radians: np.ndarray, uv_wavelengths: np.ndarray +) -> np.ndarray: + """ + Compute complex visibilities from an input sky image using the Fourier transform, + simulating the response of an astronomical radio interferometer. + + This function converts an image defined on a sky coordinate grid into its + visibility-space representation, given a set of (u,v) spatial frequency + coordinates (in wavelengths), as sampled by a radio interferometer. + + Parameters + ---------- + image_1d + The 1D flattened sky brightness values corresponding to each pixel in the grid. + grid_radians + The angular (y, x) positions of each image pixel in radians, matching image_1d. + uv_wavelengths + The (u, v) spatial frequencies in units of wavelengths, for each baseline + of the interferometer. + + Returns + ------- + visibilities + The complex visibilities (Fourier components) corresponding to each + (u, v) coordinate, representing the interferometer’s measurement. + """ + + # Compute the dot product for each pixel-uv pair + phase = ( + -2.0 + * np.pi + * ( + np.outer(grid_radians[:, 1], uv_wavelengths[:, 0]) + + np.outer(grid_radians[:, 0], uv_wavelengths[:, 1]) + ) + ) # shape (n_pixels, n_vis) + + # Multiply image values with phase terms + vis_real = image_1d[:, None] * np.cos(phase) + vis_imag = image_1d[:, None] * np.sin(phase) + + # Sum over all pixels for each visibility + visibilities = np.sum(vis_real + 1j * vis_imag, axis=0) return visibilities -@numba_util.jit() -def image_via_jit_from(n_pixels, grid_radians, uv_wavelengths, visibilities): - image_1d = np.zeros(n_pixels) - - for image_1d_index in range(image_1d.shape[0]): - for vis_1d_index in range(uv_wavelengths.shape[0]): - image_1d[image_1d_index] += visibilities[vis_1d_index, 0] * np.cos( - 2.0 - * np.pi - * ( - grid_radians[image_1d_index, 1] * uv_wavelengths[vis_1d_index, 0] - + grid_radians[image_1d_index, 0] * uv_wavelengths[vis_1d_index, 1] - ) - ) - - image_1d[image_1d_index] -= visibilities[vis_1d_index, 1] * np.sin( - 2.0 - * np.pi - * ( - grid_radians[image_1d_index, 1] * uv_wavelengths[vis_1d_index, 0] - + grid_radians[image_1d_index, 0] * uv_wavelengths[vis_1d_index, 1] - ) - ) +def image_direct_from( + visibilities: np.ndarray, grid_radians: np.ndarray, uv_wavelengths: np.ndarray +) -> np.ndarray: + """ + Reconstruct a real-valued sky image from complex interferometric visibilities + using an inverse Fourier transform approximation. - return image_1d + This function simulates the synthesis imaging equation of a radio interferometer + by summing sinusoidal components across all (u, v) spatial frequencies. + Parameters + ---------- + visibilities + The real and imaginary parts of the complex visibilities for each (u, v) point. -@numba_util.jit() -def transformed_mapping_matrix_via_preload_jit_from( - mapping_matrix, preloaded_reals, preloaded_imags -): - transfomed_mapping_matrix = 0 + 0j * np.zeros( - (preloaded_reals.shape[1], mapping_matrix.shape[1]) + grid_radians + The angular (y, x) coordinates of each pixel in radians. + + uv_wavelengths + The (u, v) spatial frequencies in units of wavelengths for each baseline. + + Returns + ------- + image_1d + The reconstructed real-valued image in sky coordinates. + """ + # Compute the phase term for each (pixel, visibility) pair + phase = ( + 2.0 + * np.pi + * ( + np.outer(grid_radians[:, 1], uv_wavelengths[:, 0]) + + np.outer(grid_radians[:, 0], uv_wavelengths[:, 1]) + ) ) - for pixel_1d_index in range(mapping_matrix.shape[1]): - for image_1d_index in range(mapping_matrix.shape[0]): - value = mapping_matrix[image_1d_index, pixel_1d_index] + real_part = np.dot(np.cos(phase), visibilities[:, 0]) + imag_part = np.dot(np.sin(phase), visibilities[:, 1]) + + image_1d = real_part - imag_part + + return image_1d + + +def transformed_mapping_matrix_via_preload_from( + mapping_matrix: np.ndarray, preloaded_reals: np.ndarray, preloaded_imags: np.ndarray +) -> np.ndarray: + """ + Computes the Fourier-transformed mapping matrix using preloaded sine and cosine terms for efficiency. + + This function transforms each source pixel's mapping to visibilities by using precomputed + real (cosine) and imaginary (sine) terms from the direct Fourier transform. + It is used in radio interferometric imaging where source-to-image mappings are projected + into the visibility space. + + Parameters + ---------- + mapping_matrix + The mapping matrix from image-plane pixels to source-plane pixels. + preloaded_reals + Precomputed cosine terms for each pixel-vis pair: cos(-2π(yu + xv)). + preloaded_imags + Precomputed sine terms for each pixel-vis pair: sin(-2π(yu + xv)). + + Returns + ------- + Complex-valued matrix mapping source pixels to visibilities. + """ - if value > 0: - for vis_1d_index in range(preloaded_reals.shape[1]): - vis_real = value * preloaded_reals[image_1d_index, vis_1d_index] - vis_imag = value * preloaded_imags[image_1d_index, vis_1d_index] - transfomed_mapping_matrix[vis_1d_index, pixel_1d_index] += ( - vis_real + 1j * vis_imag - ) + # Broadcasted multiplication and matrix multiplication over non-zero entries - return transfomed_mapping_matrix + vis_real = preloaded_reals.T @ mapping_matrix # (n_visibilities, n_source_pixels) + vis_imag = preloaded_imags.T @ mapping_matrix + transformed_matrix = vis_real + 1j * vis_imag + + return transformed_matrix + + +def transformed_mapping_matrix_from( + mapping_matrix: np.ndarray, grid_radians: np.ndarray, uv_wavelengths: np.ndarray +) -> np.ndarray: + """ + Computes the Fourier-transformed mapping matrix used in radio interferometric imaging. -@numba_util.jit() -def transformed_mapping_matrix_jit(mapping_matrix, grid_radians, uv_wavelengths): - transfomed_mapping_matrix = 0 + 0j * np.zeros( - (uv_wavelengths.shape[0], mapping_matrix.shape[1]) + This function applies a direct Fourier transform to each pixel column of the mapping matrix using the + uv-wavelength coordinates. The result is a matrix that maps source pixel intensities to complex visibilities, + which represent how a model image would appear to an interferometer. + + Parameters + ---------- + mapping_matrix : ndarray of shape (n_image_pixels, n_source_pixels) + The mapping matrix from image-plane pixels to source-plane pixels. + grid_radians : ndarray of shape (n_image_pixels, 2) + The (y,x) positions of each image pixel in radians. + uv_wavelengths : ndarray of shape (n_visibilities, 2) + The (u,v) coordinates of the sampled Fourier modes in units of wavelength. + + Returns + ------- + transformed_matrix : ndarray of shape (n_visibilities, n_source_pixels) + The transformed mapping matrix in the visibility domain (complex-valued). + """ + # Compute phase term: (n_image_pixels, n_visibilities) + phase = ( + -2.0 + * np.pi + * ( + np.outer(grid_radians[:, 1], uv_wavelengths[:, 0]) # y * u + + np.outer(grid_radians[:, 0], uv_wavelengths[:, 1]) # x * v + ) ) - for pixel_1d_index in range(mapping_matrix.shape[1]): - for image_1d_index in range(mapping_matrix.shape[0]): - value = mapping_matrix[image_1d_index, pixel_1d_index] - - if value > 0: - for vis_1d_index in range(uv_wavelengths.shape[0]): - vis_real = value * np.cos( - -2.0 - * np.pi - * ( - grid_radians[image_1d_index, 1] - * uv_wavelengths[vis_1d_index, 0] - + grid_radians[image_1d_index, 0] - * uv_wavelengths[vis_1d_index, 1] - ) - ) - - vis_imag = value * np.sin( - -2.0 - * np.pi - * ( - grid_radians[image_1d_index, 1] - * uv_wavelengths[vis_1d_index, 0] - + grid_radians[image_1d_index, 0] - * uv_wavelengths[vis_1d_index, 1] - ) - ) - - transfomed_mapping_matrix[vis_1d_index, pixel_1d_index] += ( - vis_real + 1j * vis_imag - ) - - return transfomed_mapping_matrix + # Compute real and imaginary Fourier matrices + fourier_real = np.cos(phase) + fourier_imag = np.sin(phase) + + # Only compute contributions from non-zero mapping entries + # This matrix multiplication is: (n_visibilities x n_image_pixels) dot (n_image_pixels x n_source_pixels) + vis_real = fourier_real.T @ mapping_matrix # (n_vis, n_src) + vis_imag = fourier_imag.T @ mapping_matrix # (n_vis, n_src) + + transformed_matrix = vis_real + 1j * vis_imag + + return transformed_matrix diff --git a/autoarray/structures/visibilities.py b/autoarray/structures/visibilities.py index 8cc94dca5..abfb9713e 100644 --- a/autoarray/structures/visibilities.py +++ b/autoarray/structures/visibilities.py @@ -50,16 +50,8 @@ def __init__(self, visibilities: Union[np.ndarray, List[complex]]): .ravel() ) - self.ordered_1d = np.concatenate( - (np.real(visibilities), np.imag(visibilities)), axis=0 - ) - super().__init__(array=visibilities) - def __array_finalize__(self, obj): - if hasattr(obj, "ordered_1d"): - self.ordered_1d = obj.ordered_1d - @property def slim(self) -> "AbstractVisibilities": return self @@ -74,7 +66,7 @@ def in_array(self) -> np.ndarray: Returns the 1D complex NumPy array of values with shape [total_visibilities] as a NumPy float array of shape [total_visibilities, 2]. """ - return np.stack((np.real(self), np.imag(self)), axis=-1) + return np.stack((np.real(self.array), np.imag(self.array)), axis=-1) @property def in_grid(self) -> Grid2DIrregular: @@ -232,20 +224,4 @@ def __init__(self, visibilities: Union[np.ndarray, List[complex]], *args, **kwar .ravel() ) - self.ordered_1d = np.concatenate( - (np.real(visibilities), np.imag(visibilities)), axis=0 - ) super().__init__(visibilities=visibilities) - - weight_list = 1.0 / self.in_array**2.0 - - self.weight_list_ordered_1d = np.concatenate( - (weight_list[:, 0], weight_list[:, 1]), axis=0 - ) - - def __array_finalize__(self, obj): - if hasattr(obj, "ordered_1d"): - self.ordered_1d = obj.ordered_1d - - if hasattr(obj, "weight_list_ordered_1d"): - self.weight_list_ordered_1d = obj.weight_list_ordered_1d diff --git a/test_autoarray/dataset/interferometer/test_simulator.py b/test_autoarray/dataset/interferometer/test_simulator.py index e45908cba..ea434c163 100644 --- a/test_autoarray/dataset/interferometer/test_simulator.py +++ b/test_autoarray/dataset/interferometer/test_simulator.py @@ -30,7 +30,7 @@ def test__from_image__setup_with_all_features_off( visibilities = transformer.visibilities_from(image=image) - assert dataset.data == pytest.approx(visibilities, 1.0e-4) + assert dataset.data == pytest.approx(visibilities.array, 1.0e-4) def test__setup_with_noise(uv_wavelengths_7x2, transformer_7x7_7): diff --git a/test_autoarray/inversion/inversion/interferometer/test_inversion_interferometer_util.py b/test_autoarray/inversion/inversion/interferometer/test_inversion_interferometer_util.py index c7dc03221..96cad9eed 100644 --- a/test_autoarray/inversion/inversion/interferometer/test_inversion_interferometer_util.py +++ b/test_autoarray/inversion/inversion/interferometer/test_inversion_interferometer_util.py @@ -288,8 +288,8 @@ def test__identical_inversion_values_for_two_methods(): assert inversion_w_tilde.mapped_reconstructed_image.array == pytest.approx( inversion_mapping_matrices.mapped_reconstructed_image.array, abs=1.0e-1 ) - assert inversion_w_tilde.mapped_reconstructed_data == pytest.approx( - inversion_mapping_matrices.mapped_reconstructed_data, abs=1.0e-1 + assert inversion_w_tilde.mapped_reconstructed_data.array == pytest.approx( + inversion_mapping_matrices.mapped_reconstructed_data.array, abs=1.0e-1 ) @@ -387,6 +387,6 @@ def test__identical_inversion_source_and_image_loops(): assert inversion_image_loop.mapped_reconstructed_image.array == pytest.approx( inversion_source_loop.mapped_reconstructed_image.array, 1.0e-2 ) - assert inversion_image_loop.mapped_reconstructed_data == pytest.approx( - inversion_source_loop.mapped_reconstructed_data, 1.0e-2 + assert inversion_image_loop.mapped_reconstructed_data.array == pytest.approx( + inversion_source_loop.mapped_reconstructed_data.array, 1.0e-2 ) diff --git a/test_autoarray/operators/test_transformer.py b/test_autoarray/operators/test_transformer.py index 33ea1ca42..4d432cf6b 100644 --- a/test_autoarray/operators/test_transformer.py +++ b/test_autoarray/operators/test_transformer.py @@ -4,259 +4,133 @@ import pytest -class MockDeriveMask2D: - def __init__(self, grid): - self.mask = grid.derive_mask.all_false - self.grid = grid - - @property - def sub_1(self): - return self - - @property - def derive_grid(self): - return MockDeriveGrid2D( - grid=self.grid, - ) - - -class MockDeriveGrid2D: - def __init__(self, grid): - self.unmasked = MockMaskedGrid(grid=grid) - - -class MockRealSpaceMask: - def __init__(self, grid): - self.grid = grid - self.unmasked = MockMaskedGrid(grid=grid) - - @property - def pixels_in_mask(self): - return self.unmasked.slim.in_radians.shape[0] - - @property - def derive_mask(self): - return MockDeriveMask2D( - grid=self.grid, - ) - - @property - def derive_grid(self): - return MockDeriveGrid2D( - grid=self.grid, - ) - - @property - def pixel_scales(self): - return self.grid.pixel_scales - - @property - def origin(self): - return self.grid.origin - - -class MockMaskedGrid: - def __init__(self, grid): - self.in_radians = grid - self.slim = grid - - -def test__dft__visibilities_from(): - uv_wavelengths = np.ones(shape=(4, 2)) - - grid_radians = aa.Grid2D.no_mask(values=[[[1.0, 1.0]]], pixel_scales=1.0) - - real_space_mask = MockRealSpaceMask(grid=grid_radians) +def test__dft__visibilities_from(visibilities_7, uv_wavelengths_7x2, mask_2d_7x7): transformer = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, + uv_wavelengths=uv_wavelengths_7x2, + real_space_mask=mask_2d_7x7, preload_transform=False, ) - image = aa.Array2D.ones(shape_native=(1, 1), pixel_scales=1.0) - - visibilities = transformer.visibilities_from(image=image) - - assert visibilities == pytest.approx( - np.array([1.0 + 0.0j, 1.0 + 0.0j, 1.0 + 0.0j, 1.0 + 0.0j]), 1.0e-4 - ) - - uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]]) - - grid_radians = aa.Grid2D.no_mask( - values=[[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0 - ) - - real_space_mask = MockRealSpaceMask(grid=grid_radians) - - transformer = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, - preload_transform=False, + image = aa.Array2D( + values=[ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.5, 1.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.5, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + mask=mask_2d_7x7, ) - image = aa.Array2D.ones(shape_native=(1, 2), pixel_scales=1.0) - visibilities = transformer.visibilities_from(image=image) - assert visibilities == pytest.approx( + assert visibilities[0:3] == pytest.approx( np.array( - [-0.091544 - 1.45506j, -0.73359736 - 0.781201j, -0.613160 - 0.077460j] + [ + -0.06434514 - 0.61763293j, + 1.71143349 - 1.184022j, + 0.90200541 + 0.03726693j, + ] ), 1.0e-4, ) - uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]]) - grid_radians = aa.Grid2D.no_mask( - values=[[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0 - ) - real_space_mask = MockRealSpaceMask(grid=grid_radians) +def test__dft__image_from(visibilities_7, uv_wavelengths_7x2, mask_2d_7x7): transformer = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, + uv_wavelengths=uv_wavelengths_7x2, + real_space_mask=mask_2d_7x7, preload_transform=False, ) - image = aa.Array2D.no_mask([[3.0, 6.0]], pixel_scales=1.0) - - visibilities = transformer.visibilities_from(image=image) + image = transformer.image_from(visibilities=visibilities_7) - assert visibilities == pytest.approx( - np.array([-2.46153 - 6.418822j, -5.14765 - 1.78146j, -3.11681 + 2.48210j]), - 1.0e-4, - ) + assert image[0:3] == pytest.approx([-1.49022481, -0.22395855, -0.45588535], 1.0e-4) -def test__dft__visibilities_from__preload_and_non_preload_give_same_answer(): - uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]]) - grid_radians = aa.Grid2D.no_mask( - values=[[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0 - ) - real_space_mask = MockRealSpaceMask(grid=grid_radians) +def test__dft__visibilities_from__preload_and_non_preload_give_same_answer( + visibilities_7, uv_wavelengths_7x2, mask_2d_7x7 +): transformer_preload = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, + uv_wavelengths=uv_wavelengths_7x2, + real_space_mask=mask_2d_7x7, preload_transform=True, ) transformer = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, + uv_wavelengths=uv_wavelengths_7x2, + real_space_mask=mask_2d_7x7, preload_transform=False, ) - image = aa.Array2D.no_mask([[2.0, 6.0]], pixel_scales=1.0) + image = aa.Array2D( + values=[ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.5, 1.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.5, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + mask=mask_2d_7x7, + ) visibilities_via_preload = transformer_preload.visibilities_from(image=image) visibilities = transformer.visibilities_from(image=image) - assert (visibilities_via_preload == visibilities).all() + assert visibilities_via_preload == pytest.approx(visibilities.array, 1.0e-4) -def test__dft__transform_mapping_matrix(): - uv_wavelengths = np.ones(shape=(4, 2)) - grid_radians = aa.Grid2D.no_mask(values=[[[1.0, 1.0]]], pixel_scales=1.0) - real_space_mask = MockRealSpaceMask(grid=grid_radians) +def test__dft__transform_mapping_matrix( + visibilities_7, uv_wavelengths_7x2, mask_2d_7x7 +): transformer = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, + uv_wavelengths=uv_wavelengths_7x2, + real_space_mask=mask_2d_7x7, preload_transform=False, ) - mapping_matrix = np.ones(shape=(1, 1)) + mapping_matrix = np.ones(shape=(9, 1)) transformed_mapping_matrix = transformer.transform_mapping_matrix( mapping_matrix=mapping_matrix ) - assert transformed_mapping_matrix == pytest.approx( - np.array([[1.0 + 0.0j], [1.0 + 0.0j], [1.0 + 0.0j], [1.0 + 0.0j]]), 1.0e-4 - ) - - uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]]) - - grid_radians = aa.Grid2D.no_mask( - values=[[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0 - ) - real_space_mask = MockRealSpaceMask(grid=grid_radians) - - transformer = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, - preload_transform=False, - ) - - mapping_matrix = np.ones(shape=(2, 2)) - - transformed_mapping_matrix = transformer.transform_mapping_matrix( - mapping_matrix=mapping_matrix - ) - - assert transformed_mapping_matrix == pytest.approx( - np.array( - [ - [-0.091544 - 1.45506j, -0.091544 - 1.45506j], - [-0.733597 - 0.78120j, -0.733597 - 0.78120j], - [-0.61316 - 0.07746j, -0.61316 - 0.07746j], - ] - ), - 1.0e-4, - ) - - grid_radians = aa.Grid2D.no_mask( - [[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]]], pixel_scales=1.0 - ) - real_space_mask = MockRealSpaceMask(grid=grid_radians) - - uv_wavelengths = np.array([[0.7, 0.8], [0.9, 1.0]]) - - transformer = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, - preload_transform=False, - ) - - mapping_matrix = np.array([[0.0, 0.5], [0.0, 0.2], [1.0, 0.0]]) - - transformed_mapping_matrix = transformer.transform_mapping_matrix( - mapping_matrix=mapping_matrix - ) - - assert transformed_mapping_matrix == pytest.approx( + assert transformed_mapping_matrix[0:3, :] == pytest.approx( np.array( [ - [0.42577 + 0.90482j, -0.10473 - 0.46607j], - [0.968583 - 0.24868j, -0.20085 - 0.32227j], + [1.48496084 + 0.00000000e00j], + [3.02988906 + 4.44089210e-16], + [0.86395556 + 0.00000000e00], ] ), - 1.0e-4, + abs=1.0e-4, ) -def test__dft__transformed_mapping_matrix__preload_and_non_preload_give_same_answer(): - uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]]) - grid_radians = aa.Grid2D.no_mask( - values=[[[0.1, 0.2], [0.3, 0.4]]], pixel_scales=1.0 - ) - real_space_mask = MockRealSpaceMask(grid=grid_radians) +def test__dft__transformed_mapping_matrix__preload_and_non_preload_give_same_answer( + visibilities_7, uv_wavelengths_7x2, mask_2d_7x7 +): transformer_preload = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, + uv_wavelengths=uv_wavelengths_7x2, + real_space_mask=mask_2d_7x7, preload_transform=True, ) transformer = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, + uv_wavelengths=uv_wavelengths_7x2, + real_space_mask=mask_2d_7x7, preload_transform=False, ) - mapping_matrix = np.array([[3.0, 5.0], [1.0, 2.0]]) + mapping_matrix = np.ones(shape=(9, 1)) transformed_mapping_matrix_preload = transformer_preload.transform_mapping_matrix( mapping_matrix=mapping_matrix @@ -270,53 +144,40 @@ def test__dft__transformed_mapping_matrix__preload_and_non_preload_give_same_ans def test__nufft__visibilities_from(): - uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]]) - grid_radians = aa.Grid2D.uniform(shape_native=(5, 5), pixel_scales=0.005).in_radians - real_space_mask = MockRealSpaceMask(grid=grid_radians) + uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]]) + real_space_mask = aa.Mask2D.all_false(shape_native=(5, 5), pixel_scales=0.005) image = aa.Array2D.ones( - shape_native=grid_radians.shape_native, - pixel_scales=grid_radians.pixel_scales, + shape_native=(5, 5), + pixel_scales=0.005, ) - transformer_dft = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, - preload_transform=False, - ) - - visibilities_dft = transformer_dft.visibilities_from(image=image.native) - - real_space_mask = aa.Mask2D.all_false(shape_native=(5, 5), pixel_scales=0.005) - transformer_nufft = aa.TransformerNUFFT( uv_wavelengths=uv_wavelengths, real_space_mask=real_space_mask ) visibilities_nufft = transformer_nufft.visibilities_from(image=image.native) - assert visibilities_dft == pytest.approx(visibilities_nufft, 2.0) assert visibilities_nufft[0] == pytest.approx(25.02317617953263 + 0.0j, 1.0e-7) -def test__nufft__transform_mapping_matrix(): - uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]]) +def test__nufft__image_from(visibilities_7, uv_wavelengths_7x2, mask_2d_7x7): - grid_radians = aa.Grid2D.uniform(shape_native=(5, 5), pixel_scales=0.005) - real_space_mask = MockRealSpaceMask(grid=grid_radians) + transformer = aa.TransformerNUFFT( + uv_wavelengths=uv_wavelengths_7x2, + real_space_mask=mask_2d_7x7, + ) - mapping_matrix = np.ones(shape=(25, 3)) + image = transformer.image_from(visibilities=visibilities_7) - transformer_dft = aa.TransformerDFT( - uv_wavelengths=uv_wavelengths, - real_space_mask=real_space_mask, - preload_transform=False, - ) + assert image[0:3] == pytest.approx([0.00726546, 0.01149121, 0.01421022], 1.0e-4) - transformed_mapping_matrix_dft = transformer_dft.transform_mapping_matrix( - mapping_matrix=mapping_matrix - ) + +def test__nufft__transform_mapping_matrix(): + uv_wavelengths = np.array([[0.2, 1.0], [0.5, 1.1], [0.8, 1.2]]) + + mapping_matrix = np.ones(shape=(25, 3)) real_space_mask = aa.Mask2D.all_false(shape_native=(5, 5), pixel_scales=0.005) @@ -328,13 +189,6 @@ def test__nufft__transform_mapping_matrix(): mapping_matrix=mapping_matrix ) - assert transformed_mapping_matrix_dft == pytest.approx( - transformed_mapping_matrix_nufft, 2.0 - ) - assert transformed_mapping_matrix_dft == pytest.approx( - transformed_mapping_matrix_nufft, 2.0 - ) - assert transformed_mapping_matrix_nufft[0, 0] == pytest.approx( 25.02317 + 0.0j, 1.0e-4 ) diff --git a/test_autoarray/structures/test_visibilities.py b/test_autoarray/structures/test_visibilities.py index f029ef3ed..82c6bddf0 100644 --- a/test_autoarray/structures/test_visibilities.py +++ b/test_autoarray/structures/test_visibilities.py @@ -16,7 +16,6 @@ def test__manual__makes_visibilities_without_other_inputs(): assert type(visibilities) == vis.Visibilities assert (visibilities.slim == np.array([1.0 + 2.0j, 3.0 + 4.0j])).all() assert (visibilities.in_array == np.array([[1.0, 2.0], [3.0, 4.0]])).all() - assert (visibilities.ordered_1d == np.array([1.0, 3.0, 2.0, 4.0])).all() assert (visibilities.amplitudes == np.array([np.sqrt(5), 5.0])).all() assert visibilities.phases == pytest.approx( np.array([1.10714872, 0.92729522]), 1.0e-4 @@ -29,7 +28,6 @@ def test__manual__makes_visibilities_without_other_inputs(): assert ( visibilities.in_array == np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) ).all() - assert (visibilities.ordered_1d == np.array([1.0, 3.0, 5.0, 2.0, 4.0, 6.0])).all() def test__manual__makes_visibilities_with_converted_input_as_list(): @@ -121,7 +119,3 @@ def test__visibilities_noise_has_attributes(): assert (noise_map.slim == np.array([1.0 + 2.0j, 3.0 + 4.0j])).all() assert (noise_map.amplitudes == np.array([np.sqrt(5), 5.0])).all() assert noise_map.phases == pytest.approx(np.array([1.10714872, 0.92729522]), 1.0e-4) - assert (noise_map.ordered_1d == np.array([1.0, 3.0, 2.0, 4.0])).all() - assert ( - noise_map.weight_list_ordered_1d == np.array([1.0, 1.0 / 9.0, 0.25, 0.0625]) - ).all()