diff --git a/autoarray/__init__.py b/autoarray/__init__.py index e2595203f..326e4d10d 100644 --- a/autoarray/__init__.py +++ b/autoarray/__init__.py @@ -77,7 +77,7 @@ from .inversion.mesh.mesh_geometry.delaunay import MeshGeometryDelaunay from .inversion.mesh.interpolator.rectangular import InterpolatorRectangular from .inversion.mesh.interpolator.delaunay import InterpolatorDelaunay -from .structures.arrays.kernel_2d import Kernel2D +from .operators.convolver import Convolver from .structures.vectors.uniform import VectorYX2D from .structures.vectors.irregular import VectorYX2DIrregular from .structures.triangles.abstract import AbstractTriangles diff --git a/autoarray/dataset/grids.py b/autoarray/dataset/grids.py index 8aa6626fa..b695ae1bf 100644 --- a/autoarray/dataset/grids.py +++ b/autoarray/dataset/grids.py @@ -2,7 +2,7 @@ from autoarray.mask.mask_2d import Mask2D from autoarray.structures.arrays.uniform_2d import Array2D -from autoarray.structures.arrays.kernel_2d import Kernel2D +from autoarray.operators.convolver import Convolver from autoarray.structures.grids.uniform_2d import Grid2D from autoarray.inversion.mesh.border_relocator import BorderRelocator @@ -16,7 +16,7 @@ def __init__( mask: Mask2D, over_sample_size_lp: Union[int, Array2D], over_sample_size_pixelization: Union[int, Array2D], - psf: Optional[Kernel2D] = None, + psf: Optional[Convolver] = None, ): """ Contains grids of (y,x) Cartesian coordinates at the centre of every pixel in the dataset's image and @@ -99,12 +99,10 @@ def blurring(self): if self.psf is None: self._blurring = None else: - try: - self._blurring = self.lp.blurring_grid_via_kernel_shape_from( - kernel_shape_native=self.psf.shape_native, - ) - except exc.MaskException: - self._blurring = None + self._blurring = Grid2D.from_mask( + mask=self.psf._state.blurring_mask, + over_sample_size=1, + ) return self._blurring diff --git a/autoarray/dataset/imaging/dataset.py b/autoarray/dataset/imaging/dataset.py index 970cae502..a68d0cd32 100644 --- a/autoarray/dataset/imaging/dataset.py +++ b/autoarray/dataset/imaging/dataset.py @@ -9,7 +9,8 @@ ImagingSparseOperator, ) from autoarray.structures.arrays.uniform_2d import Array2D -from autoarray.structures.arrays.kernel_2d import Kernel2D +from autoarray.operators.convolver import ConvolverState +from autoarray.operators.convolver import Convolver from autoarray.mask.mask_2d import Mask2D from autoarray import type as ty @@ -26,11 +27,11 @@ def __init__( self, data: Array2D, noise_map: Optional[Array2D] = None, - psf: Optional[Kernel2D] = None, + psf: Optional[Convolver] = None, + psf_setup_state: bool = False, noise_covariance_matrix: Optional[np.ndarray] = None, over_sample_size_lp: Union[int, Array2D] = 4, over_sample_size_pixelization: Union[int, Array2D] = 4, - disable_fft_pad: bool = True, use_normalized_psf: Optional[bool] = True, check_noise_map: bool = True, sparse_operator: Optional[ImagingSparseOperator] = None, @@ -78,10 +79,6 @@ def __init__( over_sample_size_pixelization How over sampling is performed for the grid which is associated with a pixelization, which is therefore passed into the calculations performed in the `inversion` module. - disable_fft_pad - The FFT PSF convolution is optimal for a certain 2D FFT padding or trimming, which places the fewest zeros - around the image. If this is set to `True`, this optimal padding is not performed and the image is used - as-is. use_normalized_psf If `True`, the PSF kernel values are rescaled such that they sum to 1.0. This can be important for ensuring the PSF kernel does not change the overall normalization of the image when it is convolved with it. @@ -93,50 +90,6 @@ def __init__( enable this linear algebra formalism for pixelized reconstructions. """ - self.disable_fft_pad = disable_fft_pad - - if psf is not None: - - full_shape, fft_shape, mask_shape = psf.fft_shape_from(mask=data.mask) - - if psf is not None and not disable_fft_pad and data.mask.shape != fft_shape: - - # If using real-space convolution instead of FFT, enforce odd-odd shapes - if not psf.use_fft: - fft_shape = tuple(s + 1 if s % 2 == 0 else s for s in fft_shape) - - logger.info( - f"Imaging data has been trimmed or padded for FFT convolution.\n" - f" - Original shape : {data.mask.shape}\n" - f" - FFT shape : {fft_shape}\n" - f"Padding ensures accurate PSF convolution in Fourier space. " - f"Set `disable_fft_pad=True` in Imaging object to turn off automatic padding." - ) - - over_sample_size_lp = ( - over_sample_util.over_sample_size_convert_to_array_2d_from( - over_sample_size=over_sample_size_lp, mask=data.mask - ) - ) - over_sample_size_lp = over_sample_size_lp.resized_from( - new_shape=fft_shape, mask_pad_value=1 - ) - - over_sample_size_pixelization = ( - over_sample_util.over_sample_size_convert_to_array_2d_from( - over_sample_size=over_sample_size_pixelization, mask=data.mask - ) - ) - over_sample_size_pixelization = over_sample_size_pixelization.resized_from( - new_shape=fft_shape, mask_pad_value=1 - ) - - data = data.resized_from(new_shape=fft_shape, mask_pad_value=1) - if noise_map is not None: - noise_map = noise_map.resized_from( - new_shape=fft_shape, mask_pad_value=1 - ) - super().__init__( data=data, noise_map=noise_map, @@ -145,8 +98,6 @@ def __init__( over_sample_size_pixelization=over_sample_size_pixelization, ) - self.use_normalized_psf = use_normalized_psf - if self.noise_map.native is not None and check_noise_map: if ((self.noise_map.native <= 0.0) * np.invert(self.noise_map.mask)).any(): zero_entries = np.argwhere(self.noise_map.native <= 0.0) @@ -163,36 +114,22 @@ def __init__( if psf is not None: - if not data.mask.is_all_false: + if use_normalized_psf: - image_mask = data.mask - blurring_mask = data.mask.derive_mask.blurring_from( - kernel_shape_native=psf.shape_native + psf.kernel._array = np.divide( + psf.kernel._array, np.sum(psf.kernel._array) ) - else: + if psf_setup_state: - image_mask = None - blurring_mask = None + state = ConvolverState(kernel=psf.kernel, mask=self.data.mask) - psf = Kernel2D.no_mask( - values=psf.native._array, - pixel_scales=psf.pixel_scales, - normalize=use_normalized_psf, - image_mask=image_mask, - blurring_mask=blurring_mask, - mask_shape=mask_shape, - full_shape=full_shape, - fft_shape=fft_shape, - ) + psf = Convolver( + kernel=psf.kernel, state=state, normalize=use_normalized_psf + ) self.psf = psf - if psf is not None: - if not psf.use_fft: - if psf.mask.shape[0] % 2 == 0 or psf.mask.shape[1] % 2 == 0: - raise exc.KernelException("Kernel2D Kernel2D must be odd") - self.grids = GridsDataset( mask=self.data.mask, over_sample_size_lp=self.over_sample_size_lp, @@ -272,14 +209,17 @@ def from_fits( ) if psf_path is not None: - psf = Kernel2D.from_fits( + kernel = Array2D.from_fits( file_path=psf_path, hdu=psf_hdu, pixel_scales=pixel_scales, - normalize=False, + ) + psf = Convolver( + kernel=kernel, ) else: + kernel = None psf = None return Imaging( @@ -292,7 +232,7 @@ def from_fits( over_sample_size_pixelization=over_sample_size_pixelization, ) - def apply_mask(self, mask: Mask2D, disable_fft_pad: bool = False) -> "Imaging": + def apply_mask(self, mask: Mask2D) -> "Imaging": """ Apply a mask to the imaging dataset, whereby the mask is applied to the image data, noise-map and other quantities one-by-one. @@ -340,10 +280,10 @@ def apply_mask(self, mask: Mask2D, disable_fft_pad: bool = False) -> "Imaging": data=data, noise_map=noise_map, psf=self.psf, + psf_setup_state=True, noise_covariance_matrix=noise_covariance_matrix, over_sample_size_lp=over_sample_size_lp, over_sample_size_pixelization=over_sample_size_pixelization, - disable_fft_pad=disable_fft_pad, ) logger.info( @@ -356,7 +296,6 @@ def apply_noise_scaling( self, mask: Mask2D, noise_value: float = 1e8, - disable_fft_pad: bool = False, signal_to_noise_value: Optional[float] = None, should_zero_data: bool = True, ) -> "Imaging": @@ -423,7 +362,6 @@ def apply_noise_scaling( noise_covariance_matrix=self.noise_covariance_matrix, over_sample_size_lp=self.over_sample_size_lp, over_sample_size_pixelization=self.over_sample_size_pixelization, - disable_fft_pad=disable_fft_pad, check_noise_map=False, ) @@ -437,7 +375,6 @@ def apply_over_sampling( self, over_sample_size_lp: Union[int, Array2D] = None, over_sample_size_pixelization: Union[int, Array2D] = None, - disable_fft_pad: bool = False, ) -> "AbstractDataset": """ Apply new over sampling objects to the grid and grid pixelization of the dataset. @@ -467,7 +404,6 @@ def apply_over_sampling( over_sample_size_lp=over_sample_size_lp or self.over_sample_size_lp, over_sample_size_pixelization=over_sample_size_pixelization or self.over_sample_size_pixelization, - disable_fft_pad=disable_fft_pad, check_noise_map=False, ) @@ -476,7 +412,6 @@ def apply_over_sampling( def apply_sparse_operator( self, batch_size: int = 128, - disable_fft_pad: bool = False, ): """ The sparse linear algebra formalism precomputes the convolution of every pair of masked @@ -493,11 +428,6 @@ def apply_sparse_operator( batch_size The size of batches used to compute the w-tilde curvature matrix via FFT-based convolution, which can be reduced to produce lower memory usage at the cost of speed - disable_fft_pad - The FFT PSF convolution is optimal for a certain 2D FFT padding or trimming, - which places the fewest zeros around the image. If this is set to `True`, this optimal padding is not - performed and the image is used as-is. This is normally used to avoid repadding data that has already been - padded. use_jax Whether to use JAX to compute W-Tilde. This requires JAX to be installed. """ @@ -510,7 +440,7 @@ def apply_sparse_operator( inversion_imaging_util.ImagingSparseOperator.from_noise_map_and_psf( data=self.data, noise_map=self.noise_map, - psf=self.psf.native, + psf=self.psf.kernel.native, batch_size=batch_size, ) ) @@ -522,14 +452,12 @@ def apply_sparse_operator( noise_covariance_matrix=self.noise_covariance_matrix, over_sample_size_lp=self.over_sample_size_lp, over_sample_size_pixelization=self.over_sample_size_pixelization, - disable_fft_pad=disable_fft_pad, check_noise_map=False, sparse_operator=sparse_operator, ) def apply_sparse_operator_cpu( self, - disable_fft_pad: bool = False, ): """ The sparse linear algebra formalism precomputes the convolution of every pair of masked @@ -545,12 +473,7 @@ def apply_sparse_operator_cpu( ------- batch_size The size of batches used to compute the w-tilde curvature matrix via FFT-based convolution, - which can be reduced to produce lower memory usage at the cost of speed - disable_fft_pad - The FFT PSF convolution is optimal for a certain 2D FFT padding or trimming, - which places the fewest zeros around the image. If this is set to `True`, this optimal padding is not - performed and the image is used as-is. This is normally used to avoid repadding data that has already been - padded. + which can be reduced to produce lower memory usage at the cost of speed. use_jax Whether to use JAX to compute W-Tilde. This requires JAX to be installed. """ @@ -575,7 +498,7 @@ def apply_sparse_operator_cpu( lengths, ) = inversion_imaging_numba_util.psf_precision_operator_sparse_from( noise_map_native=np.array(self.noise_map.native.array).astype("float64"), - kernel_native=np.array(self.psf.native.array).astype("float64"), + kernel_native=np.array(self.psf.kernel.native.array).astype("float64"), native_index_for_slim_index=np.array( self.mask.derive_indexes.native_for_slim ).astype("int"), @@ -597,7 +520,6 @@ def apply_sparse_operator_cpu( noise_covariance_matrix=self.noise_covariance_matrix, over_sample_size_lp=self.over_sample_size_lp, over_sample_size_pixelization=self.over_sample_size_pixelization, - disable_fft_pad=disable_fft_pad, check_noise_map=False, sparse_operator=sparse_operator, ) @@ -633,7 +555,7 @@ def output_to_fits( self.data.output_to_fits(file_path=data_path, overwrite=overwrite) if self.psf is not None and psf_path is not None: - self.psf.output_to_fits(file_path=psf_path, overwrite=overwrite) + self.psf.kernel.output_to_fits(file_path=psf_path, overwrite=overwrite) if self.noise_map is not None and noise_map_path is not None: self.noise_map.output_to_fits(file_path=noise_map_path, overwrite=overwrite) diff --git a/autoarray/dataset/imaging/simulator.py b/autoarray/dataset/imaging/simulator.py index 1c9b883d4..48b73b6b7 100644 --- a/autoarray/dataset/imaging/simulator.py +++ b/autoarray/dataset/imaging/simulator.py @@ -4,7 +4,7 @@ from autoarray.dataset.imaging.dataset import Imaging from autoarray.structures.arrays.uniform_2d import Array2D -from autoarray.structures.arrays.kernel_2d import Kernel2D +from autoarray.operators.convolver import Convolver from autoarray.mask.mask_2d import Mask2D from autoarray import exc @@ -19,7 +19,7 @@ def __init__( exposure_time: float, background_sky_level: float = 0.0, subtract_background_sky: bool = True, - psf: Kernel2D = None, + psf: Convolver = None, use_real_space_convolution: bool = True, normalize_psf: bool = True, add_poisson_noise_to_data: bool = True, @@ -95,7 +95,7 @@ def __init__( psf = psf.normalized self.psf = psf else: - self.psf = Kernel2D.no_blur(pixel_scales=1.0) + self.psf = Convolver.no_blur(pixel_scales=1.0) self.use_real_space_convolution = use_real_space_convolution self.exposure_time = exposure_time @@ -192,13 +192,12 @@ def via_image_from( psf=self.psf, noise_map=noise_map, check_noise_map=False, - disable_fft_pad=True, ) if over_sample_size is not None: dataset = dataset.apply_over_sampling( - over_sample_size_lp=over_sample_size.native, disable_fft_pad=True + over_sample_size_lp=over_sample_size.native, ) return dataset diff --git a/autoarray/dataset/plot/imaging_plotters.py b/autoarray/dataset/plot/imaging_plotters.py index e0c0772e3..b396f1a4d 100644 --- a/autoarray/dataset/plot/imaging_plotters.py +++ b/autoarray/dataset/plot/imaging_plotters.py @@ -93,7 +93,7 @@ def figures_2d( if psf: self.mat_plot_2d.plot_array( - array=self.dataset.psf, + array=self.dataset.psf.kernel, visuals_2d=self.visuals_2d, auto_labels=AutoLabels( title=title_str or f"Point Spread Function", diff --git a/autoarray/dataset/preprocess.py b/autoarray/dataset/preprocess.py index c113c16aa..d3c0de84a 100644 --- a/autoarray/dataset/preprocess.py +++ b/autoarray/dataset/preprocess.py @@ -328,7 +328,9 @@ def background_noise_map_via_edges_from(image, no_edges): ) -def psf_with_odd_dimensions_from(psf): +def kernel_with_odd_dimensions_from( + kernel, rescale_factor: float, normalize: bool = True +): """ If the PSF kernel has one or two even-sized dimensions, return a PSF object where the kernel has odd-sized dimensions (odd-sized dimensions are required for 2D convolution). @@ -337,6 +339,17 @@ def psf_with_odd_dimensions_from(psf): routine. This may lead to loss of accuracy in the PSF kernel and it is advised that users, where possible, create their PSF on an odd-sized array using their data reduction pipelines that remove this approximation. + Odd-sized kernels are often required for real space convolution operations + (e.g. centered PSFs in imaging pipelines). If the kernel has one or two + even-sized dimensions, they are rescaled (via interpolation) and padded + so that both dimensions are odd. + + The kernel can also be scaled larger or smaller by changing + ``rescale_factor``. Rescaling uses ``skimage.transform.rescale`` / + ``resize``, which interpolate pixel values and may introduce small + inaccuracies compared to native instrument PSFs. Where possible, users + should generate odd-sized PSFs directly from data reduction. + Parameters ---------- rescale_factor @@ -345,7 +358,56 @@ def psf_with_odd_dimensions_from(psf): normalize Whether the PSF should be normalized after being rescaled. """ - return psf.rescaled_with_odd_dimensions_from(rescale_factor=1.0) + + from skimage.transform import resize, rescale + + try: + kernel_rescaled = rescale( + kernel, + rescale_factor, + anti_aliasing=False, + mode="constant", + channel_axis=None, + ) + except TypeError: + kernel_rescaled = rescale( + kernel, + rescale_factor, + anti_aliasing=False, + mode="constant", + ) + + if kernel_rescaled.shape[0] % 2 == 0 and kernel_rescaled.shape[1] % 2 == 0: + kernel_rescaled = resize( + kernel_rescaled, + output_shape=( + kernel_rescaled.shape[0] + 1, + kernel_rescaled.shape[1] + 1, + ), + anti_aliasing=False, + mode="constant", + ) + + elif kernel_rescaled.shape[0] % 2 == 0 and kernel_rescaled.shape[1] % 2 != 0: + kernel_rescaled = resize( + kernel_rescaled, + output_shape=(kernel_rescaled.shape[0] + 1, kernel_rescaled.shape[1]), + anti_aliasing=False, + mode="constant", + ) + + elif kernel_rescaled.shape[0] % 2 != 0 and kernel_rescaled.shape[1] % 2 == 0: + kernel_rescaled = resize( + kernel_rescaled, + output_shape=(kernel_rescaled.shape[0], kernel_rescaled.shape[1] + 1), + anti_aliasing=False, + mode="constant", + ) + + if normalize: + kernel_rescaled = np.divide(kernel_rescaled, np.sum(kernel_rescaled)) + + return kernel_rescaled def exposure_time_map_via_exposure_time_and_background_noise_map_from( diff --git a/autoarray/exc.py b/autoarray/exc.py index 94c5b8857..456f6f461 100644 --- a/autoarray/exc.py +++ b/autoarray/exc.py @@ -40,7 +40,7 @@ class VectorYXException(Exception): class KernelException(Exception): """ - Raises exceptions associated with the `structures/arrays/kernel_2d.py` module and `Kernel2D` classes. + Raises exceptions associated with the `structures/arrays/kernel_2d.py` module and `Convolver` classes. For example if the kernel has an even-sized number of pixels. """ diff --git a/autoarray/fixtures.py b/autoarray/fixtures.py index d07b981fe..fcba86a48 100644 --- a/autoarray/fixtures.py +++ b/autoarray/fixtures.py @@ -119,13 +119,16 @@ def make_image_7x7(): def make_psf_3x3(): - psf = np.array([[0.0, 0.5, 0.0], [0.5, 1.0, 0.5], [0.0, 0.5, 0.0]]) + psf = aa.Array2D.no_mask( + values=[[0.0, 0.5, 0.0], [0.5, 1.0, 0.5], [0.0, 0.5, 0.0]], + pixel_scales=(1.0, 1.0), + ) - return aa.Kernel2D.no_mask(values=psf, pixel_scales=(1.0, 1.0)) + return aa.Convolver(kernel=psf) def make_psf_3x3_no_blur(): - return aa.Kernel2D.no_blur(pixel_scales=(1.0, 1.0)) + return aa.Convolver.no_blur(pixel_scales=(1.0, 1.0)) def make_noise_map_7x7(): diff --git a/autoarray/inversion/inversion/imaging/abstract.py b/autoarray/inversion/inversion/imaging/abstract.py index 85230ccf6..649382059 100644 --- a/autoarray/inversion/inversion/imaging/abstract.py +++ b/autoarray/inversion/inversion/imaging/abstract.py @@ -229,7 +229,7 @@ def data_linear_func_matrix_dict(self): data_linear_func_matrix = ( inversion_imaging_util.data_linear_func_matrix_from( curvature_weights_matrix=curvature_weights, - kernel_native=self.psf.native, + kernel_native=self.psf.kernel.native, mask=self.mask, ) ) diff --git a/autoarray/inversion/inversion/imaging_numba/inversion_imaging_numba_util.py b/autoarray/inversion/inversion/imaging_numba/inversion_imaging_numba_util.py index 27b680ce5..370bc8f77 100644 --- a/autoarray/inversion/inversion/imaging_numba/inversion_imaging_numba_util.py +++ b/autoarray/inversion/inversion/imaging_numba/inversion_imaging_numba_util.py @@ -873,7 +873,7 @@ def psf_precision_operator(self): return psf_precision_operator_from( noise_map_native=self.noise_map.native.array, - kernel_native=self.psf.native.array, + kernel_native=self.psf.kernel.native.array, native_index_for_slim_index=np.array( self.mask.derive_indexes.native_for_slim ).astype("int"), diff --git a/autoarray/inversion/inversion/imaging_numba/sparse.py b/autoarray/inversion/inversion/imaging_numba/sparse.py index ed22dc135..31aabf5ff 100644 --- a/autoarray/inversion/inversion/imaging_numba/sparse.py +++ b/autoarray/inversion/inversion/imaging_numba/sparse.py @@ -62,7 +62,7 @@ def psf_weighted_data(self): return inversion_imaging_numba_util.psf_weighted_data_from( image_native=np.array(self.data.native.array), noise_map_native=self.noise_map.native.array, - kernel_native=self.psf.native.array, + kernel_native=self.psf.kernel.native.array, native_index_for_slim_index=self.data.mask.derive_indexes.native_for_slim, ) @@ -430,7 +430,7 @@ def _curvature_matrix_func_list_and_mapper(self) -> np.ndarray: pix_pixels=mapper.params, curvature_weights=np.array(data_linear_func_matrix), mask=self.mask.array, - psf_kernel=self.psf.native.array, + psf_kernel=self.psf.kernel.native.array, ) curvature_matrix[ diff --git a/autoarray/inversion/plot/inversion_plotters.py b/autoarray/inversion/plot/inversion_plotters.py index b0793e947..b5fb52b9b 100644 --- a/autoarray/inversion/plot/inversion_plotters.py +++ b/autoarray/inversion/plot/inversion_plotters.py @@ -84,7 +84,8 @@ def figures_2d(self, reconstructed_operated_data: bool = False): array=self.inversion.mapped_reconstructed_operated_data, visuals_2d=self.visuals_2d, auto_labels=AutoLabels( - title="Reconstructed Image", filename="reconstructed_operated_data" + title="Reconstructed Image", + filename="reconstructed_operated_data", ), ) except AttributeError: diff --git a/autoarray/mask/derive/mask_2d.py b/autoarray/mask/derive/mask_2d.py index c9792436f..0a2197ed8 100644 --- a/autoarray/mask/derive/mask_2d.py +++ b/autoarray/mask/derive/mask_2d.py @@ -171,7 +171,7 @@ def blurring_from(self, kernel_shape_native: Tuple[int, int]) -> Mask2D: Parameters ---------- kernel_shape_native - The 2D shape of the 2D convolution ``Kernel2D`` which defines the blurring region. + The 2D shape of the 2D convolution ``Convolver`` which defines the blurring region. Examples -------- diff --git a/autoarray/mask/mask_2d.py b/autoarray/mask/mask_2d.py index 70ecd7cb1..94c0029c7 100644 --- a/autoarray/mask/mask_2d.py +++ b/autoarray/mask/mask_2d.py @@ -696,7 +696,7 @@ def unmasked_blurred_array_from(self, padded_array, psf, image_shape) -> Array2D Parameters ---------- - psf : aa.Kernel2D + psf : aa.Convolver The PSF of the image used for convolution. unmasked_image_1d The 1D unmasked image which is blurred. diff --git a/autoarray/structures/arrays/kernel_2d.py b/autoarray/operators/convolver.py similarity index 59% rename from autoarray/structures/arrays/kernel_2d.py rename to autoarray/operators/convolver.py index b9e55266f..a8d8758d6 100644 --- a/autoarray/structures/arrays/kernel_2d.py +++ b/autoarray/operators/convolver.py @@ -5,7 +5,6 @@ if TYPE_CHECKING: from autoarray import Mask2D - import numpy as np from pathlib import Path import scipy @@ -20,22 +19,93 @@ from autoarray.structures.grids.uniform_2d import Grid2D from autoarray.structures.header import Header +from autoarray import exc from autoarray import type as ty -class Kernel2D(AbstractArray2D): +class ConvolverState: def __init__( self, - values, - mask, - header=None, + kernel: Array2D, + mask: Mask2D, + ): + """ + Compute the padded shapes required for FFT-based convolution with this kernel. + + FFT convolution requires the input image and kernel to be zero-padded so that + the convolution is equivalent to linear convolution (not circular) and to avoid + wrap-around artefacts. + + This method inspects the mask and the kernel shape to determines three key shapes: + + - ``mask_shape``: the rectangular bounding-box region of the mask that encloses + all unmasked (False) pixels, padded by half the kernel size in each direction. + This is the minimal region that must be retained for convolution. This is + not used or computed outside thi sfunction with the two shapes below + used instead. + + - ``full_shape``: the "linear convolution shape", equal to + ``mask_shape + kernel_shape - 1``. This is the minimal padded size required + for an exact linear convolution. This is also not used or computed outside + this function. + + - ``fft_shape``: the FFT-efficient padded shape, obtained by rounding each + dimension of ``full_shape`` up to the next fast length for real FFTs + (via ``scipy.fft.next_fast_len``). Using this ensures efficient FFT execution. + + Parameters + ---------- + mask + A 2D mask where False indicates unmasked pixels (valid data) and True + indicates masked pixels. The bounding-box of the False region is used + to compute the convolution region. + + Returns + ------- + fft_shape + The FFT-friendly padded shape for efficient convolution. + """ + if len(kernel) == 1: + kernel = kernel.resized_from(new_shape=(3, 3)) + + self.kernel = kernel + + ys, xs = np.where(~mask) + y_min, y_max = ys.min(), ys.max() + x_min, x_max = xs.min(), xs.max() + + (pad_y, pad_x) = self.kernel.shape_native + + mask_shape = ( + (y_max + pad_y // 2) - (y_min - pad_y // 2), + (x_max + pad_x // 2) - (x_min - pad_x // 2), + ) + + full_shape = tuple( + s1 + s2 - 1 for s1, s2 in zip(mask_shape, self.kernel.shape_native) + ) + fft_shape = tuple(scipy.fft.next_fast_len(s, real=True) for s in full_shape) + + # make fft_shape odd x odd to avoid wrap-around artefacts with even kernels + # TODO : Fix this so it pads corrrectly internally + fft_shape = tuple(s + 1 if s % 2 == 0 else s for s in fft_shape) + + self.fft_shape = fft_shape + self.mask = mask.resized_from(self.fft_shape, pad_value=1) + self.blurring_mask = self.mask.derive_mask.blurring_from( + kernel_shape_native=self.kernel.shape_native + ) + + self.fft_kernel = np.fft.rfft2(self.kernel.native.array, s=self.fft_shape) + self.fft_kernel_mapping = np.expand_dims(self.fft_kernel, 2) + + +class Convolver: + def __init__( + self, + kernel: Array2D, + state: Optional[ConvolverState] = None, normalize: bool = False, - store_native: bool = False, - image_mask=None, - blurring_mask=None, - mask_shape=None, - full_shape=None, - fft_shape=None, use_fft: Optional[bool] = None, *args, **kwargs, @@ -43,7 +113,7 @@ def __init__( """ A 2D convolution kernel stored as an array of values paired to a uniform 2D mask. - The ``Kernel2D`` is a subclass of ``Array2D`` with additional methods for performing + The ``Convolver`` is a subclass of ``Array2D`` with additional methods for performing point spread function (PSF) convolution of images or mapping matrices. Each entry of the kernel corresponds to a PSF value at the centre of a pixel in the unmasked grid. @@ -63,7 +133,7 @@ def __init__( Parameters ---------- - values + kernel The raw 2D kernel values. Can be normalised to sum to unity if ``normalize=True``. mask The 2D mask associated with the kernel, defining the pixels each kernel value is @@ -72,30 +142,10 @@ def __init__( Optional metadata (e.g. FITS header) associated with the kernel. normalize If True, the kernel values are rescaled such that they sum to 1.0. - store_native - If True, the kernel is stored in its full native 2D form as an attribute - ``stored_native`` for re-use (e.g. when convolving repeatedly). - image_mask - Optional mask defining the unmasked image pixels when performing convolution. - If not provided, defaults to the supplied ``mask``. - blurring_mask - Optional mask defining the "blurring region": pixels outside the image mask - into which PSF flux can spread. Used to construct blurring images and - blurring mapping matrices. - mask_shape - The shape of the (unpadded) mask region. Used when cropping back results after - FFT convolution. - full_shape - The unpadded image + kernel shape (``image_shape + kernel_shape - 1``). - fft_shape - The padded shape used in FFT convolution, typically computed via - ``scipy.fft.next_fast_len`` for efficiency. Must be precomputed before calling - FFT convolution methods. use_fft If True, convolution is performed in Fourier space with zero-padding. If False, convolution is performed in real space. - If None, a default choice is made: real space for small kernels, - FFT for large kernels. + If None, the config file default is used. *args, **kwargs Passed to the ``Array2D`` constructor. @@ -109,33 +159,36 @@ def __init__( - For unit tests with tiny kernels, FFT and real-space convolution may differ slightly due to edge and truncation effects. """ - - super().__init__( - values=values, - mask=mask, - header=header, - store_native=store_native, - ) + self.kernel = kernel if normalize: - self._array = np.divide(self._array, np.sum(self._array)) + self.kernel._array = np.divide( + self.kernel._array, np.sum(self.kernel._array) + ) - self.stored_native = self.native + self._use_fft = use_fft - self.fft_shape = fft_shape + if not self._use_fft: + if ( + self.kernel.shape_native[0] % 2 == 0 + or self.kernel.shape_native[1] % 2 == 0 + ): + raise exc.KernelException("Convolver Convolver must be odd") - self.mask_shape = None - self.full_shape = None - self.fft_psf = None + self._state = state - if self.fft_shape is not None: + def state_from(self, mask): - self.mask_shape = mask_shape - self.full_shape = full_shape - self.fft_psf = np.fft.rfft2(self.native.array, s=self.fft_shape) - self.fft_psf_mapping = np.expand_dims(self.fft_psf, 2) + if ( + mask.shape_native[0] != self.kernel.shape_native[0] + or mask.shape_native[1] != self.kernel.shape_native[1] + ): + return ConvolverState(kernel=self.kernel, mask=mask) - self._use_fft = use_fft + if self._state is None: + return ConvolverState(kernel=self.kernel, mask=mask) + + return self._state @property def use_fft(self): @@ -144,172 +197,17 @@ def use_fft(self): return self._use_fft - @classmethod - def no_mask( - cls, - values: Union[np.ndarray, List], - pixel_scales: ty.PixelScales, - shape_native: Tuple[int, int] = None, - origin: Tuple[float, float] = (0.0, 0.0), - header: Optional[Header] = None, - normalize: bool = False, - image_mask=None, - blurring_mask=None, - mask_shape=None, - full_shape=None, - fft_shape=None, - use_fft: Optional[bool] = None, - ): - """ - Create a Kernel2D (see *Kernel2D.__new__*) by inputting the kernel values in 1D or 2D, automatically - determining whether to use the 'manual_slim' or 'manual_native' methods. - - See the manual_slim and manual_native methods for examples. - Parameters - ---------- - values - The values of the array input as an ndarray of shape [total_unmasked_pixels] or a list of - lists. - shape_native - The 2D shape of the mask the array is paired with. - pixel_scales - The (y,x) arcsecond-to-pixel units conversion factor of every pixel. If this is input as a `float`, - it is converted to a (float, float). - origin - The (y,x) scaled units origin of the mask's coordinate system. - normalize - If True, the Kernel2D's array values are normalized such that they sum to 1.0. - """ - values = Array2D.no_mask( - values=values, - shape_native=shape_native, - pixel_scales=pixel_scales, - origin=origin, - ) - return Kernel2D( - values=values, - mask=values.mask, - header=header, - normalize=normalize, - image_mask=image_mask, - blurring_mask=blurring_mask, - mask_shape=mask_shape, - full_shape=full_shape, - fft_shape=fft_shape, - use_fft=use_fft, - ) - - @classmethod - def full( - cls, - fill_value: float, - shape_native: Tuple[int, int], - pixel_scales: ty.PixelScales, - origin: Tuple[float, float] = (0.0, 0.0), - normalize: bool = False, - ) -> "Kernel2D": - """ - Create a Kernel2D (see *Kernel2D.__new__*) where all values are filled with an input fill value, analogous to - the method numpy ndarray.full. - - From 1D input the method cannot determine the 2D shape of the array and its mask, thus the shape_native must be - input into this method. The mask is setup as a unmasked `Mask2D` of shape_native. - - Parameters - ---------- - fill_value - The value all array elements are filled with. - shape_native - The 2D shape of the mask the array is paired with. - pixel_scales - The (y,x) arcsecond-to-pixel units conversion factor of every pixel. If this is input as a `float`, - it is converted to a (float, float). - origin - The (y,x) scaled units origin of the mask's coordinate system. - normalize - If True, the Kernel2D's array values are normalized such that they sum to 1.0. - """ - return Kernel2D.no_mask( - values=np.full(fill_value=fill_value, shape=shape_native), - pixel_scales=pixel_scales, - origin=origin, - normalize=normalize, - ) - - @classmethod - def ones( - cls, - shape_native: Tuple[int, int], - pixel_scales: ty.PixelScales, - origin: Tuple[float, float] = (0.0, 0.0), - normalize: bool = False, - ): - """ - Create an Kernel2D (see *Kernel2D.__new__*) where all values are filled with ones, analogous to the method numpy - ndarray.ones. - - From 1D input the method cannot determine the 2D shape of the array and its mask, thus the shape_native must be - input into this method. The mask is setup as a unmasked `Mask2D` of shape_native. - - Parameters - ---------- - shape_native - The 2D shape of the mask the array is paired with. - pixel_scales - The (y,x) arcsecond-to-pixel units conversion factor of every pixel. If this is input as a `float`, - it is converted to a (float, float). - origin - The (y,x) scaled units origin of the mask's coordinate system. - normalize - If True, the Kernel2D's array values are normalized such that they sum to 1.0. - """ - return cls.full( - fill_value=1.0, - shape_native=shape_native, - pixel_scales=pixel_scales, - origin=origin, - normalize=normalize, - ) - - @classmethod - def zeros( - cls, - shape_native: Tuple[int, int], - pixel_scales: ty.PixelScales, - origin: Tuple[float, float] = (0.0, 0.0), - normalize: bool = False, - ) -> "Kernel2D": + @property + def normalized(self) -> "Kernel2D": """ - Create an Kernel2D (see *Kernel2D.__new__*) where all values are filled with zeros, analogous to the method numpy - ndarray.ones. - - From 1D input the method cannot determine the 2D shape of the array and its mask, thus the shape_native must be - input into this method. The mask is setup as a unmasked `Mask2D` of shape_native. - - Parameters - ---------- - shape_native - The 2D shape of the mask the array is paired with. - pixel_scales - The (y,x) arcsecond-to-pixel units conversion factor of every pixel. If this is input as a `float`, - it is converted to a (float, float). - origin - The (y,x) scaled units origin of the mask's coordinate system. - normalize - If True, the Kernel2D's array values are normalized such that they sum to 1.0. + Normalize the Kernel2D such that its data_vector values sum to unity. """ - return cls.full( - fill_value=0.0, - shape_native=shape_native, - pixel_scales=pixel_scales, - origin=origin, - normalize=normalize, - ) + return Convolver(kernel=self.kernel, state=self._state, normalize=True) @classmethod def no_blur(cls, pixel_scales): """ - Setup the Kernel2D as a kernel which does not convolve any signal, which is simply an array of shape (1, 1) + Setup the Convolver as a kernel which does not convolve any signal, which is simply an array of shape (1, 1) with value 1. Parameters @@ -319,23 +217,26 @@ def no_blur(cls, pixel_scales): it is converted to a (float, float). """ - array = np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]) + kernel = Array2D.no_mask( + values=[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]], + pixel_scales=pixel_scales, + ) - return cls.no_mask(values=array, pixel_scales=pixel_scales) + return cls(kernel=kernel) @classmethod def from_gaussian( cls, shape_native: Tuple[int, int], - pixel_scales: ty.PixelScales, + pixel_scales, sigma: float, centre: Tuple[float, float] = (0.0, 0.0), axis_ratio: float = 1.0, angle: float = 0.0, normalize: bool = False, - ) -> "Kernel2D": + ) -> "Convolver": """ - Setup the Kernel2D as a 2D symmetric elliptical Gaussian profile, according to the equation: + Setup the Convolver as a 2D symmetric elliptical Gaussian profile, according to the equation: (1.0 / (sigma * sqrt(2.0*pi))) * exp(-0.5 * (r/sigma)**2) @@ -356,7 +257,7 @@ def from_gaussian( angle The rotational angle of the Gaussian's ellipse defined counter clockwise from the positive x-axis. normalize - If True, the Kernel2D's array values are normalized such that they sum to 1.0. + If True, the Convolver's array values are normalized such that they sum to 1.0. """ grid = Grid2D.uniform(shape_native=shape_native, pixel_scales=pixel_scales) @@ -384,43 +285,12 @@ def from_gaussian( np.exp(-0.5 * np.square(np.divide(grid_elliptical_radii, sigma))), ) - return cls.no_mask( - values=gaussian, - shape_native=shape_native, - pixel_scales=pixel_scales, - normalize=normalize, - ) - - @classmethod - def from_as_gaussian_via_alma_fits_header_parameters( - cls, - shape_native: Tuple[int, int], - pixel_scales: ty.PixelScales, - y_stddev: float, - x_stddev: float, - theta: float, - centre: Tuple[float, float] = (0.0, 0.0), - normalize: bool = False, - ) -> "Kernel2D": - - from astropy import units - - x_stddev = ( - x_stddev * (units.deg).to(units.arcsec) / (2.0 * np.sqrt(2.0 * np.log(2.0))) - ) - y_stddev = ( - y_stddev * (units.deg).to(units.arcsec) / (2.0 * np.sqrt(2.0 * np.log(2.0))) + gaussian = Array2D.no_mask( + values=gaussian, pixel_scales=pixel_scales, shape_native=shape_native ) - axis_ratio = x_stddev / y_stddev - - return Kernel2D.from_gaussian( - shape_native=shape_native, - pixel_scales=pixel_scales, - sigma=y_stddev, - axis_ratio=axis_ratio, - angle=90.0 - theta, - centre=centre, + return Convolver( + kernel=gaussian, normalize=normalize, ) @@ -432,9 +302,9 @@ def from_fits( pixel_scales, origin=(0.0, 0.0), normalize: bool = False, - ) -> "Kernel2D": + ) -> "Convolver": """ - Loads the Kernel2D from a .fits file. + Loads the Convolver from a .fits file. Parameters ---------- @@ -449,84 +319,21 @@ def from_fits( origin The (y,x) scaled units origin of the mask's coordinate system. normalize - If True, the Kernel2D's array values are normalized such that they sum to 1.0. + If True, the Convolver's array values are normalized such that they sum to 1.0. """ array = Array2D.from_fits( - file_path=file_path, hdu=hdu, pixel_scales=pixel_scales, origin=origin + file_path=file_path, + hdu=hdu, + pixel_scales=pixel_scales, + origin=origin, ) - header_sci_obj = header_obj_from(file_path=file_path, hdu=0) - header_hdu_obj = header_obj_from(file_path=file_path, hdu=hdu) - - return Kernel2D( - values=array[:], - mask=array.mask, + return Convolver( + kernel=array, normalize=normalize, - header=Header(header_sci_obj=header_sci_obj, header_hdu_obj=header_hdu_obj), ) - def fft_shape_from( - self, mask: np.ndarray - ) -> Union[Tuple[int, int], Tuple[int, int], Tuple[int, int]]: - """ - Compute the padded shapes required for FFT-based convolution with this kernel. - - FFT convolution requires the input image and kernel to be zero-padded so that - the convolution is equivalent to linear convolution (not circular) and to avoid - wrap-around artefacts. This method inspects the mask and the kernel shape to - determine three key shapes: - - - ``mask_shape``: the rectangular bounding-box region of the mask that encloses - all unmasked (False) pixels, padded by half the kernel size in each direction. - This is the minimal region that must be retained for convolution. - - ``full_shape``: the "linear convolution shape", equal to - ``mask_shape + kernel_shape - 1``. This is the minimal padded size required - for an exact linear convolution. - - ``fft_shape``: the FFT-efficient padded shape, obtained by rounding each - dimension of ``full_shape`` up to the next fast length for real FFTs - (via ``scipy.fft.next_fast_len``). Using this ensures efficient FFT execution. - - Parameters - ---------- - mask - A 2D mask where False indicates unmasked pixels (valid data) and True - indicates masked pixels. The bounding-box of the False region is used - to compute the convolution region. - - Returns - ------- - full_shape - The unpadded linear convolution shape (mask region + kernel − 1). - fft_shape - The FFT-friendly padded shape for efficient convolution. - mask_shape - The rectangular mask region size including kernel padding. - """ - - ys, xs = np.where(~mask) - y_min, y_max = ys.min(), ys.max() - x_min, x_max = xs.min(), xs.max() - - (pad_y, pad_x) = self.shape_native - - mask_shape = ( - (y_max + pad_y // 2) - (y_min - pad_y // 2), - (x_max + pad_x // 2) - (x_min - pad_x // 2), - ) - - full_shape = tuple(s1 + s2 - 1 for s1, s2 in zip(mask_shape, self.shape_native)) - fft_shape = tuple(scipy.fft.next_fast_len(s, real=True) for s in full_shape) - - return full_shape, fft_shape, mask_shape - - @property - def normalized(self) -> "Kernel2D": - """ - Normalize the Kernel2D such that its data_vector values sum to unity. - """ - return Kernel2D(values=self, mask=self.mask, normalize=True) - def mapping_matrix_native_from( self, mapping_matrix: np.ndarray, @@ -620,7 +427,7 @@ def convolved_image_from( This method chooses between an FFT-based convolution (default if ``self.use_fft=True``) or a direct real-space convolution, depending on - how the Kernel2D was configured. + how the Convolver was configured. In the FFT branch: - The input image (and optional blurring image) are resized / padded to @@ -633,10 +440,10 @@ def convolved_image_from( and avoids wrap-around artefacts. The required padding is determined by ``fft_shape_from(mask)``. If no precomputed shapes exist, they are computed on the fly. For reproducible behaviour, precompute and set - ``fft_shape``, ``full_shape``, and ``mask_shape`` on the kernel. + ``fft_shape` on the kernel. If ``use_fft=False``, convolution falls back to - :meth:`Kernel2D.convolved_image_via_real_space_from`. + :meth:`Convolver.convolved_image_via_real_space_from`. Parameters ---------- @@ -670,45 +477,18 @@ def convolved_image_from( import jax.numpy as jnp from autoarray.structures.arrays.uniform_2d import Array2D - if self.fft_shape is None: - # Shapes computed on the fly - full_shape, fft_shape, mask_shape = self.fft_shape_from(mask=image.mask) - - # Compute PSF FFT on the fly in the chosen precision - psf_native = jnp.asarray(self.stored_native.array, dtype=jnp.float64) - fft_psf = xp.fft.rfft2(psf_native, s=fft_shape, axes=(0, 1)).astype( - jnp.complex128 - ) - - image_shape_original = image.shape_native - - # Resize/pad the images as before - image = image.resized_from(new_shape=fft_shape, mask_pad_value=1) - if blurring_image is not None: - blurring_image = blurring_image.resized_from( - new_shape=fft_shape, mask_pad_value=1 - ) - else: - # Cached shapes/state - fft_shape = self.fft_shape - full_shape = self.full_shape - mask_shape = self.mask_shape - - # Use cached PSF FFT but ensure it matches chosen precision. - # IMPORTANT: casting here may create an extra buffer if self.fft_psf is complex128. - # Best practice is to cache a complex64 version on the object when MP is enabled. - fft_psf = jnp.asarray(self.fft_psf, dtype=jnp.complex128) + state = self.state_from(mask=image.mask) # Build combined native image in the FFT dtype - image_both_native = xp.zeros(image.mask.shape, dtype=jnp.float64) + image_both_native = xp.zeros(state.fft_shape, dtype=jnp.float64) - image_both_native = image_both_native.at[image.mask.slim_to_native_tuple].set( + image_both_native = image_both_native.at[state.mask.slim_to_native_tuple].set( jnp.asarray(image.array, dtype=jnp.float64) ) if blurring_image is not None: image_both_native = image_both_native.at[ - blurring_image.mask.slim_to_native_tuple + state.blurring_mask.slim_to_native_tuple ].set(jnp.asarray(blurring_image.array, dtype=jnp.float64)) else: warnings.warn( @@ -717,13 +497,15 @@ def convolved_image_from( ) # FFT the combined image - fft_image_native = xp.fft.rfft2(image_both_native, s=fft_shape, axes=(0, 1)) + fft_image_native = xp.fft.rfft2( + image_both_native, s=state.fft_shape, axes=(0, 1) + ) # Multiply by PSF in Fourier space and invert blurred_image_full = xp.fft.irfft2( - fft_psf * fft_image_native, s=fft_shape, axes=(0, 1) + state.fft_kernel * fft_image_native, s=state.fft_shape, axes=(0, 1) ) - ky, kx = self.native.array.shape # (21, 21) + ky, kx = self.kernel.shape_native # (21, 21) off_y = (ky - 1) // 2 off_x = (kx - 1) // 2 @@ -734,19 +516,14 @@ def convolved_image_from( start_indices = (off_y, off_x) blurred_image_native = jax.lax.dynamic_slice( - blurred_image_full, start_indices, image.mask.shape + blurred_image_full, start_indices, state.fft_shape ) # Return slim form; optionally cast for downstream stability - blurred_slim = blurred_image_native[image.mask.slim_to_native_tuple] + blurred_slim = blurred_image_native[state.mask.slim_to_native_tuple] blurred_image = Array2D(values=blurred_slim, mask=image.mask) - if self.fft_shape is None: - blurred_image = blurred_image.resized_from( - new_shape=image_shape_original, mask_pad_value=0 - ) - if use_mixed_precision: blurred_image = blurred_image.astype(jnp.float32) @@ -778,7 +555,7 @@ def convolved_mapping_matrix_from( - The slim (masked 1D) representation is returned. If ``use_fft=False``, convolution falls back to - :meth:`Kernel2D.convolved_mapping_matrix_via_real_space_from`. + :meth:`Convolver.convolved_mapping_matrix_via_real_space_from`. Notes ----- @@ -842,31 +619,21 @@ def convolved_mapping_matrix_from( import jax import jax.numpy as jnp - # ------------------------------------------------------------------------- - # Cached FFT shapes/state (REQUIRED) - # ------------------------------------------------------------------------- - if self.fft_shape is None: - raise ValueError( - "FFT convolution requires precomputed FFT shapes on the PSF." - ) - - fft_shape = self.fft_shape - fft_psf_mapping = self.fft_psf_mapping + state = self.state_from(mask=mask) # ------------------------------------------------------------------------- # Mixed precision handling # ------------------------------------------------------------------------- fft_complex_dtype = jnp.complex64 if use_mixed_precision else jnp.complex128 - fft_psf_mapping = jnp.asarray(fft_psf_mapping, dtype=fft_complex_dtype) # ------------------------------------------------------------------------- # Build native cube on the *native mask grid* # ------------------------------------------------------------------------- mapping_matrix_native = self.mapping_matrix_native_from( mapping_matrix=mapping_matrix, - mask=mask, + mask=state.mask, blurring_mapping_matrix=blurring_mapping_matrix, - blurring_mask=blurring_mask, + blurring_mask=state.blurring_mask, use_mixed_precision=use_mixed_precision, xp=xp, ) @@ -876,19 +643,19 @@ def convolved_mapping_matrix_from( # FFT convolution # ------------------------------------------------------------------------- fft_mapping_matrix_native = xp.fft.rfft2( - mapping_matrix_native, s=fft_shape, axes=(0, 1) + mapping_matrix_native, s=state.fft_shape, axes=(0, 1) ) blurred_mapping_matrix_full = xp.fft.irfft2( - fft_psf_mapping * fft_mapping_matrix_native, - s=fft_shape, + state.fft_kernel_mapping * fft_mapping_matrix_native, + s=state.fft_shape, axes=(0, 1), ) # ------------------------------------------------------------------------- # APPLY SAME FIX AS convolved_image_from # ------------------------------------------------------------------------- - ky, kx = self.native.array.shape + ky, kx = self.kernel.shape_native off_y = (ky - 1) // 2 off_x = (kx - 1) // 2 @@ -901,10 +668,9 @@ def convolved_mapping_matrix_from( # ------------------------------------------------------------------------- # Extract native grid (same as image path) # ------------------------------------------------------------------------- - native_shape = mask.shape start_indices = (off_y, off_x, 0) - out_shape = native_shape + (blurred_mapping_matrix_full.shape[2],) + out_shape = state.mask.shape_native + (blurred_mapping_matrix_full.shape[2],) blurred_mapping_matrix_native = jax.lax.dynamic_slice( blurred_mapping_matrix_full, @@ -915,104 +681,10 @@ def convolved_mapping_matrix_from( # ------------------------------------------------------------------------- # Slim using ORIGINAL mask indices (same grid) # ------------------------------------------------------------------------- - blurred_slim = blurred_mapping_matrix_native[mask.slim_to_native_tuple] + blurred_slim = blurred_mapping_matrix_native[state.mask.slim_to_native_tuple] return blurred_slim - def rescaled_with_odd_dimensions_from( - self, rescale_factor: float, normalize: bool = False - ) -> "Kernel2D": - """ - Return a version of this kernel rescaled so both dimensions are odd-sized. - - Odd-sized kernels are often required for real space convolution operations - (e.g. centered PSFs in imaging pipelines). If the kernel has one or two - even-sized dimensions, they are rescaled (via interpolation) and padded - so that both dimensions are odd. - - The kernel can also be scaled larger or smaller by changing - ``rescale_factor``. Rescaling uses ``skimage.transform.rescale`` / - ``resize``, which interpolate pixel values and may introduce small - inaccuracies compared to native instrument PSFs. Where possible, users - should generate odd-sized PSFs directly from data reduction. - - Parameters - ---------- - rescale_factor - Factor by which the kernel is rescaled. If 1.0, only adjusts size to - nearest odd dimensions. Values > 1 enlarge, < 1 shrink the kernel. - normalize - If True, the returned kernel is normalized to sum to 1.0. - - Returns - ------- - Kernel2D - Rescaled kernel with odd-sized dimensions. - """ - - from skimage.transform import resize, rescale - - try: - kernel_rescaled = rescale( - self.native.array, - rescale_factor, - anti_aliasing=False, - mode="constant", - channel_axis=None, - ) - except TypeError: - kernel_rescaled = rescale( - self.native.array, - rescale_factor, - anti_aliasing=False, - mode="constant", - ) - - if kernel_rescaled.shape[0] % 2 == 0 and kernel_rescaled.shape[1] % 2 == 0: - kernel_rescaled = resize( - kernel_rescaled, - output_shape=( - kernel_rescaled.shape[0] + 1, - kernel_rescaled.shape[1] + 1, - ), - anti_aliasing=False, - mode="constant", - ) - - elif kernel_rescaled.shape[0] % 2 == 0 and kernel_rescaled.shape[1] % 2 != 0: - kernel_rescaled = resize( - kernel_rescaled, - output_shape=(kernel_rescaled.shape[0] + 1, kernel_rescaled.shape[1]), - anti_aliasing=False, - mode="constant", - ) - - elif kernel_rescaled.shape[0] % 2 != 0 and kernel_rescaled.shape[1] % 2 == 0: - kernel_rescaled = resize( - kernel_rescaled, - output_shape=(kernel_rescaled.shape[0], kernel_rescaled.shape[1] + 1), - anti_aliasing=False, - mode="constant", - ) - - if self.pixel_scales is not None: - pixel_scale_factors = ( - self.mask.shape[0] / kernel_rescaled.shape[0], - self.mask.shape[1] / kernel_rescaled.shape[1], - ) - - pixel_scales = ( - self.pixel_scales[0] * pixel_scale_factors[0], - self.pixel_scales[1] * pixel_scale_factors[1], - ) - - else: - pixel_scales = None - - return Kernel2D.no_mask( - values=kernel_rescaled, pixel_scales=pixel_scales, normalize=normalize - ) - def convolved_image_via_real_space_from( self, image: np.ndarray, @@ -1053,17 +725,19 @@ def convolved_image_via_real_space_from( import jax + state = self.state_from(mask=image.mask) + # start with native array padded with zeros - image_native = xp.zeros(image.mask.shape, dtype=image.array.dtype) + image_native = xp.zeros(state.fft_shape, dtype=image.array.dtype) # set image pixels - image_native = image_native.at[image.mask.slim_to_native_tuple].set(image.array) + image_native = image_native.at[state.mask.slim_to_native_tuple].set(image.array) # add blurring contribution if provided if blurring_image is not None: image_native = image_native.at[ - blurring_image.mask.slim_to_native_tuple + state.blurring_mask.slim_to_native_tuple ].set(blurring_image.array) else: @@ -1072,14 +746,11 @@ def convolved_image_via_real_space_from( "This may change the correctness of the PSF convolution." ) - # perform real-space convolution - kernel = self.stored_native.array - convolve_native = jax.scipy.signal.convolve( - image_native, kernel, mode="same", method=jax_method + image_native, self.kernel.native.array, mode="same", method=jax_method ) - convolved_array_1d = convolve_native[image.mask.slim_to_native_tuple] + convolved_array_1d = convolve_native[state.mask.slim_to_native_tuple] return Array2D(values=convolved_array_1d, mask=image.mask) @@ -1131,26 +802,25 @@ def convolved_mapping_matrix_via_real_space_from( import jax + state = self.state_from(mask=mask) + mapping_matrix_native = self.mapping_matrix_native_from( mapping_matrix=mapping_matrix, - mask=mask, + mask=state.mask, blurring_mapping_matrix=blurring_mapping_matrix, - blurring_mask=blurring_mask, + blurring_mask=state.blurring_mask, xp=xp, ) - # 6) Real-space convolution, broadcast kernel over source axis - kernel = self.stored_native.array - blurred_mapping_matrix_native = jax.scipy.signal.convolve( mapping_matrix_native, - kernel[..., None], + self.kernel.native.array[..., None], mode="same", method=jax_method, ) # return slim form - return blurred_mapping_matrix_native[mask.slim_to_native_tuple] + return blurred_mapping_matrix_native[state.mask.slim_to_native_tuple] def convolved_image_via_real_space_np_from( self, image: np.ndarray, blurring_image: Optional[np.ndarray] = None, xp=np @@ -1183,16 +853,18 @@ def convolved_image_via_real_space_np_from( from scipy.signal import convolve as scipy_convolve + state = self.state_from(mask=image.mask) + # start with native array padded with zeros - image_native = xp.zeros(image.mask.shape, dtype=image.array.dtype) + image_native = xp.zeros(state.fft_shape) # set image pixels - image_native[image.mask.slim_to_native_tuple] = image.array + image_native[state.mask.slim_to_native_tuple] = image.array # add blurring contribution if provided if blurring_image is not None: - image_native[blurring_image.mask.slim_to_native_tuple] = ( + image_native[state.blurring_mask.slim_to_native_tuple] = ( blurring_image.array ) @@ -1202,14 +874,11 @@ def convolved_image_via_real_space_np_from( "This may change the correctness of the PSF convolution." ) - # perform real-space convolution - kernel = self.stored_native.array - convolve_native = scipy_convolve( - image_native, kernel, mode="same", method="auto" + image_native, self.kernel.native.array, mode="same", method="auto" ) - convolved_array_1d = convolve_native[image.mask.slim_to_native_tuple] + convolved_array_1d = convolve_native[state.mask.slim_to_native_tuple] return Array2D(values=convolved_array_1d, mask=image.mask) @@ -1251,21 +920,21 @@ def convolved_mapping_matrix_via_real_space_np_from( from scipy.signal import convolve as scipy_convolve + state = self.state_from(mask=mask) + mapping_matrix_native = self.mapping_matrix_native_from( mapping_matrix=mapping_matrix, - mask=mask, + mask=state.mask, blurring_mapping_matrix=blurring_mapping_matrix, - blurring_mask=blurring_mask, + blurring_mask=state.blurring_mask, xp=xp, ) - # 6) Real-space convolution, broadcast kernel over source axis - kernel = self.stored_native.array blurred_mapping_matrix_native = scipy_convolve( mapping_matrix_native, - kernel[..., None], + self.kernel.native.array[..., None], mode="same", ) # return slim form - return blurred_mapping_matrix_native[mask.slim_to_native_tuple] + return blurred_mapping_matrix_native[state.mask.slim_to_native_tuple] diff --git a/autoarray/plot/mat_plot/two_d.py b/autoarray/plot/mat_plot/two_d.py index f5c086291..55e929869 100644 --- a/autoarray/plot/mat_plot/two_d.py +++ b/autoarray/plot/mat_plot/two_d.py @@ -577,7 +577,9 @@ def _plot_rectangular_mapper( ) # Unpack edges (assuming shape is (N_edges, 2) → (y_edges, x_edges)) - y_edges, x_edges = mapper.mesh_geometry.edges_transformed.T # explicit, safe + y_edges, x_edges = ( + mapper.mesh_geometry.edges_transformed.T + ) # explicit, safe # Build meshes with ij-indexing: (row = y, col = x) Y, X = np.meshgrid(y_edges, x_edges, indexing="ij") diff --git a/autoarray/structures/grids/uniform_2d.py b/autoarray/structures/grids/uniform_2d.py index 563329917..975e04ad8 100644 --- a/autoarray/structures/grids/uniform_2d.py +++ b/autoarray/structures/grids/uniform_2d.py @@ -1086,7 +1086,7 @@ def padded_grid_from(self, kernel_shape_native: Tuple[int, int]) -> "Grid2D": The 2D shape of the kernel which convolves signal from masked pixels to unmasked pixels. """ if kernel_shape_native[0] % 2 == 0 or kernel_shape_native[1] % 2 == 0: - raise exc.KernelException("Kernel2D Kernel2D must be odd") + raise exc.KernelException("Convolver Convolver must be odd") shape = self.mask.shape diff --git a/test_autoarray/dataset/imaging/test_dataset.py b/test_autoarray/dataset/imaging/test_dataset.py index 663e4ea39..9a3f03e59 100644 --- a/test_autoarray/dataset/imaging/test_dataset.py +++ b/test_autoarray/dataset/imaging/test_dataset.py @@ -125,11 +125,13 @@ def test__from_fits(): ) assert (dataset.data.native == np.ones((3, 3))).all() - assert dataset.psf.native == pytest.approx((1.0 / 9.0) * np.ones((3, 3)), 1.0e-4) + assert dataset.psf.kernel.native == pytest.approx( + (1.0 / 9.0) * np.ones((3, 3)), 1.0e-4 + ) assert (dataset.noise_map.native == 3.0 * np.ones((3, 3))).all() assert dataset.pixel_scales == (0.1, 0.1) - assert dataset.psf.mask.pixel_scales == (0.1, 0.1) + assert dataset.psf.kernel.mask.pixel_scales == (0.1, 0.1) assert dataset.noise_map.mask.pixel_scales == (0.1, 0.1) dataset = aa.Imaging.from_fits( @@ -143,11 +145,13 @@ def test__from_fits(): ) assert (dataset.data.native == np.ones((3, 3))).all() - assert dataset.psf.native == pytest.approx((1.0 / 9.0) * np.ones((3, 3)), 1.0e-4) + assert dataset.psf.kernel.native == pytest.approx( + (1.0 / 9.0) * np.ones((3, 3)), 1.0e-4 + ) assert (dataset.noise_map.native == 3.0 * np.ones((3, 3))).all() assert dataset.pixel_scales == (0.1, 0.1) - assert dataset.psf.mask.pixel_scales == (0.1, 0.1) + assert dataset.psf.kernel.mask.pixel_scales == (0.1, 0.1) assert dataset.noise_map.mask.pixel_scales == (0.1, 0.1) @@ -167,13 +171,13 @@ def test__output_to_fits(imaging_7x7, test_data_path): ) assert (dataset.data.native == np.ones((7, 7))).all() - assert dataset.psf.native[1, 1] == pytest.approx(0.33333, 1.0e-4) + assert dataset.psf.kernel.native[1, 1] == pytest.approx(0.33333, 1.0e-4) assert (dataset.noise_map.native == 2.0 * np.ones((7, 7))).all() assert dataset.pixel_scales == (0.1, 0.1) def test__apply_mask(imaging_7x7, mask_2d_7x7, psf_3x3): - masked_imaging_7x7 = imaging_7x7.apply_mask(mask=mask_2d_7x7, disable_fft_pad=True) + masked_imaging_7x7 = imaging_7x7.apply_mask(mask=mask_2d_7x7) assert (masked_imaging_7x7.data.slim == np.ones(9)).all() @@ -187,11 +191,11 @@ def test__apply_mask(imaging_7x7, mask_2d_7x7, psf_3x3): == 2.0 * np.ones((7, 7)) * np.invert(mask_2d_7x7) ).all() - assert masked_imaging_7x7.psf.slim == pytest.approx( - (1.0 / 3.0) * psf_3x3.slim.array, 1.0e-4 + assert masked_imaging_7x7.psf.kernel.slim == pytest.approx( + (1.0 / 3.0) * psf_3x3.kernel.slim.array, 1.0e-4 ) - assert type(masked_imaging_7x7.psf) == aa.Kernel2D + assert type(masked_imaging_7x7.psf) == aa.Convolver def test__apply_noise_scaling(imaging_7x7, mask_2d_7x7): @@ -260,7 +264,9 @@ def test__apply_mask__noise_covariance_matrix(): def test__different_imaging_without_mock_objects__customize_constructor_inputs(): - psf = aa.Kernel2D.ones(shape_native=(7, 7), pixel_scales=3.0) + + kernel = aa.Array2D.ones(shape_native=(7, 7), pixel_scales=3.0) + psf = aa.Convolver(kernel=kernel) dataset = aa.Imaging( data=aa.Array2D.ones(shape_native=(19, 19), pixel_scales=3.0), @@ -278,7 +284,7 @@ def test__different_imaging_without_mock_objects__customize_constructor_inputs() masked_dataset = dataset.apply_mask(mask=mask) - assert masked_dataset.psf.native == pytest.approx( + assert masked_dataset.psf.kernel.native == pytest.approx( (1.0 / 49.0) * np.ones((7, 7)), 1.0e-4 ) assert (masked_dataset.data == np.array([1.0])).all() @@ -304,8 +310,11 @@ def test__psf_not_odd_x_odd_kernel__raises_error(): with pytest.raises(exc.KernelException): image = aa.Array2D.ones(shape_native=(3, 3), pixel_scales=1.0) noise_map = aa.Array2D.ones(shape_native=(3, 3), pixel_scales=1.0) - psf = aa.Kernel2D.no_mask(values=[[0.0, 1.0], [1.0, 2.0]], pixel_scales=1.0) + kernel = aa.Array2D.no_mask(values=[[0.0, 1.0], [1.0, 2.0]], pixel_scales=1.0) + psf = aa.Convolver(kernel=kernel) dataset = aa.Imaging( - data=image, noise_map=noise_map, psf=psf, disable_fft_pad=True + data=image, + noise_map=noise_map, + psf=psf, ) diff --git a/test_autoarray/dataset/imaging/test_simulator.py b/test_autoarray/dataset/imaging/test_simulator.py index 54dc1f6ed..3128b49c5 100644 --- a/test_autoarray/dataset/imaging/test_simulator.py +++ b/test_autoarray/dataset/imaging/test_simulator.py @@ -94,10 +94,11 @@ def test__via_image_from__psf_off__background_sky_on(image_central_delta_3x3): def test__via_image_from__psf_on__psf_blurs_image_with_edge_trimming( image_central_delta_3x3, ): - psf = aa.Kernel2D.no_mask( + kernel = aa.Array2D.no_mask( values=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]]), pixel_scales=1.0, ) + psf = aa.Convolver(kernel=kernel) simulator = aa.SimulatorImaging( exposure_time=1.0, @@ -118,10 +119,11 @@ def test__via_image_from__psf_on__psf_blurs_image_with_edge_trimming( def test__via_image_from__psf_on__disable_poisson_noise_in_data( image_central_delta_3x3, ): - psf = aa.Kernel2D.no_mask( + kernel = aa.Array2D.no_mask( values=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]]), pixel_scales=1.0, ) + psf = aa.Convolver(kernel=kernel) simulator = aa.SimulatorImaging( exposure_time=20.0, @@ -150,10 +152,11 @@ def test__via_image_from__psf_on__disable_poisson_noise_in_data( def test__via_image_from__psf_on__psf_and_noise_both_on(image_central_delta_3x3): image = image_central_delta_3x3 + 1.0 - psf = aa.Kernel2D.no_mask( + kernel = aa.Array2D.no_mask( values=np.array([[0.0, 1.0, 0.0], [1.0, 2.0, 1.0], [0.0, 1.0, 0.0]]), pixel_scales=1.0, ) + psf = aa.Convolver(kernel=kernel) simulator = aa.SimulatorImaging( exposure_time=20.0, diff --git a/test_autoarray/dataset/test_preprocess.py b/test_autoarray/dataset/test_preprocess.py index 8e99a74e8..8a1cf053c 100644 --- a/test_autoarray/dataset/test_preprocess.py +++ b/test_autoarray/dataset/test_preprocess.py @@ -491,6 +491,90 @@ def test__background_noise_map_via_edges_of_image_from_5(): ) +def test__rescaled_with_odd_dimensions_from__evens_to_odds(): + kernel = np.ones((6, 6)) + + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=0.5, normalize=True + ) + assert kernel == pytest.approx((1.0 / 9.0) * np.ones((3, 3)), 1.0e-4) + + kernel = np.ones((9, 9)) + + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=0.333333333333333, normalize=True + ) + assert kernel == pytest.approx((1.0 / 9.0) * np.ones((3, 3)), 1.0e-4) + + kernel = np.ones((18, 6)) + + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=0.5, normalize=True + ) + assert kernel == pytest.approx((1.0 / 27.0) * np.ones((9, 3)), 1.0e-4) + + kernel = np.ones((6, 18)) + + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=0.5, normalize=True + ) + assert kernel == pytest.approx((1.0 / 27.0) * np.ones((3, 9)), 1.0e-4) + + +def test__rescaled_with_odd_dimensions_from__different_scalings(): + kernel = np.ones(shape=(2, 2)) + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=2.0, normalize=True + ) + assert kernel == pytest.approx((1.0 / 25.0) * np.ones((5, 5)), 1.0e-4) + + kernel = np.ones(shape=(40, 40)) + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=0.1, normalize=True + ) + assert kernel == pytest.approx((1.0 / 25.0) * np.ones((5, 5)), 1.0e-4) + + kernel = np.ones(shape=(2, 4)) + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=2.0, normalize=True + ) + + assert kernel == pytest.approx((1.0 / 45.0) * np.ones((5, 9)), 1.0e-4) + + kernel = np.ones(shape=(4, 2)) + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=2.0, normalize=True + ) + assert kernel == pytest.approx((1.0 / 45.0) * np.ones((9, 5)), 1.0e-4) + + kernel = np.ones(shape=(6, 4)) + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=0.5, normalize=True + ) + + assert kernel == pytest.approx((1.0 / 9.0) * np.ones((3, 3)), 1.0e-4) + + kernel = np.ones(shape=(9, 12)) + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=0.33333333333, normalize=True + ) + + assert kernel == pytest.approx((1.0 / 15.0) * np.ones((3, 5)), 1.0e-4) + + kernel = np.ones(shape=(4, 6)) + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=0.5, normalize=True + ) + + assert kernel == pytest.approx((1.0 / 9.0) * np.ones((3, 3)), 1.0e-4) + + kernel = np.ones(shape=(12, 9)) + kernel = aa.preprocess.kernel_with_odd_dimensions_from( + kernel=kernel, rescale_factor=0.33333333333, normalize=True + ) + assert kernel == pytest.approx((1.0 / 15.0) * np.ones((5, 3)), 1.0e-4) + + def test__exposure_time_map_from_exposure_time_and_inverse_noise_map(): exposure_time = 6.0 diff --git a/test_autoarray/fit/test_fit_dataset.py b/test_autoarray/fit/test_fit_dataset.py index 4363c7707..674f25555 100644 --- a/test_autoarray/fit/test_fit_dataset.py +++ b/test_autoarray/fit/test_fit_dataset.py @@ -66,7 +66,7 @@ def test__figure_of_merit__with_noise_covariance_matrix_in_dataset( def test__grid_offset_via_data_model(imaging_7x7, mask_2d_7x7, model_image_7x7): - masked_imaging_7x7 = imaging_7x7.apply_mask(mask=mask_2d_7x7, disable_fft_pad=True) + masked_imaging_7x7 = imaging_7x7.apply_mask(mask=mask_2d_7x7) fit = aa.m.MockFitImaging( dataset=masked_imaging_7x7, diff --git a/test_autoarray/inversion/inversion/imaging/test_inversion_imaging_util.py b/test_autoarray/inversion/inversion/imaging/test_inversion_imaging_util.py index 235318cb3..538248ebb 100644 --- a/test_autoarray/inversion/inversion/imaging/test_inversion_imaging_util.py +++ b/test_autoarray/inversion/inversion/imaging/test_inversion_imaging_util.py @@ -192,11 +192,11 @@ def test__data_vector_via_weighted_data_two_methods_agree(): noise_map = np.random.uniform(size=mask.shape_native) noise_map = aa.Array2D(values=noise_map, mask=mask) - kernel = aa.Kernel2D.from_gaussian( + convolver = aa.Convolver.from_gaussian( shape_native=(7, 7), pixel_scales=mask.pixel_scales, sigma=1.0, normalize=True ) - psf = kernel + psf = convolver mesh = aa.mesh.RectangularUniform(shape=(20, 20)) @@ -239,7 +239,7 @@ def test__data_vector_via_weighted_data_two_methods_agree(): psf_weighted_data = aa.util.inversion_imaging.psf_weighted_data_from( weight_map_native=weight_map.native.array, - kernel_native=kernel.native.array, + kernel_native=convolver.kernel.native.array, native_index_for_slim_index=mask.derive_indexes.native_for_slim.astype( "int" ), @@ -265,7 +265,7 @@ def test__curvature_matrix_via_psf_weighted_noise_two_methods_agree(): noise_map = np.random.uniform(size=mask.shape_native) noise_map = aa.Array2D(values=noise_map, mask=mask) - kernel = aa.Kernel2D.from_gaussian( + kernel = aa.Convolver.from_gaussian( shape_native=(7, 7), pixel_scales=mask.pixel_scales, sigma=1.0, normalize=True ) @@ -274,7 +274,7 @@ def test__curvature_matrix_via_psf_weighted_noise_two_methods_agree(): sparse_operator = aa.ImagingSparseOperator.from_noise_map_and_psf( data=noise_map, noise_map=noise_map, - psf=psf.native, + psf=psf.kernel.native, ) mesh = aa.mesh.RectangularAdaptDensity(shape=(20, 20)) diff --git a/test_autoarray/inversion/inversion/test_abstract.py b/test_autoarray/inversion/inversion/test_abstract.py index 41bee0416..51b3b21b2 100644 --- a/test_autoarray/inversion/inversion/test_abstract.py +++ b/test_autoarray/inversion/inversion/test_abstract.py @@ -119,7 +119,8 @@ def test__curvature_matrix__via_sparse_operator__identical_to_mapping(): image = aa.Array2D.no_mask(values=np.random.random((7, 7)), pixel_scales=1.0) noise_map = aa.Array2D.no_mask(values=np.random.random((7, 7)), pixel_scales=1.0) kernel = np.array([[0.0, 1.0, 0.0], [1.0, 1.0, 1.0], [0.0, 1.0, 0.0]]) - psf = aa.Kernel2D.no_mask(values=kernel, pixel_scales=1.0) + kernel = aa.Array2D.no_mask(values=kernel, pixel_scales=1.0) + psf = aa.Convolver(kernel=kernel) dataset = aa.Imaging(data=image, noise_map=noise_map, psf=psf) @@ -187,8 +188,11 @@ def test__curvature_matrix_via_sparse_operator__includes_source_interpolation__i image = aa.Array2D.no_mask(values=np.random.random((7, 7)), pixel_scales=1.0) noise_map = aa.Array2D.no_mask(values=np.random.random((7, 7)), pixel_scales=1.0) - kernel = np.array([[0.0, 1.0, 0.0], [1.0, 1.0, 1.0], [0.0, 1.0, 0.0]]) - psf = aa.Kernel2D.no_mask(values=kernel, pixel_scales=1.0) + kernel = aa.Array2D.no_mask( + [[0.0, 1.0, 0.0], [1.0, 1.0, 1.0], [0.0, 1.0, 0.0]], pixel_scales=1.0 + ) + + psf = aa.Convolver(kernel=kernel) dataset = aa.Imaging(data=image, noise_map=noise_map, psf=psf) diff --git a/test_autoarray/operators/test_convolver.py b/test_autoarray/operators/test_convolver.py new file mode 100644 index 000000000..dfd0998be --- /dev/null +++ b/test_autoarray/operators/test_convolver.py @@ -0,0 +1,315 @@ +from astropy import units +from astropy.modeling import functional_models +from astropy.coordinates import Angle +import numpy as np +import pytest +from os import path + +import autoarray as aa + +test_data_path = path.join("{}".format(path.dirname(path.realpath(__file__))), "files") + + +def test__no_blur(): + convolver = aa.Convolver.no_blur(pixel_scales=1.0) + + assert ( + convolver.kernel.native + == np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]) + ).all() + assert convolver.kernel.pixel_scales == (1.0, 1.0) + + convolver = aa.Convolver.no_blur(pixel_scales=2.0) + + assert ( + convolver.kernel.native + == np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]) + ).all() + assert convolver.kernel.pixel_scales == (2.0, 2.0) + + +def test__from_gaussian(): + + convolver = aa.Convolver.from_gaussian( + shape_native=(3, 3), + pixel_scales=1.0, + centre=(0.1, 0.1), + axis_ratio=0.9, + angle=45.0, + sigma=1.0, + normalize=True, + ) + + assert convolver.kernel.native == pytest.approx( + np.array( + [ + [0.06281, 0.13647, 0.0970], + [0.11173, 0.21589, 0.136477], + [0.065026, 0.11173, 0.06281], + ] + ), + 1.0e-3, + ) + + +def test__normalize(): + + kernel_data = aa.Array2D.ones(shape_native=(3, 3), pixel_scales=1.0) + + convolver = aa.Convolver(kernel=kernel_data, normalize=True) + + assert convolver.kernel.native == pytest.approx(np.ones((3, 3)) / 9.0, 1e-3) + + +def test__convolved_image_from(): + + mask = aa.Mask2D.circular( + shape_native=(30, 30), pixel_scales=(1.0, 1.0), radius=4.0 + ) + + import scipy.signal + + kernel = aa.Array2D.no_mask( + values=np.arange(49).reshape(7, 7), pixel_scales=mask.pixel_scales + ) + image = aa.Array2D.no_mask( + values=np.arange(900).reshape(30, 30), pixel_scales=mask.pixel_scales + ) + + blurred_image_via_scipy = scipy.signal.convolve2d( + image.native, kernel.native, mode="same" + ) + + blurred_image_via_scipy = aa.Array2D.no_mask( + values=blurred_image_via_scipy, pixel_scales=mask.pixel_scales + ) + blurred_masked_image_via_scipy = aa.Array2D( + values=blurred_image_via_scipy.native, mask=mask + ) + + # Now reproduce this data using the convolved_image_from function + + convolver = aa.Convolver(kernel=kernel) + + masked_image = aa.Array2D(values=image.native, mask=mask) + + blurring_mask = mask.derive_mask.blurring_from( + kernel_shape_native=kernel.shape_native + ) + + blurring_image = aa.Array2D(values=image.native, mask=blurring_mask) + + blurred_masked_im_1 = convolver.convolved_image_from( + image=masked_image, blurring_image=blurring_image + ) + + assert blurred_masked_image_via_scipy == pytest.approx( + blurred_masked_im_1.array, 1e-4 + ) + + +def test__convolve_imaged_from__no_blurring(): + # Setup a blurred data, using the PSF to perform the convolution in 2D, then masks it to make a 1d array. + + mask = aa.Mask2D.circular( + shape_native=(30, 30), pixel_scales=(1.0, 1.0), radius=4.0 + ) + + import scipy.signal + + kernel = aa.Array2D.no_mask( + values=np.arange(49).reshape(7, 7), pixel_scales=mask.pixel_scales + ) + image = aa.Array2D.no_mask( + values=np.arange(900).reshape(30, 30), pixel_scales=mask.pixel_scales + ) + + blurring_mask = mask.derive_mask.blurring_from( + kernel_shape_native=kernel.shape_native + ) + + blurred_image_via_scipy = scipy.signal.convolve2d( + image.native * blurring_mask, kernel.native, mode="same" + ) + blurred_image_via_scipy = aa.Array2D.no_mask( + values=blurred_image_via_scipy, pixel_scales=mask.pixel_scales + ) + blurred_masked_image_via_scipy = aa.Array2D( + values=blurred_image_via_scipy.native, mask=mask + ) + + # Now reproduce this data using the frame convolver_image + + masked_image = aa.Array2D(values=image.native, mask=mask) + + convolver = aa.Convolver(kernel=kernel) + + blurred_masked_im_1 = convolver.convolved_image_from( + image=masked_image, blurring_image=None + ) + + assert blurred_masked_image_via_scipy == pytest.approx( + blurred_masked_im_1.array, 1e-4 + ) + + +def test__convolved_mapping_matrix_from(): + mask = aa.Mask2D( + mask=np.array( + [ + [True, True, True, True, True, True], + [True, False, False, False, False, True], + [True, False, False, False, False, True], + [True, False, False, False, False, True], + [True, False, False, False, False, True], + [True, True, True, True, True, True], + ] + ), + pixel_scales=1.0, + ) + + kernel = aa.Array2D.no_mask( + values=[[0, 0.0, 0], [0.4, 0.2, 0.3], [0, 0.1, 0]], + pixel_scales=mask.pixel_scales, + ) + + convolver = aa.Convolver(kernel=kernel) + + mapping = np.array( + [ + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [ + 0, + 1, + 0, + ], # The 0.3 should be 'chopped' from this pixel as it is on the right-most edge + [0, 0, 0], + [1, 0, 0], + [0, 0, 1], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + ] + ) + + blurred_mapping = convolver.convolved_mapping_matrix_from(mapping, mask) + + assert ( + blurred_mapping + == pytest.approx( + np.array( + [ + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0.4, 0], + [0, 0.2, 0], + [0.4, 0, 0], + [0.2, 0, 0.4], + [0.3, 0, 0.2], + [0, 0.1, 0.3], + [0, 0, 0], + [0.1, 0, 0], + [0, 0, 0.1], + [0, 0, 0], + ] + ) + ), + 1.0e-4, + ) + + mapping = np.array( + [ + [0, 1, 0], + [0, 1, 0], + [0, 1, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [ + 0, + 1, + 0, + ], # The 0.3 should be 'chopped' from this pixel as it is on the right-most edge + [1, 0, 0], + [1, 0, 0], + [0, 0, 1], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 0], + ] + ) + + blurred_mapping = convolver.convolved_mapping_matrix_from(mapping, mask) + + assert blurred_mapping == pytest.approx( + np.array( + [ + [0, 0.6, 0], + [0, 0.9, 0], + [0, 0.5, 0], + [0, 0.3, 0], + [0, 0.1, 0], + [0, 0.1, 0], + [0, 0.5, 0], + [0, 0.2, 0], + [0.6, 0, 0], + [0.5, 0, 0.4], + [0.3, 0, 0.2], + [0, 0.1, 0.3], + [0.1, 0, 0], + [0.1, 0, 0], + [0, 0, 0.1], + [0, 0, 0], + ] + ), + abs=1e-4, + ) + + +def test__convolve_imaged_from__via_fft__sizes_not_precomputed__compare_numerical_value(): + + # ------------------------------- + # Case 1: direct image convolution + # ------------------------------- + mask = aa.Mask2D.circular( + shape_native=(20, 20), pixel_scales=(1.0, 1.0), radius=5.0 + ) + + image = aa.Array2D.no_mask(values=np.arange(400).reshape(20, 20), pixel_scales=1.0) + masked_image = aa.Array2D(values=image.native, mask=mask) + + kernel = aa.Array2D.no_mask( + values=np.arange(49).reshape(7, 7), pixel_scales=mask.pixel_scales + ) + + convolver_fft = aa.Convolver( + kernel=kernel, + use_fft=True, + normalize=True, + ) + + blurring_mask = mask.derive_mask.blurring_from( + kernel_shape_native=convolver_fft.kernel.shape_native + ) + blurring_image = aa.Array2D(values=image.native, mask=blurring_mask) + + blurred_fft = convolver_fft.convolved_image_from( + image=masked_image, blurring_image=blurring_image + ) + + assert blurred_fft.native.array[13, 13] == pytest.approx(249.5, abs=1e-6) diff --git a/test_autoarray/structures/arrays/test_kernel_2d.py b/test_autoarray/structures/arrays/test_kernel_2d.py deleted file mode 100644 index ae4beae62..000000000 --- a/test_autoarray/structures/arrays/test_kernel_2d.py +++ /dev/null @@ -1,530 +0,0 @@ -from astropy import units -from astropy.modeling import functional_models -from astropy.coordinates import Angle -import numpy as np -import pytest -from os import path - -import autoarray as aa - -test_data_path = path.join("{}".format(path.dirname(path.realpath(__file__))), "files") - - -def test__full(): - kernel_2d = aa.Kernel2D.full(fill_value=3.0, shape_native=(3, 3), pixel_scales=1.0) - - assert kernel_2d.shape_native == (3, 3) - assert (kernel_2d.native == 3.0 * np.ones((3, 3))).all() - assert kernel_2d.pixel_scales == (1.0, 1.0) - assert kernel_2d.origin == (0.0, 0.0) - - -def test__ones(): - kernel_2d = aa.Kernel2D.ones(shape_native=(3, 3), pixel_scales=1.0, normalize=False) - - assert kernel_2d.shape_native == (3, 3) - assert (kernel_2d.native == np.ones((3, 3))).all() - assert kernel_2d.pixel_scales == (1.0, 1.0) - assert kernel_2d.origin == (0.0, 0.0) - - -def test__zeros(): - kernel_2d = aa.Kernel2D.zeros(shape_native=(3, 3), pixel_scales=1.0) - - assert kernel_2d.shape_native == (3, 3) - assert (kernel_2d.native == np.zeros((3, 3))).all() - assert kernel_2d.pixel_scales == (1.0, 1.0) - assert kernel_2d.origin == (0.0, 0.0) - - -def test__from_fits(): - kernel_2d = aa.Kernel2D.from_fits( - file_path=path.join(test_data_path, "3x2_ones.fits"), hdu=0, pixel_scales=1.0 - ) - - assert (kernel_2d.native == np.ones((3, 2))).all() - - kernel_2d = aa.Kernel2D.from_fits( - file_path=path.join(test_data_path, "3x2_twos.fits"), hdu=0, pixel_scales=1.0 - ) - - assert (kernel_2d.native == 2.0 * np.ones((3, 2))).all() - - -def test__from_fits__loads_and_stores_header_info(): - kernel_2d = aa.Kernel2D.from_fits( - file_path=path.join(test_data_path, "3x2_ones.fits"), hdu=0, pixel_scales=1.0 - ) - - assert kernel_2d.header.header_sci_obj["BITPIX"] == -64 - assert kernel_2d.header.header_hdu_obj["BITPIX"] == -64 - - kernel_2d = aa.Kernel2D.from_fits( - file_path=path.join(test_data_path, "3x2_twos.fits"), hdu=0, pixel_scales=1.0 - ) - - assert kernel_2d.header.header_sci_obj["BITPIX"] == -64 - assert kernel_2d.header.header_hdu_obj["BITPIX"] == -64 - - -def test__no_blur(): - kernel_2d = aa.Kernel2D.no_blur(pixel_scales=1.0) - - assert ( - kernel_2d.native - == np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]) - ).all() - assert kernel_2d.pixel_scales == (1.0, 1.0) - - kernel_2d = aa.Kernel2D.no_blur(pixel_scales=2.0) - - assert ( - kernel_2d.native - == np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]) - ).all() - assert kernel_2d.pixel_scales == (2.0, 2.0) - - -def test__from_gaussian(): - kernel_2d = aa.Kernel2D.from_gaussian( - shape_native=(3, 3), - pixel_scales=1.0, - centre=(0.1, 0.1), - axis_ratio=0.9, - angle=45.0, - sigma=1.0, - normalize=True, - ) - - assert kernel_2d.native == pytest.approx( - np.array( - [ - [0.06281, 0.13647, 0.0970], - [0.11173, 0.21589, 0.136477], - [0.065026, 0.11173, 0.06281], - ] - ), - 1.0e-3, - ) - - -def test__manual__normalize(): - kernel_data = np.ones((3, 3)) / 9.0 - kernel_2d = aa.Kernel2D.no_mask( - values=kernel_data, pixel_scales=1.0, normalize=True - ) - - assert kernel_2d.native == pytest.approx(kernel_data, 1e-3) - - kernel_data = np.ones((3, 3)) - - kernel_2d = aa.Kernel2D.no_mask( - values=kernel_data, pixel_scales=1.0, normalize=True - ) - - assert kernel_2d.native == pytest.approx(np.ones((3, 3)) / 9.0, 1e-3) - - kernel_2d = aa.Kernel2D.no_mask( - values=kernel_data, pixel_scales=1.0, normalize=False - ) - - kernel_2d = kernel_2d.normalized - - assert kernel_2d.native == pytest.approx(np.ones((3, 3)) / 9.0, 1e-3) - - kernel_data = np.ones((3, 3)) - - kernel_2d = aa.Kernel2D.no_mask( - values=kernel_data, pixel_scales=1.0, normalize=False - ) - - assert kernel_2d.native == pytest.approx(np.ones((3, 3)), 1e-3) - - -def test__rescaled_with_odd_dimensions_from__evens_to_odds(): - array_2d = np.ones((6, 6)) - kernel_2d = aa.Kernel2D.no_mask(values=array_2d, pixel_scales=1.0, normalize=False) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=0.5, normalize=True - ) - assert kernel_2d.pixel_scales == (2.0, 2.0) - assert kernel_2d.native == pytest.approx((1.0 / 9.0) * np.ones((3, 3)), 1.0e-4) - - array_2d = np.ones((9, 9)) - kernel_2d = aa.Kernel2D.no_mask(values=array_2d, pixel_scales=1.0, normalize=False) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=0.333333333333333, normalize=True - ) - assert kernel_2d.pixel_scales == (3.0, 3.0) - assert kernel_2d.native == pytest.approx((1.0 / 9.0) * np.ones((3, 3)), 1.0e-4) - - array_2d = np.ones((18, 6)) - kernel_2d = aa.Kernel2D.no_mask(values=array_2d, pixel_scales=1.0, normalize=False) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=0.5, normalize=True - ) - assert kernel_2d.pixel_scales == (2.0, 2.0) - assert kernel_2d.native == pytest.approx((1.0 / 27.0) * np.ones((9, 3)), 1.0e-4) - - array_2d = np.ones((6, 18)) - kernel_2d = aa.Kernel2D.no_mask(values=array_2d, pixel_scales=1.0, normalize=False) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=0.5, normalize=True - ) - assert kernel_2d.pixel_scales == (2.0, 2.0) - assert kernel_2d.native == pytest.approx((1.0 / 27.0) * np.ones((3, 9)), 1.0e-4) - - -def test__rescaled_with_odd_dimensions_from__different_scalings(): - kernel_2d = aa.Kernel2D.ones(shape_native=(2, 2), pixel_scales=1.0, normalize=False) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=2.0, normalize=True - ) - assert kernel_2d.pixel_scales == (0.4, 0.4) - assert kernel_2d.native == pytest.approx((1.0 / 25.0) * np.ones((5, 5)), 1.0e-4) - - kernel_2d = aa.Kernel2D.ones( - shape_native=(40, 40), pixel_scales=1.0, normalize=False - ) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=0.1, normalize=True - ) - assert kernel_2d.pixel_scales == (8.0, 8.0) - assert kernel_2d.native == pytest.approx((1.0 / 25.0) * np.ones((5, 5)), 1.0e-4) - - kernel_2d = aa.Kernel2D.ones(shape_native=(2, 4), pixel_scales=1.0, normalize=False) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=2.0, normalize=True - ) - - assert kernel_2d.pixel_scales[0] == pytest.approx(0.4, 1.0e-4) - assert kernel_2d.pixel_scales[1] == pytest.approx(0.4444444, 1.0e-4) - assert kernel_2d.native == pytest.approx((1.0 / 45.0) * np.ones((5, 9)), 1.0e-4) - - kernel_2d = aa.Kernel2D.ones(shape_native=(4, 2), pixel_scales=1.0, normalize=False) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=2.0, normalize=True - ) - assert kernel_2d.pixel_scales[0] == pytest.approx(0.4444444, 1.0e-4) - assert kernel_2d.pixel_scales[1] == pytest.approx(0.4, 1.0e-4) - assert kernel_2d.native == pytest.approx((1.0 / 45.0) * np.ones((9, 5)), 1.0e-4) - - kernel_2d = aa.Kernel2D.ones(shape_native=(6, 4), pixel_scales=1.0, normalize=False) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=0.5, normalize=True - ) - - assert kernel_2d.pixel_scales == pytest.approx((2.0, 1.3333333333), 1.0e-4) - assert kernel_2d.native == pytest.approx((1.0 / 9.0) * np.ones((3, 3)), 1.0e-4) - - kernel_2d = aa.Kernel2D.ones( - shape_native=(9, 12), pixel_scales=1.0, normalize=False - ) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=0.33333333333, normalize=True - ) - - assert kernel_2d.pixel_scales == pytest.approx((3.0, 2.4), 1.0e-4) - assert kernel_2d.native == pytest.approx((1.0 / 15.0) * np.ones((3, 5)), 1.0e-4) - - kernel_2d = aa.Kernel2D.ones(shape_native=(4, 6), pixel_scales=1.0, normalize=False) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=0.5, normalize=True - ) - - assert kernel_2d.pixel_scales == pytest.approx((1.33333333333, 2.0), 1.0e-4) - assert kernel_2d.native == pytest.approx((1.0 / 9.0) * np.ones((3, 3)), 1.0e-4) - - kernel_2d = aa.Kernel2D.ones( - shape_native=(12, 9), pixel_scales=1.0, normalize=False - ) - kernel_2d = kernel_2d.rescaled_with_odd_dimensions_from( - rescale_factor=0.33333333333, normalize=True - ) - assert kernel_2d.pixel_scales == pytest.approx((2.4, 3.0), 1.0e-4) - assert kernel_2d.native == pytest.approx((1.0 / 15.0) * np.ones((5, 3)), 1.0e-4) - - -def test__from_as_gaussian_via_alma_fits_header_parameters__identical_to_astropy_gaussian_model(): - pixel_scales = 0.1 - - x_stddev = ( - 1.0e-5 - * (units.deg).to(units.arcsec) - / pixel_scales - / (2.0 * np.sqrt(2.0 * np.log(2.0))) - ) - y_stddev = ( - 2.0e-5 - * (units.deg).to(units.arcsec) - / pixel_scales - / (2.0 * np.sqrt(2.0 * np.log(2.0))) - ) - - theta_deg = 230.0 - theta = Angle(theta_deg, "deg").radian - - gaussian_astropy = functional_models.Gaussian2D( - amplitude=1.0, - x_mean=1.0, - y_mean=1.0, - x_stddev=x_stddev, - y_stddev=y_stddev, - theta=theta, - ) - - shape = (3, 3) - y, x = np.mgrid[0 : shape[1], 0 : shape[0]] - kernel_astropy = gaussian_astropy(x, y) - kernel_astropy /= np.sum(kernel_astropy) - - kernel_2d = aa.Kernel2D.from_as_gaussian_via_alma_fits_header_parameters( - shape_native=shape, - pixel_scales=pixel_scales, - y_stddev=2.0e-5, - x_stddev=1.0e-5, - theta=theta_deg, - normalize=True, - ) - - assert kernel_astropy == pytest.approx(kernel_2d.native._array, abs=1e-4) - - -def test__convolved_image_from(): - - mask = aa.Mask2D.circular( - shape_native=(30, 30), pixel_scales=(1.0, 1.0), radius=4.0 - ) - - import scipy.signal - - kernel = np.arange(49).reshape(7, 7) - image = np.arange(900).reshape(30, 30) - - blurred_image_via_scipy = scipy.signal.convolve2d(image, kernel, mode="same") - blurred_image_via_scipy = aa.Array2D.no_mask( - values=blurred_image_via_scipy, pixel_scales=1.0 - ) - blurred_masked_image_via_scipy = aa.Array2D( - values=blurred_image_via_scipy.native, mask=mask - ) - - # Now reproduce this data using the convolved_image_from function - - image = aa.Array2D.no_mask(values=np.arange(900).reshape(30, 30), pixel_scales=1.0) - kernel = aa.Kernel2D.no_mask(values=np.arange(49).reshape(7, 7), pixel_scales=1.0) - - masked_image = aa.Array2D(values=image.native, mask=mask) - - blurring_mask = mask.derive_mask.blurring_from( - kernel_shape_native=kernel.shape_native - ) - - blurring_image = aa.Array2D(values=image.native, mask=blurring_mask) - - blurred_masked_im_1 = kernel.convolved_image_from( - image=masked_image, blurring_image=blurring_image - ) - - assert blurred_masked_image_via_scipy == pytest.approx( - blurred_masked_im_1.array, 1e-4 - ) - - -def test__convolve_imaged_from__no_blurring(): - # Setup a blurred data, using the PSF to perform the convolution in 2D, then masks it to make a 1d array. - - mask = aa.Mask2D.circular( - shape_native=(30, 30), pixel_scales=(1.0, 1.0), radius=4.0 - ) - - import scipy.signal - - kernel = np.arange(49).reshape(7, 7) - image = np.arange(900).reshape(30, 30) - - blurring_mask = mask.derive_mask.blurring_from(kernel_shape_native=kernel.shape) - blurred_image_via_scipy = scipy.signal.convolve2d( - image * blurring_mask, kernel, mode="same" - ) - blurred_image_via_scipy = aa.Array2D.no_mask( - values=blurred_image_via_scipy, pixel_scales=1.0 - ) - blurred_masked_image_via_scipy = aa.Array2D( - values=blurred_image_via_scipy.native, mask=mask - ) - - # Now reproduce this data using the frame convolver_image - - kernel = aa.Kernel2D.no_mask(values=np.arange(49).reshape(7, 7), pixel_scales=1.0) - image = aa.Array2D.no_mask(values=np.arange(900).reshape(30, 30), pixel_scales=1.0) - - masked_image = aa.Array2D(values=image.native, mask=mask) - - blurred_masked_im_1 = kernel.convolved_image_from( - image=masked_image, blurring_image=None - ) - - assert blurred_masked_image_via_scipy == pytest.approx( - blurred_masked_im_1.array, 1e-4 - ) - - -def test__convolved_mapping_matrix_from(): - mask = aa.Mask2D( - mask=np.array( - [ - [True, True, True, True, True, True], - [True, False, False, False, False, True], - [True, False, False, False, False, True], - [True, False, False, False, False, True], - [True, False, False, False, False, True], - [True, True, True, True, True, True], - ] - ), - pixel_scales=1.0, - ) - - kernel = aa.Kernel2D.no_mask( - values=[[0, 0.0, 0], [0.4, 0.2, 0.3], [0, 0.1, 0]], pixel_scales=1.0 - ) - - mapping = np.array( - [ - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [ - 0, - 1, - 0, - ], # The 0.3 should be 'chopped' from this pixel as it is on the right-most edge - [0, 0, 0], - [1, 0, 0], - [0, 0, 1], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - ] - ) - - blurred_mapping = kernel.convolved_mapping_matrix_from(mapping, mask) - - assert ( - blurred_mapping - == pytest.approx( - np.array( - [ - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0.4, 0], - [0, 0.2, 0], - [0.4, 0, 0], - [0.2, 0, 0.4], - [0.3, 0, 0.2], - [0, 0.1, 0.3], - [0, 0, 0], - [0.1, 0, 0], - [0, 0, 0.1], - [0, 0, 0], - ] - ) - ), - 1.0e-4, - ) - - kernel = aa.Kernel2D.no_mask( - values=[[0, 0.0, 0], [0.4, 0.2, 0.3], [0, 0.1, 0]], pixel_scales=1.0 - ) - - mapping = np.array( - [ - [0, 1, 0], - [0, 1, 0], - [0, 1, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [ - 0, - 1, - 0, - ], # The 0.3 should be 'chopped' from this pixel as it is on the right-most edge - [1, 0, 0], - [1, 0, 0], - [0, 0, 1], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - [0, 0, 0], - ] - ) - - blurred_mapping = kernel.convolved_mapping_matrix_from(mapping, mask) - - assert blurred_mapping == pytest.approx( - np.array( - [ - [0, 0.6, 0], - [0, 0.9, 0], - [0, 0.5, 0], - [0, 0.3, 0], - [0, 0.1, 0], - [0, 0.1, 0], - [0, 0.5, 0], - [0, 0.2, 0], - [0.6, 0, 0], - [0.5, 0, 0.4], - [0.3, 0, 0.2], - [0, 0.1, 0.3], - [0.1, 0, 0], - [0.1, 0, 0], - [0, 0, 0.1], - [0, 0, 0], - ] - ), - abs=1e-4, - ) - - -def test__convolve_imaged_from__via_fft__sizes_not_precomputed__compare_numerical_value(): - - # ------------------------------- - # Case 1: direct image convolution - # ------------------------------- - mask = aa.Mask2D.circular( - shape_native=(20, 20), pixel_scales=(1.0, 1.0), radius=5.0 - ) - - image = aa.Array2D.no_mask(values=np.arange(400).reshape(20, 20), pixel_scales=1.0) - masked_image = aa.Array2D(values=image.native, mask=mask) - - kernel_fft = aa.Kernel2D.no_mask( - values=np.arange(49).reshape(7, 7), - pixel_scales=1.0, - use_fft=True, - normalize=True, - ) - - blurring_mask = mask.derive_mask.blurring_from( - kernel_shape_native=kernel_fft.shape_native - ) - blurring_image = aa.Array2D(values=image.native, mask=blurring_mask) - - blurred_fft = kernel_fft.convolved_image_from( - image=masked_image, blurring_image=blurring_image - ) - - assert blurred_fft.native.array[13, 13] == pytest.approx(249.5, abs=1e-6)